Radish alpha
r
rad:z6cFWeWpnZNHh9rUW8phgA3b5yGt
Git libraries for Radicle
Radicle
Git
Merge remote-tracking branch 'origin/workspace-management'
Fintan Halpenny committed 3 years ago
commit 6de53f72d4dda7fce78b2bec496159119ab186d3
parent 165f25f
88 files changed +4657 -4672
modified Cargo.toml
@@ -1,13 +1,8 @@
[workspace]
members = [
-
  "git-commit",
  "git-storage",
-
  "git-trailers",
-
  "link-git",
  "radicle-git-ext",
  "radicle-std-ext",
  "radicle-surf",
-
  # TODO: port gitd-lib over
-
  # "cli/gitd-lib",
  "test",
]
added archived/git-commit/Cargo.toml
@@ -0,0 +1,19 @@
+
[package]
+
name = "git-commit"
+
version = "0.3.0"
+
license = "MIT OR Apache-2.0"
+
edition = "2021"
+
authors = [
+
  "Alexis Sellier <alexis@radicle.xyz>",
+
  "Fintan Halpenny <fintan.halpenny@gmail.com>",
+
]
+
description = "A small library for parsing, displaying and creating a git commit"
+
keywords = ["git", "git-commit", "git-trailers", "radicle"]
+

+
[dependencies]
+
thiserror = "1"
+

+
[dependencies.git2]
+
version = "0.16.1"
+
default-features = false
+
features = ["vendored-libgit2"]
added archived/git-commit/src/author.rs
@@ -0,0 +1,129 @@
+
use std::{
+
    fmt,
+
    num::ParseIntError,
+
    str::{self, FromStr},
+
};
+

+
use thiserror::Error;
+

+
/// The data for indicating authorship of an action within a
+
/// [`super::Commit`].
+
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+
pub struct Author {
+
    /// Name corresponding to `user.name` in the git config.
+
    ///
+
    /// Note: this must not contain `<` or `>`.
+
    pub name: String,
+
    /// Email corresponding to `user.email` in the git config.
+
    ///
+
    /// Note: this must not contain `<` or `>`.
+
    pub email: String,
+
    /// The time of this author's action.
+
    pub time: Time,
+
}
+

+
/// The time of a [`Author`]'s action.
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+
pub struct Time {
+
    seconds: i64,
+
    offset: i32,
+
}
+

+
impl Time {
+
    pub fn new(seconds: i64, offset: i32) -> Self {
+
        Self { seconds, offset }
+
    }
+

+
    /// Return the time, in seconds, since the epoch.
+
    pub fn seconds(&self) -> i64 {
+
        self.seconds
+
    }
+

+
    /// Return the timezone offset, in minutes.
+
    pub fn offset(&self) -> i32 {
+
        self.offset
+
    }
+
}
+

+
impl From<Time> for git2::Time {
+
    fn from(t: Time) -> Self {
+
        Self::new(t.seconds, t.offset)
+
    }
+
}
+

+
impl From<git2::Time> for Time {
+
    fn from(t: git2::Time) -> Self {
+
        Self::new(t.seconds(), t.offset_minutes())
+
    }
+
}
+

+
impl fmt::Display for Time {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        let sign = if self.offset.is_negative() { '-' } else { '+' };
+
        write!(f, "{} {}{:0>4}", self.seconds, sign, self.offset.abs())
+
    }
+
}
+

+
impl fmt::Display for Author {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        write!(f, "{} <{}> {}", self.name, self.email, self.time,)
+
    }
+
}
+

+
impl TryFrom<&Author> for git2::Signature<'_> {
+
    type Error = git2::Error;
+

+
    fn try_from(person: &Author) -> Result<Self, Self::Error> {
+
        let time = git2::Time::new(person.time.seconds, person.time.offset);
+
        git2::Signature::new(&person.name, &person.email, &time)
+
    }
+
}
+

+
impl<'a> TryFrom<&git2::Signature<'a>> for Author {
+
    type Error = str::Utf8Error;
+

+
    fn try_from(value: &git2::Signature<'a>) -> Result<Self, Self::Error> {
+
        Ok(Self {
+
            name: str::from_utf8(value.name_bytes())?.to_string(),
+
            email: str::from_utf8(value.email_bytes())?.to_string(),
+
            time: value.when().into(),
+
        })
+
    }
+
}
+

+
#[derive(Debug, Error)]
+
pub enum ParseError {
+
    #[error("missing '{0}' while parsing person signature")]
+
    Missing(&'static str),
+
    #[error("offset was incorrect format while parsing person signature")]
+
    Offset(#[source] ParseIntError),
+
    #[error("time was incorrect format while parsing person signature")]
+
    Time(#[source] ParseIntError),
+
    #[error("time offset is expected to be '+'/'-' for a person siganture")]
+
    UnknownOffset,
+
}
+

+
impl FromStr for Author {
+
    type Err = ParseError;
+

+
    fn from_str(s: &str) -> Result<Self, Self::Err> {
+
        let mut components = s.split(' ');
+
        let offset = match components.next_back() {
+
            None => return Err(ParseError::Missing("offset")),
+
            Some(offset) => offset.parse::<i32>().map_err(ParseError::Offset)?,
+
        };
+
        let time = match components.next_back() {
+
            None => return Err(ParseError::Missing("time")),
+
            Some(time) => time.parse::<i64>().map_err(ParseError::Time)?,
+
        };
+
        let time = Time::new(time, offset);
+

+
        let email = components
+
            .next_back()
+
            .ok_or(ParseError::Missing("email"))?
+
            .trim_matches(|c| c == '<' || c == '>')
+
            .to_owned();
+
        let name = components.collect::<Vec<_>>().join(" ");
+
        Ok(Self { name, email, time })
+
    }
+
}
added archived/git-commit/src/headers.rs
@@ -0,0 +1,71 @@
+
use std::borrow::Cow;
+

+
const BEGIN_SSH: &str = "-----BEGIN SSH SIGNATURE-----\n";
+
const BEGIN_PGP: &str = "-----BEGIN PGP SIGNATURE-----\n";
+

+
/// A collection of headers stored in a [`super::Commit`].
+
///
+
/// Note: these do not include `tree`, `parent`, `author`, and `committer`.
+
#[derive(Clone, Debug, Default)]
+
pub struct Headers(pub(super) Vec<(String, String)>);
+

+
/// A `gpgsig` signature stored in a [`super::Commit`].
+
pub enum Signature<'a> {
+
    /// A PGP signature, i.e. starts with `-----BEGIN PGP SIGNATURE-----`.
+
    Pgp(Cow<'a, str>),
+
    /// A SSH signature, i.e. starts with `-----BEGIN SSH SIGNATURE-----`.
+
    Ssh(Cow<'a, str>),
+
}
+

+
impl<'a> Signature<'a> {
+
    fn from_str(s: &'a str) -> Result<Self, UnknownScheme> {
+
        if s.starts_with(BEGIN_SSH) {
+
            Ok(Signature::Ssh(Cow::Borrowed(s)))
+
        } else if s.starts_with(BEGIN_PGP) {
+
            Ok(Signature::Pgp(Cow::Borrowed(s)))
+
        } else {
+
            Err(UnknownScheme)
+
        }
+
    }
+
}
+

+
pub struct UnknownScheme;
+

+
impl<'a> ToString for Signature<'a> {
+
    fn to_string(&self) -> String {
+
        match self {
+
            Signature::Pgp(pgp) => pgp.to_string(),
+
            Signature::Ssh(ssh) => ssh.to_string(),
+
        }
+
    }
+
}
+

+
impl Headers {
+
    pub fn new() -> Self {
+
        Headers(Vec::new())
+
    }
+

+
    pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
+
        self.0.iter().map(|(k, v)| (k.as_str(), v.as_str()))
+
    }
+

+
    pub fn values<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a str> + '_ {
+
        self.iter()
+
            .filter_map(move |(k, v)| (k == name).then_some(v))
+
    }
+

+
    pub fn signatures(&self) -> impl Iterator<Item = Signature> + '_ {
+
        self.0.iter().filter_map(|(k, v)| {
+
            if k == "gpgsig" {
+
                Signature::from_str(v).ok()
+
            } else {
+
                None
+
            }
+
        })
+
    }
+

+
    /// Push a header to the end of the headers section.
+
    pub fn push(&mut self, name: &str, value: &str) {
+
        self.0.push((name.to_owned(), value.trim().to_owned()));
+
    }
+
}
added archived/git-commit/src/lib.rs
@@ -0,0 +1,295 @@
+
//! The `git-commit` crate provides parsing a displaying of a [git
+
//! commit][git-commit].
+
//!
+
//! The [`Commit`] data can be constructed using the `FromStr`
+
//! implementation, or by converting from a `git2::Buf`.
+
//!
+
//! The [`Headers`] can be accessed via [`Commit::headers`]. If the
+
//! signatures of the commit are of particular interest, the
+
//! [`Commit::signatures`] method can be used, which returns a series of
+
//! [`Signature`]s.
+
//!
+
//! [git-commit]: https://git-scm.com/book/en/v2/Git-Internals-Git-Objects
+

+
use std::{
+
    fmt::Write as _,
+
    str::{self, FromStr},
+
};
+

+
use git2::{ObjectType, Oid};
+

+
pub mod author;
+
pub use author::Author;
+

+
pub mod headers;
+
pub use headers::{Headers, Signature};
+

+
pub mod trailers;
+
pub use trailers::{OwnedTrailer, Trailer, Trailers};
+

+
/// A git commit in its object description form, i.e. the output of
+
/// `git cat-file` for a commit object.
+
#[derive(Debug)]
+
pub struct Commit {
+
    tree: Oid,
+
    parents: Vec<Oid>,
+
    author: Author,
+
    committer: Author,
+
    headers: Headers,
+
    message: String,
+
    trailers: Vec<OwnedTrailer>,
+
}
+

+
impl Commit {
+
    pub fn new<I, T>(
+
        tree: Oid,
+
        parents: Vec<Oid>,
+
        author: Author,
+
        committer: Author,
+
        headers: Headers,
+
        message: String,
+
        trailers: I,
+
    ) -> Self
+
    where
+
        I: IntoIterator<Item = T>,
+
        OwnedTrailer: From<T>,
+
    {
+
        let trailers = trailers.into_iter().map(OwnedTrailer::from).collect();
+
        Self {
+
            tree,
+
            parents,
+
            author,
+
            committer,
+
            headers,
+
            message,
+
            trailers,
+
        }
+
    }
+

+
    /// Read the [`Commit`] from the `repo` that is expected to be found at
+
    /// `oid`.
+
    pub fn read(repo: &git2::Repository, oid: Oid) -> Result<Self, error::Read> {
+
        let odb = repo.odb()?;
+
        let object = odb.read(oid)?;
+
        Ok(Commit::try_from(object.data())?)
+
    }
+

+
    /// Write the given [`Commit`] to the `repo`. The resulting `Oid`
+
    /// is the identifier for this commit.
+
    pub fn write(&self, repo: &git2::Repository) -> Result<Oid, git2::Error> {
+
        let odb = repo.odb()?;
+
        odb.write(ObjectType::Commit, self.to_string().as_bytes())
+
    }
+

+
    /// The tree [`Oid`] this commit points to.
+
    pub fn tree(&self) -> Oid {
+
        self.tree
+
    }
+

+
    /// The parent [`Oid`]s of this commit.
+
    pub fn parents(&self) -> impl Iterator<Item = Oid> + '_ {
+
        self.parents.iter().copied()
+
    }
+

+
    /// The author of this commit, i.e. the header corresponding to `author`.
+
    pub fn author(&self) -> &Author {
+
        &self.author
+
    }
+

+
    /// The committer of this commit, i.e. the header corresponding to
+
    /// `committer`.
+
    pub fn committer(&self) -> &Author {
+
        &self.committer
+
    }
+

+
    /// The message body of this commit.
+
    pub fn message(&self) -> &str {
+
        &self.message
+
    }
+

+
    /// The [`Signature`]s found in this commit, i.e. the headers corresponding
+
    /// to `gpgsig`.
+
    pub fn signatures(&self) -> impl Iterator<Item = Signature> + '_ {
+
        self.headers.signatures()
+
    }
+

+
    /// The [`Headers`] found in this commit.
+
    ///
+
    /// Note: these do not include `tree`, `parent`, `author`, and `committer`.
+
    pub fn headers(&self) -> impl Iterator<Item = (&str, &str)> {
+
        self.headers.iter()
+
    }
+

+
    /// Iterate over the [`Headers`] values that match the provided `name`.
+
    pub fn values<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a str> + '_ {
+
        self.headers.values(name)
+
    }
+

+
    /// Push a header to the end of the headers section.
+
    pub fn push_header(&mut self, name: &str, value: &str) {
+
        self.headers.push(name, value.trim());
+
    }
+

+
    pub fn trailers(&self) -> impl Iterator<Item = &OwnedTrailer> {
+
        self.trailers.iter()
+
    }
+
}
+

+
pub mod error {
+
    use std::str;
+

+
    use thiserror::Error;
+

+
    use super::author;
+

+
    #[derive(Debug, Error)]
+
    pub enum Read {
+
        #[error(transparent)]
+
        Git(#[from] git2::Error),
+
        #[error(transparent)]
+
        Parse(#[from] Parse),
+
    }
+

+
    #[derive(Debug, Error)]
+
    pub enum Parse {
+
        #[error(transparent)]
+
        Author(#[from] author::ParseError),
+
        #[error("invalid '{header}'")]
+
        InvalidHeader {
+
            header: &'static str,
+
            #[source]
+
            err: git2::Error,
+
        },
+
        #[error("invalid git commit object format")]
+
        InvalidFormat,
+
        #[error("missing '{0}' while parsing commit")]
+
        Missing(&'static str),
+
        #[error("error occurred while checking for git-trailers: {0}")]
+
        Trailers(#[source] git2::Error),
+
        #[error(transparent)]
+
        Utf8(#[from] str::Utf8Error),
+
    }
+
}
+

+
impl TryFrom<git2::Buf> for Commit {
+
    type Error = error::Parse;
+

+
    fn try_from(value: git2::Buf) -> Result<Self, Self::Error> {
+
        value.as_str().ok_or(error::Parse::InvalidFormat)?.parse()
+
    }
+
}
+

+
impl TryFrom<&[u8]> for Commit {
+
    type Error = error::Parse;
+

+
    fn try_from(data: &[u8]) -> Result<Self, Self::Error> {
+
        Commit::from_str(str::from_utf8(data)?)
+
    }
+
}
+

+
impl FromStr for Commit {
+
    type Err = error::Parse;
+

+
    fn from_str(buffer: &str) -> Result<Self, Self::Err> {
+
        let (header, message) = buffer
+
            .split_once("\n\n")
+
            .ok_or(error::Parse::InvalidFormat)?;
+
        let mut lines = header.lines();
+

+
        let tree = match lines.next() {
+
            Some(tree) => tree
+
                .strip_prefix("tree ")
+
                .map(git2::Oid::from_str)
+
                .transpose()
+
                .map_err(|err| error::Parse::InvalidHeader {
+
                    header: "tree",
+
                    err,
+
                })?
+
                .ok_or(error::Parse::Missing("tree"))?,
+
            None => return Err(error::Parse::Missing("tree")),
+
        };
+

+
        let mut parents = Vec::new();
+
        let mut author: Option<Author> = None;
+
        let mut committer: Option<Author> = None;
+
        let mut headers = Headers::new();
+

+
        for line in lines {
+
            // Check if a signature is still being parsed
+
            if let Some(rest) = line.strip_prefix(' ') {
+
                let value: &mut String = headers
+
                    .0
+
                    .last_mut()
+
                    .map(|(_, v)| v)
+
                    .ok_or(error::Parse::InvalidFormat)?;
+
                value.push('\n');
+
                value.push_str(rest);
+
                continue;
+
            }
+

+
            if let Some((name, value)) = line.split_once(' ') {
+
                match name {
+
                    "parent" => parents.push(git2::Oid::from_str(value).map_err(|err| {
+
                        error::Parse::InvalidHeader {
+
                            header: "parent",
+
                            err,
+
                        }
+
                    })?),
+
                    "author" => author = Some(value.parse::<Author>()?),
+
                    "committer" => committer = Some(value.parse::<Author>()?),
+
                    _ => headers.push(name, value),
+
                }
+
                continue;
+
            }
+
        }
+

+
        let trailers = trailers::Trailers::parse(message).map_err(error::Parse::Trailers)?;
+

+
        let message = message
+
            .strip_suffix(&trailers.to_string(": "))
+
            .unwrap_or(message)
+
            .to_string();
+

+
        let trailers = trailers.iter().map(OwnedTrailer::from).collect();
+

+
        Ok(Self {
+
            tree,
+
            parents,
+
            author: author.ok_or(error::Parse::Missing("author"))?,
+
            committer: committer.ok_or(error::Parse::Missing("committer"))?,
+
            headers,
+
            message,
+
            trailers,
+
        })
+
    }
+
}
+

+
impl ToString for Commit {
+
    fn to_string(&self) -> String {
+
        let mut buf = String::new();
+

+
        writeln!(buf, "tree {}", self.tree).ok();
+

+
        for parent in &self.parents {
+
            writeln!(buf, "parent {parent}").ok();
+
        }
+

+
        writeln!(buf, "author {}", self.author).ok();
+
        writeln!(buf, "committer {}", self.committer).ok();
+

+
        for (name, value) in self.headers.iter() {
+
            writeln!(buf, "{name} {}", value.replace('\n', "\n ")).ok();
+
        }
+
        writeln!(buf).ok();
+
        write!(buf, "{}", self.message.trim()).ok();
+
        writeln!(buf).ok();
+

+
        if !self.trailers.is_empty() {
+
            writeln!(buf).ok();
+
        }
+
        for trailer in self.trailers.iter() {
+
            writeln!(buf, "{}", Trailer::from(trailer).display(": ")).ok();
+
        }
+
        buf
+
    }
+
}
added archived/git-commit/src/trailers.rs
@@ -0,0 +1,214 @@
+
use std::{borrow::Cow, fmt, fmt::Write, ops::Deref, str::FromStr};
+

+
use git2::{MessageTrailersStrs, MessageTrailersStrsIterator};
+

+
/// A Git commit's set of trailers that are left in the commit's
+
/// message.
+
///
+
/// Trailers are key/value pairs in the last paragraph of a message,
+
/// not including any patches or conflicts that may be present.
+
///
+
/// # Usage
+
///
+
/// To construct `Trailers`, you can use [`Trailers::parse`] or its
+
/// `FromStr` implementation.
+
///
+
/// To iterate over the trailers, you can use [`Trailers::iter`].
+
///
+
/// To render the trailers to a `String`, you can use
+
/// [`Trailers::to_string`] or its `Display` implementation (note that
+
/// it will default to using `": "` as the separator.
+
///
+
/// # Examples
+
///
+
/// ```text
+
/// Add new functionality
+
///
+
/// Making code better with new functionality.
+
///
+
/// X-Signed-Off-By: Alex Sellier
+
/// X-Co-Authored-By: Fintan Halpenny
+
/// ```
+
///
+
/// The trailers in the above example are:
+
///
+
/// ```text
+
/// X-Signed-Off-By: Alex Sellier
+
/// X-Co-Authored-By: Fintan Halpenny
+
/// ```
+
pub struct Trailers {
+
    inner: MessageTrailersStrs,
+
}
+

+
impl Trailers {
+
    pub fn parse(message: &str) -> Result<Self, git2::Error> {
+
        Ok(Self {
+
            inner: git2::message_trailers_strs(message)?,
+
        })
+
    }
+

+
    pub fn iter(&self) -> Iter<'_> {
+
        Iter {
+
            inner: self.inner.iter(),
+
        }
+
    }
+

+
    pub fn to_string<'a, S>(&self, sep: S) -> String
+
    where
+
        S: Separator<'a>,
+
    {
+
        let mut buf = String::new();
+
        for (i, trailer) in self.iter().enumerate() {
+
            if i > 0 {
+
                writeln!(buf).ok();
+
            }
+

+
            write!(buf, "{}", trailer.display(sep.sep_for(&trailer.token))).ok();
+
        }
+
        writeln!(buf).ok();
+
        buf
+
    }
+
}
+

+
impl fmt::Display for Trailers {
+
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+
        f.write_str(&self.to_string(": "))
+
    }
+
}
+

+
pub trait Separator<'a> {
+
    fn sep_for(&self, token: &Token) -> &'a str;
+
}
+

+
impl<'a> Separator<'a> for &'a str {
+
    fn sep_for(&self, _: &Token) -> &'a str {
+
        self
+
    }
+
}
+

+
impl<'a, F> Separator<'a> for F
+
where
+
    F: Fn(&Token) -> &'a str,
+
{
+
    fn sep_for(&self, token: &Token) -> &'a str {
+
        self(token)
+
    }
+
}
+

+
impl FromStr for Trailers {
+
    type Err = git2::Error;
+

+
    fn from_str(s: &str) -> Result<Self, Self::Err> {
+
        Self::parse(s)
+
    }
+
}
+

+
pub struct Iter<'a> {
+
    inner: MessageTrailersStrsIterator<'a>,
+
}
+

+
impl<'a> Iterator for Iter<'a> {
+
    type Item = Trailer<'a>;
+

+
    fn next(&mut self) -> Option<Self::Item> {
+
        let (token, value) = self.inner.next()?;
+
        Some(Trailer {
+
            token: Token(token),
+
            value: Cow::Borrowed(value),
+
        })
+
    }
+
}
+

+
#[derive(Debug, Clone, Eq, PartialEq)]
+
pub struct Token<'a>(&'a str);
+

+
impl Deref for Token<'_> {
+
    type Target = str;
+

+
    fn deref(&self) -> &Self::Target {
+
        self.0
+
    }
+
}
+

+
pub struct Display<'a> {
+
    trailer: &'a Trailer<'a>,
+
    separator: &'a str,
+
}
+

+
impl<'a> fmt::Display for Display<'a> {
+
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+
        write!(
+
            f,
+
            "{}{}{}",
+
            self.trailer.token.deref(),
+
            self.separator,
+
            self.trailer.value,
+
        )
+
    }
+
}
+

+
/// A trailer is a key/value pair found in the last paragraph of a Git
+
/// commit message, not including any patches or conflicts that may be
+
/// present.
+
#[derive(Debug, Clone, Eq, PartialEq)]
+
pub struct Trailer<'a> {
+
    pub token: Token<'a>,
+
    pub value: Cow<'a, str>,
+
}
+

+
impl<'a> Trailer<'a> {
+
    pub fn display(&'a self, separator: &'a str) -> Display<'a> {
+
        Display {
+
            trailer: self,
+
            separator,
+
        }
+
    }
+

+
    pub fn to_owned(&self) -> OwnedTrailer {
+
        OwnedTrailer::from(self)
+
    }
+
}
+

+
/// A version of the [`Trailer`] which owns its token and
+
/// value. Useful for when you need to carry trailers around in a long
+
/// lived data structure.
+
#[derive(Debug)]
+
pub struct OwnedTrailer {
+
    pub token: OwnedToken,
+
    pub value: String,
+
}
+

+
#[derive(Debug)]
+
pub struct OwnedToken(String);
+

+
impl Deref for OwnedToken {
+
    type Target = str;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
impl<'a> From<&Trailer<'a>> for OwnedTrailer {
+
    fn from(t: &Trailer<'a>) -> Self {
+
        OwnedTrailer {
+
            token: OwnedToken(t.token.0.to_string()),
+
            value: t.value.to_string(),
+
        }
+
    }
+
}
+

+
impl<'a> From<Trailer<'a>> for OwnedTrailer {
+
    fn from(t: Trailer<'a>) -> Self {
+
        (&t).into()
+
    }
+
}
+

+
impl<'a> From<&'a OwnedTrailer> for Trailer<'a> {
+
    fn from(t: &'a OwnedTrailer) -> Self {
+
        Trailer {
+
            token: Token(t.token.0.as_str()),
+
            value: Cow::from(&t.value),
+
        }
+
    }
+
}
added archived/git-commit/t/Cargo.toml
@@ -0,0 +1,26 @@
+
[package]
+
name = "git-commit-test"
+
version = "0.1.0"
+
license = "MIT OR Apache-2.0"
+
edition = "2021"
+

+
publish = false
+

+
[lib]
+
doctest = false
+
test = true
+
doc = false
+

+
[features]
+
test = []
+

+
[dev-dependencies.git-commit]
+
path = ".."
+

+
[dev-dependencies.git2]
+
version = "0.16.1"
+
default-features = false
+
features = ["vendored-libgit2"]
+

+
[dev-dependencies.test-helpers]
+
path = "../../test/test-helpers"

\ No newline at end of file
added archived/git-commit/t/src/commit.rs
@@ -0,0 +1,149 @@
+
use std::str::FromStr as _;
+

+
use git_commit::Commit;
+

+
const NO_TRAILER: &str = "\
+
tree 50d6ef440728217febf9e35716d8b0296608d7f8
+
parent 0ad95dbdfe9fdf81938ca419cf740469173e2022
+
parent a4ec9e07e1b2e6f37f7119651ae3bb63b79988b6
+
author Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
+
committer Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
+

+
Merge remote-tracking branch 'origin/surf/organise-tests'
+

+
* origin/surf/organise-tests:
+
  radicle-surf: organise tests
+
";
+

+
const SINGLE_TRAILER: &str = "\
+
tree 50d6ef440728217febf9e35716d8b0296608d7f8
+
parent 0ad95dbdfe9fdf81938ca419cf740469173e2022
+
parent a4ec9e07e1b2e6f37f7119651ae3bb63b79988b6
+
author Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
+
committer Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
+

+
Merge remote-tracking branch 'origin/surf/organise-tests'
+

+
* origin/surf/organise-tests:
+
  radicle-surf: organise tests
+

+
Signed-off-by: Fintan Halpenny <fintan.halpenny@gmail.com>
+
";
+

+
const UNSIGNED: &str = "\
+
tree c66cc435f83ed0fba90ed4500e9b4b96e9bd001b
+
parent af06ad645133f580a87895353508053c5de60716
+
author Alexis Sellier <alexis@radicle.xyz> 1664467633 +0200
+
committer Alexis Sellier <alexis@radicle.xyz> 1664786099 -0200
+

+
Add SSH functionality with new `radicle-ssh`
+

+
We borrow code from `thrussh`, refactored to be runtime-less.
+

+
X-Signed-Off-By: Alex Sellier
+
X-Co-Authored-By: Fintan Halpenny
+
";
+

+
const SSH_SIGNATURE: &str = "\
+
-----BEGIN SSH SIGNATURE-----
+
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgvjrQogRxxLjzzWns8+mKJAGzEX
+
4fm2ALoN7pyvD2ttQAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
+
AAAAQIQvhIewOgGfnXLgR5Qe1ZEr2vjekYXTdOfNWICi6ZiosgfZnIqV0enCPC4arVqQg+
+
GPp0HqxaB911OnSAr6bwU=
+
-----END SSH SIGNATURE-----";
+

+
const PGP_SIGNATURE: &str = "\
+
-----BEGIN PGP SIGNATURE-----
+
iQIzBAABCAAdFiEEHe7BWIo9taTY6TIiJVL7b2QGbLcFAmNcDhsACgkQJVL7b2QG
+
bLcc9Q//RgKf5N4enta9AuszGJZvdFhMPfIDUdw+WAZA6Z8zDPb/aAXZrPP/KIOM
+
zmX08FTqjP9B9YeWrEcFuAtxsRNqbDKrfpko9Y6bTsdrAJg3WIypBb9F8YDKJ6BO
+
CORJJqWOsLW129jW+mJDhcE0YTvPlcMiMI2qjVXKhU6Ag11W8IRZyTb9tvEaDjBR
+
YUnkPvgubv61K9BeUKexE2MakPBldaQtl0MF1Dk7/zo5btLd+KP0SOUKEhuMEu5b
+
LATHHdiYjt/2Xz7q8EcrFxXUaipxZe89dfTdi2ooJQw3ZDqjDHsGTHpDeBuzuSaJ
+
9fKVRwFz/78onfHPhmU4wfUhh+Fcl90p5/T+4dt2K6cr+7rq078e+aJYxkX2d0MG
+
PG0xGP0RN4g+X92K1kGuzoe4870xAnRTNh5nUB+X9snO8tVqQZTb0M2yI+sTsKrv
+
w/f+uiqL6e9DgIxlO5dgiNHCVoCs1QJ900jUGisrlzS4+n6GzMsG6s3c01X4yY9G
+
Ou/kGkMsn7tqejqC9RufygcchCFZqYwaHQwPkiYhfYGMarMpoCFvll0h8tSparpS
+
nnpAQXVdu8m3v1YdPUuTg5ksxSOe9HCIlVXGFhxy3iqCVRn+51FRnUI63rMTOm9/
+
LBqzvji02lDUPGqPgXfcCS0ty8FM2flBIXnwb8TDzCaPYhf53+U=
+
=6dw2
+
-----END PGP SIGNATURE-----";
+

+
const SIGNED: &str = "\
+
tree c66cc435f83ed0fba90ed4500e9b4b96e9bd001b
+
parent af06ad645133f580a87895353508053c5de60716
+
author Alexis Sellier <alexis@radicle.xyz> 1664467633 +0200
+
committer Alexis Sellier <alexis@radicle.xyz> 1664786099 -0200
+
other e6fe3c97619deb8ab4198620f9a7eb79d98363dd
+
gpgsig -----BEGIN SSH SIGNATURE-----
+
 U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgvjrQogRxxLjzzWns8+mKJAGzEX
+
 4fm2ALoN7pyvD2ttQAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
+
 AAAAQIQvhIewOgGfnXLgR5Qe1ZEr2vjekYXTdOfNWICi6ZiosgfZnIqV0enCPC4arVqQg+
+
 GPp0HqxaB911OnSAr6bwU=
+
 -----END SSH SIGNATURE-----
+
gpgsig -----BEGIN PGP SIGNATURE-----
+
 iQIzBAABCAAdFiEEHe7BWIo9taTY6TIiJVL7b2QGbLcFAmNcDhsACgkQJVL7b2QG
+
 bLcc9Q//RgKf5N4enta9AuszGJZvdFhMPfIDUdw+WAZA6Z8zDPb/aAXZrPP/KIOM
+
 zmX08FTqjP9B9YeWrEcFuAtxsRNqbDKrfpko9Y6bTsdrAJg3WIypBb9F8YDKJ6BO
+
 CORJJqWOsLW129jW+mJDhcE0YTvPlcMiMI2qjVXKhU6Ag11W8IRZyTb9tvEaDjBR
+
 YUnkPvgubv61K9BeUKexE2MakPBldaQtl0MF1Dk7/zo5btLd+KP0SOUKEhuMEu5b
+
 LATHHdiYjt/2Xz7q8EcrFxXUaipxZe89dfTdi2ooJQw3ZDqjDHsGTHpDeBuzuSaJ
+
 9fKVRwFz/78onfHPhmU4wfUhh+Fcl90p5/T+4dt2K6cr+7rq078e+aJYxkX2d0MG
+
 PG0xGP0RN4g+X92K1kGuzoe4870xAnRTNh5nUB+X9snO8tVqQZTb0M2yI+sTsKrv
+
 w/f+uiqL6e9DgIxlO5dgiNHCVoCs1QJ900jUGisrlzS4+n6GzMsG6s3c01X4yY9G
+
 Ou/kGkMsn7tqejqC9RufygcchCFZqYwaHQwPkiYhfYGMarMpoCFvll0h8tSparpS
+
 nnpAQXVdu8m3v1YdPUuTg5ksxSOe9HCIlVXGFhxy3iqCVRn+51FRnUI63rMTOm9/
+
 LBqzvji02lDUPGqPgXfcCS0ty8FM2flBIXnwb8TDzCaPYhf53+U=
+
 =6dw2
+
 -----END PGP SIGNATURE-----
+

+
Add SSH functionality with new `radicle-ssh`
+

+
We borrow code from `thrussh`, refactored to be runtime-less.
+

+
X-Signed-Off-By: Alex Sellier
+
X-Co-Authored-By: Fintan Halpenny
+
";
+

+
#[test]
+
fn test_push_header() {
+
    let mut commit = Commit::from_str(UNSIGNED).unwrap();
+

+
    commit.push_header("other", "e6fe3c97619deb8ab4198620f9a7eb79d98363dd");
+
    commit.push_header("gpgsig", SSH_SIGNATURE);
+
    commit.push_header("gpgsig", PGP_SIGNATURE);
+

+
    assert_eq!(commit.to_string(), SIGNED);
+
}
+

+
#[test]
+
fn test_get_header() {
+
    let commit = Commit::from_str(SIGNED).unwrap();
+

+
    assert_eq!(
+
        commit
+
            .signatures()
+
            .map(|sig| sig.to_string())
+
            .collect::<Vec<_>>(),
+
        vec![SSH_SIGNATURE.to_owned(), PGP_SIGNATURE.to_owned()]
+
    );
+
    assert_eq!(
+
        commit.values("other").collect::<Vec<_>>(),
+
        vec![String::from("e6fe3c97619deb8ab4198620f9a7eb79d98363dd")],
+
    );
+
    assert!(commit.values("unknown").next().is_none());
+
}
+

+
#[test]
+
fn test_conversion() {
+
    assert_eq!(
+
        Commit::from_str(NO_TRAILER).unwrap().to_string(),
+
        NO_TRAILER
+
    );
+
    assert_eq!(
+
        Commit::from_str(SINGLE_TRAILER).unwrap().to_string(),
+
        SINGLE_TRAILER
+
    );
+
    assert_eq!(Commit::from_str(SIGNED).unwrap().to_string(), SIGNED);
+
    assert_eq!(Commit::from_str(UNSIGNED).unwrap().to_string(), UNSIGNED);
+
}
added archived/git-commit/t/src/integration.rs
@@ -0,0 +1,28 @@
+
use std::io;
+

+
use git_commit::Commit;
+
use test_helpers::tempdir::WithTmpDir;
+

+
#[test]
+
fn valid_commits() {
+
    let radicle_git = format!(
+
        "file://{}",
+
        git2::Repository::discover(".").unwrap().path().display()
+
    );
+
    let repo = WithTmpDir::new(|path| {
+
        let repo = git2::Repository::clone(&radicle_git, path)
+
            .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
+
        Ok::<_, io::Error>(repo)
+
    })
+
    .unwrap();
+

+
    let mut walk = repo.revwalk().unwrap();
+
    walk.push_head().unwrap();
+

+
    // take the first 20 commits and make sure we can parse them
+
    for oid in walk.take(20) {
+
        let oid = oid.unwrap();
+
        let commit = Commit::read(&repo, oid);
+
        assert!(commit.is_ok(), "Oid: {oid}, Error: {commit:?}")
+
    }
+
}
added archived/git-commit/t/src/lib.rs
@@ -0,0 +1,5 @@
+
#[cfg(test)]
+
mod commit;
+

+
#[cfg(test)]
+
mod integration;
added archived/git-trailers/Cargo.toml
@@ -0,0 +1,20 @@
+
[package]
+
name = "git-trailers"
+
version = "0.1.0"
+
authors = [
+
  "Nuno Alexandre <hi@nunoalexandre.com>",
+
  "Kim Altintop <kim@eagain.st>",
+
  "Fintan Halpenny <fintan.halpenny@gmail.com>",
+
]
+
edition = "2018"
+
license = "GPL-3.0-or-later"
+
description = "Library to support parsing and display git trailers <https://git-scm.com/docs/git-interpret-trailers>"
+
keywords = ["git"]
+

+
[lib]
+
doctest = false
+
test = false
+

+
[dependencies]
+
nom = "7.1"
+
thiserror = "1.0"
added archived/git-trailers/src/lib.rs
@@ -0,0 +1,300 @@
+
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{borrow::Cow, convert::TryFrom, fmt, ops::Deref};
+

+
use thiserror::Error;
+

+
#[derive(Debug, Error)]
+
#[non_exhaustive]
+
pub enum Error {
+
    #[error("the trailers paragraph is missing in the given message")]
+
    MissingParagraph,
+

+
    #[error("trailing data after trailers section: '{0}")]
+
    Trailing(String),
+

+
    #[error(transparent)]
+
    Parse(#[from] nom::Err<nom::error::Error<String>>),
+
}
+

+
#[derive(Debug, Clone, Eq, PartialEq)]
+
pub struct Trailer<'a> {
+
    pub token: Token<'a>,
+
    pub values: Vec<Cow<'a, str>>,
+
}
+

+
impl<'a> Trailer<'a> {
+
    pub fn display(&'a self, separator: &'a str) -> Display<'a> {
+
        Display {
+
            trailer: self,
+
            separator,
+
        }
+
    }
+

+
    pub fn to_owned(&self) -> OwnedTrailer {
+
        OwnedTrailer::from(self)
+
    }
+
}
+

+
#[derive(Debug, Clone, Eq, PartialEq)]
+
pub struct Token<'a>(&'a str);
+

+
/// A version of the Trailer<'a> which owns it's token and values. Useful for
+
/// when you need to carry trailers around in a long lived data structure.
+
#[derive(Debug)]
+
pub struct OwnedTrailer {
+
    pub token: OwnedToken,
+
    pub values: Vec<String>,
+
}
+

+
#[derive(Debug)]
+
pub struct OwnedToken(String);
+

+
impl<'a> From<&Trailer<'a>> for OwnedTrailer {
+
    fn from(t: &Trailer<'a>) -> Self {
+
        OwnedTrailer {
+
            token: OwnedToken(t.token.0.to_string()),
+
            values: t.values.iter().map(|v| v.to_string()).collect(),
+
        }
+
    }
+
}
+

+
impl<'a> From<Trailer<'a>> for OwnedTrailer {
+
    fn from(t: Trailer<'a>) -> Self {
+
        (&t).into()
+
    }
+
}
+

+
impl<'a> From<&'a OwnedTrailer> for Trailer<'a> {
+
    fn from(t: &'a OwnedTrailer) -> Self {
+
        Trailer {
+
            token: Token(t.token.0.as_str()),
+
            values: t.values.iter().map(Cow::from).collect(),
+
        }
+
    }
+
}
+

+
impl Deref for OwnedToken {
+
    type Target = str;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
#[derive(Debug, Error)]
+
#[non_exhaustive]
+
pub enum InvalidToken {
+
    #[error("trailing characters: '{0}'")]
+
    Trailing(String),
+

+
    #[error(transparent)]
+
    Parse(#[from] nom::Err<nom::error::Error<String>>),
+
}
+

+
impl<'a> TryFrom<&'a str> for Token<'a> {
+
    type Error = InvalidToken;
+

+
    fn try_from(s: &'a str) -> Result<Self, Self::Error> {
+
        match parser::token(s) {
+
            Ok((rest, token)) if rest.is_empty() => Ok(token),
+
            Ok((trailing, _)) => Err(InvalidToken::Trailing(trailing.to_owned())),
+
            Err(e) => Err(e.to_owned().into()),
+
        }
+
    }
+
}
+

+
impl Deref for Token<'_> {
+
    type Target = str;
+

+
    fn deref(&self) -> &Self::Target {
+
        self.0
+
    }
+
}
+

+
pub struct Display<'a> {
+
    trailer: &'a Trailer<'a>,
+
    separator: &'a str,
+
}
+

+
impl<'a> fmt::Display for Display<'a> {
+
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+
        write!(
+
            f,
+
            "{}{}{}",
+
            self.trailer.token.deref(),
+
            self.separator,
+
            self.trailer.values.join("\n  ")
+
        )
+
    }
+
}
+

+
pub trait Separator<'a> {
+
    fn sep_for(&self, token: &Token) -> &'a str;
+
}
+

+
impl<'a> Separator<'a> for &'a str {
+
    fn sep_for(&self, _: &Token) -> &'a str {
+
        self
+
    }
+
}
+

+
impl<'a, F> Separator<'a> for F
+
where
+
    F: Fn(&Token) -> &'a str,
+
{
+
    fn sep_for(&self, token: &Token) -> &'a str {
+
        self(token)
+
    }
+
}
+

+
pub struct DisplayMany<'a, S> {
+
    separator: S,
+
    trailers: &'a [Trailer<'a>],
+
}
+

+
impl<'a, S> fmt::Display for DisplayMany<'a, S>
+
where
+
    S: Separator<'a>,
+
{
+
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+
        for (i, trailer) in self.trailers.iter().enumerate() {
+
            if i > 0 {
+
                writeln!(f)?
+
            }
+

+
            write!(
+
                f,
+
                "{}",
+
                trailer.display(self.separator.sep_for(&trailer.token))
+
            )?
+
        }
+

+
        Ok(())
+
    }
+
}
+

+
/// Parse the trailers of the given message. It looks up the last paragraph
+
/// of the message and attempts to parse each of its lines as a [Trailer].
+
/// Fails if no trailers paragraph is found or if at least one trailer
+
/// fails to be parsed.
+
pub fn parse<'a>(message: &'a str, separators: &'a str) -> Result<Vec<Trailer<'a>>, Error> {
+
    let trailers_paragraph =
+
        match parser::paragraphs(message.trim_end()).map(|(_, ps)| ps.last().cloned()) {
+
            Ok(None) | Err(_) => return Err(Error::MissingParagraph),
+
            Ok(Some(p)) => {
+
                if p.is_empty() {
+
                    return Err(Error::MissingParagraph);
+
                }
+
                p
+
            },
+
        };
+

+
    match parser::trailers(trailers_paragraph, separators) {
+
        Ok((rest, trailers)) if rest.is_empty() => Ok(trailers),
+
        Ok((unparseable, _)) => Err(Error::Trailing(unparseable.to_owned())),
+
        Err(e) => Err(e.to_owned().into()),
+
    }
+
}
+

+
/// Render a slice of trailers.
+
///
+
/// The `separator` can be either a string slice, or a closure which may choose
+
/// a different separator for each [`Token`] encountered. Note that multiline
+
/// trailers are rendered with a fixed indent, so the result is not
+
/// layout-preserving.
+
pub fn display<'a, S>(separator: S, trailers: &'a [Trailer<'a>]) -> DisplayMany<'a, S>
+
where
+
    S: Separator<'a>,
+
{
+
    DisplayMany {
+
        separator,
+
        trailers,
+
    }
+
}
+

+
pub mod parser {
+
    use std::borrow::Cow;
+

+
    use super::{Token, Trailer};
+
    use nom::{
+
        branch::alt,
+
        bytes::complete::{tag, take_until, take_while1},
+
        character::complete::{line_ending, not_line_ending, one_of, space0, space1},
+
        combinator::{map, rest},
+
        multi::{many0, separated_list1},
+
        sequence::{delimited, preceded, separated_pair, terminated},
+
        IResult,
+
    };
+

+
    const EMPTY_LINE: &str = "\n\n";
+

+
    pub fn paragraphs(s: &str) -> IResult<&str, Vec<&str>> {
+
        separated_list1(tag(EMPTY_LINE), paragraph)(s)
+
    }
+

+
    pub fn paragraph(s: &str) -> IResult<&str, &str> {
+
        alt((take_until(EMPTY_LINE), rest))(s)
+
    }
+

+
    /// Parse all the possible trailers.
+
    /// It stops when it can no longer parse valid trailers.
+
    pub fn trailers<'a>(s: &'a str, separators: &'a str) -> IResult<&'a str, Vec<Trailer<'a>>> {
+
        many0(|s| trailer(s, separators))(s)
+
    }
+

+
    /// Parse a trailer, which can have an inlined or multilined value.
+
    pub fn trailer<'a>(s: &'a str, separators: &'a str) -> IResult<&'a str, Trailer<'a>> {
+
        let mut parser = separated_pair(token, |s| separator(separators, s), values);
+
        let (rest, (token, values)) = parser(s)?;
+
        Ok((rest, Trailer { token, values }))
+
    }
+

+
    /// Parse a trailer token.
+
    pub(super) fn token(s: &str) -> IResult<&str, Token> {
+
        take_while1(|c: char| c.is_alphanumeric() || c == '-')(s)
+
            .map(|(i, token_str)| (i, Token(token_str)))
+
    }
+

+
    /// Parse the trailer separator, which can be delimited by spaces.
+
    fn separator<'a>(separators: &'a str, s: &'a str) -> IResult<&'a str, char> {
+
        delimited(space0, one_of(separators), space0)(s)
+
    }
+

+
    /// Parse the trailer values, which gathers the value after the separator
+
    /// (if any) and possible following multilined values, indented by a
+
    /// space.
+
    fn values(s: &str) -> IResult<&str, Vec<Cow<'_, str>>> {
+
        let (r, opt_inline_value) = until_eol_or_eof(s)?;
+
        let (r, mut values) = multiline_values(r)?;
+
        if !opt_inline_value.is_empty() {
+
            values.insert(0, opt_inline_value.into())
+
        }
+
        Ok((r, values))
+
    }
+

+
    fn multiline_values(s: &str) -> IResult<&str, Vec<Cow<'_, str>>> {
+
        many0(map(indented_line_contents, Cow::from))(s)
+
    }
+

+
    fn until_eol_or_eof(s: &str) -> IResult<&str, &str> {
+
        alt((until_eol, rest))(s)
+
    }
+

+
    /// Parse an indented line, i.e, a line that starts with a space.
+
    /// Extracts the line contents, ignoring the indentation and the
+
    /// new line character.
+
    fn indented_line_contents(s: &str) -> IResult<&str, &str> {
+
        preceded(space1, until_eol_or_eof)(s)
+
    }
+

+
    /// Consume the input until the end of the line, ignoring the new line
+
    /// character.
+
    fn until_eol(s: &str) -> IResult<&str, &str> {
+
        terminated(not_line_ending, line_ending)(s)
+
    }
+
}
added archived/git-trailers/t/Cargo.toml
@@ -0,0 +1,22 @@
+
[package]
+
name = "git-trailers-test"
+
version = "0.1.0"
+
edition = "2021"
+
license = "GPL-3.0-or-later"
+

+
publish = false
+

+
[lib]
+
doctest = false
+
test = true
+
doc = false
+

+
[features]
+
test = []
+

+
[dev-dependencies]
+
assert_matches = "1.5"
+
pretty_assertions = "1.1"
+

+
[dev-dependencies.git-trailers]
+
path = ".."
added archived/git-trailers/t/src/lib.rs
@@ -0,0 +1,2 @@
+
#[cfg(test)]
+
mod tests;
added archived/git-trailers/t/src/tests.rs
@@ -0,0 +1,155 @@
+
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{borrow::Cow, convert::TryFrom as _, ops::Deref as _};
+

+
use assert_matches::assert_matches;
+
use git_trailers::{display, parse, Error, Token, Trailer};
+
use pretty_assertions::assert_eq;
+

+
#[test]
+
fn parse_message_with_valid_trailers() {
+
    let msg = r#"Subject
+

+
A multiline
+
description.
+

+
Co-authored-by: John Doe <john.doe@test.com>
+
Ticket: #42
+
Tested-by:
+
    John <john@test.com>
+
    Jane <jane@test.com>
+
Just-a-token:
+

+
"#;
+
    assert_eq!(
+
        parse(msg, ":").unwrap(),
+
        vec![
+
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
+
            new_trailer("Ticket", &["#42"]),
+
            new_trailer(
+
                "Tested-by",
+
                &["John <john@test.com>", "Jane <jane@test.com>"]
+
            ),
+
            new_trailer("Just-a-token", &[]),
+
        ]
+
    )
+
}
+

+
#[test]
+
fn parse_message_trailers_with_custom_separators() {
+
    let separators = ":=$";
+
    let msg = r#"Subject
+

+
A multiline
+
description.
+

+
Co-authored-by: John Doe <john.doe@test.com>
+
Ticket = #42
+
Tested-by $User <user@test.com>
+
    John <john@test.com>
+
    Jane <jane@test.com>
+
"#;
+
    assert_eq!(
+
        parse(msg, separators).unwrap(),
+
        vec![
+
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
+
            new_trailer("Ticket", &["#42"]),
+
            new_trailer(
+
                "Tested-by",
+
                &[
+
                    "User <user@test.com>",
+
                    "John <john@test.com>",
+
                    "Jane <jane@test.com>"
+
                ]
+
            ),
+
        ]
+
    )
+
}
+

+
#[test]
+
fn parse_message_trailers_with_missing_token() {
+
    let msg = r#"Subject
+

+
Good-trailer: true
+
John Doe <john.doe@test.com> # Unparsable token due to missing token"#;
+
    assert_matches!(
+
        parse(msg, ":"),
+
        Err(Error::Trailing(s))
+
            if s == "John Doe <john.doe@test.com> # Unparsable token due to missing token"
+
    )
+
}
+

+
#[test]
+
fn parse_message_trailers_with_invalid_token() {
+
    let msg = r#"Subject
+

+
Good-trailer: true
+
&!#: John Doe <john.doe@test.com> # Unparsable token due to invalid token"#;
+
    assert_matches!(
+
        parse(msg, ":"),
+
        Err(Error::Trailing(s))
+
            if s == "&!#: John Doe <john.doe@test.com> # Unparsable token due to invalid token"
+
    )
+
}
+

+
#[test]
+
fn parse_message_with_only_trailers() {
+
    let msg = r#"Co-authored-by: John Doe <john.doe@test.com>
+
Ticket: #42
+
Tested-by: Tester <tester@test.com>
+
"#;
+
    assert_eq!(
+
        parse(msg, ":").unwrap(),
+
        vec![
+
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
+
            new_trailer("Ticket", &["#42"]),
+
            new_trailer("Tested-by", &["Tester <tester@test.com>"]),
+
        ]
+
    )
+
}
+

+
#[test]
+
fn parse_empty_message() {
+
    let msg = "";
+
    assert_matches!(parse(msg, ":"), Err(Error::MissingParagraph))
+
}
+

+
#[test]
+
fn display_static() {
+
    let msg = r#"Tested-by: Alice
+
  Bob
+
  Carol
+
  Dylan
+
Acked-by: Eve"#;
+

+
    let parsed = parse(msg, ":").unwrap();
+
    let rendered = format!("{}", display(": ", &parsed));
+
    assert_eq!(&rendered, msg);
+
}
+

+
#[test]
+
fn display_dynamic() {
+
    let msg = r#"Co-authored-by: John Doe <john.doe@test.com>
+
Tested-by: Tester <tester@test.com>
+
Fixes #42"#;
+

+
    let parsed = parse(msg, ":#").unwrap();
+
    let rendered = format!(
+
        "{}",
+
        display(
+
            |t: &Token| if t.deref() == "Fixes" { " #" } else { ": " },
+
            &parsed
+
        )
+
    );
+
    assert_eq!(rendered, msg)
+
}
+

+
fn new_trailer<'a>(token: &'a str, values: &[&'a str]) -> Trailer<'a> {
+
    Trailer {
+
        token: Token::try_from(token).unwrap(),
+
        values: values.iter().map(|s| Cow::from(*s)).collect(),
+
    }
+
}
added archived/link-git/Cargo.toml
@@ -0,0 +1,64 @@
+
[package]
+
name = "link-git"
+
version = "0.1.0"
+
authors = ["Kim Altintop <kim@eagain.st>"]
+
edition = "2018"
+
license = "GPL-3.0-or-later"
+

+
description = "Core git types and functionality"
+

+
[lib]
+
doctest = false
+
test = false
+

+
[dependencies]
+
arc-swap = "1.4.0"
+
async-process = "1.1.0"
+
async-trait = "0.1"
+
blocking = "1.0.2"
+
bstr = "0.2"
+
futures-lite = "1.12.0"
+
futures-util = "0.3.15"
+
lazy_static = "1.4.0"
+
im = "15.0.0"
+
once_cell = "1.10"
+
parking_lot = "0.12"
+
pin-project = "1.0.7"
+
regex = "1.5.4"
+
rustc-hash = "1.1.0"
+
tempfile = "3.4"
+
thiserror = "1.0.30"
+
tracing = "0.1"
+
versions = "3.0.2"
+

+
# gitoxide
+
git-actor = "^0.6.0"
+
git-hash = "^0.8.0"
+
git-lock = "^1.0.1"
+
git-object = "^0.15.1"
+
git-odb = "^0.24.0"
+
git-ref = "^0.9.0"
+
git-traverse = "^0.10.0"
+

+
[dependencies.git-features]
+
version = "^0.17.0"
+
features = ["progress", "parallel", "zlib-ng-compat"]
+

+
[dependencies.git-pack]
+
version = "^0.14.0"
+
features = ["object-cache-dynamic", "pack-cache-lru-static", "pack-cache-lru-dynamic"]
+

+
[dependencies.git-packetline]
+
version = "^0.12.0"
+
features = ["async-io"]
+

+
[dependencies.git-protocol]
+
version = "^0.12.0"
+
features = ["async-client"]
+

+
# compat
+
[dependencies.git2]
+
version = "0.16.1"
+
default-features = false
+
features = ["vendored-libgit2"]
+
optional = true
added archived/link-git/src/lib.rs
@@ -0,0 +1,20 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
#[macro_use]
+
extern crate async_trait;
+

+
pub mod odb;
+
pub mod protocol;
+
pub mod refs;
+
pub use refs::db as refdb;
+
#[cfg(feature = "git2")]
+
pub mod service;
+

+
pub use git_actor as actor;
+
pub use git_hash as hash;
+
pub use git_lock as lock;
+
pub use git_object as object;
+
pub use git_traverse as traverse;
added archived/link-git/src/odb.rs
@@ -0,0 +1,51 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use git_hash::oid;
+
use thiserror::Error;
+

+
pub mod backend;
+
pub mod index;
+
pub mod pack;
+
pub mod window;
+

+
pub use git_pack::{cache, data::Object};
+

+
#[derive(Debug, Error)]
+
pub enum Error {
+
    #[error(transparent)]
+
    Packed(#[from] index::error::Lookup<pack::error::Data>),
+

+
    #[error(transparent)]
+
    Loose(#[from] git_odb::loose::find::Error),
+
}
+

+
pub struct Odb<I, D> {
+
    pub loose: backend::Loose,
+
    pub packed: backend::Packed<I, D>,
+
}
+

+
impl<I, D> Odb<I, D>
+
where
+
    I: index::Index,
+
    D: window::Cache,
+
{
+
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
+
        self.packed.contains(id.as_ref()) || self.loose.contains(id)
+
    }
+

+
    pub fn find<'a>(
+
        &self,
+
        id: impl AsRef<oid>,
+
        buf: &'a mut Vec<u8>,
+
        cache: &mut impl cache::DecodeEntry,
+
    ) -> Result<Option<Object<'a>>, Error> {
+
        let id = id.as_ref();
+
        if self.packed.contains(id) {
+
            return self.packed.find(id, buf, cache).map_err(Into::into);
+
        }
+
        self.loose.try_find(id, buf).map_err(Into::into)
+
    }
+
}
added archived/link-git/src/odb/backend.rs
@@ -0,0 +1,36 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use git_hash::oid;
+
use git_pack::{cache::DecodeEntry, data::Object};
+

+
use super::{index, pack, window};
+

+
pub type Loose = git_odb::loose::Store;
+

+
pub struct Packed<I, D> {
+
    pub index: I,
+
    pub data: D,
+
}
+

+
impl<I, D> Packed<I, D>
+
where
+
    I: index::Index,
+
    D: window::Cache,
+
{
+
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
+
        self.index.contains(id)
+
    }
+

+
    pub fn find<'a>(
+
        &self,
+
        id: impl AsRef<oid>,
+
        buf: &'a mut Vec<u8>,
+
        cache: &mut impl DecodeEntry,
+
    ) -> Result<Option<Object<'a>>, index::error::Lookup<pack::error::Data>> {
+
        self.index
+
            .lookup(|info| self.data.get(info), id, buf, cache)
+
    }
+
}
added archived/link-git/src/odb/index.rs
@@ -0,0 +1,307 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    fs,
+
    io,
+
    path::{Path, PathBuf},
+
    sync::Arc,
+
};
+

+
use arc_swap::ArcSwap;
+
use git_hash::oid;
+
use git_pack::{
+
    cache::DecodeEntry,
+
    data::{Object, ResolvedBase},
+
};
+
use parking_lot::Mutex;
+
use tracing::trace;
+

+
use super::pack;
+

+
pub use git_pack::index::File as IndexFile;
+

+
mod metrics;
+
pub use metrics::{Metrics, Stats, StatsView};
+

+
pub mod error {
+
    use super::*;
+
    use thiserror::Error;
+

+
    #[derive(Debug, Error)]
+
    pub enum Discover {
+
        #[error(transparent)]
+
        Index(#[from] pack::error::Index),
+

+
        #[error(transparent)]
+
        Io(#[from] io::Error),
+
    }
+

+
    #[derive(Debug, Error)]
+
    pub enum Lookup<E> {
+
        #[error(transparent)]
+
        Lookup(E),
+

+
        #[error(transparent)]
+
        Reload(#[from] Discover),
+

+
        #[error(transparent)]
+
        Decode(#[from] git_pack::data::decode_entry::Error),
+
    }
+
}
+

+
pub trait Index {
+
    fn contains(&self, id: impl AsRef<oid>) -> bool;
+

+
    fn lookup<'a, F, E>(
+
        &self,
+
        pack_cache: F,
+
        id: impl AsRef<oid>,
+
        buf: &'a mut Vec<u8>,
+
        cache: &mut impl DecodeEntry,
+
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
+
    where
+
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>;
+
}
+

+
/// An [`Index`] which can be shared between threads.
+
///
+
/// [`Shared`] assumes that:
+
///
+
/// * newer packs are likely to contain recent objects
+
/// * lookups tend to favour recent objects
+
/// * lookups tend to expect the object to be found (the object id is either
+
///   pointed to by a ref, or linked to by an existing object)
+
///
+
/// Thus, it:
+
///
+
/// * orders indices found in `GIT_DIR/objects/pack` by modification time, and
+
///   queries the more recent ones first
+
/// * attempts to rescan `GIT_DIR/objects/pack` when an object id was _not_
+
///   found (assuming that this is due to a compaction)
+
///
+
/// Unless a reload occurs, lookups are lock-free and mostly wait-free. Writes
+
/// ([`Shared::push`], [`Shared::reload`]) are guarded by a [`Mutex`].
+
// TODO: consecutive lookups also tend to resolve to the same pack, so we could
+
// remember the index into the `im::Vector` where we found a match and look
+
// there first. This is what libgit2 does, but the heuristic is not necessarily
+
// true when `Shared` is shared across multiple concurrent link replication
+
// tasks; per-namespace packs are independent pre-compaction.
+
pub struct Shared<M> {
+
    pack_dir: PathBuf,
+
    indices: ArcSwap<im::Vector<Arc<pack::Index>>>,
+
    write: Mutex<()>,
+
    stats: M,
+
}
+

+
impl Shared<()> {
+
    pub fn open(git_dir: impl AsRef<Path>) -> Result<Self, error::Discover> {
+
        let pack_dir = git_dir.as_ref().join("objects").join("pack");
+
        let indices = discover(&pack_dir)?;
+

+
        Ok(Self {
+
            pack_dir,
+
            indices: ArcSwap::new(Arc::new(indices)),
+
            write: Mutex::new(()),
+
            stats: (),
+
        })
+
    }
+
}
+

+
impl<M> Shared<M>
+
where
+
    M: Metrics,
+
{
+
    pub fn with_stats(self) -> Shared<Stats> {
+
        self.with_metrics(Stats::default())
+
    }
+

+
    pub fn with_metrics<N: Metrics>(self, m: N) -> Shared<N> {
+
        Shared {
+
            pack_dir: self.pack_dir,
+
            indices: self.indices,
+
            write: self.write,
+
            stats: m,
+
        }
+
    }
+

+
    pub fn stats(&self) -> M::Snapshot {
+
        self.stats.snapshot(self.len())
+
    }
+

+
    /// Add a newly discovered [`pack::Index`].
+
    ///
+
    /// This index will be considered first by subsequent lookups. Note that it
+
    /// is only guaranteed that the index will be visible to readers if it
+
    /// resides in the `git_dir` this [`Shared`] was initialised with.
+
    pub fn push(&self, idx: pack::Index) {
+
        let lock = self.write.lock();
+
        let mut new = self.indices.load_full();
+
        Arc::make_mut(&mut new).push_front(Arc::new(idx));
+
        self.indices.store(new);
+
        drop(lock);
+

+
        self.stats.record_push()
+
    }
+

+
    /// Re-scan the packs directory and replace the in-memory indices with the
+
    /// result.
+
    ///
+
    /// If the application can intercept compaction events, this method can be
+
    /// used to release memory early. Otherwise it is not required to call this
+
    /// method, as [`Shared`] manages reloads automatically.
+
    pub fn reload(&self) -> Result<(), error::Discover> {
+
        let lock = self.write.lock();
+
        let indices = discover(&self.pack_dir)?;
+
        self.indices.store(Arc::new(indices));
+
        drop(lock);
+

+
        self.stats.record_reload();
+

+
        Ok(())
+
    }
+

+
    pub fn is_empty(&self) -> bool {
+
        self.indices.load().is_empty()
+
    }
+

+
    pub fn len(&self) -> usize {
+
        self.indices.load().len()
+
    }
+

+
    fn contains(&self, id: impl AsRef<oid>) -> bool {
+
        for i in 0..2 {
+
            for idx in self.indices.load().iter() {
+
                if idx.contains(&id) {
+
                    self.stats.record_hit();
+
                    return true;
+
                }
+
            }
+

+
            if i == 0 && self.reload().is_err() {
+
                self.stats.record_miss();
+
                return false;
+
            }
+
        }
+

+
        self.stats.record_miss();
+
        false
+
    }
+

+
    fn lookup<'a, F, E>(
+
        &self,
+
        pack_cache: F,
+
        id: impl AsRef<oid>,
+
        buf: &'a mut Vec<u8>,
+
        cache: &mut impl DecodeEntry,
+
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
+
    where
+
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
+
    {
+
        for i in 0..2 {
+
            for idx in self.indices.load().iter() {
+
                if let Some(ofs) = idx.ofs(&id) {
+
                    self.stats.record_hit();
+
                    return load_obj(ofs, idx, pack_cache, buf, cache).map(Some);
+
                }
+
            }
+

+
            if i == 0 {
+
                self.reload()?;
+
            }
+
        }
+

+
        self.stats.record_miss();
+
        Ok(None)
+
    }
+
}
+

+
fn load_obj<'a, F, E>(
+
    ofs: u64,
+
    idx: &pack::Index,
+
    pack_cache: F,
+
    buf: &'a mut Vec<u8>,
+
    cache: &mut impl DecodeEntry,
+
) -> Result<Object<'a>, error::Lookup<E>>
+
where
+
    F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
+
{
+
    let data = pack_cache(&idx.info).map_err(error::Lookup::Lookup)?;
+
    let pack = data.file();
+
    let entry = pack.entry(ofs);
+
    let obj = pack
+
        .decode_entry(
+
            entry,
+
            buf,
+
            |id, _| idx.ofs(id).map(|ofs| ResolvedBase::InPack(pack.entry(ofs))),
+
            cache,
+
        )
+
        .map(move |out| Object {
+
            kind: out.kind,
+
            data: buf.as_slice(),
+
            pack_location: None,
+
        })?;
+

+
    Ok(obj)
+
}
+

+
fn discover(pack_dir: impl AsRef<Path>) -> Result<im::Vector<Arc<pack::Index>>, error::Discover> {
+
    let pack_dir = pack_dir.as_ref();
+
    let pack_dir_disp = pack_dir.display();
+
    trace!("discovering packs at {}", pack_dir_disp);
+
    match fs::read_dir(pack_dir) {
+
        Ok(iter) => {
+
            let mut paths = Vec::new();
+
            for entry in iter {
+
                let entry = entry?;
+
                let path = entry.path();
+
                trace!("{}", path.display());
+
                let meta = entry.metadata()?;
+
                if meta.file_type().is_file() && path.extension().unwrap_or_default() == "idx" {
+
                    let mtime = meta.modified()?;
+
                    paths.push((path, mtime));
+
                }
+
            }
+
            paths.sort_by(|(_, mtime_a), (_, mtime_b)| mtime_a.cmp(mtime_b));
+

+
            let indices = paths
+
                .into_iter()
+
                .rev()
+
                .map(|(path, _)| Ok(pack::Index::open(path).map(Arc::new)?))
+
                .collect::<Result<_, error::Discover>>()?;
+

+
            Ok(indices)
+
        },
+
        // It's not an error if the directory doesn't exist, the repository
+
        // could contain only loose objects
+
        Err(e) if e.kind() == io::ErrorKind::NotFound => {
+
            trace!("not a directory: {}", pack_dir_disp);
+
            Ok(im::Vector::new())
+
        },
+
        Err(e) => Err(e.into()),
+
    }
+
}
+

+
impl<M> Index for Shared<M>
+
where
+
    M: Metrics,
+
{
+
    fn contains(&self, id: impl AsRef<oid>) -> bool {
+
        self.contains(id)
+
    }
+

+
    fn lookup<'a, F, E>(
+
        &self,
+
        pack_cache: F,
+
        id: impl AsRef<oid>,
+
        buf: &'a mut Vec<u8>,
+
        cache: &mut impl DecodeEntry,
+
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
+
    where
+
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
+
    {
+
        self.lookup(pack_cache, id, buf, cache)
+
    }
+
}
added archived/link-git/src/odb/index/metrics.rs
@@ -0,0 +1,80 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::sync::atomic::{AtomicUsize, Ordering};
+

+
pub struct StatsView {
+
    /// Total number of times a lookup was successful.
+
    pub hits: usize,
+
    /// Total number of times a lookup was unsuccessful.
+
    pub misses: usize,
+
    /// Total number of times an index was added explicitly via
+
    /// [`super::Shared::push`].
+
    pub pushes: usize,
+
    /// Total number of reloads via [`super::Shared::reload`].
+
    pub reloads: usize,
+
    /// Number of [`crate::odb::pack::Index`]es currently held.
+
    pub indices: usize,
+
}
+

+
#[derive(Default)]
+
pub struct Stats {
+
    hits: AtomicUsize,
+
    misses: AtomicUsize,
+
    pushes: AtomicUsize,
+
    reloads: AtomicUsize,
+
}
+

+
pub trait Metrics {
+
    type Snapshot;
+

+
    fn record_hit(&self);
+
    fn record_miss(&self);
+
    fn record_push(&self);
+
    fn record_reload(&self);
+

+
    fn snapshot(&self, indices: usize) -> Self::Snapshot;
+
}
+

+
impl Metrics for Stats {
+
    type Snapshot = StatsView;
+

+
    fn record_hit(&self) {
+
        self.hits.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn record_miss(&self) {
+
        self.misses.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn record_push(&self) {
+
        self.pushes.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn record_reload(&self) {
+
        self.reloads.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn snapshot(&self, indices: usize) -> Self::Snapshot {
+
        StatsView {
+
            hits: self.hits.load(Ordering::Relaxed),
+
            misses: self.misses.load(Ordering::Relaxed),
+
            pushes: self.pushes.load(Ordering::Relaxed),
+
            reloads: self.reloads.load(Ordering::Relaxed),
+
            indices,
+
        }
+
    }
+
}
+

+
impl Metrics for () {
+
    type Snapshot = ();
+

+
    fn record_hit(&self) {}
+
    fn record_miss(&self) {}
+
    fn record_push(&self) {}
+
    fn record_reload(&self) {}
+

+
    fn snapshot(&self, _: usize) -> Self::Snapshot {}
+
}
added archived/link-git/src/odb/pack.rs
@@ -0,0 +1,138 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    path::{Path, PathBuf},
+
    sync::atomic::{AtomicUsize, Ordering},
+
};
+

+
use git_hash::{oid, ObjectId};
+
use git_pack::{data, index};
+
use rustc_hash::FxHasher;
+
use tracing::warn;
+

+
pub mod error {
+
    use super::*;
+
    use thiserror::Error;
+

+
    #[derive(Debug, Error)]
+
    #[error("failed to load pack data from {path:?}")]
+
    pub struct Data {
+
        pub path: PathBuf,
+
        pub source: data::header::decode::Error,
+
    }
+

+
    #[derive(Debug, Error)]
+
    #[error("failed to load pack index from {path:?}")]
+
    pub struct Index {
+
        pub path: PathBuf,
+
        pub source: index::init::Error,
+
    }
+
}
+

+
pub struct Data {
+
    pub hash: u64,
+
    hits: AtomicUsize,
+
    file: data::File,
+
}
+

+
impl Data {
+
    pub fn hit(&self) {
+
        self.hits.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    pub fn hits(&self) -> usize {
+
        self.hits.load(Ordering::Relaxed)
+
    }
+

+
    pub fn file(&self) -> &data::File {
+
        &self.file
+
    }
+
}
+

+
impl AsRef<data::File> for Data {
+
    fn as_ref(&self) -> &data::File {
+
        self.file()
+
    }
+
}
+

+
#[derive(Clone, PartialEq, Eq)]
+
pub struct Info {
+
    pub(super) hash: u64,
+
    pub data_path: PathBuf,
+
}
+

+
impl Info {
+
    pub fn data(&self) -> Result<Data, error::Data> {
+
        let file = data::File::at(&self.data_path).map_err(|source| error::Data {
+
            path: self.data_path.clone(),
+
            source,
+
        })?;
+
        Ok(Data {
+
            hash: self.hash,
+
            hits: AtomicUsize::new(0),
+
            file,
+
        })
+
    }
+
}
+

+
pub struct Index {
+
    pub info: Info,
+
    file: index::File,
+
}
+

+
impl Index {
+
    pub fn open(path: impl AsRef<Path>) -> Result<Self, error::Index> {
+
        let path = path.as_ref();
+
        let file = index::File::at(path).map_err(|source| error::Index {
+
            path: path.to_path_buf(),
+
            source,
+
        })?;
+
        let data_path = path.with_extension("pack");
+
        let hash = {
+
            let file_name = path
+
                .file_name()
+
                .expect("must have a file name, we opened it")
+
                .to_string_lossy();
+
            // XXX: inexplicably, gitoxide omits the "pack-" prefix
+
            let sha_hex = file_name.strip_prefix("pack-").unwrap_or(&file_name);
+
            match ObjectId::from_hex(&sha_hex.as_bytes()[..40]) {
+
                Err(e) => {
+
                    warn!(
+
                        "unconventional pack name {:?}, falling back to fxhash: {}",
+
                        path, e
+
                    );
+
                    hash(path)
+
                },
+
                Ok(oid) => {
+
                    let mut buf = [0u8; 8];
+
                    buf.copy_from_slice(&oid.sha1()[..8]);
+
                    u64::from_be_bytes(buf)
+
                },
+
            }
+
        };
+
        let info = Info { hash, data_path };
+

+
        Ok(Self { file, info })
+
    }
+

+
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
+
        self.file.lookup(id).is_some()
+
    }
+

+
    pub fn ofs(&self, id: impl AsRef<oid>) -> Option<u64> {
+
        self.file
+
            .lookup(id)
+
            .map(|idx| self.file.pack_offset_at_index(idx))
+
    }
+
}
+

+
fn hash(p: &Path) -> u64 {
+
    use std::hash::{Hash as _, Hasher as _};
+

+
    let mut hasher = FxHasher::default();
+
    p.hash(&mut hasher);
+
    hasher.finish()
+
}
added archived/link-git/src/odb/window.rs
@@ -0,0 +1,183 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::sync::Arc;
+

+
use arc_swap::{ArcSwap, Guard};
+
use parking_lot::Mutex;
+

+
use super::pack;
+

+
mod metrics;
+
pub use metrics::{Metrics, Stats, StatsView};
+

+
/// A threadsafe, shareable cache of packfiles.
+
pub trait Cache {
+
    type Stats;
+

+
    fn stats(&self) -> Self::Stats;
+

+
    fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data>;
+
}
+

+
impl<M, const B: usize, const S: usize> Cache for Fixed<M, B, S>
+
where
+
    M: Metrics,
+
{
+
    type Stats = M::Snapshot;
+

+
    fn stats(&self) -> Self::Stats {
+
        self.stats()
+
    }
+

+
    fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data> {
+
        self.get(info)
+
    }
+
}
+

+
/// 128 open files
+
pub type Small<S> = Fixed<S, 16, 8>;
+
/// 512 open files
+
pub type Medium<S> = Fixed<S, 32, 16>;
+
/// 1024 open files
+
pub type Large<S> = Fixed<S, 64, 16>;
+
/// 2048 open files
+
pub type XLarge<S> = Fixed<S, 128, 16>;
+

+
/// A fixed-size [`Cache`].
+
///
+
/// [`Fixed`] is essentially a very simple, fixed-capacity hashtable. When a
+
/// pack (data-) file is requested via [`Cache::get`], the file is loaded
+
/// (typically `mmap`ed) from disk if it is not already in the cache. Otherwise,
+
/// a pointer to the already loaded file is returned. Old entries are replaced
+
/// on an approximate LRU basis when the cache becomes full (this means that old
+
/// entries are **not** evicted when there is still space).
+
///
+
/// The implementation is a somewhat dumbed-down version of JGit's
+
/// `WindowCache`. The main differences are that the table buckets are of fixed
+
/// size (`SLOTS`), instead of a linked list. This means that the cache does not
+
/// allow to (temporarily) commit more entries than its nominal capacity.
+
///
+
/// Reading cached values is lock-free and mostly wait-free. Modifications are
+
/// guarded by locks on individual buckets; if a cache miss occurs, multiple
+
/// threads requesting the same entry will be blocked until one of them
+
/// succeeded loading the data and updating the cache. Writers will _not_,
+
/// however, contend with readers (unlike `RwLock`).
+
///
+
/// This favours usage patterns where different threads tend to request disjoint
+
/// sets of packfiles, and of course their hashes colliding relatively
+
/// infrequently.
+
pub struct Fixed<M, const BUCKETS: usize, const SLOTS: usize> {
+
    entries: [ArcSwap<[Option<Arc<pack::Data>>; SLOTS]>; BUCKETS],
+
    locks: [Mutex<()>; BUCKETS],
+
    stats: M,
+
}
+

+
trait AssertSendSync: Send + Sync {}
+
impl<M, const B: usize, const S: usize> AssertSendSync for Fixed<M, B, S> where M: Send + Sync {}
+

+
impl<M, const B: usize, const S: usize> AsRef<Fixed<M, B, S>> for Fixed<M, B, S> {
+
    fn as_ref(&self) -> &Fixed<M, B, S> {
+
        self
+
    }
+
}
+

+
impl<const B: usize, const S: usize> Default for Fixed<(), B, S> {
+
    fn default() -> Self {
+
        Self {
+
            entries: [(); B].map(|_| ArcSwap::new(Arc::new([(); S].map(|_| None)))),
+
            locks: [(); B].map(|_| Mutex::new(())),
+
            stats: (),
+
        }
+
    }
+
}
+

+
impl<M, const B: usize, const S: usize> Fixed<M, B, S>
+
where
+
    M: Metrics,
+
{
+
    pub fn with_stats(self) -> Fixed<Stats, B, S> {
+
        self.with_metrics(Stats::default())
+
    }
+

+
    pub fn with_metrics<N: Metrics>(self, m: N) -> Fixed<N, B, S> {
+
        Fixed {
+
            entries: self.entries,
+
            locks: self.locks,
+
            stats: m,
+
        }
+
    }
+

+
    pub fn stats(&self) -> M::Snapshot {
+
        let open_files = self
+
            .entries
+
            .iter()
+
            .map(|bucket| bucket.load().iter().flatten().count())
+
            .sum();
+
        self.stats.snapshot(open_files)
+
    }
+

+
    pub fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data> {
+
        let idx = info.hash as usize % self.entries.len();
+

+
        let bucket = self.entries[idx].load();
+
        for entry in bucket.iter().flatten() {
+
            if entry.hash == info.hash {
+
                self.stats.record_hit();
+
                entry.hit();
+
                return Ok(Arc::clone(entry));
+
            }
+
        }
+
        drop(bucket);
+

+
        self.stats.record_miss();
+

+
        // Cache miss, try to load the data file
+
        let lock = self.locks[idx].lock();
+
        // Did someone else win the race for the lock?
+
        let bucket = self.entries[idx].load();
+
        for entry in bucket.iter().flatten() {
+
            if entry.hash == info.hash {
+
                self.stats.record_hit();
+
                entry.hit();
+
                return Ok(Arc::clone(entry));
+
            }
+
        }
+
        // No, proceed
+
        self.stats.record_load();
+
        let data = Arc::new(info.data()?);
+

+
        // Find an empty slot, or swap with the least popular
+
        let mut access = usize::MAX;
+
        let mut evict = 0;
+
        for (i, e) in bucket.iter().enumerate() {
+
            match e {
+
                Some(entry) => {
+
                    let hits = entry.hits();
+
                    if hits < access {
+
                        access = hits;
+
                        evict = i;
+
                    }
+
                },
+
                None => {
+
                    evict = i;
+
                    break;
+
                },
+
            }
+
        }
+
        let mut entries = Guard::into_inner(bucket);
+
        {
+
            // This costs `SLOTS` refcount increments if the slot is currently
+
            // borrowed.
+
            let mutti = Arc::make_mut(&mut entries);
+
            mutti[evict] = Some(Arc::clone(&data));
+
        }
+
        self.entries[idx].store(entries);
+
        drop(lock);
+

+
        data.hit();
+
        Ok(data)
+
    }
+
}
added archived/link-git/src/odb/window/metrics.rs
@@ -0,0 +1,79 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::sync::atomic::{AtomicUsize, Ordering};
+

+
use tracing::trace;
+

+
pub struct StatsView {
+
    /// Total number of times the requested data was found in the cache.
+
    pub cache_hits: usize,
+
    /// Total number of times the requested data was not found in the cache.
+
    ///
+
    /// Note that a cache hit can occur after a miss if another thread was
+
    /// faster to fill in the missing entry. Thus, `cache_hits + cache_misses`
+
    /// does not necessarily sum up to the number of cache accesses.
+
    pub cache_misses: usize,
+
    /// Total number of times a pack file was attempted to be loaded from disk
+
    /// (incl. failed attempts).
+
    pub file_loads: usize,
+
    /// Total number of pack files the cache holds on to.
+
    pub open_files: usize,
+
}
+

+
#[derive(Default)]
+
pub struct Stats {
+
    hits: AtomicUsize,
+
    miss: AtomicUsize,
+
    load: AtomicUsize,
+
}
+

+
pub trait Metrics {
+
    type Snapshot;
+

+
    fn record_hit(&self);
+
    fn record_miss(&self);
+
    fn record_load(&self);
+

+
    fn snapshot(&self, open_files: usize) -> Self::Snapshot;
+
}
+

+
impl Metrics for Stats {
+
    type Snapshot = StatsView;
+

+
    fn record_hit(&self) {
+
        trace!("cache hit");
+
        self.hits.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn record_miss(&self) {
+
        trace!("cache miss");
+
        self.miss.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn record_load(&self) {
+
        trace!("pack load");
+
        self.load.fetch_add(1, Ordering::Relaxed);
+
    }
+

+
    fn snapshot(&self, open_files: usize) -> Self::Snapshot {
+
        StatsView {
+
            cache_hits: self.hits.load(Ordering::Relaxed),
+
            cache_misses: self.miss.load(Ordering::Relaxed),
+
            file_loads: self.load.load(Ordering::Relaxed),
+
            open_files,
+
        }
+
    }
+
}
+

+
impl Metrics for () {
+
    type Snapshot = ();
+

+
    fn record_hit(&self) {}
+
    fn record_miss(&self) {}
+
    fn record_load(&self) {}
+

+
    fn snapshot(&self, _: usize) -> Self::Snapshot {}
+
}
added archived/link-git/src/protocol.rs
@@ -0,0 +1,30 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use bstr::ByteSlice as _;
+
use git_protocol::transport::client;
+
use versions::Version;
+

+
pub mod fetch;
+
pub mod ls;
+
pub mod packwriter;
+
pub mod take;
+
pub mod transport;
+
pub mod upload_pack;
+

+
pub use fetch::{fetch, Ref};
+
pub use ls::ls_refs;
+
pub use packwriter::PackWriter;
+
pub use upload_pack::upload_pack;
+

+
pub use git_hash::{oid, ObjectId};
+

+
fn remote_git_version(caps: &client::Capabilities) -> Option<Version> {
+
    let agent = caps.capability("agent").and_then(|cap| {
+
        cap.value()
+
            .and_then(|bs| bs.to_str().map(|s| s.to_owned()).ok())
+
    })?;
+
    Version::new(agent.strip_prefix("git/")?)
+
}
added archived/link-git/src/protocol/fetch.rs
@@ -0,0 +1,290 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    future::Future,
+
    io,
+
    mem,
+
    pin::Pin,
+
    sync::{
+
        atomic::{AtomicBool, Ordering},
+
        Arc,
+
    },
+
    task::{Context, Poll},
+
};
+

+
use bstr::{BString, ByteSlice as _};
+
use futures_lite::{
+
    future,
+
    io::{AsyncBufRead, AsyncRead, AsyncWrite},
+
};
+
use git_features::progress::{self, Progress};
+
use git_protocol::{
+
    fetch::{response, Action, Arguments, Delegate, DelegateBlocking, LsRefsAction, Response},
+
    transport::client,
+
};
+
use once_cell::sync::Lazy;
+
use pin_project::{pin_project, pinned_drop};
+
use versions::Version;
+

+
pub use git_hash::ObjectId;
+
pub use git_protocol::fetch::Ref;
+

+
use super::{packwriter::PackWriter, remote_git_version, transport};
+

+
// Work around `git-upload-pack` not handling namespaces properly,
+
//
+
// cf. https://lore.kernel.org/git/CD2XNXHACAXS.13J6JTWZPO1JA@schmidt/
+
// Fixed in `git.git` 1ab13eb, which should land in 2.34
+
//
+
// Based on testing with git 2.25.1 in Ubuntu 20.04, this workaround is
+
// not needed. Hence the checked version is lowered to 2.25.0.
+
fn must_namespace_want_ref(caps: &client::Capabilities) -> bool {
+
    static FIXED_AFTER: Lazy<Version> = Lazy::new(|| Version::new("2.25.0").unwrap());
+

+
    remote_git_version(caps)
+
        .map(|version| version <= *FIXED_AFTER)
+
        .unwrap_or(false)
+
}
+

+
#[derive(Debug)]
+
pub struct Options {
+
    /// The remote (logical) repository to fetch from.
+
    ///
+
    /// Normally, this is the path to a repo on the remote side (eg.
+
    /// `/git.git`). `radicle-link` serves only a single namespaced repo, so
+
    /// this value should be the name of a namespace.
+
    pub repo: BString,
+

+
    /// [Extra Parameters][extra] to send with the initial transport header.
+
    ///
+
    /// [extra]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt#n52
+
    pub extra_params: Vec<(String, Option<String>)>,
+

+
    /// [`ObjectId`]s to send as `want` lines.
+
    pub wants: Vec<ObjectId>,
+

+
    /// [`ObjectId`]s to send as `have` lines.
+
    pub haves: Vec<ObjectId>,
+

+
    /// Known refs to ask the server to include in the packfile.
+
    pub want_refs: Vec<BString>,
+
}
+

+
/// Result of a succesful [`fetch`].
+
#[derive(Debug)]
+
pub struct Outputs<T> {
+
    /// The `wanted-refs` as acknowledged by the server.
+
    pub wanted_refs: Vec<Ref>,
+
    /// If a packfile was received successfully, some info about it.
+
    pub pack: Option<T>,
+
}
+

+
impl<T> Default for Outputs<T> {
+
    fn default() -> Self {
+
        Self {
+
            wanted_refs: Vec::new(),
+
            pack: None,
+
        }
+
    }
+
}
+

+
/// [`Delegate`] driving the fetch end of the [pack protocol].
+
///
+
/// [pack protocol]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt
+
pub struct Fetch<P, O> {
+
    opt: Options,
+
    pack_writer: P,
+
    out: Outputs<O>,
+
    need_namespaced_want_ref: bool,
+
}
+

+
impl<P, O> Fetch<P, O> {
+
    pub fn new(opt: Options, pack_writer: P) -> Self {
+
        Self {
+
            opt,
+
            pack_writer,
+
            out: Outputs::default(),
+
            need_namespaced_want_ref: false,
+
        }
+
    }
+

+
    pub fn outputs(&self) -> &Outputs<O> {
+
        &self.out
+
    }
+

+
    pub fn take_outputs(&mut self) -> Outputs<O> {
+
        mem::take(&mut self.out)
+
    }
+
}
+

+
impl<P: PackWriter> DelegateBlocking for Fetch<P, P::Output> {
+
    fn handshake_extra_parameters(&self) -> Vec<(String, Option<String>)> {
+
        self.opt.extra_params.clone()
+
    }
+

+
    fn prepare_ls_refs(
+
        &mut self,
+
        _: &client::Capabilities,
+
        _: &mut Vec<BString>,
+
        _: &mut Vec<(&str, Option<&str>)>,
+
    ) -> io::Result<LsRefsAction> {
+
        Ok(LsRefsAction::Skip)
+
    }
+

+
    fn prepare_fetch(
+
        &mut self,
+
        _: git_protocol::transport::Protocol,
+
        caps: &client::Capabilities,
+
        _: &mut Vec<(&str, Option<&str>)>,
+
        _: &[Ref],
+
    ) -> io::Result<Action> {
+
        if !self.opt.want_refs.is_empty() && !remote_supports_ref_in_want(caps) {
+
            return Err(io::Error::new(
+
                io::ErrorKind::Unsupported,
+
                "`want-ref`s given, but server does not support `ref-in-want`",
+
            ));
+
        }
+

+
        if self.opt.wants.is_empty() && self.opt.want_refs.is_empty() {
+
            return Err(io::Error::new(
+
                io::ErrorKind::InvalidData,
+
                "`fetch` is empty",
+
            ));
+
        }
+

+
        self.need_namespaced_want_ref = must_namespace_want_ref(caps);
+

+
        Ok(Action::Continue)
+
    }
+

+
    fn negotiate(
+
        &mut self,
+
        _: &[Ref],
+
        args: &mut Arguments,
+
        _: Option<&Response>,
+
    ) -> io::Result<Action> {
+
        for oid in &self.opt.wants {
+
            args.want(oid);
+
        }
+

+
        for oid in &self.opt.haves {
+
            args.have(oid)
+
        }
+

+
        for name in &self.opt.want_refs {
+
            if self.need_namespaced_want_ref {
+
                let want_ref = format!("refs/namespaces/{}/{}", self.opt.repo, name);
+
                args.want_ref(BString::from(want_ref).as_bstr());
+
            } else {
+
                args.want_ref(name.as_bstr());
+
            }
+
        }
+

+
        // send done, as we don't bother with further negotiation
+
        Ok(Action::Cancel)
+
    }
+
}
+

+
#[async_trait(?Send)]
+
impl<P: PackWriter> Delegate for Fetch<P, P::Output> {
+
    async fn receive_pack(
+
        &mut self,
+
        pack: impl AsyncBufRead + Unpin + 'async_trait,
+
        prog: impl Progress,
+
        _: &[Ref],
+
        resp: &Response,
+
    ) -> io::Result<()> {
+
        // Strip any namespaces leaked by the other end due to workarounds
+
        let namespace = format!("refs/namespaces/{}/", self.opt.repo);
+
        self.out.wanted_refs.extend(resp.wanted_refs().iter().map(
+
            |response::WantedRef { id, path }| {
+
                Ref::Direct {
+
                    path: path
+
                        .strip_prefix(namespace.as_bytes())
+
                        .map(BString::from)
+
                        .unwrap_or_else(|| path.clone()),
+
                    object: *id,
+
                }
+
            },
+
        ));
+
        let out = self.pack_writer.write_pack(pack, prog)?;
+
        self.out.pack = Some(out);
+

+
        Ok(())
+
    }
+
}
+

+
/// Future created by the [`fetch`] function.
+
///
+
/// Ensures that a running inner [`PackWriter`] is cancelled when the
+
/// [`Fetching`] future is dropped without also dropping the [`AsyncRead`] data
+
/// source.
+
#[pin_project(PinnedDrop)]
+
struct Fetching<T> {
+
    stop: Arc<AtomicBool>,
+
    #[pin]
+
    task: T,
+
}
+

+
#[pinned_drop]
+
impl<T> PinnedDrop for Fetching<T> {
+
    fn drop(self: Pin<&mut Self>) {
+
        self.stop.store(true, Ordering::Release)
+
    }
+
}
+

+
impl<T> Future for Fetching<T>
+
where
+
    T: Future,
+
{
+
    type Output = T::Output;
+

+
    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+
        self.project().task.poll(cx)
+
    }
+
}
+

+
pub fn fetch<B, P, R, W>(
+
    opt: Options,
+
    build_pack_writer: B,
+
    recv: R,
+
    send: W,
+
) -> impl Future<Output = io::Result<Outputs<P::Output>>>
+
where
+
    B: FnOnce(Arc<AtomicBool>) -> P,
+
    P: PackWriter + Send + 'static,
+
    P::Output: Send + 'static,
+
    R: AsyncRead + Unpin + Send + 'static,
+
    W: AsyncWrite + Unpin + Send + 'static,
+
{
+
    let stop = Arc::new(AtomicBool::new(false));
+
    let task = blocking::unblock({
+
        let mut conn = transport::Stateless::new(opt.repo.clone(), recv, send);
+
        let pack_writer = build_pack_writer(Arc::clone(&stop));
+

+
        move || {
+
            let mut delegate = Fetch::new(opt, pack_writer);
+
            future::block_on(git_protocol::fetch(
+
                &mut conn,
+
                &mut delegate,
+
                |_| unreachable!("credentials helper requested"),
+
                progress::Discard,
+
                git_protocol::FetchConnection::AllowReuse,
+
            ))
+
            .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+

+
            Ok(delegate.out)
+
        }
+
    });
+

+
    Fetching { stop, task }
+
}
+

+
fn remote_supports_ref_in_want(caps: &client::Capabilities) -> bool {
+
    caps.capability("fetch")
+
        .and_then(|cap| cap.supports("ref-in-want"))
+
        .unwrap_or(false)
+
}
added archived/link-git/src/protocol/ls.rs
@@ -0,0 +1,151 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::io;
+

+
use bstr::{BString, ByteVec as _};
+
use futures_lite::io::{AsyncBufRead, AsyncRead, AsyncWrite};
+
use git_features::progress::{self, Progress};
+
use git_protocol::{
+
    fetch::{Action, Arguments, Delegate, DelegateBlocking, LsRefsAction, Response},
+
    transport::client,
+
};
+
use once_cell::sync::Lazy;
+
use versions::Version;
+

+
pub use git_protocol::fetch::Ref;
+

+
use super::{remote_git_version, transport};
+

+
// Work around `git-upload-pack` not handling namespaces properly
+
//
+
// cf. https://lore.kernel.org/git/pMV5dJabxOBTD8kJBaPuWK0aS6OJhRQ7YFGwfhPCeSJEbPDrIFBza36nXBCgUCeUJWGmpjPI1rlOGvZJEh71Ruz4SqljndUwOCoBUDRHRDU=@eagain.st/
+
//
+
// Based on testing with git 2.25.1 in Ubuntu 20.04, this workaround is
+
// not needed. Hence the checked version is lowered to 2.25.0.
+
fn must_namespace(caps: &client::Capabilities) -> bool {
+
    static MIN_GIT_VERSION_NAMESPACES: Lazy<Version> =
+
        Lazy::new(|| Version::new("2.25.0").unwrap());
+

+
    remote_git_version(caps)
+
        .map(|version| version < *MIN_GIT_VERSION_NAMESPACES)
+
        .unwrap_or(false)
+
}
+

+
#[derive(Debug)]
+
pub struct Options {
+
    /// The remote (logical) repository to fetch from.
+
    ///
+
    /// Normally, this is the path to a repo on the remote side (eg.
+
    /// `/git.git`). `radicle-link` serves only a single namespaced repo, so
+
    /// this value should be the name of a namespace.
+
    pub repo: BString,
+

+
    /// [Extra Parameters][extra] to send with the initial transport header.
+
    ///
+
    /// [extra]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt#n52
+
    pub extra_params: Vec<(String, Option<String>)>,
+

+
    /// Prefixes of refs to ask the server to advertise via `ls-refs`.
+
    ///
+
    /// If the [`Vec`] is empty, the server is asked to return all refs it knows
+
    /// about. Otherwise, the server is asked to only return refs matching
+
    /// the given prefixes.
+
    pub ref_prefixes: Vec<BString>,
+
}
+

+
/// [`Delegate`] for running a stateless `ls-refs` command.
+
pub struct LsRefs {
+
    opt: Options,
+
    out: Vec<Ref>,
+
}
+

+
impl LsRefs {
+
    pub fn new(opt: Options) -> Self {
+
        Self {
+
            opt,
+
            out: Vec::new(),
+
        }
+
    }
+
}
+

+
impl DelegateBlocking for LsRefs {
+
    fn handshake_extra_parameters(&self) -> Vec<(String, Option<String>)> {
+
        self.opt.extra_params.clone()
+
    }
+

+
    fn prepare_ls_refs(
+
        &mut self,
+
        caps: &client::Capabilities,
+
        args: &mut Vec<BString>,
+
        _: &mut Vec<(&str, Option<&str>)>,
+
    ) -> io::Result<LsRefsAction> {
+
        let must_namespace = must_namespace(caps);
+
        for prefix in &self.opt.ref_prefixes {
+
            let mut arg = BString::from("ref-prefix ");
+
            if must_namespace {
+
                arg.push_str("refs/namespaces/");
+
                arg.push_str(&self.opt.repo);
+
                arg.push_char('/');
+
            }
+
            arg.push_str(prefix);
+
            args.push(arg)
+
        }
+
        Ok(LsRefsAction::Continue)
+
    }
+

+
    fn prepare_fetch(
+
        &mut self,
+
        _: git_protocol::transport::Protocol,
+
        _: &client::Capabilities,
+
        _: &mut Vec<(&str, Option<&str>)>,
+
        refs: &[Ref],
+
    ) -> io::Result<Action> {
+
        self.out.extend_from_slice(refs);
+
        Ok(Action::Cancel)
+
    }
+

+
    fn negotiate(
+
        &mut self,
+
        _: &[Ref],
+
        _: &mut Arguments,
+
        _: Option<&Response>,
+
    ) -> io::Result<Action> {
+
        unreachable!("`negotiate` called even though no `fetch` command was sent")
+
    }
+
}
+

+
#[async_trait(?Send)]
+
impl Delegate for LsRefs {
+
    async fn receive_pack(
+
        &mut self,
+
        _: impl AsyncBufRead + Unpin + 'async_trait,
+
        _: impl Progress,
+
        _: &[Ref],
+
        _: &Response,
+
    ) -> io::Result<()> {
+
        unreachable!("`receive_pack` called even though no `fetch` command was sent")
+
    }
+
}
+

+
pub async fn ls_refs<R, W>(opt: Options, recv: R, send: W) -> io::Result<Vec<Ref>>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    let mut conn = transport::Stateless::new(opt.repo.clone(), recv, send);
+
    let mut delegate = LsRefs::new(opt);
+
    git_protocol::fetch(
+
        &mut conn,
+
        &mut delegate,
+
        |_| unreachable!("credentials helper requested"),
+
        progress::Discard,
+
        git_protocol::FetchConnection::AllowReuse,
+
    )
+
    .await
+
    .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+

+
    Ok(delegate.out)
+
}
added archived/link-git/src/protocol/packwriter.rs
@@ -0,0 +1,271 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    io,
+
    path::{Path, PathBuf},
+
    sync::{
+
        atomic::{AtomicBool, Ordering},
+
        Arc,
+
    },
+
};
+

+
use futures_lite::io::{AsyncBufRead, BlockOn};
+
use git_features::progress::Progress;
+
use git_hash::ObjectId;
+
use git_odb::{self as odb, pack};
+

+
use super::take::TryTake;
+

+
#[cfg(feature = "git2")]
+
pub use libgit::Libgit;
+

+
/// What to do with the `packfile` response.
+
///
+
/// _This is mostly the same as [`git_protocol::fetch::Delegate`], but without
+
/// incurring the [`git_protocol::fetch::DelegateBlocking`] super-trait
+
/// constraint. We can simply make [`crate::protocol::fetch::Fetch`] parametric
+
/// over the packfile sink._
+
pub trait PackWriter {
+
    type Output;
+

+
    fn write_pack(
+
        &self,
+
        pack: impl AsyncBufRead + Unpin,
+
        progress: impl Progress,
+
    ) -> io::Result<Self::Output>;
+
}
+

+
#[derive(Clone, Copy, Debug)]
+
pub struct Options {
+
    /// How many threads the packfile indexer is allowed to spawn. `None` means
+
    /// unlimited.
+
    pub max_indexer_threads: Option<usize>,
+
    /// The maximum size in bytes of the packfile.
+
    ///
+
    /// If the remote sends a larger file, the transfer will be aborted.
+
    pub max_pack_bytes: u64,
+
}
+

+
impl Default for Options {
+
    fn default() -> Self {
+
        Self {
+
            max_indexer_threads: Some(1),
+
            max_pack_bytes: u64::MAX,
+
        }
+
    }
+
}
+

+
#[cfg(feature = "git2")]
+
pub mod libgit {
+
    use super::*;
+

+
    #[derive(Clone, Copy, Debug)]
+
    pub struct PackReceived {
+
        pub objects: usize,
+
        pub local_objects: usize,
+
        pub deltas: usize,
+
    }
+

+
    impl From<git2::Progress<'_>> for PackReceived {
+
        fn from(p: git2::Progress<'_>) -> Self {
+
            Self {
+
                objects: p.indexed_objects(),
+
                local_objects: p.local_objects(),
+
                deltas: p.indexed_deltas(),
+
            }
+
        }
+
    }
+

+
    pub struct Libgit {
+
        opt: Options,
+
        repo: git2::Repository,
+
        stop: Arc<AtomicBool>,
+
    }
+

+
    impl Libgit {
+
        pub fn new(opt: Options, repo: git2::Repository, stop: Arc<AtomicBool>) -> Self {
+
            Self { opt, repo, stop }
+
        }
+

+
        fn guard_cancelled(&self) -> io::Result<()> {
+
            if self.stop.load(Ordering::Acquire) {
+
                Err(io::Error::new(io::ErrorKind::Interrupted, "cancelled"))
+
            } else {
+
                Ok(())
+
            }
+
        }
+
    }
+

+
    impl PackWriter for Libgit {
+
        type Output = Option<PackReceived>;
+

+
        fn write_pack(
+
            &self,
+
            pack: impl AsyncBufRead + Unpin,
+
            _: impl Progress,
+
        ) -> io::Result<Self::Output> {
+
            let mut out = None;
+

+
            let odb = self.repo.odb().map_err(io_error)?;
+
            let mut writer = odb.packwriter().map_err(io_error)?;
+

+
            self.guard_cancelled()?;
+
            io::copy(
+
                &mut BlockOn::new(TryTake::new(pack, self.opt.max_pack_bytes)),
+
                &mut writer,
+
            )?;
+

+
            self.guard_cancelled()?;
+
            writer
+
                .progress(|p| {
+
                    out = Some(p.to_owned());
+
                    true
+
                })
+
                .commit()
+
                .map(|_| ())
+
                .map_err(io_error)?;
+
            // Convince borrowchk that `out` can not possibly be borrowed anymore
+
            drop(writer);
+

+
            Ok(out.map(Into::into))
+
        }
+
    }
+

+
    fn io_error(e: git2::Error) -> io::Error {
+
        io::Error::new(io::ErrorKind::Other, e)
+
    }
+
}
+

+
pub type PackReceived = pack::bundle::write::Outcome;
+

+
/// A lookup function to help "thicken" thin packs by finding missing base
+
/// objects.
+
///
+
/// The impl provided for [`odb::linked::Store`] does not use any pack caching.
+
pub trait Thickener {
+
    fn find_object<'a>(&self, id: ObjectId, buf: &'a mut Vec<u8>)
+
        -> Option<pack::data::Object<'a>>;
+
}
+

+
impl Thickener for odb::linked::Store {
+
    fn find_object<'a>(
+
        &self,
+
        id: ObjectId,
+
        buf: &'a mut Vec<u8>,
+
    ) -> Option<pack::data::Object<'a>> {
+
        use git_odb::FindExt as _;
+

+
        self.find(id, buf, &mut pack::cache::Never).ok()
+
    }
+
}
+

+
/// A factory spewing out new [`Thickener`]s with static lifetimes.
+
///
+
/// `gitoxide` doesn't currently allow us to initialise thickening lazily (the
+
/// pack file may not be thin after all), but requires a static lookup function.
+
/// Instead of initialising a new [`odb::linked::Store`] for every pack stream,
+
/// users may share a pre-initialised object database provided appropriate
+
/// thread safety measures.
+
pub trait BuildThickener {
+
    type Error: std::error::Error + Send + Sync + 'static;
+
    type Thick: Thickener + 'static;
+

+
    fn build_thickener(&self) -> Result<Self::Thick, Self::Error>;
+
}
+

+
pub struct StandardThickener {
+
    git_dir: PathBuf,
+
}
+

+
impl StandardThickener {
+
    pub fn new(git_dir: impl Into<PathBuf>) -> Self {
+
        let git_dir = git_dir.into();
+
        Self { git_dir }
+
    }
+
}
+

+
impl BuildThickener for StandardThickener {
+
    type Error = odb::linked::init::Error;
+
    type Thick = odb::linked::Store;
+

+
    fn build_thickener(&self) -> Result<Self::Thick, Self::Error> {
+
        odb::linked::Store::at(self.git_dir.join("objects"))
+
    }
+
}
+

+
/// The default [`PackWriter`].
+
///
+
/// Writes the packfile into the given output directory, along with a v2
+
/// index. The packfile is verified.
+
pub struct Standard<F> {
+
    git_dir: PathBuf,
+
    opt: Options,
+
    thick: F,
+
    stop: Arc<AtomicBool>,
+
}
+

+
impl<F> Standard<F> {
+
    pub fn new(git_dir: impl AsRef<Path>, opt: Options, thick: F, stop: Arc<AtomicBool>) -> Self {
+
        Self {
+
            git_dir: git_dir.as_ref().to_owned(),
+
            opt,
+
            thick,
+
            stop,
+
        }
+
    }
+
}
+

+
impl<F> Drop for Standard<F> {
+
    fn drop(&mut self) {
+
        self.stop.store(true, Ordering::Release);
+
    }
+
}
+

+
impl<F: BuildThickener> PackWriter for Standard<F> {
+
    type Output = PackReceived;
+

+
    fn write_pack(
+
        &self,
+
        pack: impl AsyncBufRead + Unpin,
+
        prog: impl Progress,
+
    ) -> io::Result<Self::Output> {
+
        use pack::{bundle::write::Options, data::input::Mode, index::Version, Bundle};
+

+
        let opts = Options {
+
            thread_limit: self.opt.max_indexer_threads,
+
            index_kind: Version::V2,
+
            iteration_mode: Mode::Verify,
+
        };
+
        let thickener = self
+
            .thick
+
            .build_thickener()
+
            .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+
        Bundle::write_to_directory(
+
            BlockOn::new(TryTake::new(pack, self.opt.max_pack_bytes)),
+
            Some(self.git_dir.join("objects").join("pack")),
+
            prog,
+
            &self.stop,
+
            Some(Box::new(move |oid, buf| thickener.find_object(oid, buf))),
+
            opts,
+
        )
+
        .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
+
    }
+
}
+

+
/// No-op [`PackWriter`] which just drains the input.
+
pub struct Discard;
+

+
impl PackWriter for Discard {
+
    type Output = u64;
+

+
    fn write_pack(
+
        &self,
+
        pack: impl AsyncBufRead + Unpin,
+
        _: impl Progress,
+
    ) -> io::Result<Self::Output> {
+
        io::copy(&mut BlockOn::new(pack), &mut io::sink())
+
    }
+
}
added archived/link-git/src/protocol/take.rs
@@ -0,0 +1,76 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    io,
+
    pin::Pin,
+
    task::{Context, Poll},
+
};
+

+
use futures_lite::io::{AsyncBufRead, AsyncRead};
+

+
/// Like [`futures_lite::io::Take`], but returns an error if and when the
+
/// `limit` is exceeded.
+
///
+
/// Note that, unlike [`futures_lite::io::Take`], if a single poll reads past
+
/// the limit, the excess bytes are _not_ discarded. Instead, an error is
+
/// returned on the next poll.
+
pub struct TryTake<R> {
+
    limit: u64,
+
    inner: R,
+
}
+

+
impl<R> TryTake<R> {
+
    pub fn new(inner: R, limit: u64) -> Self {
+
        Self { limit, inner }
+
    }
+
}
+

+
impl<R> AsyncRead for TryTake<R>
+
where
+
    R: AsyncRead + Unpin,
+
{
+
    fn poll_read(
+
        self: Pin<&mut Self>,
+
        cx: &mut Context,
+
        buf: &mut [u8],
+
    ) -> Poll<Result<usize, io::Error>> {
+
        if self.limit == 0 {
+
            return Poll::Ready(Err(io::Error::new(
+
                io::ErrorKind::Other,
+
                "max input size exceeded",
+
            )));
+
        }
+

+
        let this = self.get_mut();
+
        Pin::new(&mut this.inner).poll_read(cx, buf).map(|ready| {
+
            if let Ok(siz) = ready {
+
                this.limit = this.limit.saturating_sub(siz as u64);
+
            }
+

+
            ready
+
        })
+
    }
+
}
+

+
impl<R> AsyncBufRead for TryTake<R>
+
where
+
    R: AsyncBufRead + Unpin,
+
{
+
    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<&[u8], io::Error>> {
+
        if self.limit == 0 {
+
            return Poll::Ready(Err(io::Error::new(
+
                io::ErrorKind::Other,
+
                "max input size exceeded",
+
            )));
+
        }
+

+
        Pin::new(&mut self.get_mut().inner).poll_fill_buf(cx)
+
    }
+

+
    fn consume(self: Pin<&mut Self>, amt: usize) {
+
        Pin::new(&mut self.get_mut().inner).consume(amt)
+
    }
+
}
added archived/link-git/src/protocol/transport.rs
@@ -0,0 +1,84 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use bstr::BString;
+
use futures_lite::io::{AsyncRead, AsyncWrite};
+
use git_protocol::transport::{
+
    client::{
+
        self,
+
        git::{ConnectMode, Connection},
+
        SetServiceResponse,
+
        Transport,
+
        TransportWithoutIO,
+
    },
+
    Protocol,
+
    Service,
+
};
+

+
pub struct Stateless<R, W> {
+
    inner: Connection<R, W>,
+
}
+

+
impl<R, W> Stateless<R, W>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    pub fn new(repo: BString, recv: R, send: W) -> Self {
+
        let url = format!("rad://{repo}");
+
        let inner = Connection::new(
+
            recv,
+
            send,
+
            Protocol::V2,
+
            repo,
+
            None::<(String, Option<u16>)>,
+
            ConnectMode::Daemon,
+
        )
+
        .custom_url(Some(url));
+

+
        Self { inner }
+
    }
+
}
+

+
impl<R, W> TransportWithoutIO for Stateless<R, W>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    fn request(
+
        &mut self,
+
        write_mode: client::WriteMode,
+
        on_into_read: client::MessageKind,
+
    ) -> Result<client::RequestWriter<'_>, client::Error> {
+
        self.inner.request(write_mode, on_into_read)
+
    }
+

+
    fn to_url(&self) -> String {
+
        self.inner.to_url()
+
    }
+

+
    fn supported_protocol_versions(&self) -> &[Protocol] {
+
        &[Protocol::V2]
+
    }
+

+
    fn connection_persists_across_multiple_requests(&self) -> bool {
+
        false
+
    }
+
}
+

+
#[async_trait(?Send)]
+
impl<R, W> Transport for Stateless<R, W>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    async fn handshake<'a>(
+
        &mut self,
+
        service: Service,
+
        extra_parameters: &'a [(&'a str, Option<&'a str>)],
+
    ) -> Result<SetServiceResponse<'_>, client::Error> {
+
        self.inner.handshake(service, extra_parameters).await
+
    }
+
}
added archived/link-git/src/protocol/upload_pack.rs
@@ -0,0 +1,238 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{future::Future, io, path::Path, process::ExitStatus, str::FromStr};
+

+
use async_process::{Command, Stdio};
+
use futures_lite::io::{copy, AsyncBufReadExt as _, AsyncRead, AsyncWrite, BufReader};
+
use futures_util::try_join;
+
use git_packetline::{self as packetline, PacketLineRef};
+
use once_cell::sync::Lazy;
+
use versions::Version;
+

+
mod legacy;
+

+
#[derive(Debug, PartialEq, Eq)]
+
pub struct Header {
+
    pub path: String,
+
    pub host: Option<(String, Option<u16>)>,
+
    pub extra: Vec<(String, Option<String>)>,
+
}
+

+
impl FromStr for Header {
+
    type Err = &'static str;
+

+
    fn from_str(s: &str) -> Result<Self, Self::Err> {
+
        let mut parts = s
+
            .strip_prefix("git-upload-pack ")
+
            .ok_or("unsupported service")?
+
            .split_terminator('\0');
+

+
        let path = parts.next().ok_or("missing path").and_then(|path| {
+
            if path.is_empty() {
+
                Err("empty path")
+
            } else {
+
                Ok(path.to_owned())
+
            }
+
        })?;
+
        let host = match parts.next() {
+
            None | Some("") => None,
+
            Some(host) => match host.strip_prefix("host=") {
+
                None => return Err("invalid host"),
+
                Some(host) => match host.split_once(':') {
+
                    None => Some((host.to_owned(), None)),
+
                    Some((host, port)) => {
+
                        let port = port.parse::<u16>().or(Err("invalid port"))?;
+
                        Some((host.to_owned(), Some(port)))
+
                    },
+
                },
+
            },
+
        };
+
        let extra = parts
+
            .skip_while(|part| part.is_empty())
+
            .map(|part| match part.split_once('=') {
+
                None => (part.to_owned(), None),
+
                Some((k, v)) => (k.to_owned(), Some(v.to_owned())),
+
            })
+
            .collect();
+

+
        Ok(Self { path, host, extra })
+
    }
+
}
+

+
pub async fn upload_pack<R, W>(
+
    git_dir: impl AsRef<Path>,
+
    recv: R,
+
    mut send: W,
+
) -> io::Result<(Header, impl Future<Output = io::Result<ExitStatus>>)>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    let mut recv = BufReader::new(recv);
+
    let header: Header = match recv.fill_buf().await?.first() {
+
        // legacy clients don't send a proper pktline header :(
+
        Some(b'g') => {
+
            let mut buf = String::with_capacity(256);
+
            recv.read_line(&mut buf).await?;
+
            buf.parse().map_err(invalid_data)?
+
        },
+
        Some(_) => {
+
            let mut pktline = packetline::StreamingPeekableIter::new(recv, &[]);
+
            let pkt = pktline
+
                .read_line()
+
                .await
+
                .ok_or_else(|| invalid_data("missing header"))?
+
                .map_err(invalid_data)?
+
                .map_err(invalid_data)?;
+
            let hdr = match pkt {
+
                PacketLineRef::Data(data) => std::str::from_utf8(data)
+
                    .map_err(invalid_data)?
+
                    .parse()
+
                    .map_err(invalid_data),
+
                _ => Err(invalid_data("not a header packet")),
+
            }?;
+
            recv = pktline.into_inner();
+

+
            hdr
+
        },
+
        None => {
+
            return Err(io::Error::new(
+
                io::ErrorKind::UnexpectedEof,
+
                "expected header",
+
            ))
+
        },
+
    };
+

+
    let namespace = header
+
        .path
+
        // legacy clients redundantly send a full URN
+
        .strip_prefix("rad:git:")
+
        .map(ToOwned::to_owned)
+
        .unwrap_or_else(|| header.path.clone());
+
    let protocol_version = header
+
        .extra
+
        .iter()
+
        .find_map(|kv| match kv {
+
            (ref k, Some(v)) if k == "version" => {
+
                let version = match v.as_str() {
+
                    "2" => 2,
+
                    "1" => 1,
+
                    _ => 0,
+
                };
+
                Some(version)
+
            },
+
            _ => None,
+
        })
+
        .unwrap_or(0);
+
    // legacy
+
    let stateless_ls = header.extra.iter().any(|(k, _)| k == "ls");
+

+
    let fut = async move {
+
        if protocol_version < 2 {
+
            if stateless_ls {
+
                return legacy::advertise_refs(git_dir, &namespace, recv, send).await;
+
            }
+
        } else {
+
            advertise_capabilities(&mut send).await?;
+
        }
+

+
        let mut child = {
+
            let mut cmd = Command::new("git");
+
            cmd.current_dir(git_dir)
+
                .env_clear()
+
                .envs(
+
                    std::env::vars()
+
                        .filter(|(key, _)| key == "PATH" || key.starts_with("GIT_TRACE")),
+
                )
+
                .env("GIT_PROTOCOL", format!("version={protocol_version}"))
+
                .env("GIT_NAMESPACE", namespace)
+
                .args([
+
                    "-c",
+
                    "uploadpack.allowanysha1inwant=true",
+
                    "-c",
+
                    "uploadpack.allowrefinwant=true",
+
                    "-c",
+
                    "lsrefs.unborn=ignore",
+
                    "upload-pack",
+
                    "--strict",
+
                    "--stateless-rpc",
+
                    ".",
+
                ])
+
                .stdout(Stdio::piped())
+
                .stdin(Stdio::piped())
+
                .stderr(Stdio::inherit())
+
                .kill_on_drop(true)
+
                .reap_on_drop(true)
+
                .spawn()?
+
        };
+

+
        let mut stdin = child.stdin.take().unwrap();
+
        let mut stdout = child.stdout.take().unwrap();
+

+
        try_join!(
+
            copy(&mut recv, &mut stdin),
+
            copy(&mut stdout, &mut send),
+
            child.status(),
+
        )
+
        .map(|(_, _, status)| status)
+
    };
+

+
    Ok((header, fut))
+
}
+

+
async fn advertise_capabilities<W>(mut send: W) -> io::Result<()>
+
where
+
    W: AsyncWrite + Unpin,
+
{
+
    // Thou shallt not upgrade your `git` installation while a link instance is
+
    // running!
+
    static GIT_VERSION: Lazy<Version> = Lazy::new(|| git_version().unwrap());
+
    static AGENT: Lazy<Vec<u8>> = Lazy::new(|| format!("agent=git/{}", *GIT_VERSION).into_bytes());
+
    static CAPABILITIES: Lazy<[&[u8]; 4]> = Lazy::new(|| {
+
        [
+
            b"version 2",
+
            AGENT.as_slice(),
+
            b"object-format=sha1",
+
            b"fetch=ref-in-want",
+
        ]
+
    });
+

+
    for cap in *CAPABILITIES {
+
        packetline::encode::text_to_write(cap, &mut send).await?;
+
    }
+
    packetline::encode::flush_to_write(&mut send).await?;
+

+
    Ok(())
+
}
+

+
fn git_version() -> io::Result<Version> {
+
    let out = std::process::Command::new("git")
+
        .arg("--version")
+
        .output()?;
+
    if !out.status.success() {
+
        return Err(io::Error::new(
+
            io::ErrorKind::Other,
+
            "failed to read `git` version",
+
        ));
+
    }
+

+
    // parse: git version 2.30.1 <other optional tokens>
+
    out.stdout
+
        .split(|x| x == &b' ')
+
        .nth(2)
+
        .and_then(|s| {
+
            let s = std::str::from_utf8(s).ok()?;
+
            Version::new(s.trim())
+
        })
+
        .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "failed to parse `git` version"))
+
}
+

+
fn invalid_data<E>(inner: E) -> io::Error
+
where
+
    E: Into<Box<dyn std::error::Error + Sync + Send>>,
+
{
+
    io::Error::new(io::ErrorKind::InvalidData, inner)
+
}
added archived/link-git/src/protocol/upload_pack/legacy.rs
@@ -0,0 +1,102 @@
+
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
+
// Copyright © 2021      The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{io, path::Path, process::ExitStatus};
+

+
use async_process::{Command, Stdio};
+
use futures_lite::io::{copy, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _};
+
use futures_util::try_join;
+
use git_ref::{
+
    file::{Store as Refdb, WriteReflog},
+
    FullName,
+
    Reference,
+
};
+

+
pub(super) async fn advertise_refs<R, W>(
+
    git_dir: impl AsRef<Path>,
+
    namespace: &str,
+
    mut recv: R,
+
    mut send: W,
+
) -> io::Result<ExitStatus>
+
where
+
    R: AsyncRead + Unpin,
+
    W: AsyncWrite + Unpin,
+
{
+
    let unhide = blocking::unblock({
+
        let git_dir = git_dir.as_ref().to_path_buf();
+
        let prefix = Path::new("refs")
+
            .join("namespaces")
+
            .join(namespace)
+
            .join("refs");
+
        move || -> io::Result<Vec<FullName>> {
+
            let refdb = Refdb::at(git_dir, WriteReflog::Disable);
+
            let packed = refdb
+
                .packed_buffer()
+
                .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+

+
            let refs = refdb
+
                .iter_prefixed(packed.as_ref(), prefix)?
+
                .filter_map(|r| r.ok().map(|Reference { name, .. }| name))
+
                .filter(|name| {
+
                    const PATTERN: &[u8] = b"rad/ids/any";
+
                    const SEPARAT: u8 = b'/';
+
                    name.as_bstr()
+
                        .rsplit(|b| b == &SEPARAT)
+
                        .zip(PATTERN.rsplit(|b| b == &SEPARAT))
+
                        .skip(1)
+
                        .all(|(a, b)| a == b)
+
                })
+
                .collect::<Vec<_>>();
+

+
            Ok(refs)
+
        }
+
    })
+
    .await?;
+

+
    let mut child = {
+
        let mut cmd = Command::new("git");
+
        cmd.current_dir(git_dir)
+
            .env_clear()
+
            .envs(std::env::vars().filter(|(key, _)| key == "PATH" || key.starts_with("GIT_TRACE")))
+
            .arg("-c")
+
            .arg("uploadpack.hiderefs=refs/")
+
            .arg("-c")
+
            .arg(format!("uploadpack.hiderefs=!refs/namespaces/{namespace}",));
+

+
        for r in unhide {
+
            cmd.arg("-c")
+
                .arg(format!("uploadpack.hiderefs=!{}", r.as_bstr()));
+
        }
+

+
        cmd.args([
+
            "upload-pack",
+
            "--strict",
+
            "--timeout=5",
+
            "--stateless-rpc",
+
            "--advertise-refs",
+
            ".",
+
        ])
+
        .stdout(Stdio::piped())
+
        .stderr(Stdio::inherit())
+
        .kill_on_drop(true)
+
        .reap_on_drop(true)
+
        .spawn()?
+
    };
+
    let mut stdout = child.stdout.take().unwrap();
+

+
    const HEADER: &[u8] = b"001e# service=git-upload-pack\n0000";
+
    send.write_all(HEADER).await?;
+
    let status = try_join!(copy(&mut stdout, &mut send), child.status()).map(|x| x.1);
+

+
    // Read one byte off the read stream to ensure it is driven to completion
+
    // (we expect EOF immediately). Failure to do so may cause resource leaks.
+
    //
+
    // Cf. 900b6cf6 (replication: Ensure git stream is closed, 2021-04-26)
+
    let mut buf = [0; 1];
+
    recv.read(&mut buf).await?;
+

+
    status
+
}
added archived/link-git/src/refs.rs
@@ -0,0 +1,7 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
pub mod db;
+
pub use git_ref::*;
added archived/link-git/src/refs/db.rs
@@ -0,0 +1,269 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    collections::BTreeSet,
+
    convert::TryInto,
+
    io,
+
    path::{Path, PathBuf},
+
    sync::Arc,
+
    time::{Duration, SystemTime},
+
};
+

+
use git_ref::{
+
    file::{self, iter::LooseThenPacked, Transaction, WriteReflog},
+
    packed,
+
    FullName,
+
    PartialNameRef,
+
    Reference,
+
    Target,
+
};
+
use parking_lot::RwLock;
+

+
pub mod error {
+
    use super::*;
+
    use thiserror::Error;
+

+
    #[derive(Debug, Error)]
+
    pub enum Open {
+
        #[error("failed to take a snapshot of packed-refs")]
+
        Snapshot(#[from] Snapshot),
+

+
        #[error(transparent)]
+
        Io(#[from] io::Error),
+
    }
+

+
    #[derive(Debug, Error)]
+
    pub enum Snapshot {
+
        #[error("failed to lock packed-refs")]
+
        Lock(#[from] git_lock::acquire::Error),
+

+
        #[error("failed to open packed-refs")]
+
        Open(#[from] packed::buffer::open::Error),
+

+
        #[error(transparent)]
+
        Io(#[from] io::Error),
+
    }
+

+
    #[derive(Debug, Error)]
+
    pub enum Follow {
+
        #[error("cyclic symref: {0:?}")]
+
        Cycle(FullName),
+

+
        #[error("reference {0:?} not found")]
+
        NotFound(FullName),
+

+
        #[error("max symref depth {0} exceeded")]
+
        DepthLimitExceeded(usize),
+

+
        #[error(transparent)]
+
        Find(#[from] file::find::Error),
+
    }
+
}
+

+
/// Threadsafe refdb with shareable `packed-refs` memory buffer.
+
///
+
/// Packed refs are a delicate business: they are written by an external
+
/// process, [`git-pack-refs`], _or_ when a packed ref is deleted. It may also
+
/// be that no `packed-refs` currently exist.
+
///
+
/// The only way we can be certain to operate on a consistent view of what is
+
/// committed to disk is to check if the `packed-refs` file has changed since we
+
/// last read it. This would be quite expensive to do for small operations.
+
/// Thus, the caller is responsible for determining just how much they can
+
/// afford to see possibly out-of-date data: the [`Refdb::snapshot`] method
+
/// checks if the previously loaded `packed-refs` appear to be out-of-date, and
+
/// reloads them if necessary. The resulting [`Snapshot`] contains a pointer to
+
/// an immutable memory buffer of the packed refs which can be shared between
+
/// threads, or cloned.
+
///
+
/// [`git-pack-refs`]: https://git-scm.com/docs/git-pack-refs
+
#[derive(Clone)]
+
pub struct Refdb {
+
    store: file::Store,
+
    packed: Arc<RwLock<Option<Packed>>>,
+
}
+

+
impl Refdb {
+
    pub fn open(git_dir: impl Into<PathBuf>) -> Result<Self, error::Open> {
+
        let store = file::Store::at(git_dir, WriteReflog::Normal);
+
        let packed = Arc::new(RwLock::new(Packed::open(store.packed_refs_path())?));
+
        Ok(Self { store, packed })
+
    }
+

+
    pub fn snapshot(&self) -> Result<Snapshot, error::Snapshot> {
+
        let read = self.packed.read();
+
        match &*read {
+
            None => {
+
                drop(read);
+
                // always modified, because it was None and now is Some
+
                self.reload(|_| true)
+
            },
+

+
            Some(packed) => {
+
                if packed.is_modified()? {
+
                    let mtime = packed.mtime;
+
                    drop(read);
+
                    // we don't care what the mtime is, only that we have a
+
                    // different value than before
+
                    self.reload(|packed1| packed1.mtime != mtime)
+
                } else {
+
                    Ok(Snapshot {
+
                        store: self.store.clone(),
+
                        packed: Some(packed.buf.clone()),
+
                    })
+
                }
+
            },
+
        }
+
    }
+

+
    fn reload<F>(&self, modified_while_blocked: F) -> Result<Snapshot, error::Snapshot>
+
    where
+
        F: FnOnce(&Packed) -> bool,
+
    {
+
        let mut write = self.packed.write();
+
        if let Some(packed) = &*write {
+
            if modified_while_blocked(packed) {
+
                return Ok(Snapshot {
+
                    store: self.store.clone(),
+
                    packed: Some(packed.buf.clone()),
+
                });
+
            }
+
        }
+

+
        match Packed::open(self.store.packed_refs_path())? {
+
            Some(packed) => {
+
                let buf = packed.buf.clone();
+
                *write = Some(packed);
+
                Ok(Snapshot {
+
                    store: self.store.clone(),
+
                    packed: Some(buf),
+
                })
+
            },
+

+
            None => {
+
                *write = None;
+
                Ok(Snapshot {
+
                    store: self.store.clone(),
+
                    packed: None,
+
                })
+
            },
+
        }
+
    }
+
}
+

+
#[derive(Clone)]
+
pub struct Snapshot {
+
    store: file::Store,
+
    packed: Option<Arc<packed::Buffer>>,
+
}
+

+
impl Snapshot {
+
    pub fn find<'a, N, E>(&self, name: N) -> Result<Option<Reference>, file::find::Error>
+
    where
+
        N: TryInto<PartialNameRef<'a>, Error = E>,
+
        file::find::Error: From<E>,
+
    {
+
        self.store.try_find(name, self.packed.as_deref())
+
    }
+

+
    pub fn transaction(&self) -> Transaction {
+
        self.store.transaction()
+
    }
+

+
    pub fn iter(&self, prefix: Option<impl AsRef<Path>>) -> io::Result<LooseThenPacked> {
+
        let packed = self.packed.as_deref();
+
        match prefix {
+
            None => self.store.iter(packed),
+
            Some(p) => self.store.iter_prefixed(packed, p),
+
        }
+
    }
+

+
    /// Follow a symbolic reference until a direct reference is found.
+
    ///
+
    /// If `symref` is a direct reference, a copy of it is returned. No more
+
    /// than five symbolic references will be followed, and cyclic
+
    /// references are detected. Both result in an error to be returned.
+
    ///
+
    /// Note that following is not the same as "peeling": no access to the
+
    /// object database is made, and thus no assumptions about the kind of
+
    /// object the reference ultimately points to can be made.
+
    pub fn follow(&self, symref: &Reference) -> Result<Reference, error::Follow> {
+
        match &symref.target {
+
            Target::Peeled(_) => Ok(symref.clone()),
+
            Target::Symbolic(name) => {
+
                let mut seen = BTreeSet::new();
+
                seen.insert(symref.name.clone());
+

+
                let mut next = self
+
                    .find(name.to_partial())?
+
                    .ok_or_else(|| error::Follow::NotFound(name.clone()))?;
+
                seen.insert(name.clone());
+

+
                const MAX_DEPTH: usize = 5;
+
                loop {
+
                    match next.target {
+
                        Target::Peeled(_) => return Ok(next),
+
                        Target::Symbolic(sym) => {
+
                            if seen.len() + 1 > MAX_DEPTH {
+
                                return Err(error::Follow::DepthLimitExceeded(MAX_DEPTH));
+
                            }
+

+
                            if seen.contains(&sym) {
+
                                return Err(error::Follow::Cycle(sym));
+
                            }
+
                            next = self
+
                                .find(sym.to_partial())?
+
                                .ok_or_else(|| error::Follow::NotFound(sym.clone()))?;
+
                            seen.insert(sym);
+
                        },
+
                    }
+
                }
+
            },
+
        }
+
    }
+
}
+

+
struct Packed {
+
    buf: Arc<packed::Buffer>,
+
    path: PathBuf,
+
    mtime: SystemTime,
+
}
+

+
impl Packed {
+
    fn open(path: PathBuf) -> Result<Option<Self>, error::Snapshot> {
+
        use git_lock::{acquire, Marker};
+

+
        let _lock = Marker::acquire_to_hold_resource(
+
            &path,
+
            acquire::Fail::AfterDurationWithBackoff(Duration::from_millis(500)),
+
            None,
+
        )?;
+
        match path.metadata() {
+
            // `git-lock` will happily lock a non-existent file
+
            Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None),
+
            Err(e) => Err(e.into()),
+

+
            Ok(meta) => {
+
                let mtime = meta.modified()?;
+
                let buf = Arc::new(packed::Buffer::open(&path, 32 * 1024)?);
+
                Ok(Some(Self { buf, path, mtime }))
+
            },
+
        }
+
    }
+

+
    fn is_modified(&self) -> io::Result<bool> {
+
        match self.path.metadata() {
+
            // it existed before, so gone is modified
+
            Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(true),
+
            Err(e) => Err(e),
+

+
            Ok(meta) => {
+
                let mtime = meta.modified()?;
+
                Ok(self.mtime == mtime)
+
            },
+
        }
+
    }
+
}
added archived/link-git/src/service.rs
@@ -0,0 +1,126 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{fmt::Debug, ops::Deref, str::FromStr};
+

+
use git2::transport::Service as GitService;
+
use lazy_static::lazy_static;
+

+
lazy_static! {
+
    static ref SERVICE_REGEX: regex::Regex = regex::Regex::new(r"(\S+) '/?(.+)'").unwrap();
+
}
+

+
#[derive(Clone, Copy, PartialEq)]
+
pub struct Service(pub GitService);
+

+
/// A service and URN as passed to the exec_request of an SSH server by git when
+
/// talking to an SSH remote. The `FromStr` implementation for this type expects
+
/// a string of the form:
+
///
+
/// `<request type> /<path>`
+
///
+
/// Where the request type is either `upload-pack` or `receive-pack`, the
+
/// leading slash before the urn is optional, and the `path` is whatever the
+
/// `FromStr` of `Path` provides.
+
#[derive(Debug, Clone)]
+
pub struct SshService<Path> {
+
    pub service: Service,
+
    pub path: Path,
+
}
+

+
impl<Path> SshService<Path> {
+
    pub fn is_upload(&self) -> bool {
+
        match self.service.0 {
+
            GitService::UploadPackLs | GitService::UploadPack => true,
+
            GitService::ReceivePackLs | GitService::ReceivePack => false,
+
        }
+
    }
+

+
    pub fn is_receive(&self) -> bool {
+
        !self.is_upload()
+
    }
+
}
+

+
impl From<GitService> for Service {
+
    fn from(g: GitService) -> Self {
+
        Service(g)
+
    }
+
}
+

+
impl From<Service> for GitService {
+
    fn from(s: Service) -> Self {
+
        s.0
+
    }
+
}
+

+
#[derive(thiserror::Error, Debug)]
+
pub enum ParseService {
+
    #[error("the exec str must be in the form <service> <urn>")]
+
    Format,
+
    #[error(transparent)]
+
    Namespace(Box<dyn std::error::Error + Send + Sync + 'static>),
+
    #[error("unknown service {0}")]
+
    UnknownService(String),
+
}
+

+
impl Debug for Service {
+
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+
        f.debug_tuple("Service")
+
            .field(match self.0 {
+
                GitService::UploadPackLs => &"UploadPackLs",
+
                GitService::UploadPack => &"UploadPack",
+
                GitService::ReceivePackLs => &"ReceivePackLs",
+
                GitService::ReceivePack => &"ReceivePack",
+
            })
+
            .finish()
+
    }
+
}
+

+
impl std::fmt::Display for Service {
+
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+
        match self.0 {
+
            GitService::UploadPack => write!(f, "upload-pack"),
+
            GitService::UploadPackLs => write!(f, "upload-pack-ls"),
+
            GitService::ReceivePack => write!(f, "receive-pack"),
+
            GitService::ReceivePackLs => write!(f, "receive-pack-ls"),
+
        }
+
    }
+
}
+

+
impl Deref for Service {
+
    type Target = GitService;
+

+
    fn deref(&self) -> &Self::Target {
+
        &self.0
+
    }
+
}
+

+
impl<Path> FromStr for SshService<Path>
+
where
+
    Path: FromStr,
+
    Path::Err: std::error::Error + Send + Sync + 'static,
+
{
+
    type Err = ParseService;
+

+
    fn from_str(exec_str: &str) -> Result<Self, Self::Err> {
+
        let cap = SERVICE_REGEX
+
            .captures_iter(exec_str)
+
            .next()
+
            .ok_or(ParseService::Format)?;
+
        debug_assert!(cap.len() == 3);
+
        let service_str: &str = &cap[1];
+
        let urn_str = &cap[2];
+

+
        let path = urn_str
+
            .parse()
+
            .map_err(|err| ParseService::Namespace(Box::new(err)))?;
+
        let service = match service_str {
+
            "git-upload-pack" => Ok(Service(GitService::UploadPack)),
+
            "git-receive-pack" => Ok(Service(GitService::ReceivePack)),
+
            other => Err(ParseService::UnknownService(other.to_string())),
+
        }?;
+
        Ok(Self { service, path })
+
    }
+
}
added archived/link-git/t/Cargo.toml
@@ -0,0 +1,31 @@
+
[package]
+
name = "link-git-test"
+
version = "0.1.0"
+
edition = "2021"
+
license = "GPL-3.0-or-later"
+

+
publish = false
+

+
[lib]
+
doctest = false
+
test = true
+
doc = false
+

+
[features]
+
test = []
+

+
[dev-dependencies]
+
anyhow = "1"
+
bstr = "0.2"
+
futures = "0.3"
+
futures_ringbuf = "0.3"
+
tempfile = "3.4"
+

+
[dev-dependencies.git2]
+
version = "0.16.1"
+
default-features = false
+
features = ["vendored-libgit2"]
+

+
[dev-dependencies.link-git]
+
path = ".."
+
features = ["git2"]

\ No newline at end of file
added archived/link-git/t/src/integration.rs
@@ -0,0 +1,6 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
mod protocol;
added archived/link-git/t/src/integration/protocol.rs
@@ -0,0 +1,383 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use std::{
+
    collections::BTreeSet,
+
    io,
+
    path::Path,
+
    sync::{atomic::AtomicBool, Arc},
+
};
+

+
use bstr::ByteSlice as _;
+
use futures::{AsyncReadExt as _, TryFutureExt as _};
+
use link_git::protocol::{fetch, ls, packwriter, upload_pack, ObjectId, PackWriter, Ref};
+
use tempfile::{tempdir, TempDir};
+

+
fn upstream() -> TempDir {
+
    let tmp = tempdir().unwrap();
+

+
    let repo = git2::Repository::init_bare(&tmp).unwrap();
+
    let auth = git2::Signature::now("apollo", "apollo@cree.de").unwrap();
+

+
    let tree = {
+
        let empty = repo.treebuilder(None).unwrap();
+
        let oid = empty.write().unwrap();
+
        repo.find_tree(oid).unwrap()
+
    };
+
    let base = {
+
        let oid = repo
+
            .commit(
+
                Some("refs/namespaces/foo/refs/heads/main"),
+
                &auth,
+
                &auth,
+
                "initial",
+
                &tree,
+
                &[],
+
            )
+
            .unwrap();
+
        repo.find_commit(oid).unwrap()
+
    };
+
    let next = repo
+
        .commit(
+
            Some("refs/namespaces/foo/refs/heads/next"),
+
            &auth,
+
            &auth,
+
            "ng",
+
            &tree,
+
            &[&base],
+
        )
+
        .unwrap();
+
    repo.reference(
+
        "refs/namespaces/foo/refs/pulls/1/head",
+
        next,
+
        true,
+
        "pee arrr",
+
    )
+
    .unwrap();
+

+
    tmp
+
}
+

+
fn collect_refs(repo: &git2::Repository) -> Result<Vec<(String, git2::Oid)>, git2::Error> {
+
    repo.references()?
+
        .map(|x| x.map(|r| (r.name().unwrap().to_owned(), r.target().unwrap())))
+
        .collect()
+
}
+

+
fn update_tips<'a, T>(repo: &git2::Repository, tips: T) -> Result<(), anyhow::Error>
+
where
+
    T: IntoIterator<Item = &'a Ref>,
+
{
+
    for r in tips {
+
        match r {
+
            Ref::Direct { path, object } => {
+
                repo.reference(
+
                    path.to_str()?,
+
                    git2::Oid::from_bytes(object.as_slice())?,
+
                    true,
+
                    "",
+
                )?;
+
            },
+
            x => anyhow::bail!("unexpected ref variant: {:?}", x),
+
        }
+
    }
+

+
    Ok(())
+
}
+

+
fn collect_history(repo: &git2::Repository, tip: &str) -> Result<Vec<git2::Oid>, git2::Error> {
+
    let mut revwalk = repo.revwalk()?;
+
    revwalk.push_ref(tip)?;
+
    revwalk.collect()
+
}
+

+
fn run_ls_refs<R: AsRef<Path>>(remote: R, opt: ls::Options) -> io::Result<Vec<Ref>> {
+
    let (client, server) = futures_ringbuf::Endpoint::pair(256, 256);
+
    let client = async move {
+
        let (recv, send) = client.split();
+
        ls::ls_refs(opt, recv, send).await
+
    };
+
    let server = {
+
        let (recv, send) = server.split();
+
        upload_pack::upload_pack(&remote, recv, send).and_then(|(_hdr, run)| run)
+
    };
+

+
    let (client_out, server_out) =
+
        futures::executor::block_on(futures::future::try_join(client, server))?;
+
    assert!(server_out.success());
+
    Ok(client_out)
+
}
+

+
fn run_fetch<R, B, P>(
+
    remote: R,
+
    opt: fetch::Options,
+
    build_pack_writer: B,
+
) -> io::Result<fetch::Outputs<P::Output>>
+
where
+
    R: AsRef<Path>,
+
    B: FnOnce(Arc<AtomicBool>) -> P,
+
    P: PackWriter + Send + 'static,
+
    P::Output: Send + 'static,
+
{
+
    let (client, server) = futures_ringbuf::Endpoint::pair(256, 256);
+
    let client = async move {
+
        let (recv, send) = client.split();
+
        fetch::fetch(opt, build_pack_writer, recv, send).await
+
    };
+
    let server = {
+
        let (recv, send) = server.split();
+
        upload_pack::upload_pack(&remote, recv, send).and_then(|(_hdr, run)| run)
+
    };
+

+
    let (client_out, server_out) =
+
        futures::executor::block_on(futures::future::try_join(client, server))?;
+
    assert!(server_out.success());
+
    Ok(client_out)
+
}
+

+
#[test]
+
fn smoke() {
+
    let remote = upstream();
+
    let refs = run_ls_refs(
+
        &remote,
+
        ls::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            ref_prefixes: vec!["refs/heads/".into(), "refs/pulls/".into()],
+
        },
+
    )
+
    .unwrap();
+

+
    assert_eq!(
+
        refs.iter().map(|r| r.unpack().0).collect::<BTreeSet<_>>(),
+
        [
+
            "refs/heads/main".into(),
+
            "refs/heads/next".into(),
+
            "refs/pulls/1/head".into()
+
        ]
+
        .iter()
+
        .collect::<BTreeSet<_>>()
+
    );
+

+
    let out = run_fetch(
+
        &remote,
+
        fetch::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            haves: vec![],
+
            wants: vec![],
+
            want_refs: refs.iter().map(|r| r.unpack().0.clone()).collect(),
+
        },
+
        |_| packwriter::Discard,
+
    )
+
    .unwrap();
+

+
    assert!(out.pack.is_some());
+
}
+

+
#[test]
+
fn want_ref() {
+
    let remote = upstream();
+
    let out = run_fetch(
+
        &remote,
+
        fetch::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            haves: vec![],
+
            wants: vec![],
+
            want_refs: vec!["refs/heads/main".into(), "refs/pulls/1/head".into()],
+
        },
+
        |_| packwriter::Discard,
+
    )
+
    .unwrap();
+

+
    assert!(out.pack.is_some());
+
    assert_eq!(
+
        out.wanted_refs
+
            .iter()
+
            .map(|r| r.unpack().0)
+
            .collect::<BTreeSet<_>>(),
+
        ["refs/heads/main".into(), "refs/pulls/1/head".into(),]
+
            .iter()
+
            .collect::<BTreeSet<_>>()
+
    )
+
}
+

+
#[test]
+
#[should_panic(expected = "`fetch` is empty")]
+
fn empty_fetch() {
+
    let remote = upstream();
+
    run_fetch(
+
        &remote,
+
        fetch::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            haves: vec![],
+
            wants: vec![],
+
            want_refs: vec![],
+
        },
+
        |_| packwriter::Discard,
+
    )
+
    .unwrap();
+
}
+

+
fn clone_with<R, L, B, P>(remote: R, local: L, build_pack_writer: B)
+
where
+
    R: AsRef<Path>,
+
    L: AsRef<Path>,
+
    B: FnOnce(Arc<AtomicBool>) -> P,
+
    P: PackWriter + Send + 'static,
+
    P::Output: Send + 'static,
+
{
+
    let refs = run_ls_refs(
+
        &remote,
+
        ls::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            ref_prefixes: vec!["refs/heads/".into(), "refs/pulls/".into()],
+
        },
+
    )
+
    .unwrap();
+
    let out = run_fetch(
+
        &remote,
+
        fetch::Options {
+
            repo: "foo".into(),
+
            extra_params: vec![],
+
            haves: vec![],
+
            wants: vec![],
+
            want_refs: refs.iter().map(|r| r.unpack().0.clone()).collect(),
+
        },
+
        build_pack_writer,
+
    )
+
    .unwrap();
+

+
    assert!(out.pack.is_some());
+

+
    let remote_repo = git2::Repository::open(remote).unwrap();
+
    remote_repo.set_namespace("foo").unwrap();
+
    let local_repo = git2::Repository::open(&local).unwrap();
+

+
    update_tips(&local_repo, &out.wanted_refs).unwrap();
+

+
    let mut remote_refs = collect_refs(&remote_repo).unwrap();
+
    let mut local_refs = collect_refs(&local_repo).unwrap();
+

+
    remote_refs.sort();
+
    local_refs.sort();
+

+
    assert_eq!(remote_refs, local_refs);
+
}
+

+
#[test]
+
fn clone_libgit() {
+
    let remote = upstream();
+
    let local = tempdir().unwrap();
+
    let local_repo = git2::Repository::init(&local).unwrap();
+

+
    clone_with(&remote, &local, move |stop| {
+
        packwriter::Libgit::new(packwriter::Options::default(), local_repo, stop)
+
    })
+
}
+

+
#[test]
+
fn clone_gitoxide() {
+
    let remote = upstream();
+
    let local = tempdir().unwrap();
+
    let local_repo = git2::Repository::init(&local).unwrap();
+

+
    clone_with(&remote, &local, move |stop| {
+
        packwriter::Standard::new(
+
            local_repo.path(),
+
            packwriter::Options::default(),
+
            packwriter::StandardThickener::new(local_repo.path()),
+
            stop,
+
        )
+
    })
+
}
+

+
fn thin_pack_with<R, L, B, P>(remote: R, local: L, build_pack_writer: B)
+
where
+
    R: AsRef<Path>,
+
    L: AsRef<Path>,
+
    B: Fn(Arc<AtomicBool>) -> P,
+
    P: PackWriter + Send + 'static,
+
    P::Output: Send + 'static,
+
{
+
    // Clone main only
+
    {
+
        let out = run_fetch(
+
            &remote,
+
            fetch::Options {
+
                repo: "foo".into(),
+
                extra_params: vec![],
+
                haves: vec![],
+
                wants: vec![],
+
                want_refs: vec!["refs/heads/main".into()],
+
            },
+
            &build_pack_writer,
+
        )
+
        .unwrap();
+
        assert!(out.pack.is_some());
+
    }
+

+
    let remote_repo = git2::Repository::open(&remote).unwrap();
+
    remote_repo.set_namespace("foo").unwrap();
+
    let local_repo = git2::Repository::open(&local).unwrap();
+

+
    // Fetch next, which is ahead of main
+
    {
+
        let head = remote_repo.refname_to_id("refs/heads/main").unwrap();
+
        let out = run_fetch(
+
            &remote,
+
            fetch::Options {
+
                repo: "foo".into(),
+
                extra_params: vec![],
+
                haves: vec![ObjectId::from_20_bytes(head.as_bytes())],
+
                wants: vec![],
+
                want_refs: vec!["refs/heads/next".into()],
+
            },
+
            build_pack_writer,
+
        )
+
        .unwrap();
+
        assert!(out.pack.is_some());
+

+
        update_tips(&local_repo, &out.wanted_refs).unwrap();
+
    }
+

+
    let remote_history = collect_history(&remote_repo, "refs/heads/next").unwrap();
+
    let local_history = collect_history(&local_repo, "refs/heads/next").unwrap();
+

+
    assert!(!remote_history.is_empty());
+
    assert_eq!(remote_history, local_history)
+
}
+

+
#[test]
+
#[ignore]
+
fn thin_pack_libgit() {
+
    let remote = upstream();
+
    let local = tempdir().unwrap();
+

+
    thin_pack_with(&remote, &local, |stop| {
+
        let local_repo = git2::Repository::init(&local).unwrap();
+
        packwriter::Libgit::new(packwriter::Options::default(), local_repo, stop)
+
    })
+
}
+

+
#[test]
+
fn thin_pack_gitoxide() {
+
    let remote = upstream();
+
    let local = tempdir().unwrap();
+
    let local_repo = git2::Repository::init(&local).unwrap();
+
    let git_dir = local_repo.path().to_owned();
+

+
    thin_pack_with(&remote, &local, move |stop| {
+
        packwriter::Standard::new(
+
            &git_dir,
+
            packwriter::Options::default(),
+
            packwriter::StandardThickener::new(&git_dir),
+
            stop,
+
        )
+
    })
+
}
added archived/link-git/t/src/lib.rs
@@ -0,0 +1,7 @@
+
// Copyright © 2022 The Radicle Link Contributors
+
// SPDX-License-Identifier: GPL-3.0-or-later
+

+
#[cfg(test)]
+
mod integration;
+
#[cfg(test)]
+
mod tests;
added archived/link-git/t/src/tests.rs
@@ -0,0 +1,6 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
mod protocol;
added archived/link-git/t/src/tests/protocol.rs
@@ -0,0 +1,7 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
mod take;
+
mod upload_pack;
added archived/link-git/t/src/tests/protocol/take.rs
@@ -0,0 +1,47 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use futures::{executor::block_on, io::Cursor, AsyncReadExt as _};
+
use link_git::protocol::take::TryTake;
+
use std::io;
+

+
#[test]
+
fn when_within_limit() {
+
    let input = b"the world is everything that is the case";
+
    let output = block_on(async move {
+
        let mut buf = Vec::with_capacity(input.len());
+
        TryTake::new(Cursor::new(input), input.len() as u64 + 1)
+
            .read_to_end(&mut buf)
+
            .await?;
+
        Ok::<_, io::Error>(buf)
+
    })
+
    .unwrap();
+

+
    assert_eq!(input, output.as_slice())
+
}
+

+
#[test]
+
fn when_limit_exceeded() {
+
    let input = b"what is the case, the fact, is the existence of atomic facts";
+
    let output =
+
        block_on(TryTake::new(Cursor::new(input), 10).read_to_end(&mut Vec::new())).unwrap_err();
+

+
    assert_eq!(output.to_string(), "max input size exceeded")
+
}
+

+
#[test]
+
fn excess_bytes_remain() {
+
    let input = b"whereof one cannot speak, thereof one must be silent";
+
    let output = block_on(async move {
+
        let mut buf = Vec::with_capacity(input.len());
+
        let res = TryTake::new(Cursor::new(input), input.len() as u64)
+
            .read_to_end(&mut buf)
+
            .await;
+
        assert!(res.is_err());
+
        buf
+
    });
+

+
    assert_eq!(input, output.as_slice())
+
}
added archived/link-git/t/src/tests/protocol/upload_pack.rs
@@ -0,0 +1,129 @@
+
// Copyright © 2021 The Radicle Link Contributors
+
//
+
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
+
// Linking Exception. For full terms see the included LICENSE file.
+

+
use link_git::protocol::upload_pack;
+

+
mod header {
+
    use super::*;
+
    use std::str::FromStr as _;
+

+
    #[test]
+
    fn service_must_be_upload_pack() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-receive-pack "),
+
            Err("unsupported service")
+
        )
+
    }
+

+
    #[test]
+
    fn no_path() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack "),
+
            Err("missing path")
+
        )
+
    }
+

+
    #[test]
+
    fn empty_path() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack \0host=lolhost:123\0"),
+
            Err("empty path")
+
        )
+
    }
+

+
    #[test]
+
    fn host_and_port() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack /git.git\0host=lolhost:123\0").unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: Some(("lolhost".to_owned(), Some(123))),
+
                extra: vec![]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn host_without_port() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack /git.git\0host=lolhost\0").unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: Some(("lolhost".to_owned(), None)),
+
                extra: vec![]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn no_host() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack /git.git\0").unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: None,
+
                extra: vec![]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn empty_host() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack /git.git\0\0").unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: None,
+
                extra: vec![]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn no_host_extra() {
+
        assert_eq!(
+
            upload_pack::Header::from_str("git-upload-pack /git.git\0\0version=42\0").unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: None,
+
                extra: vec![("version".to_owned(), Some("42".to_owned()))]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn host_port_extra() {
+
        assert_eq!(
+
            upload_pack::Header::from_str(
+
                "git-upload-pack /git.git\0host=lolhost:123\0\0version=42\0"
+
            )
+
            .unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: Some(("lolhost".to_owned(), Some(123))),
+
                extra: vec![("version".to_owned(), Some("42".to_owned()))]
+
            }
+
        )
+
    }
+

+
    #[test]
+
    fn host_extra_extra() {
+
        assert_eq!(
+
            upload_pack::Header::from_str(
+
                "git-upload-pack /git.git\0host=lolhost\0\0version=42\0foo\0n=69\0"
+
            )
+
            .unwrap(),
+
            upload_pack::Header {
+
                path: "/git.git".to_owned(),
+
                host: Some(("lolhost".to_owned(), None)),
+
                extra: vec![
+
                    ("version".to_owned(), Some("42".to_owned())),
+
                    ("foo".to_owned(), None),
+
                    ("n".to_owned(), Some("69".to_owned()))
+
                ]
+
            }
+
        )
+
    }
+
}
deleted git-commit/Cargo.toml
@@ -1,19 +0,0 @@
-
[package]
-
name = "git-commit"
-
version = "0.3.0"
-
license = "MIT OR Apache-2.0"
-
edition = "2021"
-
authors = [
-
  "Alexis Sellier <alexis@radicle.xyz>",
-
  "Fintan Halpenny <fintan.halpenny@gmail.com>",
-
]
-
description = "A small library for parsing, displaying and creating a git commit"
-
keywords = ["git", "git-commit", "git-trailers", "radicle"]
-

-
[dependencies]
-
thiserror = "1"
-

-
[dependencies.git2]
-
version = "0.16.1"
-
default-features = false
-
features = ["vendored-libgit2"]
deleted git-commit/src/author.rs
@@ -1,129 +0,0 @@
-
use std::{
-
    fmt,
-
    num::ParseIntError,
-
    str::{self, FromStr},
-
};
-

-
use thiserror::Error;
-

-
/// The data for indicating authorship of an action within a
-
/// [`super::Commit`].
-
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
-
pub struct Author {
-
    /// Name corresponding to `user.name` in the git config.
-
    ///
-
    /// Note: this must not contain `<` or `>`.
-
    pub name: String,
-
    /// Email corresponding to `user.email` in the git config.
-
    ///
-
    /// Note: this must not contain `<` or `>`.
-
    pub email: String,
-
    /// The time of this author's action.
-
    pub time: Time,
-
}
-

-
/// The time of a [`Author`]'s action.
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-
pub struct Time {
-
    seconds: i64,
-
    offset: i32,
-
}
-

-
impl Time {
-
    pub fn new(seconds: i64, offset: i32) -> Self {
-
        Self { seconds, offset }
-
    }
-

-
    /// Return the time, in seconds, since the epoch.
-
    pub fn seconds(&self) -> i64 {
-
        self.seconds
-
    }
-

-
    /// Return the timezone offset, in minutes.
-
    pub fn offset(&self) -> i32 {
-
        self.offset
-
    }
-
}
-

-
impl From<Time> for git2::Time {
-
    fn from(t: Time) -> Self {
-
        Self::new(t.seconds, t.offset)
-
    }
-
}
-

-
impl From<git2::Time> for Time {
-
    fn from(t: git2::Time) -> Self {
-
        Self::new(t.seconds(), t.offset_minutes())
-
    }
-
}
-

-
impl fmt::Display for Time {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        let sign = if self.offset.is_negative() { '-' } else { '+' };
-
        write!(f, "{} {}{:0>4}", self.seconds, sign, self.offset.abs())
-
    }
-
}
-

-
impl fmt::Display for Author {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        write!(f, "{} <{}> {}", self.name, self.email, self.time,)
-
    }
-
}
-

-
impl TryFrom<&Author> for git2::Signature<'_> {
-
    type Error = git2::Error;
-

-
    fn try_from(person: &Author) -> Result<Self, Self::Error> {
-
        let time = git2::Time::new(person.time.seconds, person.time.offset);
-
        git2::Signature::new(&person.name, &person.email, &time)
-
    }
-
}
-

-
impl<'a> TryFrom<&git2::Signature<'a>> for Author {
-
    type Error = str::Utf8Error;
-

-
    fn try_from(value: &git2::Signature<'a>) -> Result<Self, Self::Error> {
-
        Ok(Self {
-
            name: str::from_utf8(value.name_bytes())?.to_string(),
-
            email: str::from_utf8(value.email_bytes())?.to_string(),
-
            time: value.when().into(),
-
        })
-
    }
-
}
-

-
#[derive(Debug, Error)]
-
pub enum ParseError {
-
    #[error("missing '{0}' while parsing person signature")]
-
    Missing(&'static str),
-
    #[error("offset was incorrect format while parsing person signature")]
-
    Offset(#[source] ParseIntError),
-
    #[error("time was incorrect format while parsing person signature")]
-
    Time(#[source] ParseIntError),
-
    #[error("time offset is expected to be '+'/'-' for a person siganture")]
-
    UnknownOffset,
-
}
-

-
impl FromStr for Author {
-
    type Err = ParseError;
-

-
    fn from_str(s: &str) -> Result<Self, Self::Err> {
-
        let mut components = s.split(' ');
-
        let offset = match components.next_back() {
-
            None => return Err(ParseError::Missing("offset")),
-
            Some(offset) => offset.parse::<i32>().map_err(ParseError::Offset)?,
-
        };
-
        let time = match components.next_back() {
-
            None => return Err(ParseError::Missing("time")),
-
            Some(time) => time.parse::<i64>().map_err(ParseError::Time)?,
-
        };
-
        let time = Time::new(time, offset);
-

-
        let email = components
-
            .next_back()
-
            .ok_or(ParseError::Missing("email"))?
-
            .trim_matches(|c| c == '<' || c == '>')
-
            .to_owned();
-
        let name = components.collect::<Vec<_>>().join(" ");
-
        Ok(Self { name, email, time })
-
    }
-
}
deleted git-commit/src/headers.rs
@@ -1,71 +0,0 @@
-
use std::borrow::Cow;
-

-
const BEGIN_SSH: &str = "-----BEGIN SSH SIGNATURE-----\n";
-
const BEGIN_PGP: &str = "-----BEGIN PGP SIGNATURE-----\n";
-

-
/// A collection of headers stored in a [`super::Commit`].
-
///
-
/// Note: these do not include `tree`, `parent`, `author`, and `committer`.
-
#[derive(Clone, Debug, Default)]
-
pub struct Headers(pub(super) Vec<(String, String)>);
-

-
/// A `gpgsig` signature stored in a [`super::Commit`].
-
pub enum Signature<'a> {
-
    /// A PGP signature, i.e. starts with `-----BEGIN PGP SIGNATURE-----`.
-
    Pgp(Cow<'a, str>),
-
    /// A SSH signature, i.e. starts with `-----BEGIN SSH SIGNATURE-----`.
-
    Ssh(Cow<'a, str>),
-
}
-

-
impl<'a> Signature<'a> {
-
    fn from_str(s: &'a str) -> Result<Self, UnknownScheme> {
-
        if s.starts_with(BEGIN_SSH) {
-
            Ok(Signature::Ssh(Cow::Borrowed(s)))
-
        } else if s.starts_with(BEGIN_PGP) {
-
            Ok(Signature::Pgp(Cow::Borrowed(s)))
-
        } else {
-
            Err(UnknownScheme)
-
        }
-
    }
-
}
-

-
pub struct UnknownScheme;
-

-
impl<'a> ToString for Signature<'a> {
-
    fn to_string(&self) -> String {
-
        match self {
-
            Signature::Pgp(pgp) => pgp.to_string(),
-
            Signature::Ssh(ssh) => ssh.to_string(),
-
        }
-
    }
-
}
-

-
impl Headers {
-
    pub fn new() -> Self {
-
        Headers(Vec::new())
-
    }
-

-
    pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
-
        self.0.iter().map(|(k, v)| (k.as_str(), v.as_str()))
-
    }
-

-
    pub fn values<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a str> + '_ {
-
        self.iter()
-
            .filter_map(move |(k, v)| (k == name).then_some(v))
-
    }
-

-
    pub fn signatures(&self) -> impl Iterator<Item = Signature> + '_ {
-
        self.0.iter().filter_map(|(k, v)| {
-
            if k == "gpgsig" {
-
                Signature::from_str(v).ok()
-
            } else {
-
                None
-
            }
-
        })
-
    }
-

-
    /// Push a header to the end of the headers section.
-
    pub fn push(&mut self, name: &str, value: &str) {
-
        self.0.push((name.to_owned(), value.trim().to_owned()));
-
    }
-
}
deleted git-commit/src/lib.rs
@@ -1,295 +0,0 @@
-
//! The `git-commit` crate provides parsing a displaying of a [git
-
//! commit][git-commit].
-
//!
-
//! The [`Commit`] data can be constructed using the `FromStr`
-
//! implementation, or by converting from a `git2::Buf`.
-
//!
-
//! The [`Headers`] can be accessed via [`Commit::headers`]. If the
-
//! signatures of the commit are of particular interest, the
-
//! [`Commit::signatures`] method can be used, which returns a series of
-
//! [`Signature`]s.
-
//!
-
//! [git-commit]: https://git-scm.com/book/en/v2/Git-Internals-Git-Objects
-

-
use std::{
-
    fmt::Write as _,
-
    str::{self, FromStr},
-
};
-

-
use git2::{ObjectType, Oid};
-

-
pub mod author;
-
pub use author::Author;
-

-
pub mod headers;
-
pub use headers::{Headers, Signature};
-

-
pub mod trailers;
-
pub use trailers::{OwnedTrailer, Trailer, Trailers};
-

-
/// A git commit in its object description form, i.e. the output of
-
/// `git cat-file` for a commit object.
-
#[derive(Debug)]
-
pub struct Commit {
-
    tree: Oid,
-
    parents: Vec<Oid>,
-
    author: Author,
-
    committer: Author,
-
    headers: Headers,
-
    message: String,
-
    trailers: Vec<OwnedTrailer>,
-
}
-

-
impl Commit {
-
    pub fn new<I, T>(
-
        tree: Oid,
-
        parents: Vec<Oid>,
-
        author: Author,
-
        committer: Author,
-
        headers: Headers,
-
        message: String,
-
        trailers: I,
-
    ) -> Self
-
    where
-
        I: IntoIterator<Item = T>,
-
        OwnedTrailer: From<T>,
-
    {
-
        let trailers = trailers.into_iter().map(OwnedTrailer::from).collect();
-
        Self {
-
            tree,
-
            parents,
-
            author,
-
            committer,
-
            headers,
-
            message,
-
            trailers,
-
        }
-
    }
-

-
    /// Read the [`Commit`] from the `repo` that is expected to be found at
-
    /// `oid`.
-
    pub fn read(repo: &git2::Repository, oid: Oid) -> Result<Self, error::Read> {
-
        let odb = repo.odb()?;
-
        let object = odb.read(oid)?;
-
        Ok(Commit::try_from(object.data())?)
-
    }
-

-
    /// Write the given [`Commit`] to the `repo`. The resulting `Oid`
-
    /// is the identifier for this commit.
-
    pub fn write(&self, repo: &git2::Repository) -> Result<Oid, git2::Error> {
-
        let odb = repo.odb()?;
-
        odb.write(ObjectType::Commit, self.to_string().as_bytes())
-
    }
-

-
    /// The tree [`Oid`] this commit points to.
-
    pub fn tree(&self) -> Oid {
-
        self.tree
-
    }
-

-
    /// The parent [`Oid`]s of this commit.
-
    pub fn parents(&self) -> impl Iterator<Item = Oid> + '_ {
-
        self.parents.iter().copied()
-
    }
-

-
    /// The author of this commit, i.e. the header corresponding to `author`.
-
    pub fn author(&self) -> &Author {
-
        &self.author
-
    }
-

-
    /// The committer of this commit, i.e. the header corresponding to
-
    /// `committer`.
-
    pub fn committer(&self) -> &Author {
-
        &self.committer
-
    }
-

-
    /// The message body of this commit.
-
    pub fn message(&self) -> &str {
-
        &self.message
-
    }
-

-
    /// The [`Signature`]s found in this commit, i.e. the headers corresponding
-
    /// to `gpgsig`.
-
    pub fn signatures(&self) -> impl Iterator<Item = Signature> + '_ {
-
        self.headers.signatures()
-
    }
-

-
    /// The [`Headers`] found in this commit.
-
    ///
-
    /// Note: these do not include `tree`, `parent`, `author`, and `committer`.
-
    pub fn headers(&self) -> impl Iterator<Item = (&str, &str)> {
-
        self.headers.iter()
-
    }
-

-
    /// Iterate over the [`Headers`] values that match the provided `name`.
-
    pub fn values<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a str> + '_ {
-
        self.headers.values(name)
-
    }
-

-
    /// Push a header to the end of the headers section.
-
    pub fn push_header(&mut self, name: &str, value: &str) {
-
        self.headers.push(name, value.trim());
-
    }
-

-
    pub fn trailers(&self) -> impl Iterator<Item = &OwnedTrailer> {
-
        self.trailers.iter()
-
    }
-
}
-

-
pub mod error {
-
    use std::str;
-

-
    use thiserror::Error;
-

-
    use super::author;
-

-
    #[derive(Debug, Error)]
-
    pub enum Read {
-
        #[error(transparent)]
-
        Git(#[from] git2::Error),
-
        #[error(transparent)]
-
        Parse(#[from] Parse),
-
    }
-

-
    #[derive(Debug, Error)]
-
    pub enum Parse {
-
        #[error(transparent)]
-
        Author(#[from] author::ParseError),
-
        #[error("invalid '{header}'")]
-
        InvalidHeader {
-
            header: &'static str,
-
            #[source]
-
            err: git2::Error,
-
        },
-
        #[error("invalid git commit object format")]
-
        InvalidFormat,
-
        #[error("missing '{0}' while parsing commit")]
-
        Missing(&'static str),
-
        #[error("error occurred while checking for git-trailers: {0}")]
-
        Trailers(#[source] git2::Error),
-
        #[error(transparent)]
-
        Utf8(#[from] str::Utf8Error),
-
    }
-
}
-

-
impl TryFrom<git2::Buf> for Commit {
-
    type Error = error::Parse;
-

-
    fn try_from(value: git2::Buf) -> Result<Self, Self::Error> {
-
        value.as_str().ok_or(error::Parse::InvalidFormat)?.parse()
-
    }
-
}
-

-
impl TryFrom<&[u8]> for Commit {
-
    type Error = error::Parse;
-

-
    fn try_from(data: &[u8]) -> Result<Self, Self::Error> {
-
        Commit::from_str(str::from_utf8(data)?)
-
    }
-
}
-

-
impl FromStr for Commit {
-
    type Err = error::Parse;
-

-
    fn from_str(buffer: &str) -> Result<Self, Self::Err> {
-
        let (header, message) = buffer
-
            .split_once("\n\n")
-
            .ok_or(error::Parse::InvalidFormat)?;
-
        let mut lines = header.lines();
-

-
        let tree = match lines.next() {
-
            Some(tree) => tree
-
                .strip_prefix("tree ")
-
                .map(git2::Oid::from_str)
-
                .transpose()
-
                .map_err(|err| error::Parse::InvalidHeader {
-
                    header: "tree",
-
                    err,
-
                })?
-
                .ok_or(error::Parse::Missing("tree"))?,
-
            None => return Err(error::Parse::Missing("tree")),
-
        };
-

-
        let mut parents = Vec::new();
-
        let mut author: Option<Author> = None;
-
        let mut committer: Option<Author> = None;
-
        let mut headers = Headers::new();
-

-
        for line in lines {
-
            // Check if a signature is still being parsed
-
            if let Some(rest) = line.strip_prefix(' ') {
-
                let value: &mut String = headers
-
                    .0
-
                    .last_mut()
-
                    .map(|(_, v)| v)
-
                    .ok_or(error::Parse::InvalidFormat)?;
-
                value.push('\n');
-
                value.push_str(rest);
-
                continue;
-
            }
-

-
            if let Some((name, value)) = line.split_once(' ') {
-
                match name {
-
                    "parent" => parents.push(git2::Oid::from_str(value).map_err(|err| {
-
                        error::Parse::InvalidHeader {
-
                            header: "parent",
-
                            err,
-
                        }
-
                    })?),
-
                    "author" => author = Some(value.parse::<Author>()?),
-
                    "committer" => committer = Some(value.parse::<Author>()?),
-
                    _ => headers.push(name, value),
-
                }
-
                continue;
-
            }
-
        }
-

-
        let trailers = trailers::Trailers::parse(message).map_err(error::Parse::Trailers)?;
-

-
        let message = message
-
            .strip_suffix(&trailers.to_string(": "))
-
            .unwrap_or(message)
-
            .to_string();
-

-
        let trailers = trailers.iter().map(OwnedTrailer::from).collect();
-

-
        Ok(Self {
-
            tree,
-
            parents,
-
            author: author.ok_or(error::Parse::Missing("author"))?,
-
            committer: committer.ok_or(error::Parse::Missing("committer"))?,
-
            headers,
-
            message,
-
            trailers,
-
        })
-
    }
-
}
-

-
impl ToString for Commit {
-
    fn to_string(&self) -> String {
-
        let mut buf = String::new();
-

-
        writeln!(buf, "tree {}", self.tree).ok();
-

-
        for parent in &self.parents {
-
            writeln!(buf, "parent {parent}").ok();
-
        }
-

-
        writeln!(buf, "author {}", self.author).ok();
-
        writeln!(buf, "committer {}", self.committer).ok();
-

-
        for (name, value) in self.headers.iter() {
-
            writeln!(buf, "{name} {}", value.replace('\n', "\n ")).ok();
-
        }
-
        writeln!(buf).ok();
-
        write!(buf, "{}", self.message.trim()).ok();
-
        writeln!(buf).ok();
-

-
        if !self.trailers.is_empty() {
-
            writeln!(buf).ok();
-
        }
-
        for trailer in self.trailers.iter() {
-
            writeln!(buf, "{}", Trailer::from(trailer).display(": ")).ok();
-
        }
-
        buf
-
    }
-
}
deleted git-commit/src/trailers.rs
@@ -1,214 +0,0 @@
-
use std::{borrow::Cow, fmt, fmt::Write, ops::Deref, str::FromStr};
-

-
use git2::{MessageTrailersStrs, MessageTrailersStrsIterator};
-

-
/// A Git commit's set of trailers that are left in the commit's
-
/// message.
-
///
-
/// Trailers are key/value pairs in the last paragraph of a message,
-
/// not including any patches or conflicts that may be present.
-
///
-
/// # Usage
-
///
-
/// To construct `Trailers`, you can use [`Trailers::parse`] or its
-
/// `FromStr` implementation.
-
///
-
/// To iterate over the trailers, you can use [`Trailers::iter`].
-
///
-
/// To render the trailers to a `String`, you can use
-
/// [`Trailers::to_string`] or its `Display` implementation (note that
-
/// it will default to using `": "` as the separator.
-
///
-
/// # Examples
-
///
-
/// ```text
-
/// Add new functionality
-
///
-
/// Making code better with new functionality.
-
///
-
/// X-Signed-Off-By: Alex Sellier
-
/// X-Co-Authored-By: Fintan Halpenny
-
/// ```
-
///
-
/// The trailers in the above example are:
-
///
-
/// ```text
-
/// X-Signed-Off-By: Alex Sellier
-
/// X-Co-Authored-By: Fintan Halpenny
-
/// ```
-
pub struct Trailers {
-
    inner: MessageTrailersStrs,
-
}
-

-
impl Trailers {
-
    pub fn parse(message: &str) -> Result<Self, git2::Error> {
-
        Ok(Self {
-
            inner: git2::message_trailers_strs(message)?,
-
        })
-
    }
-

-
    pub fn iter(&self) -> Iter<'_> {
-
        Iter {
-
            inner: self.inner.iter(),
-
        }
-
    }
-

-
    pub fn to_string<'a, S>(&self, sep: S) -> String
-
    where
-
        S: Separator<'a>,
-
    {
-
        let mut buf = String::new();
-
        for (i, trailer) in self.iter().enumerate() {
-
            if i > 0 {
-
                writeln!(buf).ok();
-
            }
-

-
            write!(buf, "{}", trailer.display(sep.sep_for(&trailer.token))).ok();
-
        }
-
        writeln!(buf).ok();
-
        buf
-
    }
-
}
-

-
impl fmt::Display for Trailers {
-
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-
        f.write_str(&self.to_string(": "))
-
    }
-
}
-

-
pub trait Separator<'a> {
-
    fn sep_for(&self, token: &Token) -> &'a str;
-
}
-

-
impl<'a> Separator<'a> for &'a str {
-
    fn sep_for(&self, _: &Token) -> &'a str {
-
        self
-
    }
-
}
-

-
impl<'a, F> Separator<'a> for F
-
where
-
    F: Fn(&Token) -> &'a str,
-
{
-
    fn sep_for(&self, token: &Token) -> &'a str {
-
        self(token)
-
    }
-
}
-

-
impl FromStr for Trailers {
-
    type Err = git2::Error;
-

-
    fn from_str(s: &str) -> Result<Self, Self::Err> {
-
        Self::parse(s)
-
    }
-
}
-

-
pub struct Iter<'a> {
-
    inner: MessageTrailersStrsIterator<'a>,
-
}
-

-
impl<'a> Iterator for Iter<'a> {
-
    type Item = Trailer<'a>;
-

-
    fn next(&mut self) -> Option<Self::Item> {
-
        let (token, value) = self.inner.next()?;
-
        Some(Trailer {
-
            token: Token(token),
-
            value: Cow::Borrowed(value),
-
        })
-
    }
-
}
-

-
#[derive(Debug, Clone, Eq, PartialEq)]
-
pub struct Token<'a>(&'a str);
-

-
impl Deref for Token<'_> {
-
    type Target = str;
-

-
    fn deref(&self) -> &Self::Target {
-
        self.0
-
    }
-
}
-

-
pub struct Display<'a> {
-
    trailer: &'a Trailer<'a>,
-
    separator: &'a str,
-
}
-

-
impl<'a> fmt::Display for Display<'a> {
-
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-
        write!(
-
            f,
-
            "{}{}{}",
-
            self.trailer.token.deref(),
-
            self.separator,
-
            self.trailer.value,
-
        )
-
    }
-
}
-

-
/// A trailer is a key/value pair found in the last paragraph of a Git
-
/// commit message, not including any patches or conflicts that may be
-
/// present.
-
#[derive(Debug, Clone, Eq, PartialEq)]
-
pub struct Trailer<'a> {
-
    pub token: Token<'a>,
-
    pub value: Cow<'a, str>,
-
}
-

-
impl<'a> Trailer<'a> {
-
    pub fn display(&'a self, separator: &'a str) -> Display<'a> {
-
        Display {
-
            trailer: self,
-
            separator,
-
        }
-
    }
-

-
    pub fn to_owned(&self) -> OwnedTrailer {
-
        OwnedTrailer::from(self)
-
    }
-
}
-

-
/// A version of the [`Trailer`] which owns its token and
-
/// value. Useful for when you need to carry trailers around in a long
-
/// lived data structure.
-
#[derive(Debug)]
-
pub struct OwnedTrailer {
-
    pub token: OwnedToken,
-
    pub value: String,
-
}
-

-
#[derive(Debug)]
-
pub struct OwnedToken(String);
-

-
impl Deref for OwnedToken {
-
    type Target = str;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
impl<'a> From<&Trailer<'a>> for OwnedTrailer {
-
    fn from(t: &Trailer<'a>) -> Self {
-
        OwnedTrailer {
-
            token: OwnedToken(t.token.0.to_string()),
-
            value: t.value.to_string(),
-
        }
-
    }
-
}
-

-
impl<'a> From<Trailer<'a>> for OwnedTrailer {
-
    fn from(t: Trailer<'a>) -> Self {
-
        (&t).into()
-
    }
-
}
-

-
impl<'a> From<&'a OwnedTrailer> for Trailer<'a> {
-
    fn from(t: &'a OwnedTrailer) -> Self {
-
        Trailer {
-
            token: Token(t.token.0.as_str()),
-
            value: Cow::from(&t.value),
-
        }
-
    }
-
}
deleted git-commit/t/Cargo.toml
@@ -1,26 +0,0 @@
-
[package]
-
name = "git-commit-test"
-
version = "0.1.0"
-
license = "MIT OR Apache-2.0"
-
edition = "2021"
-

-
publish = false
-

-
[lib]
-
doctest = false
-
test = true
-
doc = false
-

-
[features]
-
test = []
-

-
[dev-dependencies.git-commit]
-
path = ".."
-

-
[dev-dependencies.git2]
-
version = "0.16.1"
-
default-features = false
-
features = ["vendored-libgit2"]
-

-
[dev-dependencies.test-helpers]
-
path = "../../test/test-helpers"

\ No newline at end of file
deleted git-commit/t/src/commit.rs
@@ -1,149 +0,0 @@
-
use std::str::FromStr as _;
-

-
use git_commit::Commit;
-

-
const NO_TRAILER: &str = "\
-
tree 50d6ef440728217febf9e35716d8b0296608d7f8
-
parent 0ad95dbdfe9fdf81938ca419cf740469173e2022
-
parent a4ec9e07e1b2e6f37f7119651ae3bb63b79988b6
-
author Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
-
committer Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
-

-
Merge remote-tracking branch 'origin/surf/organise-tests'
-

-
* origin/surf/organise-tests:
-
  radicle-surf: organise tests
-
";
-

-
const SINGLE_TRAILER: &str = "\
-
tree 50d6ef440728217febf9e35716d8b0296608d7f8
-
parent 0ad95dbdfe9fdf81938ca419cf740469173e2022
-
parent a4ec9e07e1b2e6f37f7119651ae3bb63b79988b6
-
author Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
-
committer Fintan Halpenny <fintan.halpenny@gmail.com> 1669292989 +0000
-

-
Merge remote-tracking branch 'origin/surf/organise-tests'
-

-
* origin/surf/organise-tests:
-
  radicle-surf: organise tests
-

-
Signed-off-by: Fintan Halpenny <fintan.halpenny@gmail.com>
-
";
-

-
const UNSIGNED: &str = "\
-
tree c66cc435f83ed0fba90ed4500e9b4b96e9bd001b
-
parent af06ad645133f580a87895353508053c5de60716
-
author Alexis Sellier <alexis@radicle.xyz> 1664467633 +0200
-
committer Alexis Sellier <alexis@radicle.xyz> 1664786099 -0200
-

-
Add SSH functionality with new `radicle-ssh`
-

-
We borrow code from `thrussh`, refactored to be runtime-less.
-

-
X-Signed-Off-By: Alex Sellier
-
X-Co-Authored-By: Fintan Halpenny
-
";
-

-
const SSH_SIGNATURE: &str = "\
-
-----BEGIN SSH SIGNATURE-----
-
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgvjrQogRxxLjzzWns8+mKJAGzEX
-
4fm2ALoN7pyvD2ttQAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
-
AAAAQIQvhIewOgGfnXLgR5Qe1ZEr2vjekYXTdOfNWICi6ZiosgfZnIqV0enCPC4arVqQg+
-
GPp0HqxaB911OnSAr6bwU=
-
-----END SSH SIGNATURE-----";
-

-
const PGP_SIGNATURE: &str = "\
-
-----BEGIN PGP SIGNATURE-----
-
iQIzBAABCAAdFiEEHe7BWIo9taTY6TIiJVL7b2QGbLcFAmNcDhsACgkQJVL7b2QG
-
bLcc9Q//RgKf5N4enta9AuszGJZvdFhMPfIDUdw+WAZA6Z8zDPb/aAXZrPP/KIOM
-
zmX08FTqjP9B9YeWrEcFuAtxsRNqbDKrfpko9Y6bTsdrAJg3WIypBb9F8YDKJ6BO
-
CORJJqWOsLW129jW+mJDhcE0YTvPlcMiMI2qjVXKhU6Ag11W8IRZyTb9tvEaDjBR
-
YUnkPvgubv61K9BeUKexE2MakPBldaQtl0MF1Dk7/zo5btLd+KP0SOUKEhuMEu5b
-
LATHHdiYjt/2Xz7q8EcrFxXUaipxZe89dfTdi2ooJQw3ZDqjDHsGTHpDeBuzuSaJ
-
9fKVRwFz/78onfHPhmU4wfUhh+Fcl90p5/T+4dt2K6cr+7rq078e+aJYxkX2d0MG
-
PG0xGP0RN4g+X92K1kGuzoe4870xAnRTNh5nUB+X9snO8tVqQZTb0M2yI+sTsKrv
-
w/f+uiqL6e9DgIxlO5dgiNHCVoCs1QJ900jUGisrlzS4+n6GzMsG6s3c01X4yY9G
-
Ou/kGkMsn7tqejqC9RufygcchCFZqYwaHQwPkiYhfYGMarMpoCFvll0h8tSparpS
-
nnpAQXVdu8m3v1YdPUuTg5ksxSOe9HCIlVXGFhxy3iqCVRn+51FRnUI63rMTOm9/
-
LBqzvji02lDUPGqPgXfcCS0ty8FM2flBIXnwb8TDzCaPYhf53+U=
-
=6dw2
-
-----END PGP SIGNATURE-----";
-

-
const SIGNED: &str = "\
-
tree c66cc435f83ed0fba90ed4500e9b4b96e9bd001b
-
parent af06ad645133f580a87895353508053c5de60716
-
author Alexis Sellier <alexis@radicle.xyz> 1664467633 +0200
-
committer Alexis Sellier <alexis@radicle.xyz> 1664786099 -0200
-
other e6fe3c97619deb8ab4198620f9a7eb79d98363dd
-
gpgsig -----BEGIN SSH SIGNATURE-----
-
 U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgvjrQogRxxLjzzWns8+mKJAGzEX
-
 4fm2ALoN7pyvD2ttQAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
-
 AAAAQIQvhIewOgGfnXLgR5Qe1ZEr2vjekYXTdOfNWICi6ZiosgfZnIqV0enCPC4arVqQg+
-
 GPp0HqxaB911OnSAr6bwU=
-
 -----END SSH SIGNATURE-----
-
gpgsig -----BEGIN PGP SIGNATURE-----
-
 iQIzBAABCAAdFiEEHe7BWIo9taTY6TIiJVL7b2QGbLcFAmNcDhsACgkQJVL7b2QG
-
 bLcc9Q//RgKf5N4enta9AuszGJZvdFhMPfIDUdw+WAZA6Z8zDPb/aAXZrPP/KIOM
-
 zmX08FTqjP9B9YeWrEcFuAtxsRNqbDKrfpko9Y6bTsdrAJg3WIypBb9F8YDKJ6BO
-
 CORJJqWOsLW129jW+mJDhcE0YTvPlcMiMI2qjVXKhU6Ag11W8IRZyTb9tvEaDjBR
-
 YUnkPvgubv61K9BeUKexE2MakPBldaQtl0MF1Dk7/zo5btLd+KP0SOUKEhuMEu5b
-
 LATHHdiYjt/2Xz7q8EcrFxXUaipxZe89dfTdi2ooJQw3ZDqjDHsGTHpDeBuzuSaJ
-
 9fKVRwFz/78onfHPhmU4wfUhh+Fcl90p5/T+4dt2K6cr+7rq078e+aJYxkX2d0MG
-
 PG0xGP0RN4g+X92K1kGuzoe4870xAnRTNh5nUB+X9snO8tVqQZTb0M2yI+sTsKrv
-
 w/f+uiqL6e9DgIxlO5dgiNHCVoCs1QJ900jUGisrlzS4+n6GzMsG6s3c01X4yY9G
-
 Ou/kGkMsn7tqejqC9RufygcchCFZqYwaHQwPkiYhfYGMarMpoCFvll0h8tSparpS
-
 nnpAQXVdu8m3v1YdPUuTg5ksxSOe9HCIlVXGFhxy3iqCVRn+51FRnUI63rMTOm9/
-
 LBqzvji02lDUPGqPgXfcCS0ty8FM2flBIXnwb8TDzCaPYhf53+U=
-
 =6dw2
-
 -----END PGP SIGNATURE-----
-

-
Add SSH functionality with new `radicle-ssh`
-

-
We borrow code from `thrussh`, refactored to be runtime-less.
-

-
X-Signed-Off-By: Alex Sellier
-
X-Co-Authored-By: Fintan Halpenny
-
";
-

-
#[test]
-
fn test_push_header() {
-
    let mut commit = Commit::from_str(UNSIGNED).unwrap();
-

-
    commit.push_header("other", "e6fe3c97619deb8ab4198620f9a7eb79d98363dd");
-
    commit.push_header("gpgsig", SSH_SIGNATURE);
-
    commit.push_header("gpgsig", PGP_SIGNATURE);
-

-
    assert_eq!(commit.to_string(), SIGNED);
-
}
-

-
#[test]
-
fn test_get_header() {
-
    let commit = Commit::from_str(SIGNED).unwrap();
-

-
    assert_eq!(
-
        commit
-
            .signatures()
-
            .map(|sig| sig.to_string())
-
            .collect::<Vec<_>>(),
-
        vec![SSH_SIGNATURE.to_owned(), PGP_SIGNATURE.to_owned()]
-
    );
-
    assert_eq!(
-
        commit.values("other").collect::<Vec<_>>(),
-
        vec![String::from("e6fe3c97619deb8ab4198620f9a7eb79d98363dd")],
-
    );
-
    assert!(commit.values("unknown").next().is_none());
-
}
-

-
#[test]
-
fn test_conversion() {
-
    assert_eq!(
-
        Commit::from_str(NO_TRAILER).unwrap().to_string(),
-
        NO_TRAILER
-
    );
-
    assert_eq!(
-
        Commit::from_str(SINGLE_TRAILER).unwrap().to_string(),
-
        SINGLE_TRAILER
-
    );
-
    assert_eq!(Commit::from_str(SIGNED).unwrap().to_string(), SIGNED);
-
    assert_eq!(Commit::from_str(UNSIGNED).unwrap().to_string(), UNSIGNED);
-
}
deleted git-commit/t/src/integration.rs
@@ -1,28 +0,0 @@
-
use std::io;
-

-
use git_commit::Commit;
-
use test_helpers::tempdir::WithTmpDir;
-

-
#[test]
-
fn valid_commits() {
-
    let radicle_git = format!(
-
        "file://{}",
-
        git2::Repository::discover(".").unwrap().path().display()
-
    );
-
    let repo = WithTmpDir::new(|path| {
-
        let repo = git2::Repository::clone(&radicle_git, path)
-
            .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
-
        Ok::<_, io::Error>(repo)
-
    })
-
    .unwrap();
-

-
    let mut walk = repo.revwalk().unwrap();
-
    walk.push_head().unwrap();
-

-
    // take the first 20 commits and make sure we can parse them
-
    for oid in walk.take(20) {
-
        let oid = oid.unwrap();
-
        let commit = Commit::read(&repo, oid);
-
        assert!(commit.is_ok(), "Oid: {oid}, Error: {commit:?}")
-
    }
-
}
deleted git-commit/t/src/lib.rs
@@ -1,5 +0,0 @@
-
#[cfg(test)]
-
mod commit;
-

-
#[cfg(test)]
-
mod integration;
deleted git-trailers/Cargo.toml
@@ -1,20 +0,0 @@
-
[package]
-
name = "git-trailers"
-
version = "0.1.0"
-
authors = [
-
  "Nuno Alexandre <hi@nunoalexandre.com>",
-
  "Kim Altintop <kim@eagain.st>",
-
  "Fintan Halpenny <fintan.halpenny@gmail.com>",
-
]
-
edition = "2018"
-
license = "GPL-3.0-or-later"
-
description = "Library to support parsing and display git trailers <https://git-scm.com/docs/git-interpret-trailers>"
-
keywords = ["git"]
-

-
[lib]
-
doctest = false
-
test = false
-

-
[dependencies]
-
nom = "7.1"
-
thiserror = "1.0"
deleted git-trailers/src/lib.rs
@@ -1,300 +0,0 @@
-
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{borrow::Cow, convert::TryFrom, fmt, ops::Deref};
-

-
use thiserror::Error;
-

-
#[derive(Debug, Error)]
-
#[non_exhaustive]
-
pub enum Error {
-
    #[error("the trailers paragraph is missing in the given message")]
-
    MissingParagraph,
-

-
    #[error("trailing data after trailers section: '{0}")]
-
    Trailing(String),
-

-
    #[error(transparent)]
-
    Parse(#[from] nom::Err<nom::error::Error<String>>),
-
}
-

-
#[derive(Debug, Clone, Eq, PartialEq)]
-
pub struct Trailer<'a> {
-
    pub token: Token<'a>,
-
    pub values: Vec<Cow<'a, str>>,
-
}
-

-
impl<'a> Trailer<'a> {
-
    pub fn display(&'a self, separator: &'a str) -> Display<'a> {
-
        Display {
-
            trailer: self,
-
            separator,
-
        }
-
    }
-

-
    pub fn to_owned(&self) -> OwnedTrailer {
-
        OwnedTrailer::from(self)
-
    }
-
}
-

-
#[derive(Debug, Clone, Eq, PartialEq)]
-
pub struct Token<'a>(&'a str);
-

-
/// A version of the Trailer<'a> which owns it's token and values. Useful for
-
/// when you need to carry trailers around in a long lived data structure.
-
#[derive(Debug)]
-
pub struct OwnedTrailer {
-
    pub token: OwnedToken,
-
    pub values: Vec<String>,
-
}
-

-
#[derive(Debug)]
-
pub struct OwnedToken(String);
-

-
impl<'a> From<&Trailer<'a>> for OwnedTrailer {
-
    fn from(t: &Trailer<'a>) -> Self {
-
        OwnedTrailer {
-
            token: OwnedToken(t.token.0.to_string()),
-
            values: t.values.iter().map(|v| v.to_string()).collect(),
-
        }
-
    }
-
}
-

-
impl<'a> From<Trailer<'a>> for OwnedTrailer {
-
    fn from(t: Trailer<'a>) -> Self {
-
        (&t).into()
-
    }
-
}
-

-
impl<'a> From<&'a OwnedTrailer> for Trailer<'a> {
-
    fn from(t: &'a OwnedTrailer) -> Self {
-
        Trailer {
-
            token: Token(t.token.0.as_str()),
-
            values: t.values.iter().map(Cow::from).collect(),
-
        }
-
    }
-
}
-

-
impl Deref for OwnedToken {
-
    type Target = str;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
#[derive(Debug, Error)]
-
#[non_exhaustive]
-
pub enum InvalidToken {
-
    #[error("trailing characters: '{0}'")]
-
    Trailing(String),
-

-
    #[error(transparent)]
-
    Parse(#[from] nom::Err<nom::error::Error<String>>),
-
}
-

-
impl<'a> TryFrom<&'a str> for Token<'a> {
-
    type Error = InvalidToken;
-

-
    fn try_from(s: &'a str) -> Result<Self, Self::Error> {
-
        match parser::token(s) {
-
            Ok((rest, token)) if rest.is_empty() => Ok(token),
-
            Ok((trailing, _)) => Err(InvalidToken::Trailing(trailing.to_owned())),
-
            Err(e) => Err(e.to_owned().into()),
-
        }
-
    }
-
}
-

-
impl Deref for Token<'_> {
-
    type Target = str;
-

-
    fn deref(&self) -> &Self::Target {
-
        self.0
-
    }
-
}
-

-
pub struct Display<'a> {
-
    trailer: &'a Trailer<'a>,
-
    separator: &'a str,
-
}
-

-
impl<'a> fmt::Display for Display<'a> {
-
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-
        write!(
-
            f,
-
            "{}{}{}",
-
            self.trailer.token.deref(),
-
            self.separator,
-
            self.trailer.values.join("\n  ")
-
        )
-
    }
-
}
-

-
pub trait Separator<'a> {
-
    fn sep_for(&self, token: &Token) -> &'a str;
-
}
-

-
impl<'a> Separator<'a> for &'a str {
-
    fn sep_for(&self, _: &Token) -> &'a str {
-
        self
-
    }
-
}
-

-
impl<'a, F> Separator<'a> for F
-
where
-
    F: Fn(&Token) -> &'a str,
-
{
-
    fn sep_for(&self, token: &Token) -> &'a str {
-
        self(token)
-
    }
-
}
-

-
pub struct DisplayMany<'a, S> {
-
    separator: S,
-
    trailers: &'a [Trailer<'a>],
-
}
-

-
impl<'a, S> fmt::Display for DisplayMany<'a, S>
-
where
-
    S: Separator<'a>,
-
{
-
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-
        for (i, trailer) in self.trailers.iter().enumerate() {
-
            if i > 0 {
-
                writeln!(f)?
-
            }
-

-
            write!(
-
                f,
-
                "{}",
-
                trailer.display(self.separator.sep_for(&trailer.token))
-
            )?
-
        }
-

-
        Ok(())
-
    }
-
}
-

-
/// Parse the trailers of the given message. It looks up the last paragraph
-
/// of the message and attempts to parse each of its lines as a [Trailer].
-
/// Fails if no trailers paragraph is found or if at least one trailer
-
/// fails to be parsed.
-
pub fn parse<'a>(message: &'a str, separators: &'a str) -> Result<Vec<Trailer<'a>>, Error> {
-
    let trailers_paragraph =
-
        match parser::paragraphs(message.trim_end()).map(|(_, ps)| ps.last().cloned()) {
-
            Ok(None) | Err(_) => return Err(Error::MissingParagraph),
-
            Ok(Some(p)) => {
-
                if p.is_empty() {
-
                    return Err(Error::MissingParagraph);
-
                }
-
                p
-
            },
-
        };
-

-
    match parser::trailers(trailers_paragraph, separators) {
-
        Ok((rest, trailers)) if rest.is_empty() => Ok(trailers),
-
        Ok((unparseable, _)) => Err(Error::Trailing(unparseable.to_owned())),
-
        Err(e) => Err(e.to_owned().into()),
-
    }
-
}
-

-
/// Render a slice of trailers.
-
///
-
/// The `separator` can be either a string slice, or a closure which may choose
-
/// a different separator for each [`Token`] encountered. Note that multiline
-
/// trailers are rendered with a fixed indent, so the result is not
-
/// layout-preserving.
-
pub fn display<'a, S>(separator: S, trailers: &'a [Trailer<'a>]) -> DisplayMany<'a, S>
-
where
-
    S: Separator<'a>,
-
{
-
    DisplayMany {
-
        separator,
-
        trailers,
-
    }
-
}
-

-
pub mod parser {
-
    use std::borrow::Cow;
-

-
    use super::{Token, Trailer};
-
    use nom::{
-
        branch::alt,
-
        bytes::complete::{tag, take_until, take_while1},
-
        character::complete::{line_ending, not_line_ending, one_of, space0, space1},
-
        combinator::{map, rest},
-
        multi::{many0, separated_list1},
-
        sequence::{delimited, preceded, separated_pair, terminated},
-
        IResult,
-
    };
-

-
    const EMPTY_LINE: &str = "\n\n";
-

-
    pub fn paragraphs(s: &str) -> IResult<&str, Vec<&str>> {
-
        separated_list1(tag(EMPTY_LINE), paragraph)(s)
-
    }
-

-
    pub fn paragraph(s: &str) -> IResult<&str, &str> {
-
        alt((take_until(EMPTY_LINE), rest))(s)
-
    }
-

-
    /// Parse all the possible trailers.
-
    /// It stops when it can no longer parse valid trailers.
-
    pub fn trailers<'a>(s: &'a str, separators: &'a str) -> IResult<&'a str, Vec<Trailer<'a>>> {
-
        many0(|s| trailer(s, separators))(s)
-
    }
-

-
    /// Parse a trailer, which can have an inlined or multilined value.
-
    pub fn trailer<'a>(s: &'a str, separators: &'a str) -> IResult<&'a str, Trailer<'a>> {
-
        let mut parser = separated_pair(token, |s| separator(separators, s), values);
-
        let (rest, (token, values)) = parser(s)?;
-
        Ok((rest, Trailer { token, values }))
-
    }
-

-
    /// Parse a trailer token.
-
    pub(super) fn token(s: &str) -> IResult<&str, Token> {
-
        take_while1(|c: char| c.is_alphanumeric() || c == '-')(s)
-
            .map(|(i, token_str)| (i, Token(token_str)))
-
    }
-

-
    /// Parse the trailer separator, which can be delimited by spaces.
-
    fn separator<'a>(separators: &'a str, s: &'a str) -> IResult<&'a str, char> {
-
        delimited(space0, one_of(separators), space0)(s)
-
    }
-

-
    /// Parse the trailer values, which gathers the value after the separator
-
    /// (if any) and possible following multilined values, indented by a
-
    /// space.
-
    fn values(s: &str) -> IResult<&str, Vec<Cow<'_, str>>> {
-
        let (r, opt_inline_value) = until_eol_or_eof(s)?;
-
        let (r, mut values) = multiline_values(r)?;
-
        if !opt_inline_value.is_empty() {
-
            values.insert(0, opt_inline_value.into())
-
        }
-
        Ok((r, values))
-
    }
-

-
    fn multiline_values(s: &str) -> IResult<&str, Vec<Cow<'_, str>>> {
-
        many0(map(indented_line_contents, Cow::from))(s)
-
    }
-

-
    fn until_eol_or_eof(s: &str) -> IResult<&str, &str> {
-
        alt((until_eol, rest))(s)
-
    }
-

-
    /// Parse an indented line, i.e, a line that starts with a space.
-
    /// Extracts the line contents, ignoring the indentation and the
-
    /// new line character.
-
    fn indented_line_contents(s: &str) -> IResult<&str, &str> {
-
        preceded(space1, until_eol_or_eof)(s)
-
    }
-

-
    /// Consume the input until the end of the line, ignoring the new line
-
    /// character.
-
    fn until_eol(s: &str) -> IResult<&str, &str> {
-
        terminated(not_line_ending, line_ending)(s)
-
    }
-
}
deleted git-trailers/t/Cargo.toml
@@ -1,22 +0,0 @@
-
[package]
-
name = "git-trailers-test"
-
version = "0.1.0"
-
edition = "2021"
-
license = "GPL-3.0-or-later"
-

-
publish = false
-

-
[lib]
-
doctest = false
-
test = true
-
doc = false
-

-
[features]
-
test = []
-

-
[dev-dependencies]
-
assert_matches = "1.5"
-
pretty_assertions = "1.1"
-

-
[dev-dependencies.git-trailers]
-
path = ".."
deleted git-trailers/t/src/lib.rs
@@ -1,2 +0,0 @@
-
#[cfg(test)]
-
mod tests;
deleted git-trailers/t/src/tests.rs
@@ -1,155 +0,0 @@
-
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{borrow::Cow, convert::TryFrom as _, ops::Deref as _};
-

-
use assert_matches::assert_matches;
-
use git_trailers::{display, parse, Error, Token, Trailer};
-
use pretty_assertions::assert_eq;
-

-
#[test]
-
fn parse_message_with_valid_trailers() {
-
    let msg = r#"Subject
-

-
A multiline
-
description.
-

-
Co-authored-by: John Doe <john.doe@test.com>
-
Ticket: #42
-
Tested-by:
-
    John <john@test.com>
-
    Jane <jane@test.com>
-
Just-a-token:
-

-
"#;
-
    assert_eq!(
-
        parse(msg, ":").unwrap(),
-
        vec![
-
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
-
            new_trailer("Ticket", &["#42"]),
-
            new_trailer(
-
                "Tested-by",
-
                &["John <john@test.com>", "Jane <jane@test.com>"]
-
            ),
-
            new_trailer("Just-a-token", &[]),
-
        ]
-
    )
-
}
-

-
#[test]
-
fn parse_message_trailers_with_custom_separators() {
-
    let separators = ":=$";
-
    let msg = r#"Subject
-

-
A multiline
-
description.
-

-
Co-authored-by: John Doe <john.doe@test.com>
-
Ticket = #42
-
Tested-by $User <user@test.com>
-
    John <john@test.com>
-
    Jane <jane@test.com>
-
"#;
-
    assert_eq!(
-
        parse(msg, separators).unwrap(),
-
        vec![
-
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
-
            new_trailer("Ticket", &["#42"]),
-
            new_trailer(
-
                "Tested-by",
-
                &[
-
                    "User <user@test.com>",
-
                    "John <john@test.com>",
-
                    "Jane <jane@test.com>"
-
                ]
-
            ),
-
        ]
-
    )
-
}
-

-
#[test]
-
fn parse_message_trailers_with_missing_token() {
-
    let msg = r#"Subject
-

-
Good-trailer: true
-
John Doe <john.doe@test.com> # Unparsable token due to missing token"#;
-
    assert_matches!(
-
        parse(msg, ":"),
-
        Err(Error::Trailing(s))
-
            if s == "John Doe <john.doe@test.com> # Unparsable token due to missing token"
-
    )
-
}
-

-
#[test]
-
fn parse_message_trailers_with_invalid_token() {
-
    let msg = r#"Subject
-

-
Good-trailer: true
-
&!#: John Doe <john.doe@test.com> # Unparsable token due to invalid token"#;
-
    assert_matches!(
-
        parse(msg, ":"),
-
        Err(Error::Trailing(s))
-
            if s == "&!#: John Doe <john.doe@test.com> # Unparsable token due to invalid token"
-
    )
-
}
-

-
#[test]
-
fn parse_message_with_only_trailers() {
-
    let msg = r#"Co-authored-by: John Doe <john.doe@test.com>
-
Ticket: #42
-
Tested-by: Tester <tester@test.com>
-
"#;
-
    assert_eq!(
-
        parse(msg, ":").unwrap(),
-
        vec![
-
            new_trailer("Co-authored-by", &["John Doe <john.doe@test.com>"]),
-
            new_trailer("Ticket", &["#42"]),
-
            new_trailer("Tested-by", &["Tester <tester@test.com>"]),
-
        ]
-
    )
-
}
-

-
#[test]
-
fn parse_empty_message() {
-
    let msg = "";
-
    assert_matches!(parse(msg, ":"), Err(Error::MissingParagraph))
-
}
-

-
#[test]
-
fn display_static() {
-
    let msg = r#"Tested-by: Alice
-
  Bob
-
  Carol
-
  Dylan
-
Acked-by: Eve"#;
-

-
    let parsed = parse(msg, ":").unwrap();
-
    let rendered = format!("{}", display(": ", &parsed));
-
    assert_eq!(&rendered, msg);
-
}
-

-
#[test]
-
fn display_dynamic() {
-
    let msg = r#"Co-authored-by: John Doe <john.doe@test.com>
-
Tested-by: Tester <tester@test.com>
-
Fixes #42"#;
-

-
    let parsed = parse(msg, ":#").unwrap();
-
    let rendered = format!(
-
        "{}",
-
        display(
-
            |t: &Token| if t.deref() == "Fixes" { " #" } else { ": " },
-
            &parsed
-
        )
-
    );
-
    assert_eq!(rendered, msg)
-
}
-

-
fn new_trailer<'a>(token: &'a str, values: &[&'a str]) -> Trailer<'a> {
-
    Trailer {
-
        token: Token::try_from(token).unwrap(),
-
        values: values.iter().map(|s| Cow::from(*s)).collect(),
-
    }
-
}
deleted link-git/Cargo.toml
@@ -1,64 +0,0 @@
-
[package]
-
name = "link-git"
-
version = "0.1.0"
-
authors = ["Kim Altintop <kim@eagain.st>"]
-
edition = "2018"
-
license = "GPL-3.0-or-later"
-

-
description = "Core git types and functionality"
-

-
[lib]
-
doctest = false
-
test = false
-

-
[dependencies]
-
arc-swap = "1.4.0"
-
async-process = "1.1.0"
-
async-trait = "0.1"
-
blocking = "1.0.2"
-
bstr = "0.2"
-
futures-lite = "1.12.0"
-
futures-util = "0.3.15"
-
lazy_static = "1.4.0"
-
im = "15.0.0"
-
once_cell = "1.10"
-
parking_lot = "0.12"
-
pin-project = "1.0.7"
-
regex = "1.5.4"
-
rustc-hash = "1.1.0"
-
tempfile = "3.4"
-
thiserror = "1.0.30"
-
tracing = "0.1"
-
versions = "3.0.2"
-

-
# gitoxide
-
git-actor = "^0.6.0"
-
git-hash = "^0.8.0"
-
git-lock = "^1.0.1"
-
git-object = "^0.15.1"
-
git-odb = "^0.24.0"
-
git-ref = "^0.9.0"
-
git-traverse = "^0.10.0"
-

-
[dependencies.git-features]
-
version = "^0.17.0"
-
features = ["progress", "parallel", "zlib-ng-compat"]
-

-
[dependencies.git-pack]
-
version = "^0.14.0"
-
features = ["object-cache-dynamic", "pack-cache-lru-static", "pack-cache-lru-dynamic"]
-

-
[dependencies.git-packetline]
-
version = "^0.12.0"
-
features = ["async-io"]
-

-
[dependencies.git-protocol]
-
version = "^0.12.0"
-
features = ["async-client"]
-

-
# compat
-
[dependencies.git2]
-
version = "0.16.1"
-
default-features = false
-
features = ["vendored-libgit2"]
-
optional = true
deleted link-git/src/lib.rs
@@ -1,20 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
#[macro_use]
-
extern crate async_trait;
-

-
pub mod odb;
-
pub mod protocol;
-
pub mod refs;
-
pub use refs::db as refdb;
-
#[cfg(feature = "git2")]
-
pub mod service;
-

-
pub use git_actor as actor;
-
pub use git_hash as hash;
-
pub use git_lock as lock;
-
pub use git_object as object;
-
pub use git_traverse as traverse;
deleted link-git/src/odb.rs
@@ -1,51 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use git_hash::oid;
-
use thiserror::Error;
-

-
pub mod backend;
-
pub mod index;
-
pub mod pack;
-
pub mod window;
-

-
pub use git_pack::{cache, data::Object};
-

-
#[derive(Debug, Error)]
-
pub enum Error {
-
    #[error(transparent)]
-
    Packed(#[from] index::error::Lookup<pack::error::Data>),
-

-
    #[error(transparent)]
-
    Loose(#[from] git_odb::loose::find::Error),
-
}
-

-
pub struct Odb<I, D> {
-
    pub loose: backend::Loose,
-
    pub packed: backend::Packed<I, D>,
-
}
-

-
impl<I, D> Odb<I, D>
-
where
-
    I: index::Index,
-
    D: window::Cache,
-
{
-
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
-
        self.packed.contains(id.as_ref()) || self.loose.contains(id)
-
    }
-

-
    pub fn find<'a>(
-
        &self,
-
        id: impl AsRef<oid>,
-
        buf: &'a mut Vec<u8>,
-
        cache: &mut impl cache::DecodeEntry,
-
    ) -> Result<Option<Object<'a>>, Error> {
-
        let id = id.as_ref();
-
        if self.packed.contains(id) {
-
            return self.packed.find(id, buf, cache).map_err(Into::into);
-
        }
-
        self.loose.try_find(id, buf).map_err(Into::into)
-
    }
-
}
deleted link-git/src/odb/backend.rs
@@ -1,36 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use git_hash::oid;
-
use git_pack::{cache::DecodeEntry, data::Object};
-

-
use super::{index, pack, window};
-

-
pub type Loose = git_odb::loose::Store;
-

-
pub struct Packed<I, D> {
-
    pub index: I,
-
    pub data: D,
-
}
-

-
impl<I, D> Packed<I, D>
-
where
-
    I: index::Index,
-
    D: window::Cache,
-
{
-
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
-
        self.index.contains(id)
-
    }
-

-
    pub fn find<'a>(
-
        &self,
-
        id: impl AsRef<oid>,
-
        buf: &'a mut Vec<u8>,
-
        cache: &mut impl DecodeEntry,
-
    ) -> Result<Option<Object<'a>>, index::error::Lookup<pack::error::Data>> {
-
        self.index
-
            .lookup(|info| self.data.get(info), id, buf, cache)
-
    }
-
}
deleted link-git/src/odb/index.rs
@@ -1,307 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    fs,
-
    io,
-
    path::{Path, PathBuf},
-
    sync::Arc,
-
};
-

-
use arc_swap::ArcSwap;
-
use git_hash::oid;
-
use git_pack::{
-
    cache::DecodeEntry,
-
    data::{Object, ResolvedBase},
-
};
-
use parking_lot::Mutex;
-
use tracing::trace;
-

-
use super::pack;
-

-
pub use git_pack::index::File as IndexFile;
-

-
mod metrics;
-
pub use metrics::{Metrics, Stats, StatsView};
-

-
pub mod error {
-
    use super::*;
-
    use thiserror::Error;
-

-
    #[derive(Debug, Error)]
-
    pub enum Discover {
-
        #[error(transparent)]
-
        Index(#[from] pack::error::Index),
-

-
        #[error(transparent)]
-
        Io(#[from] io::Error),
-
    }
-

-
    #[derive(Debug, Error)]
-
    pub enum Lookup<E> {
-
        #[error(transparent)]
-
        Lookup(E),
-

-
        #[error(transparent)]
-
        Reload(#[from] Discover),
-

-
        #[error(transparent)]
-
        Decode(#[from] git_pack::data::decode_entry::Error),
-
    }
-
}
-

-
pub trait Index {
-
    fn contains(&self, id: impl AsRef<oid>) -> bool;
-

-
    fn lookup<'a, F, E>(
-
        &self,
-
        pack_cache: F,
-
        id: impl AsRef<oid>,
-
        buf: &'a mut Vec<u8>,
-
        cache: &mut impl DecodeEntry,
-
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
-
    where
-
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>;
-
}
-

-
/// An [`Index`] which can be shared between threads.
-
///
-
/// [`Shared`] assumes that:
-
///
-
/// * newer packs are likely to contain recent objects
-
/// * lookups tend to favour recent objects
-
/// * lookups tend to expect the object to be found (the object id is either
-
///   pointed to by a ref, or linked to by an existing object)
-
///
-
/// Thus, it:
-
///
-
/// * orders indices found in `GIT_DIR/objects/pack` by modification time, and
-
///   queries the more recent ones first
-
/// * attempts to rescan `GIT_DIR/objects/pack` when an object id was _not_
-
///   found (assuming that this is due to a compaction)
-
///
-
/// Unless a reload occurs, lookups are lock-free and mostly wait-free. Writes
-
/// ([`Shared::push`], [`Shared::reload`]) are guarded by a [`Mutex`].
-
// TODO: consecutive lookups also tend to resolve to the same pack, so we could
-
// remember the index into the `im::Vector` where we found a match and look
-
// there first. This is what libgit2 does, but the heuristic is not necessarily
-
// true when `Shared` is shared across multiple concurrent link replication
-
// tasks; per-namespace packs are independent pre-compaction.
-
pub struct Shared<M> {
-
    pack_dir: PathBuf,
-
    indices: ArcSwap<im::Vector<Arc<pack::Index>>>,
-
    write: Mutex<()>,
-
    stats: M,
-
}
-

-
impl Shared<()> {
-
    pub fn open(git_dir: impl AsRef<Path>) -> Result<Self, error::Discover> {
-
        let pack_dir = git_dir.as_ref().join("objects").join("pack");
-
        let indices = discover(&pack_dir)?;
-

-
        Ok(Self {
-
            pack_dir,
-
            indices: ArcSwap::new(Arc::new(indices)),
-
            write: Mutex::new(()),
-
            stats: (),
-
        })
-
    }
-
}
-

-
impl<M> Shared<M>
-
where
-
    M: Metrics,
-
{
-
    pub fn with_stats(self) -> Shared<Stats> {
-
        self.with_metrics(Stats::default())
-
    }
-

-
    pub fn with_metrics<N: Metrics>(self, m: N) -> Shared<N> {
-
        Shared {
-
            pack_dir: self.pack_dir,
-
            indices: self.indices,
-
            write: self.write,
-
            stats: m,
-
        }
-
    }
-

-
    pub fn stats(&self) -> M::Snapshot {
-
        self.stats.snapshot(self.len())
-
    }
-

-
    /// Add a newly discovered [`pack::Index`].
-
    ///
-
    /// This index will be considered first by subsequent lookups. Note that it
-
    /// is only guaranteed that the index will be visible to readers if it
-
    /// resides in the `git_dir` this [`Shared`] was initialised with.
-
    pub fn push(&self, idx: pack::Index) {
-
        let lock = self.write.lock();
-
        let mut new = self.indices.load_full();
-
        Arc::make_mut(&mut new).push_front(Arc::new(idx));
-
        self.indices.store(new);
-
        drop(lock);
-

-
        self.stats.record_push()
-
    }
-

-
    /// Re-scan the packs directory and replace the in-memory indices with the
-
    /// result.
-
    ///
-
    /// If the application can intercept compaction events, this method can be
-
    /// used to release memory early. Otherwise it is not required to call this
-
    /// method, as [`Shared`] manages reloads automatically.
-
    pub fn reload(&self) -> Result<(), error::Discover> {
-
        let lock = self.write.lock();
-
        let indices = discover(&self.pack_dir)?;
-
        self.indices.store(Arc::new(indices));
-
        drop(lock);
-

-
        self.stats.record_reload();
-

-
        Ok(())
-
    }
-

-
    pub fn is_empty(&self) -> bool {
-
        self.indices.load().is_empty()
-
    }
-

-
    pub fn len(&self) -> usize {
-
        self.indices.load().len()
-
    }
-

-
    fn contains(&self, id: impl AsRef<oid>) -> bool {
-
        for i in 0..2 {
-
            for idx in self.indices.load().iter() {
-
                if idx.contains(&id) {
-
                    self.stats.record_hit();
-
                    return true;
-
                }
-
            }
-

-
            if i == 0 && self.reload().is_err() {
-
                self.stats.record_miss();
-
                return false;
-
            }
-
        }
-

-
        self.stats.record_miss();
-
        false
-
    }
-

-
    fn lookup<'a, F, E>(
-
        &self,
-
        pack_cache: F,
-
        id: impl AsRef<oid>,
-
        buf: &'a mut Vec<u8>,
-
        cache: &mut impl DecodeEntry,
-
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
-
    where
-
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
-
    {
-
        for i in 0..2 {
-
            for idx in self.indices.load().iter() {
-
                if let Some(ofs) = idx.ofs(&id) {
-
                    self.stats.record_hit();
-
                    return load_obj(ofs, idx, pack_cache, buf, cache).map(Some);
-
                }
-
            }
-

-
            if i == 0 {
-
                self.reload()?;
-
            }
-
        }
-

-
        self.stats.record_miss();
-
        Ok(None)
-
    }
-
}
-

-
fn load_obj<'a, F, E>(
-
    ofs: u64,
-
    idx: &pack::Index,
-
    pack_cache: F,
-
    buf: &'a mut Vec<u8>,
-
    cache: &mut impl DecodeEntry,
-
) -> Result<Object<'a>, error::Lookup<E>>
-
where
-
    F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
-
{
-
    let data = pack_cache(&idx.info).map_err(error::Lookup::Lookup)?;
-
    let pack = data.file();
-
    let entry = pack.entry(ofs);
-
    let obj = pack
-
        .decode_entry(
-
            entry,
-
            buf,
-
            |id, _| idx.ofs(id).map(|ofs| ResolvedBase::InPack(pack.entry(ofs))),
-
            cache,
-
        )
-
        .map(move |out| Object {
-
            kind: out.kind,
-
            data: buf.as_slice(),
-
            pack_location: None,
-
        })?;
-

-
    Ok(obj)
-
}
-

-
fn discover(pack_dir: impl AsRef<Path>) -> Result<im::Vector<Arc<pack::Index>>, error::Discover> {
-
    let pack_dir = pack_dir.as_ref();
-
    let pack_dir_disp = pack_dir.display();
-
    trace!("discovering packs at {}", pack_dir_disp);
-
    match fs::read_dir(pack_dir) {
-
        Ok(iter) => {
-
            let mut paths = Vec::new();
-
            for entry in iter {
-
                let entry = entry?;
-
                let path = entry.path();
-
                trace!("{}", path.display());
-
                let meta = entry.metadata()?;
-
                if meta.file_type().is_file() && path.extension().unwrap_or_default() == "idx" {
-
                    let mtime = meta.modified()?;
-
                    paths.push((path, mtime));
-
                }
-
            }
-
            paths.sort_by(|(_, mtime_a), (_, mtime_b)| mtime_a.cmp(mtime_b));
-

-
            let indices = paths
-
                .into_iter()
-
                .rev()
-
                .map(|(path, _)| Ok(pack::Index::open(path).map(Arc::new)?))
-
                .collect::<Result<_, error::Discover>>()?;
-

-
            Ok(indices)
-
        },
-
        // It's not an error if the directory doesn't exist, the repository
-
        // could contain only loose objects
-
        Err(e) if e.kind() == io::ErrorKind::NotFound => {
-
            trace!("not a directory: {}", pack_dir_disp);
-
            Ok(im::Vector::new())
-
        },
-
        Err(e) => Err(e.into()),
-
    }
-
}
-

-
impl<M> Index for Shared<M>
-
where
-
    M: Metrics,
-
{
-
    fn contains(&self, id: impl AsRef<oid>) -> bool {
-
        self.contains(id)
-
    }
-

-
    fn lookup<'a, F, E>(
-
        &self,
-
        pack_cache: F,
-
        id: impl AsRef<oid>,
-
        buf: &'a mut Vec<u8>,
-
        cache: &mut impl DecodeEntry,
-
    ) -> Result<Option<Object<'a>>, error::Lookup<E>>
-
    where
-
        F: FnOnce(&pack::Info) -> Result<Arc<pack::Data>, E>,
-
    {
-
        self.lookup(pack_cache, id, buf, cache)
-
    }
-
}
deleted link-git/src/odb/index/metrics.rs
@@ -1,80 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::sync::atomic::{AtomicUsize, Ordering};
-

-
pub struct StatsView {
-
    /// Total number of times a lookup was successful.
-
    pub hits: usize,
-
    /// Total number of times a lookup was unsuccessful.
-
    pub misses: usize,
-
    /// Total number of times an index was added explicitly via
-
    /// [`super::Shared::push`].
-
    pub pushes: usize,
-
    /// Total number of reloads via [`super::Shared::reload`].
-
    pub reloads: usize,
-
    /// Number of [`crate::odb::pack::Index`]es currently held.
-
    pub indices: usize,
-
}
-

-
#[derive(Default)]
-
pub struct Stats {
-
    hits: AtomicUsize,
-
    misses: AtomicUsize,
-
    pushes: AtomicUsize,
-
    reloads: AtomicUsize,
-
}
-

-
pub trait Metrics {
-
    type Snapshot;
-

-
    fn record_hit(&self);
-
    fn record_miss(&self);
-
    fn record_push(&self);
-
    fn record_reload(&self);
-

-
    fn snapshot(&self, indices: usize) -> Self::Snapshot;
-
}
-

-
impl Metrics for Stats {
-
    type Snapshot = StatsView;
-

-
    fn record_hit(&self) {
-
        self.hits.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn record_miss(&self) {
-
        self.misses.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn record_push(&self) {
-
        self.pushes.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn record_reload(&self) {
-
        self.reloads.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn snapshot(&self, indices: usize) -> Self::Snapshot {
-
        StatsView {
-
            hits: self.hits.load(Ordering::Relaxed),
-
            misses: self.misses.load(Ordering::Relaxed),
-
            pushes: self.pushes.load(Ordering::Relaxed),
-
            reloads: self.reloads.load(Ordering::Relaxed),
-
            indices,
-
        }
-
    }
-
}
-

-
impl Metrics for () {
-
    type Snapshot = ();
-

-
    fn record_hit(&self) {}
-
    fn record_miss(&self) {}
-
    fn record_push(&self) {}
-
    fn record_reload(&self) {}
-

-
    fn snapshot(&self, _: usize) -> Self::Snapshot {}
-
}
deleted link-git/src/odb/pack.rs
@@ -1,138 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    path::{Path, PathBuf},
-
    sync::atomic::{AtomicUsize, Ordering},
-
};
-

-
use git_hash::{oid, ObjectId};
-
use git_pack::{data, index};
-
use rustc_hash::FxHasher;
-
use tracing::warn;
-

-
pub mod error {
-
    use super::*;
-
    use thiserror::Error;
-

-
    #[derive(Debug, Error)]
-
    #[error("failed to load pack data from {path:?}")]
-
    pub struct Data {
-
        pub path: PathBuf,
-
        pub source: data::header::decode::Error,
-
    }
-

-
    #[derive(Debug, Error)]
-
    #[error("failed to load pack index from {path:?}")]
-
    pub struct Index {
-
        pub path: PathBuf,
-
        pub source: index::init::Error,
-
    }
-
}
-

-
pub struct Data {
-
    pub hash: u64,
-
    hits: AtomicUsize,
-
    file: data::File,
-
}
-

-
impl Data {
-
    pub fn hit(&self) {
-
        self.hits.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    pub fn hits(&self) -> usize {
-
        self.hits.load(Ordering::Relaxed)
-
    }
-

-
    pub fn file(&self) -> &data::File {
-
        &self.file
-
    }
-
}
-

-
impl AsRef<data::File> for Data {
-
    fn as_ref(&self) -> &data::File {
-
        self.file()
-
    }
-
}
-

-
#[derive(Clone, PartialEq, Eq)]
-
pub struct Info {
-
    pub(super) hash: u64,
-
    pub data_path: PathBuf,
-
}
-

-
impl Info {
-
    pub fn data(&self) -> Result<Data, error::Data> {
-
        let file = data::File::at(&self.data_path).map_err(|source| error::Data {
-
            path: self.data_path.clone(),
-
            source,
-
        })?;
-
        Ok(Data {
-
            hash: self.hash,
-
            hits: AtomicUsize::new(0),
-
            file,
-
        })
-
    }
-
}
-

-
pub struct Index {
-
    pub info: Info,
-
    file: index::File,
-
}
-

-
impl Index {
-
    pub fn open(path: impl AsRef<Path>) -> Result<Self, error::Index> {
-
        let path = path.as_ref();
-
        let file = index::File::at(path).map_err(|source| error::Index {
-
            path: path.to_path_buf(),
-
            source,
-
        })?;
-
        let data_path = path.with_extension("pack");
-
        let hash = {
-
            let file_name = path
-
                .file_name()
-
                .expect("must have a file name, we opened it")
-
                .to_string_lossy();
-
            // XXX: inexplicably, gitoxide omits the "pack-" prefix
-
            let sha_hex = file_name.strip_prefix("pack-").unwrap_or(&file_name);
-
            match ObjectId::from_hex(&sha_hex.as_bytes()[..40]) {
-
                Err(e) => {
-
                    warn!(
-
                        "unconventional pack name {:?}, falling back to fxhash: {}",
-
                        path, e
-
                    );
-
                    hash(path)
-
                },
-
                Ok(oid) => {
-
                    let mut buf = [0u8; 8];
-
                    buf.copy_from_slice(&oid.sha1()[..8]);
-
                    u64::from_be_bytes(buf)
-
                },
-
            }
-
        };
-
        let info = Info { hash, data_path };
-

-
        Ok(Self { file, info })
-
    }
-

-
    pub fn contains(&self, id: impl AsRef<oid>) -> bool {
-
        self.file.lookup(id).is_some()
-
    }
-

-
    pub fn ofs(&self, id: impl AsRef<oid>) -> Option<u64> {
-
        self.file
-
            .lookup(id)
-
            .map(|idx| self.file.pack_offset_at_index(idx))
-
    }
-
}
-

-
fn hash(p: &Path) -> u64 {
-
    use std::hash::{Hash as _, Hasher as _};
-

-
    let mut hasher = FxHasher::default();
-
    p.hash(&mut hasher);
-
    hasher.finish()
-
}
deleted link-git/src/odb/window.rs
@@ -1,183 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::sync::Arc;
-

-
use arc_swap::{ArcSwap, Guard};
-
use parking_lot::Mutex;
-

-
use super::pack;
-

-
mod metrics;
-
pub use metrics::{Metrics, Stats, StatsView};
-

-
/// A threadsafe, shareable cache of packfiles.
-
pub trait Cache {
-
    type Stats;
-

-
    fn stats(&self) -> Self::Stats;
-

-
    fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data>;
-
}
-

-
impl<M, const B: usize, const S: usize> Cache for Fixed<M, B, S>
-
where
-
    M: Metrics,
-
{
-
    type Stats = M::Snapshot;
-

-
    fn stats(&self) -> Self::Stats {
-
        self.stats()
-
    }
-

-
    fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data> {
-
        self.get(info)
-
    }
-
}
-

-
/// 128 open files
-
pub type Small<S> = Fixed<S, 16, 8>;
-
/// 512 open files
-
pub type Medium<S> = Fixed<S, 32, 16>;
-
/// 1024 open files
-
pub type Large<S> = Fixed<S, 64, 16>;
-
/// 2048 open files
-
pub type XLarge<S> = Fixed<S, 128, 16>;
-

-
/// A fixed-size [`Cache`].
-
///
-
/// [`Fixed`] is essentially a very simple, fixed-capacity hashtable. When a
-
/// pack (data-) file is requested via [`Cache::get`], the file is loaded
-
/// (typically `mmap`ed) from disk if it is not already in the cache. Otherwise,
-
/// a pointer to the already loaded file is returned. Old entries are replaced
-
/// on an approximate LRU basis when the cache becomes full (this means that old
-
/// entries are **not** evicted when there is still space).
-
///
-
/// The implementation is a somewhat dumbed-down version of JGit's
-
/// `WindowCache`. The main differences are that the table buckets are of fixed
-
/// size (`SLOTS`), instead of a linked list. This means that the cache does not
-
/// allow to (temporarily) commit more entries than its nominal capacity.
-
///
-
/// Reading cached values is lock-free and mostly wait-free. Modifications are
-
/// guarded by locks on individual buckets; if a cache miss occurs, multiple
-
/// threads requesting the same entry will be blocked until one of them
-
/// succeeded loading the data and updating the cache. Writers will _not_,
-
/// however, contend with readers (unlike `RwLock`).
-
///
-
/// This favours usage patterns where different threads tend to request disjoint
-
/// sets of packfiles, and of course their hashes colliding relatively
-
/// infrequently.
-
pub struct Fixed<M, const BUCKETS: usize, const SLOTS: usize> {
-
    entries: [ArcSwap<[Option<Arc<pack::Data>>; SLOTS]>; BUCKETS],
-
    locks: [Mutex<()>; BUCKETS],
-
    stats: M,
-
}
-

-
trait AssertSendSync: Send + Sync {}
-
impl<M, const B: usize, const S: usize> AssertSendSync for Fixed<M, B, S> where M: Send + Sync {}
-

-
impl<M, const B: usize, const S: usize> AsRef<Fixed<M, B, S>> for Fixed<M, B, S> {
-
    fn as_ref(&self) -> &Fixed<M, B, S> {
-
        self
-
    }
-
}
-

-
impl<const B: usize, const S: usize> Default for Fixed<(), B, S> {
-
    fn default() -> Self {
-
        Self {
-
            entries: [(); B].map(|_| ArcSwap::new(Arc::new([(); S].map(|_| None)))),
-
            locks: [(); B].map(|_| Mutex::new(())),
-
            stats: (),
-
        }
-
    }
-
}
-

-
impl<M, const B: usize, const S: usize> Fixed<M, B, S>
-
where
-
    M: Metrics,
-
{
-
    pub fn with_stats(self) -> Fixed<Stats, B, S> {
-
        self.with_metrics(Stats::default())
-
    }
-

-
    pub fn with_metrics<N: Metrics>(self, m: N) -> Fixed<N, B, S> {
-
        Fixed {
-
            entries: self.entries,
-
            locks: self.locks,
-
            stats: m,
-
        }
-
    }
-

-
    pub fn stats(&self) -> M::Snapshot {
-
        let open_files = self
-
            .entries
-
            .iter()
-
            .map(|bucket| bucket.load().iter().flatten().count())
-
            .sum();
-
        self.stats.snapshot(open_files)
-
    }
-

-
    pub fn get(&self, info: &pack::Info) -> Result<Arc<pack::Data>, pack::error::Data> {
-
        let idx = info.hash as usize % self.entries.len();
-

-
        let bucket = self.entries[idx].load();
-
        for entry in bucket.iter().flatten() {
-
            if entry.hash == info.hash {
-
                self.stats.record_hit();
-
                entry.hit();
-
                return Ok(Arc::clone(entry));
-
            }
-
        }
-
        drop(bucket);
-

-
        self.stats.record_miss();
-

-
        // Cache miss, try to load the data file
-
        let lock = self.locks[idx].lock();
-
        // Did someone else win the race for the lock?
-
        let bucket = self.entries[idx].load();
-
        for entry in bucket.iter().flatten() {
-
            if entry.hash == info.hash {
-
                self.stats.record_hit();
-
                entry.hit();
-
                return Ok(Arc::clone(entry));
-
            }
-
        }
-
        // No, proceed
-
        self.stats.record_load();
-
        let data = Arc::new(info.data()?);
-

-
        // Find an empty slot, or swap with the least popular
-
        let mut access = usize::MAX;
-
        let mut evict = 0;
-
        for (i, e) in bucket.iter().enumerate() {
-
            match e {
-
                Some(entry) => {
-
                    let hits = entry.hits();
-
                    if hits < access {
-
                        access = hits;
-
                        evict = i;
-
                    }
-
                },
-
                None => {
-
                    evict = i;
-
                    break;
-
                },
-
            }
-
        }
-
        let mut entries = Guard::into_inner(bucket);
-
        {
-
            // This costs `SLOTS` refcount increments if the slot is currently
-
            // borrowed.
-
            let mutti = Arc::make_mut(&mut entries);
-
            mutti[evict] = Some(Arc::clone(&data));
-
        }
-
        self.entries[idx].store(entries);
-
        drop(lock);
-

-
        data.hit();
-
        Ok(data)
-
    }
-
}
deleted link-git/src/odb/window/metrics.rs
@@ -1,79 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::sync::atomic::{AtomicUsize, Ordering};
-

-
use tracing::trace;
-

-
pub struct StatsView {
-
    /// Total number of times the requested data was found in the cache.
-
    pub cache_hits: usize,
-
    /// Total number of times the requested data was not found in the cache.
-
    ///
-
    /// Note that a cache hit can occur after a miss if another thread was
-
    /// faster to fill in the missing entry. Thus, `cache_hits + cache_misses`
-
    /// does not necessarily sum up to the number of cache accesses.
-
    pub cache_misses: usize,
-
    /// Total number of times a pack file was attempted to be loaded from disk
-
    /// (incl. failed attempts).
-
    pub file_loads: usize,
-
    /// Total number of pack files the cache holds on to.
-
    pub open_files: usize,
-
}
-

-
#[derive(Default)]
-
pub struct Stats {
-
    hits: AtomicUsize,
-
    miss: AtomicUsize,
-
    load: AtomicUsize,
-
}
-

-
pub trait Metrics {
-
    type Snapshot;
-

-
    fn record_hit(&self);
-
    fn record_miss(&self);
-
    fn record_load(&self);
-

-
    fn snapshot(&self, open_files: usize) -> Self::Snapshot;
-
}
-

-
impl Metrics for Stats {
-
    type Snapshot = StatsView;
-

-
    fn record_hit(&self) {
-
        trace!("cache hit");
-
        self.hits.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn record_miss(&self) {
-
        trace!("cache miss");
-
        self.miss.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn record_load(&self) {
-
        trace!("pack load");
-
        self.load.fetch_add(1, Ordering::Relaxed);
-
    }
-

-
    fn snapshot(&self, open_files: usize) -> Self::Snapshot {
-
        StatsView {
-
            cache_hits: self.hits.load(Ordering::Relaxed),
-
            cache_misses: self.miss.load(Ordering::Relaxed),
-
            file_loads: self.load.load(Ordering::Relaxed),
-
            open_files,
-
        }
-
    }
-
}
-

-
impl Metrics for () {
-
    type Snapshot = ();
-

-
    fn record_hit(&self) {}
-
    fn record_miss(&self) {}
-
    fn record_load(&self) {}
-

-
    fn snapshot(&self, _: usize) -> Self::Snapshot {}
-
}
deleted link-git/src/protocol.rs
@@ -1,30 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use bstr::ByteSlice as _;
-
use git_protocol::transport::client;
-
use versions::Version;
-

-
pub mod fetch;
-
pub mod ls;
-
pub mod packwriter;
-
pub mod take;
-
pub mod transport;
-
pub mod upload_pack;
-

-
pub use fetch::{fetch, Ref};
-
pub use ls::ls_refs;
-
pub use packwriter::PackWriter;
-
pub use upload_pack::upload_pack;
-

-
pub use git_hash::{oid, ObjectId};
-

-
fn remote_git_version(caps: &client::Capabilities) -> Option<Version> {
-
    let agent = caps.capability("agent").and_then(|cap| {
-
        cap.value()
-
            .and_then(|bs| bs.to_str().map(|s| s.to_owned()).ok())
-
    })?;
-
    Version::new(agent.strip_prefix("git/")?)
-
}
deleted link-git/src/protocol/fetch.rs
@@ -1,290 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    future::Future,
-
    io,
-
    mem,
-
    pin::Pin,
-
    sync::{
-
        atomic::{AtomicBool, Ordering},
-
        Arc,
-
    },
-
    task::{Context, Poll},
-
};
-

-
use bstr::{BString, ByteSlice as _};
-
use futures_lite::{
-
    future,
-
    io::{AsyncBufRead, AsyncRead, AsyncWrite},
-
};
-
use git_features::progress::{self, Progress};
-
use git_protocol::{
-
    fetch::{response, Action, Arguments, Delegate, DelegateBlocking, LsRefsAction, Response},
-
    transport::client,
-
};
-
use once_cell::sync::Lazy;
-
use pin_project::{pin_project, pinned_drop};
-
use versions::Version;
-

-
pub use git_hash::ObjectId;
-
pub use git_protocol::fetch::Ref;
-

-
use super::{packwriter::PackWriter, remote_git_version, transport};
-

-
// Work around `git-upload-pack` not handling namespaces properly,
-
//
-
// cf. https://lore.kernel.org/git/CD2XNXHACAXS.13J6JTWZPO1JA@schmidt/
-
// Fixed in `git.git` 1ab13eb, which should land in 2.34
-
//
-
// Based on testing with git 2.25.1 in Ubuntu 20.04, this workaround is
-
// not needed. Hence the checked version is lowered to 2.25.0.
-
fn must_namespace_want_ref(caps: &client::Capabilities) -> bool {
-
    static FIXED_AFTER: Lazy<Version> = Lazy::new(|| Version::new("2.25.0").unwrap());
-

-
    remote_git_version(caps)
-
        .map(|version| version <= *FIXED_AFTER)
-
        .unwrap_or(false)
-
}
-

-
#[derive(Debug)]
-
pub struct Options {
-
    /// The remote (logical) repository to fetch from.
-
    ///
-
    /// Normally, this is the path to a repo on the remote side (eg.
-
    /// `/git.git`). `radicle-link` serves only a single namespaced repo, so
-
    /// this value should be the name of a namespace.
-
    pub repo: BString,
-

-
    /// [Extra Parameters][extra] to send with the initial transport header.
-
    ///
-
    /// [extra]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt#n52
-
    pub extra_params: Vec<(String, Option<String>)>,
-

-
    /// [`ObjectId`]s to send as `want` lines.
-
    pub wants: Vec<ObjectId>,
-

-
    /// [`ObjectId`]s to send as `have` lines.
-
    pub haves: Vec<ObjectId>,
-

-
    /// Known refs to ask the server to include in the packfile.
-
    pub want_refs: Vec<BString>,
-
}
-

-
/// Result of a succesful [`fetch`].
-
#[derive(Debug)]
-
pub struct Outputs<T> {
-
    /// The `wanted-refs` as acknowledged by the server.
-
    pub wanted_refs: Vec<Ref>,
-
    /// If a packfile was received successfully, some info about it.
-
    pub pack: Option<T>,
-
}
-

-
impl<T> Default for Outputs<T> {
-
    fn default() -> Self {
-
        Self {
-
            wanted_refs: Vec::new(),
-
            pack: None,
-
        }
-
    }
-
}
-

-
/// [`Delegate`] driving the fetch end of the [pack protocol].
-
///
-
/// [pack protocol]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt
-
pub struct Fetch<P, O> {
-
    opt: Options,
-
    pack_writer: P,
-
    out: Outputs<O>,
-
    need_namespaced_want_ref: bool,
-
}
-

-
impl<P, O> Fetch<P, O> {
-
    pub fn new(opt: Options, pack_writer: P) -> Self {
-
        Self {
-
            opt,
-
            pack_writer,
-
            out: Outputs::default(),
-
            need_namespaced_want_ref: false,
-
        }
-
    }
-

-
    pub fn outputs(&self) -> &Outputs<O> {
-
        &self.out
-
    }
-

-
    pub fn take_outputs(&mut self) -> Outputs<O> {
-
        mem::take(&mut self.out)
-
    }
-
}
-

-
impl<P: PackWriter> DelegateBlocking for Fetch<P, P::Output> {
-
    fn handshake_extra_parameters(&self) -> Vec<(String, Option<String>)> {
-
        self.opt.extra_params.clone()
-
    }
-

-
    fn prepare_ls_refs(
-
        &mut self,
-
        _: &client::Capabilities,
-
        _: &mut Vec<BString>,
-
        _: &mut Vec<(&str, Option<&str>)>,
-
    ) -> io::Result<LsRefsAction> {
-
        Ok(LsRefsAction::Skip)
-
    }
-

-
    fn prepare_fetch(
-
        &mut self,
-
        _: git_protocol::transport::Protocol,
-
        caps: &client::Capabilities,
-
        _: &mut Vec<(&str, Option<&str>)>,
-
        _: &[Ref],
-
    ) -> io::Result<Action> {
-
        if !self.opt.want_refs.is_empty() && !remote_supports_ref_in_want(caps) {
-
            return Err(io::Error::new(
-
                io::ErrorKind::Unsupported,
-
                "`want-ref`s given, but server does not support `ref-in-want`",
-
            ));
-
        }
-

-
        if self.opt.wants.is_empty() && self.opt.want_refs.is_empty() {
-
            return Err(io::Error::new(
-
                io::ErrorKind::InvalidData,
-
                "`fetch` is empty",
-
            ));
-
        }
-

-
        self.need_namespaced_want_ref = must_namespace_want_ref(caps);
-

-
        Ok(Action::Continue)
-
    }
-

-
    fn negotiate(
-
        &mut self,
-
        _: &[Ref],
-
        args: &mut Arguments,
-
        _: Option<&Response>,
-
    ) -> io::Result<Action> {
-
        for oid in &self.opt.wants {
-
            args.want(oid);
-
        }
-

-
        for oid in &self.opt.haves {
-
            args.have(oid)
-
        }
-

-
        for name in &self.opt.want_refs {
-
            if self.need_namespaced_want_ref {
-
                let want_ref = format!("refs/namespaces/{}/{}", self.opt.repo, name);
-
                args.want_ref(BString::from(want_ref).as_bstr());
-
            } else {
-
                args.want_ref(name.as_bstr());
-
            }
-
        }
-

-
        // send done, as we don't bother with further negotiation
-
        Ok(Action::Cancel)
-
    }
-
}
-

-
#[async_trait(?Send)]
-
impl<P: PackWriter> Delegate for Fetch<P, P::Output> {
-
    async fn receive_pack(
-
        &mut self,
-
        pack: impl AsyncBufRead + Unpin + 'async_trait,
-
        prog: impl Progress,
-
        _: &[Ref],
-
        resp: &Response,
-
    ) -> io::Result<()> {
-
        // Strip any namespaces leaked by the other end due to workarounds
-
        let namespace = format!("refs/namespaces/{}/", self.opt.repo);
-
        self.out.wanted_refs.extend(resp.wanted_refs().iter().map(
-
            |response::WantedRef { id, path }| {
-
                Ref::Direct {
-
                    path: path
-
                        .strip_prefix(namespace.as_bytes())
-
                        .map(BString::from)
-
                        .unwrap_or_else(|| path.clone()),
-
                    object: *id,
-
                }
-
            },
-
        ));
-
        let out = self.pack_writer.write_pack(pack, prog)?;
-
        self.out.pack = Some(out);
-

-
        Ok(())
-
    }
-
}
-

-
/// Future created by the [`fetch`] function.
-
///
-
/// Ensures that a running inner [`PackWriter`] is cancelled when the
-
/// [`Fetching`] future is dropped without also dropping the [`AsyncRead`] data
-
/// source.
-
#[pin_project(PinnedDrop)]
-
struct Fetching<T> {
-
    stop: Arc<AtomicBool>,
-
    #[pin]
-
    task: T,
-
}
-

-
#[pinned_drop]
-
impl<T> PinnedDrop for Fetching<T> {
-
    fn drop(self: Pin<&mut Self>) {
-
        self.stop.store(true, Ordering::Release)
-
    }
-
}
-

-
impl<T> Future for Fetching<T>
-
where
-
    T: Future,
-
{
-
    type Output = T::Output;
-

-
    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
-
        self.project().task.poll(cx)
-
    }
-
}
-

-
pub fn fetch<B, P, R, W>(
-
    opt: Options,
-
    build_pack_writer: B,
-
    recv: R,
-
    send: W,
-
) -> impl Future<Output = io::Result<Outputs<P::Output>>>
-
where
-
    B: FnOnce(Arc<AtomicBool>) -> P,
-
    P: PackWriter + Send + 'static,
-
    P::Output: Send + 'static,
-
    R: AsyncRead + Unpin + Send + 'static,
-
    W: AsyncWrite + Unpin + Send + 'static,
-
{
-
    let stop = Arc::new(AtomicBool::new(false));
-
    let task = blocking::unblock({
-
        let mut conn = transport::Stateless::new(opt.repo.clone(), recv, send);
-
        let pack_writer = build_pack_writer(Arc::clone(&stop));
-

-
        move || {
-
            let mut delegate = Fetch::new(opt, pack_writer);
-
            future::block_on(git_protocol::fetch(
-
                &mut conn,
-
                &mut delegate,
-
                |_| unreachable!("credentials helper requested"),
-
                progress::Discard,
-
                git_protocol::FetchConnection::AllowReuse,
-
            ))
-
            .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-

-
            Ok(delegate.out)
-
        }
-
    });
-

-
    Fetching { stop, task }
-
}
-

-
fn remote_supports_ref_in_want(caps: &client::Capabilities) -> bool {
-
    caps.capability("fetch")
-
        .and_then(|cap| cap.supports("ref-in-want"))
-
        .unwrap_or(false)
-
}
deleted link-git/src/protocol/ls.rs
@@ -1,151 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::io;
-

-
use bstr::{BString, ByteVec as _};
-
use futures_lite::io::{AsyncBufRead, AsyncRead, AsyncWrite};
-
use git_features::progress::{self, Progress};
-
use git_protocol::{
-
    fetch::{Action, Arguments, Delegate, DelegateBlocking, LsRefsAction, Response},
-
    transport::client,
-
};
-
use once_cell::sync::Lazy;
-
use versions::Version;
-

-
pub use git_protocol::fetch::Ref;
-

-
use super::{remote_git_version, transport};
-

-
// Work around `git-upload-pack` not handling namespaces properly
-
//
-
// cf. https://lore.kernel.org/git/pMV5dJabxOBTD8kJBaPuWK0aS6OJhRQ7YFGwfhPCeSJEbPDrIFBza36nXBCgUCeUJWGmpjPI1rlOGvZJEh71Ruz4SqljndUwOCoBUDRHRDU=@eagain.st/
-
//
-
// Based on testing with git 2.25.1 in Ubuntu 20.04, this workaround is
-
// not needed. Hence the checked version is lowered to 2.25.0.
-
fn must_namespace(caps: &client::Capabilities) -> bool {
-
    static MIN_GIT_VERSION_NAMESPACES: Lazy<Version> =
-
        Lazy::new(|| Version::new("2.25.0").unwrap());
-

-
    remote_git_version(caps)
-
        .map(|version| version < *MIN_GIT_VERSION_NAMESPACES)
-
        .unwrap_or(false)
-
}
-

-
#[derive(Debug)]
-
pub struct Options {
-
    /// The remote (logical) repository to fetch from.
-
    ///
-
    /// Normally, this is the path to a repo on the remote side (eg.
-
    /// `/git.git`). `radicle-link` serves only a single namespaced repo, so
-
    /// this value should be the name of a namespace.
-
    pub repo: BString,
-

-
    /// [Extra Parameters][extra] to send with the initial transport header.
-
    ///
-
    /// [extra]: https://git.kernel.org/pub/scm/git/git.git/tree/Documentation/technical/pack-protocol.txt#n52
-
    pub extra_params: Vec<(String, Option<String>)>,
-

-
    /// Prefixes of refs to ask the server to advertise via `ls-refs`.
-
    ///
-
    /// If the [`Vec`] is empty, the server is asked to return all refs it knows
-
    /// about. Otherwise, the server is asked to only return refs matching
-
    /// the given prefixes.
-
    pub ref_prefixes: Vec<BString>,
-
}
-

-
/// [`Delegate`] for running a stateless `ls-refs` command.
-
pub struct LsRefs {
-
    opt: Options,
-
    out: Vec<Ref>,
-
}
-

-
impl LsRefs {
-
    pub fn new(opt: Options) -> Self {
-
        Self {
-
            opt,
-
            out: Vec::new(),
-
        }
-
    }
-
}
-

-
impl DelegateBlocking for LsRefs {
-
    fn handshake_extra_parameters(&self) -> Vec<(String, Option<String>)> {
-
        self.opt.extra_params.clone()
-
    }
-

-
    fn prepare_ls_refs(
-
        &mut self,
-
        caps: &client::Capabilities,
-
        args: &mut Vec<BString>,
-
        _: &mut Vec<(&str, Option<&str>)>,
-
    ) -> io::Result<LsRefsAction> {
-
        let must_namespace = must_namespace(caps);
-
        for prefix in &self.opt.ref_prefixes {
-
            let mut arg = BString::from("ref-prefix ");
-
            if must_namespace {
-
                arg.push_str("refs/namespaces/");
-
                arg.push_str(&self.opt.repo);
-
                arg.push_char('/');
-
            }
-
            arg.push_str(prefix);
-
            args.push(arg)
-
        }
-
        Ok(LsRefsAction::Continue)
-
    }
-

-
    fn prepare_fetch(
-
        &mut self,
-
        _: git_protocol::transport::Protocol,
-
        _: &client::Capabilities,
-
        _: &mut Vec<(&str, Option<&str>)>,
-
        refs: &[Ref],
-
    ) -> io::Result<Action> {
-
        self.out.extend_from_slice(refs);
-
        Ok(Action::Cancel)
-
    }
-

-
    fn negotiate(
-
        &mut self,
-
        _: &[Ref],
-
        _: &mut Arguments,
-
        _: Option<&Response>,
-
    ) -> io::Result<Action> {
-
        unreachable!("`negotiate` called even though no `fetch` command was sent")
-
    }
-
}
-

-
#[async_trait(?Send)]
-
impl Delegate for LsRefs {
-
    async fn receive_pack(
-
        &mut self,
-
        _: impl AsyncBufRead + Unpin + 'async_trait,
-
        _: impl Progress,
-
        _: &[Ref],
-
        _: &Response,
-
    ) -> io::Result<()> {
-
        unreachable!("`receive_pack` called even though no `fetch` command was sent")
-
    }
-
}
-

-
pub async fn ls_refs<R, W>(opt: Options, recv: R, send: W) -> io::Result<Vec<Ref>>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    let mut conn = transport::Stateless::new(opt.repo.clone(), recv, send);
-
    let mut delegate = LsRefs::new(opt);
-
    git_protocol::fetch(
-
        &mut conn,
-
        &mut delegate,
-
        |_| unreachable!("credentials helper requested"),
-
        progress::Discard,
-
        git_protocol::FetchConnection::AllowReuse,
-
    )
-
    .await
-
    .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-

-
    Ok(delegate.out)
-
}
deleted link-git/src/protocol/packwriter.rs
@@ -1,271 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    io,
-
    path::{Path, PathBuf},
-
    sync::{
-
        atomic::{AtomicBool, Ordering},
-
        Arc,
-
    },
-
};
-

-
use futures_lite::io::{AsyncBufRead, BlockOn};
-
use git_features::progress::Progress;
-
use git_hash::ObjectId;
-
use git_odb::{self as odb, pack};
-

-
use super::take::TryTake;
-

-
#[cfg(feature = "git2")]
-
pub use libgit::Libgit;
-

-
/// What to do with the `packfile` response.
-
///
-
/// _This is mostly the same as [`git_protocol::fetch::Delegate`], but without
-
/// incurring the [`git_protocol::fetch::DelegateBlocking`] super-trait
-
/// constraint. We can simply make [`crate::protocol::fetch::Fetch`] parametric
-
/// over the packfile sink._
-
pub trait PackWriter {
-
    type Output;
-

-
    fn write_pack(
-
        &self,
-
        pack: impl AsyncBufRead + Unpin,
-
        progress: impl Progress,
-
    ) -> io::Result<Self::Output>;
-
}
-

-
#[derive(Clone, Copy, Debug)]
-
pub struct Options {
-
    /// How many threads the packfile indexer is allowed to spawn. `None` means
-
    /// unlimited.
-
    pub max_indexer_threads: Option<usize>,
-
    /// The maximum size in bytes of the packfile.
-
    ///
-
    /// If the remote sends a larger file, the transfer will be aborted.
-
    pub max_pack_bytes: u64,
-
}
-

-
impl Default for Options {
-
    fn default() -> Self {
-
        Self {
-
            max_indexer_threads: Some(1),
-
            max_pack_bytes: u64::MAX,
-
        }
-
    }
-
}
-

-
#[cfg(feature = "git2")]
-
pub mod libgit {
-
    use super::*;
-

-
    #[derive(Clone, Copy, Debug)]
-
    pub struct PackReceived {
-
        pub objects: usize,
-
        pub local_objects: usize,
-
        pub deltas: usize,
-
    }
-

-
    impl From<git2::Progress<'_>> for PackReceived {
-
        fn from(p: git2::Progress<'_>) -> Self {
-
            Self {
-
                objects: p.indexed_objects(),
-
                local_objects: p.local_objects(),
-
                deltas: p.indexed_deltas(),
-
            }
-
        }
-
    }
-

-
    pub struct Libgit {
-
        opt: Options,
-
        repo: git2::Repository,
-
        stop: Arc<AtomicBool>,
-
    }
-

-
    impl Libgit {
-
        pub fn new(opt: Options, repo: git2::Repository, stop: Arc<AtomicBool>) -> Self {
-
            Self { opt, repo, stop }
-
        }
-

-
        fn guard_cancelled(&self) -> io::Result<()> {
-
            if self.stop.load(Ordering::Acquire) {
-
                Err(io::Error::new(io::ErrorKind::Interrupted, "cancelled"))
-
            } else {
-
                Ok(())
-
            }
-
        }
-
    }
-

-
    impl PackWriter for Libgit {
-
        type Output = Option<PackReceived>;
-

-
        fn write_pack(
-
            &self,
-
            pack: impl AsyncBufRead + Unpin,
-
            _: impl Progress,
-
        ) -> io::Result<Self::Output> {
-
            let mut out = None;
-

-
            let odb = self.repo.odb().map_err(io_error)?;
-
            let mut writer = odb.packwriter().map_err(io_error)?;
-

-
            self.guard_cancelled()?;
-
            io::copy(
-
                &mut BlockOn::new(TryTake::new(pack, self.opt.max_pack_bytes)),
-
                &mut writer,
-
            )?;
-

-
            self.guard_cancelled()?;
-
            writer
-
                .progress(|p| {
-
                    out = Some(p.to_owned());
-
                    true
-
                })
-
                .commit()
-
                .map(|_| ())
-
                .map_err(io_error)?;
-
            // Convince borrowchk that `out` can not possibly be borrowed anymore
-
            drop(writer);
-

-
            Ok(out.map(Into::into))
-
        }
-
    }
-

-
    fn io_error(e: git2::Error) -> io::Error {
-
        io::Error::new(io::ErrorKind::Other, e)
-
    }
-
}
-

-
pub type PackReceived = pack::bundle::write::Outcome;
-

-
/// A lookup function to help "thicken" thin packs by finding missing base
-
/// objects.
-
///
-
/// The impl provided for [`odb::linked::Store`] does not use any pack caching.
-
pub trait Thickener {
-
    fn find_object<'a>(&self, id: ObjectId, buf: &'a mut Vec<u8>)
-
        -> Option<pack::data::Object<'a>>;
-
}
-

-
impl Thickener for odb::linked::Store {
-
    fn find_object<'a>(
-
        &self,
-
        id: ObjectId,
-
        buf: &'a mut Vec<u8>,
-
    ) -> Option<pack::data::Object<'a>> {
-
        use git_odb::FindExt as _;
-

-
        self.find(id, buf, &mut pack::cache::Never).ok()
-
    }
-
}
-

-
/// A factory spewing out new [`Thickener`]s with static lifetimes.
-
///
-
/// `gitoxide` doesn't currently allow us to initialise thickening lazily (the
-
/// pack file may not be thin after all), but requires a static lookup function.
-
/// Instead of initialising a new [`odb::linked::Store`] for every pack stream,
-
/// users may share a pre-initialised object database provided appropriate
-
/// thread safety measures.
-
pub trait BuildThickener {
-
    type Error: std::error::Error + Send + Sync + 'static;
-
    type Thick: Thickener + 'static;
-

-
    fn build_thickener(&self) -> Result<Self::Thick, Self::Error>;
-
}
-

-
pub struct StandardThickener {
-
    git_dir: PathBuf,
-
}
-

-
impl StandardThickener {
-
    pub fn new(git_dir: impl Into<PathBuf>) -> Self {
-
        let git_dir = git_dir.into();
-
        Self { git_dir }
-
    }
-
}
-

-
impl BuildThickener for StandardThickener {
-
    type Error = odb::linked::init::Error;
-
    type Thick = odb::linked::Store;
-

-
    fn build_thickener(&self) -> Result<Self::Thick, Self::Error> {
-
        odb::linked::Store::at(self.git_dir.join("objects"))
-
    }
-
}
-

-
/// The default [`PackWriter`].
-
///
-
/// Writes the packfile into the given output directory, along with a v2
-
/// index. The packfile is verified.
-
pub struct Standard<F> {
-
    git_dir: PathBuf,
-
    opt: Options,
-
    thick: F,
-
    stop: Arc<AtomicBool>,
-
}
-

-
impl<F> Standard<F> {
-
    pub fn new(git_dir: impl AsRef<Path>, opt: Options, thick: F, stop: Arc<AtomicBool>) -> Self {
-
        Self {
-
            git_dir: git_dir.as_ref().to_owned(),
-
            opt,
-
            thick,
-
            stop,
-
        }
-
    }
-
}
-

-
impl<F> Drop for Standard<F> {
-
    fn drop(&mut self) {
-
        self.stop.store(true, Ordering::Release);
-
    }
-
}
-

-
impl<F: BuildThickener> PackWriter for Standard<F> {
-
    type Output = PackReceived;
-

-
    fn write_pack(
-
        &self,
-
        pack: impl AsyncBufRead + Unpin,
-
        prog: impl Progress,
-
    ) -> io::Result<Self::Output> {
-
        use pack::{bundle::write::Options, data::input::Mode, index::Version, Bundle};
-

-
        let opts = Options {
-
            thread_limit: self.opt.max_indexer_threads,
-
            index_kind: Version::V2,
-
            iteration_mode: Mode::Verify,
-
        };
-
        let thickener = self
-
            .thick
-
            .build_thickener()
-
            .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-
        Bundle::write_to_directory(
-
            BlockOn::new(TryTake::new(pack, self.opt.max_pack_bytes)),
-
            Some(self.git_dir.join("objects").join("pack")),
-
            prog,
-
            &self.stop,
-
            Some(Box::new(move |oid, buf| thickener.find_object(oid, buf))),
-
            opts,
-
        )
-
        .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
-
    }
-
}
-

-
/// No-op [`PackWriter`] which just drains the input.
-
pub struct Discard;
-

-
impl PackWriter for Discard {
-
    type Output = u64;
-

-
    fn write_pack(
-
        &self,
-
        pack: impl AsyncBufRead + Unpin,
-
        _: impl Progress,
-
    ) -> io::Result<Self::Output> {
-
        io::copy(&mut BlockOn::new(pack), &mut io::sink())
-
    }
-
}
deleted link-git/src/protocol/take.rs
@@ -1,76 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    io,
-
    pin::Pin,
-
    task::{Context, Poll},
-
};
-

-
use futures_lite::io::{AsyncBufRead, AsyncRead};
-

-
/// Like [`futures_lite::io::Take`], but returns an error if and when the
-
/// `limit` is exceeded.
-
///
-
/// Note that, unlike [`futures_lite::io::Take`], if a single poll reads past
-
/// the limit, the excess bytes are _not_ discarded. Instead, an error is
-
/// returned on the next poll.
-
pub struct TryTake<R> {
-
    limit: u64,
-
    inner: R,
-
}
-

-
impl<R> TryTake<R> {
-
    pub fn new(inner: R, limit: u64) -> Self {
-
        Self { limit, inner }
-
    }
-
}
-

-
impl<R> AsyncRead for TryTake<R>
-
where
-
    R: AsyncRead + Unpin,
-
{
-
    fn poll_read(
-
        self: Pin<&mut Self>,
-
        cx: &mut Context,
-
        buf: &mut [u8],
-
    ) -> Poll<Result<usize, io::Error>> {
-
        if self.limit == 0 {
-
            return Poll::Ready(Err(io::Error::new(
-
                io::ErrorKind::Other,
-
                "max input size exceeded",
-
            )));
-
        }
-

-
        let this = self.get_mut();
-
        Pin::new(&mut this.inner).poll_read(cx, buf).map(|ready| {
-
            if let Ok(siz) = ready {
-
                this.limit = this.limit.saturating_sub(siz as u64);
-
            }
-

-
            ready
-
        })
-
    }
-
}
-

-
impl<R> AsyncBufRead for TryTake<R>
-
where
-
    R: AsyncBufRead + Unpin,
-
{
-
    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<&[u8], io::Error>> {
-
        if self.limit == 0 {
-
            return Poll::Ready(Err(io::Error::new(
-
                io::ErrorKind::Other,
-
                "max input size exceeded",
-
            )));
-
        }
-

-
        Pin::new(&mut self.get_mut().inner).poll_fill_buf(cx)
-
    }
-

-
    fn consume(self: Pin<&mut Self>, amt: usize) {
-
        Pin::new(&mut self.get_mut().inner).consume(amt)
-
    }
-
}
deleted link-git/src/protocol/transport.rs
@@ -1,84 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use bstr::BString;
-
use futures_lite::io::{AsyncRead, AsyncWrite};
-
use git_protocol::transport::{
-
    client::{
-
        self,
-
        git::{ConnectMode, Connection},
-
        SetServiceResponse,
-
        Transport,
-
        TransportWithoutIO,
-
    },
-
    Protocol,
-
    Service,
-
};
-

-
pub struct Stateless<R, W> {
-
    inner: Connection<R, W>,
-
}
-

-
impl<R, W> Stateless<R, W>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    pub fn new(repo: BString, recv: R, send: W) -> Self {
-
        let url = format!("rad://{repo}");
-
        let inner = Connection::new(
-
            recv,
-
            send,
-
            Protocol::V2,
-
            repo,
-
            None::<(String, Option<u16>)>,
-
            ConnectMode::Daemon,
-
        )
-
        .custom_url(Some(url));
-

-
        Self { inner }
-
    }
-
}
-

-
impl<R, W> TransportWithoutIO for Stateless<R, W>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    fn request(
-
        &mut self,
-
        write_mode: client::WriteMode,
-
        on_into_read: client::MessageKind,
-
    ) -> Result<client::RequestWriter<'_>, client::Error> {
-
        self.inner.request(write_mode, on_into_read)
-
    }
-

-
    fn to_url(&self) -> String {
-
        self.inner.to_url()
-
    }
-

-
    fn supported_protocol_versions(&self) -> &[Protocol] {
-
        &[Protocol::V2]
-
    }
-

-
    fn connection_persists_across_multiple_requests(&self) -> bool {
-
        false
-
    }
-
}
-

-
#[async_trait(?Send)]
-
impl<R, W> Transport for Stateless<R, W>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    async fn handshake<'a>(
-
        &mut self,
-
        service: Service,
-
        extra_parameters: &'a [(&'a str, Option<&'a str>)],
-
    ) -> Result<SetServiceResponse<'_>, client::Error> {
-
        self.inner.handshake(service, extra_parameters).await
-
    }
-
}
deleted link-git/src/protocol/upload_pack.rs
@@ -1,238 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{future::Future, io, path::Path, process::ExitStatus, str::FromStr};
-

-
use async_process::{Command, Stdio};
-
use futures_lite::io::{copy, AsyncBufReadExt as _, AsyncRead, AsyncWrite, BufReader};
-
use futures_util::try_join;
-
use git_packetline::{self as packetline, PacketLineRef};
-
use once_cell::sync::Lazy;
-
use versions::Version;
-

-
mod legacy;
-

-
#[derive(Debug, PartialEq, Eq)]
-
pub struct Header {
-
    pub path: String,
-
    pub host: Option<(String, Option<u16>)>,
-
    pub extra: Vec<(String, Option<String>)>,
-
}
-

-
impl FromStr for Header {
-
    type Err = &'static str;
-

-
    fn from_str(s: &str) -> Result<Self, Self::Err> {
-
        let mut parts = s
-
            .strip_prefix("git-upload-pack ")
-
            .ok_or("unsupported service")?
-
            .split_terminator('\0');
-

-
        let path = parts.next().ok_or("missing path").and_then(|path| {
-
            if path.is_empty() {
-
                Err("empty path")
-
            } else {
-
                Ok(path.to_owned())
-
            }
-
        })?;
-
        let host = match parts.next() {
-
            None | Some("") => None,
-
            Some(host) => match host.strip_prefix("host=") {
-
                None => return Err("invalid host"),
-
                Some(host) => match host.split_once(':') {
-
                    None => Some((host.to_owned(), None)),
-
                    Some((host, port)) => {
-
                        let port = port.parse::<u16>().or(Err("invalid port"))?;
-
                        Some((host.to_owned(), Some(port)))
-
                    },
-
                },
-
            },
-
        };
-
        let extra = parts
-
            .skip_while(|part| part.is_empty())
-
            .map(|part| match part.split_once('=') {
-
                None => (part.to_owned(), None),
-
                Some((k, v)) => (k.to_owned(), Some(v.to_owned())),
-
            })
-
            .collect();
-

-
        Ok(Self { path, host, extra })
-
    }
-
}
-

-
pub async fn upload_pack<R, W>(
-
    git_dir: impl AsRef<Path>,
-
    recv: R,
-
    mut send: W,
-
) -> io::Result<(Header, impl Future<Output = io::Result<ExitStatus>>)>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    let mut recv = BufReader::new(recv);
-
    let header: Header = match recv.fill_buf().await?.first() {
-
        // legacy clients don't send a proper pktline header :(
-
        Some(b'g') => {
-
            let mut buf = String::with_capacity(256);
-
            recv.read_line(&mut buf).await?;
-
            buf.parse().map_err(invalid_data)?
-
        },
-
        Some(_) => {
-
            let mut pktline = packetline::StreamingPeekableIter::new(recv, &[]);
-
            let pkt = pktline
-
                .read_line()
-
                .await
-
                .ok_or_else(|| invalid_data("missing header"))?
-
                .map_err(invalid_data)?
-
                .map_err(invalid_data)?;
-
            let hdr = match pkt {
-
                PacketLineRef::Data(data) => std::str::from_utf8(data)
-
                    .map_err(invalid_data)?
-
                    .parse()
-
                    .map_err(invalid_data),
-
                _ => Err(invalid_data("not a header packet")),
-
            }?;
-
            recv = pktline.into_inner();
-

-
            hdr
-
        },
-
        None => {
-
            return Err(io::Error::new(
-
                io::ErrorKind::UnexpectedEof,
-
                "expected header",
-
            ))
-
        },
-
    };
-

-
    let namespace = header
-
        .path
-
        // legacy clients redundantly send a full URN
-
        .strip_prefix("rad:git:")
-
        .map(ToOwned::to_owned)
-
        .unwrap_or_else(|| header.path.clone());
-
    let protocol_version = header
-
        .extra
-
        .iter()
-
        .find_map(|kv| match kv {
-
            (ref k, Some(v)) if k == "version" => {
-
                let version = match v.as_str() {
-
                    "2" => 2,
-
                    "1" => 1,
-
                    _ => 0,
-
                };
-
                Some(version)
-
            },
-
            _ => None,
-
        })
-
        .unwrap_or(0);
-
    // legacy
-
    let stateless_ls = header.extra.iter().any(|(k, _)| k == "ls");
-

-
    let fut = async move {
-
        if protocol_version < 2 {
-
            if stateless_ls {
-
                return legacy::advertise_refs(git_dir, &namespace, recv, send).await;
-
            }
-
        } else {
-
            advertise_capabilities(&mut send).await?;
-
        }
-

-
        let mut child = {
-
            let mut cmd = Command::new("git");
-
            cmd.current_dir(git_dir)
-
                .env_clear()
-
                .envs(
-
                    std::env::vars()
-
                        .filter(|(key, _)| key == "PATH" || key.starts_with("GIT_TRACE")),
-
                )
-
                .env("GIT_PROTOCOL", format!("version={protocol_version}"))
-
                .env("GIT_NAMESPACE", namespace)
-
                .args([
-
                    "-c",
-
                    "uploadpack.allowanysha1inwant=true",
-
                    "-c",
-
                    "uploadpack.allowrefinwant=true",
-
                    "-c",
-
                    "lsrefs.unborn=ignore",
-
                    "upload-pack",
-
                    "--strict",
-
                    "--stateless-rpc",
-
                    ".",
-
                ])
-
                .stdout(Stdio::piped())
-
                .stdin(Stdio::piped())
-
                .stderr(Stdio::inherit())
-
                .kill_on_drop(true)
-
                .reap_on_drop(true)
-
                .spawn()?
-
        };
-

-
        let mut stdin = child.stdin.take().unwrap();
-
        let mut stdout = child.stdout.take().unwrap();
-

-
        try_join!(
-
            copy(&mut recv, &mut stdin),
-
            copy(&mut stdout, &mut send),
-
            child.status(),
-
        )
-
        .map(|(_, _, status)| status)
-
    };
-

-
    Ok((header, fut))
-
}
-

-
async fn advertise_capabilities<W>(mut send: W) -> io::Result<()>
-
where
-
    W: AsyncWrite + Unpin,
-
{
-
    // Thou shallt not upgrade your `git` installation while a link instance is
-
    // running!
-
    static GIT_VERSION: Lazy<Version> = Lazy::new(|| git_version().unwrap());
-
    static AGENT: Lazy<Vec<u8>> = Lazy::new(|| format!("agent=git/{}", *GIT_VERSION).into_bytes());
-
    static CAPABILITIES: Lazy<[&[u8]; 4]> = Lazy::new(|| {
-
        [
-
            b"version 2",
-
            AGENT.as_slice(),
-
            b"object-format=sha1",
-
            b"fetch=ref-in-want",
-
        ]
-
    });
-

-
    for cap in *CAPABILITIES {
-
        packetline::encode::text_to_write(cap, &mut send).await?;
-
    }
-
    packetline::encode::flush_to_write(&mut send).await?;
-

-
    Ok(())
-
}
-

-
fn git_version() -> io::Result<Version> {
-
    let out = std::process::Command::new("git")
-
        .arg("--version")
-
        .output()?;
-
    if !out.status.success() {
-
        return Err(io::Error::new(
-
            io::ErrorKind::Other,
-
            "failed to read `git` version",
-
        ));
-
    }
-

-
    // parse: git version 2.30.1 <other optional tokens>
-
    out.stdout
-
        .split(|x| x == &b' ')
-
        .nth(2)
-
        .and_then(|s| {
-
            let s = std::str::from_utf8(s).ok()?;
-
            Version::new(s.trim())
-
        })
-
        .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "failed to parse `git` version"))
-
}
-

-
fn invalid_data<E>(inner: E) -> io::Error
-
where
-
    E: Into<Box<dyn std::error::Error + Sync + Send>>,
-
{
-
    io::Error::new(io::ErrorKind::InvalidData, inner)
-
}
deleted link-git/src/protocol/upload_pack/legacy.rs
@@ -1,102 +0,0 @@
-
// Copyright © 2019-2020 The Radicle Foundation <hello@radicle.foundation>
-
// Copyright © 2021      The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{io, path::Path, process::ExitStatus};
-

-
use async_process::{Command, Stdio};
-
use futures_lite::io::{copy, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _};
-
use futures_util::try_join;
-
use git_ref::{
-
    file::{Store as Refdb, WriteReflog},
-
    FullName,
-
    Reference,
-
};
-

-
pub(super) async fn advertise_refs<R, W>(
-
    git_dir: impl AsRef<Path>,
-
    namespace: &str,
-
    mut recv: R,
-
    mut send: W,
-
) -> io::Result<ExitStatus>
-
where
-
    R: AsyncRead + Unpin,
-
    W: AsyncWrite + Unpin,
-
{
-
    let unhide = blocking::unblock({
-
        let git_dir = git_dir.as_ref().to_path_buf();
-
        let prefix = Path::new("refs")
-
            .join("namespaces")
-
            .join(namespace)
-
            .join("refs");
-
        move || -> io::Result<Vec<FullName>> {
-
            let refdb = Refdb::at(git_dir, WriteReflog::Disable);
-
            let packed = refdb
-
                .packed_buffer()
-
                .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-

-
            let refs = refdb
-
                .iter_prefixed(packed.as_ref(), prefix)?
-
                .filter_map(|r| r.ok().map(|Reference { name, .. }| name))
-
                .filter(|name| {
-
                    const PATTERN: &[u8] = b"rad/ids/any";
-
                    const SEPARAT: u8 = b'/';
-
                    name.as_bstr()
-
                        .rsplit(|b| b == &SEPARAT)
-
                        .zip(PATTERN.rsplit(|b| b == &SEPARAT))
-
                        .skip(1)
-
                        .all(|(a, b)| a == b)
-
                })
-
                .collect::<Vec<_>>();
-

-
            Ok(refs)
-
        }
-
    })
-
    .await?;
-

-
    let mut child = {
-
        let mut cmd = Command::new("git");
-
        cmd.current_dir(git_dir)
-
            .env_clear()
-
            .envs(std::env::vars().filter(|(key, _)| key == "PATH" || key.starts_with("GIT_TRACE")))
-
            .arg("-c")
-
            .arg("uploadpack.hiderefs=refs/")
-
            .arg("-c")
-
            .arg(format!("uploadpack.hiderefs=!refs/namespaces/{namespace}",));
-

-
        for r in unhide {
-
            cmd.arg("-c")
-
                .arg(format!("uploadpack.hiderefs=!{}", r.as_bstr()));
-
        }
-

-
        cmd.args([
-
            "upload-pack",
-
            "--strict",
-
            "--timeout=5",
-
            "--stateless-rpc",
-
            "--advertise-refs",
-
            ".",
-
        ])
-
        .stdout(Stdio::piped())
-
        .stderr(Stdio::inherit())
-
        .kill_on_drop(true)
-
        .reap_on_drop(true)
-
        .spawn()?
-
    };
-
    let mut stdout = child.stdout.take().unwrap();
-

-
    const HEADER: &[u8] = b"001e# service=git-upload-pack\n0000";
-
    send.write_all(HEADER).await?;
-
    let status = try_join!(copy(&mut stdout, &mut send), child.status()).map(|x| x.1);
-

-
    // Read one byte off the read stream to ensure it is driven to completion
-
    // (we expect EOF immediately). Failure to do so may cause resource leaks.
-
    //
-
    // Cf. 900b6cf6 (replication: Ensure git stream is closed, 2021-04-26)
-
    let mut buf = [0; 1];
-
    recv.read(&mut buf).await?;
-

-
    status
-
}
deleted link-git/src/refs.rs
@@ -1,7 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
pub mod db;
-
pub use git_ref::*;
deleted link-git/src/refs/db.rs
@@ -1,269 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    collections::BTreeSet,
-
    convert::TryInto,
-
    io,
-
    path::{Path, PathBuf},
-
    sync::Arc,
-
    time::{Duration, SystemTime},
-
};
-

-
use git_ref::{
-
    file::{self, iter::LooseThenPacked, Transaction, WriteReflog},
-
    packed,
-
    FullName,
-
    PartialNameRef,
-
    Reference,
-
    Target,
-
};
-
use parking_lot::RwLock;
-

-
pub mod error {
-
    use super::*;
-
    use thiserror::Error;
-

-
    #[derive(Debug, Error)]
-
    pub enum Open {
-
        #[error("failed to take a snapshot of packed-refs")]
-
        Snapshot(#[from] Snapshot),
-

-
        #[error(transparent)]
-
        Io(#[from] io::Error),
-
    }
-

-
    #[derive(Debug, Error)]
-
    pub enum Snapshot {
-
        #[error("failed to lock packed-refs")]
-
        Lock(#[from] git_lock::acquire::Error),
-

-
        #[error("failed to open packed-refs")]
-
        Open(#[from] packed::buffer::open::Error),
-

-
        #[error(transparent)]
-
        Io(#[from] io::Error),
-
    }
-

-
    #[derive(Debug, Error)]
-
    pub enum Follow {
-
        #[error("cyclic symref: {0:?}")]
-
        Cycle(FullName),
-

-
        #[error("reference {0:?} not found")]
-
        NotFound(FullName),
-

-
        #[error("max symref depth {0} exceeded")]
-
        DepthLimitExceeded(usize),
-

-
        #[error(transparent)]
-
        Find(#[from] file::find::Error),
-
    }
-
}
-

-
/// Threadsafe refdb with shareable `packed-refs` memory buffer.
-
///
-
/// Packed refs are a delicate business: they are written by an external
-
/// process, [`git-pack-refs`], _or_ when a packed ref is deleted. It may also
-
/// be that no `packed-refs` currently exist.
-
///
-
/// The only way we can be certain to operate on a consistent view of what is
-
/// committed to disk is to check if the `packed-refs` file has changed since we
-
/// last read it. This would be quite expensive to do for small operations.
-
/// Thus, the caller is responsible for determining just how much they can
-
/// afford to see possibly out-of-date data: the [`Refdb::snapshot`] method
-
/// checks if the previously loaded `packed-refs` appear to be out-of-date, and
-
/// reloads them if necessary. The resulting [`Snapshot`] contains a pointer to
-
/// an immutable memory buffer of the packed refs which can be shared between
-
/// threads, or cloned.
-
///
-
/// [`git-pack-refs`]: https://git-scm.com/docs/git-pack-refs
-
#[derive(Clone)]
-
pub struct Refdb {
-
    store: file::Store,
-
    packed: Arc<RwLock<Option<Packed>>>,
-
}
-

-
impl Refdb {
-
    pub fn open(git_dir: impl Into<PathBuf>) -> Result<Self, error::Open> {
-
        let store = file::Store::at(git_dir, WriteReflog::Normal);
-
        let packed = Arc::new(RwLock::new(Packed::open(store.packed_refs_path())?));
-
        Ok(Self { store, packed })
-
    }
-

-
    pub fn snapshot(&self) -> Result<Snapshot, error::Snapshot> {
-
        let read = self.packed.read();
-
        match &*read {
-
            None => {
-
                drop(read);
-
                // always modified, because it was None and now is Some
-
                self.reload(|_| true)
-
            },
-

-
            Some(packed) => {
-
                if packed.is_modified()? {
-
                    let mtime = packed.mtime;
-
                    drop(read);
-
                    // we don't care what the mtime is, only that we have a
-
                    // different value than before
-
                    self.reload(|packed1| packed1.mtime != mtime)
-
                } else {
-
                    Ok(Snapshot {
-
                        store: self.store.clone(),
-
                        packed: Some(packed.buf.clone()),
-
                    })
-
                }
-
            },
-
        }
-
    }
-

-
    fn reload<F>(&self, modified_while_blocked: F) -> Result<Snapshot, error::Snapshot>
-
    where
-
        F: FnOnce(&Packed) -> bool,
-
    {
-
        let mut write = self.packed.write();
-
        if let Some(packed) = &*write {
-
            if modified_while_blocked(packed) {
-
                return Ok(Snapshot {
-
                    store: self.store.clone(),
-
                    packed: Some(packed.buf.clone()),
-
                });
-
            }
-
        }
-

-
        match Packed::open(self.store.packed_refs_path())? {
-
            Some(packed) => {
-
                let buf = packed.buf.clone();
-
                *write = Some(packed);
-
                Ok(Snapshot {
-
                    store: self.store.clone(),
-
                    packed: Some(buf),
-
                })
-
            },
-

-
            None => {
-
                *write = None;
-
                Ok(Snapshot {
-
                    store: self.store.clone(),
-
                    packed: None,
-
                })
-
            },
-
        }
-
    }
-
}
-

-
#[derive(Clone)]
-
pub struct Snapshot {
-
    store: file::Store,
-
    packed: Option<Arc<packed::Buffer>>,
-
}
-

-
impl Snapshot {
-
    pub fn find<'a, N, E>(&self, name: N) -> Result<Option<Reference>, file::find::Error>
-
    where
-
        N: TryInto<PartialNameRef<'a>, Error = E>,
-
        file::find::Error: From<E>,
-
    {
-
        self.store.try_find(name, self.packed.as_deref())
-
    }
-

-
    pub fn transaction(&self) -> Transaction {
-
        self.store.transaction()
-
    }
-

-
    pub fn iter(&self, prefix: Option<impl AsRef<Path>>) -> io::Result<LooseThenPacked> {
-
        let packed = self.packed.as_deref();
-
        match prefix {
-
            None => self.store.iter(packed),
-
            Some(p) => self.store.iter_prefixed(packed, p),
-
        }
-
    }
-

-
    /// Follow a symbolic reference until a direct reference is found.
-
    ///
-
    /// If `symref` is a direct reference, a copy of it is returned. No more
-
    /// than five symbolic references will be followed, and cyclic
-
    /// references are detected. Both result in an error to be returned.
-
    ///
-
    /// Note that following is not the same as "peeling": no access to the
-
    /// object database is made, and thus no assumptions about the kind of
-
    /// object the reference ultimately points to can be made.
-
    pub fn follow(&self, symref: &Reference) -> Result<Reference, error::Follow> {
-
        match &symref.target {
-
            Target::Peeled(_) => Ok(symref.clone()),
-
            Target::Symbolic(name) => {
-
                let mut seen = BTreeSet::new();
-
                seen.insert(symref.name.clone());
-

-
                let mut next = self
-
                    .find(name.to_partial())?
-
                    .ok_or_else(|| error::Follow::NotFound(name.clone()))?;
-
                seen.insert(name.clone());
-

-
                const MAX_DEPTH: usize = 5;
-
                loop {
-
                    match next.target {
-
                        Target::Peeled(_) => return Ok(next),
-
                        Target::Symbolic(sym) => {
-
                            if seen.len() + 1 > MAX_DEPTH {
-
                                return Err(error::Follow::DepthLimitExceeded(MAX_DEPTH));
-
                            }
-

-
                            if seen.contains(&sym) {
-
                                return Err(error::Follow::Cycle(sym));
-
                            }
-
                            next = self
-
                                .find(sym.to_partial())?
-
                                .ok_or_else(|| error::Follow::NotFound(sym.clone()))?;
-
                            seen.insert(sym);
-
                        },
-
                    }
-
                }
-
            },
-
        }
-
    }
-
}
-

-
struct Packed {
-
    buf: Arc<packed::Buffer>,
-
    path: PathBuf,
-
    mtime: SystemTime,
-
}
-

-
impl Packed {
-
    fn open(path: PathBuf) -> Result<Option<Self>, error::Snapshot> {
-
        use git_lock::{acquire, Marker};
-

-
        let _lock = Marker::acquire_to_hold_resource(
-
            &path,
-
            acquire::Fail::AfterDurationWithBackoff(Duration::from_millis(500)),
-
            None,
-
        )?;
-
        match path.metadata() {
-
            // `git-lock` will happily lock a non-existent file
-
            Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None),
-
            Err(e) => Err(e.into()),
-

-
            Ok(meta) => {
-
                let mtime = meta.modified()?;
-
                let buf = Arc::new(packed::Buffer::open(&path, 32 * 1024)?);
-
                Ok(Some(Self { buf, path, mtime }))
-
            },
-
        }
-
    }
-

-
    fn is_modified(&self) -> io::Result<bool> {
-
        match self.path.metadata() {
-
            // it existed before, so gone is modified
-
            Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(true),
-
            Err(e) => Err(e),
-

-
            Ok(meta) => {
-
                let mtime = meta.modified()?;
-
                Ok(self.mtime == mtime)
-
            },
-
        }
-
    }
-
}
deleted link-git/src/service.rs
@@ -1,126 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{fmt::Debug, ops::Deref, str::FromStr};
-

-
use git2::transport::Service as GitService;
-
use lazy_static::lazy_static;
-

-
lazy_static! {
-
    static ref SERVICE_REGEX: regex::Regex = regex::Regex::new(r"(\S+) '/?(.+)'").unwrap();
-
}
-

-
#[derive(Clone, Copy, PartialEq)]
-
pub struct Service(pub GitService);
-

-
/// A service and URN as passed to the exec_request of an SSH server by git when
-
/// talking to an SSH remote. The `FromStr` implementation for this type expects
-
/// a string of the form:
-
///
-
/// `<request type> /<path>`
-
///
-
/// Where the request type is either `upload-pack` or `receive-pack`, the
-
/// leading slash before the urn is optional, and the `path` is whatever the
-
/// `FromStr` of `Path` provides.
-
#[derive(Debug, Clone)]
-
pub struct SshService<Path> {
-
    pub service: Service,
-
    pub path: Path,
-
}
-

-
impl<Path> SshService<Path> {
-
    pub fn is_upload(&self) -> bool {
-
        match self.service.0 {
-
            GitService::UploadPackLs | GitService::UploadPack => true,
-
            GitService::ReceivePackLs | GitService::ReceivePack => false,
-
        }
-
    }
-

-
    pub fn is_receive(&self) -> bool {
-
        !self.is_upload()
-
    }
-
}
-

-
impl From<GitService> for Service {
-
    fn from(g: GitService) -> Self {
-
        Service(g)
-
    }
-
}
-

-
impl From<Service> for GitService {
-
    fn from(s: Service) -> Self {
-
        s.0
-
    }
-
}
-

-
#[derive(thiserror::Error, Debug)]
-
pub enum ParseService {
-
    #[error("the exec str must be in the form <service> <urn>")]
-
    Format,
-
    #[error(transparent)]
-
    Namespace(Box<dyn std::error::Error + Send + Sync + 'static>),
-
    #[error("unknown service {0}")]
-
    UnknownService(String),
-
}
-

-
impl Debug for Service {
-
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
-
        f.debug_tuple("Service")
-
            .field(match self.0 {
-
                GitService::UploadPackLs => &"UploadPackLs",
-
                GitService::UploadPack => &"UploadPack",
-
                GitService::ReceivePackLs => &"ReceivePackLs",
-
                GitService::ReceivePack => &"ReceivePack",
-
            })
-
            .finish()
-
    }
-
}
-

-
impl std::fmt::Display for Service {
-
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-
        match self.0 {
-
            GitService::UploadPack => write!(f, "upload-pack"),
-
            GitService::UploadPackLs => write!(f, "upload-pack-ls"),
-
            GitService::ReceivePack => write!(f, "receive-pack"),
-
            GitService::ReceivePackLs => write!(f, "receive-pack-ls"),
-
        }
-
    }
-
}
-

-
impl Deref for Service {
-
    type Target = GitService;
-

-
    fn deref(&self) -> &Self::Target {
-
        &self.0
-
    }
-
}
-

-
impl<Path> FromStr for SshService<Path>
-
where
-
    Path: FromStr,
-
    Path::Err: std::error::Error + Send + Sync + 'static,
-
{
-
    type Err = ParseService;
-

-
    fn from_str(exec_str: &str) -> Result<Self, Self::Err> {
-
        let cap = SERVICE_REGEX
-
            .captures_iter(exec_str)
-
            .next()
-
            .ok_or(ParseService::Format)?;
-
        debug_assert!(cap.len() == 3);
-
        let service_str: &str = &cap[1];
-
        let urn_str = &cap[2];
-

-
        let path = urn_str
-
            .parse()
-
            .map_err(|err| ParseService::Namespace(Box::new(err)))?;
-
        let service = match service_str {
-
            "git-upload-pack" => Ok(Service(GitService::UploadPack)),
-
            "git-receive-pack" => Ok(Service(GitService::ReceivePack)),
-
            other => Err(ParseService::UnknownService(other.to_string())),
-
        }?;
-
        Ok(Self { service, path })
-
    }
-
}
deleted link-git/t/Cargo.toml
@@ -1,31 +0,0 @@
-
[package]
-
name = "link-git-test"
-
version = "0.1.0"
-
edition = "2021"
-
license = "GPL-3.0-or-later"
-

-
publish = false
-

-
[lib]
-
doctest = false
-
test = true
-
doc = false
-

-
[features]
-
test = []
-

-
[dev-dependencies]
-
anyhow = "1"
-
bstr = "0.2"
-
futures = "0.3"
-
futures_ringbuf = "0.3"
-
tempfile = "3.4"
-

-
[dev-dependencies.git2]
-
version = "0.16.1"
-
default-features = false
-
features = ["vendored-libgit2"]
-

-
[dev-dependencies.link-git]
-
path = ".."
-
features = ["git2"]

\ No newline at end of file
deleted link-git/t/src/integration.rs
@@ -1,6 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
mod protocol;
deleted link-git/t/src/integration/protocol.rs
@@ -1,383 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use std::{
-
    collections::BTreeSet,
-
    io,
-
    path::Path,
-
    sync::{atomic::AtomicBool, Arc},
-
};
-

-
use bstr::ByteSlice as _;
-
use futures::{AsyncReadExt as _, TryFutureExt as _};
-
use link_git::protocol::{fetch, ls, packwriter, upload_pack, ObjectId, PackWriter, Ref};
-
use tempfile::{tempdir, TempDir};
-

-
fn upstream() -> TempDir {
-
    let tmp = tempdir().unwrap();
-

-
    let repo = git2::Repository::init_bare(&tmp).unwrap();
-
    let auth = git2::Signature::now("apollo", "apollo@cree.de").unwrap();
-

-
    let tree = {
-
        let empty = repo.treebuilder(None).unwrap();
-
        let oid = empty.write().unwrap();
-
        repo.find_tree(oid).unwrap()
-
    };
-
    let base = {
-
        let oid = repo
-
            .commit(
-
                Some("refs/namespaces/foo/refs/heads/main"),
-
                &auth,
-
                &auth,
-
                "initial",
-
                &tree,
-
                &[],
-
            )
-
            .unwrap();
-
        repo.find_commit(oid).unwrap()
-
    };
-
    let next = repo
-
        .commit(
-
            Some("refs/namespaces/foo/refs/heads/next"),
-
            &auth,
-
            &auth,
-
            "ng",
-
            &tree,
-
            &[&base],
-
        )
-
        .unwrap();
-
    repo.reference(
-
        "refs/namespaces/foo/refs/pulls/1/head",
-
        next,
-
        true,
-
        "pee arrr",
-
    )
-
    .unwrap();
-

-
    tmp
-
}
-

-
fn collect_refs(repo: &git2::Repository) -> Result<Vec<(String, git2::Oid)>, git2::Error> {
-
    repo.references()?
-
        .map(|x| x.map(|r| (r.name().unwrap().to_owned(), r.target().unwrap())))
-
        .collect()
-
}
-

-
fn update_tips<'a, T>(repo: &git2::Repository, tips: T) -> Result<(), anyhow::Error>
-
where
-
    T: IntoIterator<Item = &'a Ref>,
-
{
-
    for r in tips {
-
        match r {
-
            Ref::Direct { path, object } => {
-
                repo.reference(
-
                    path.to_str()?,
-
                    git2::Oid::from_bytes(object.as_slice())?,
-
                    true,
-
                    "",
-
                )?;
-
            },
-
            x => anyhow::bail!("unexpected ref variant: {:?}", x),
-
        }
-
    }
-

-
    Ok(())
-
}
-

-
fn collect_history(repo: &git2::Repository, tip: &str) -> Result<Vec<git2::Oid>, git2::Error> {
-
    let mut revwalk = repo.revwalk()?;
-
    revwalk.push_ref(tip)?;
-
    revwalk.collect()
-
}
-

-
fn run_ls_refs<R: AsRef<Path>>(remote: R, opt: ls::Options) -> io::Result<Vec<Ref>> {
-
    let (client, server) = futures_ringbuf::Endpoint::pair(256, 256);
-
    let client = async move {
-
        let (recv, send) = client.split();
-
        ls::ls_refs(opt, recv, send).await
-
    };
-
    let server = {
-
        let (recv, send) = server.split();
-
        upload_pack::upload_pack(&remote, recv, send).and_then(|(_hdr, run)| run)
-
    };
-

-
    let (client_out, server_out) =
-
        futures::executor::block_on(futures::future::try_join(client, server))?;
-
    assert!(server_out.success());
-
    Ok(client_out)
-
}
-

-
fn run_fetch<R, B, P>(
-
    remote: R,
-
    opt: fetch::Options,
-
    build_pack_writer: B,
-
) -> io::Result<fetch::Outputs<P::Output>>
-
where
-
    R: AsRef<Path>,
-
    B: FnOnce(Arc<AtomicBool>) -> P,
-
    P: PackWriter + Send + 'static,
-
    P::Output: Send + 'static,
-
{
-
    let (client, server) = futures_ringbuf::Endpoint::pair(256, 256);
-
    let client = async move {
-
        let (recv, send) = client.split();
-
        fetch::fetch(opt, build_pack_writer, recv, send).await
-
    };
-
    let server = {
-
        let (recv, send) = server.split();
-
        upload_pack::upload_pack(&remote, recv, send).and_then(|(_hdr, run)| run)
-
    };
-

-
    let (client_out, server_out) =
-
        futures::executor::block_on(futures::future::try_join(client, server))?;
-
    assert!(server_out.success());
-
    Ok(client_out)
-
}
-

-
#[test]
-
fn smoke() {
-
    let remote = upstream();
-
    let refs = run_ls_refs(
-
        &remote,
-
        ls::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            ref_prefixes: vec!["refs/heads/".into(), "refs/pulls/".into()],
-
        },
-
    )
-
    .unwrap();
-

-
    assert_eq!(
-
        refs.iter().map(|r| r.unpack().0).collect::<BTreeSet<_>>(),
-
        [
-
            "refs/heads/main".into(),
-
            "refs/heads/next".into(),
-
            "refs/pulls/1/head".into()
-
        ]
-
        .iter()
-
        .collect::<BTreeSet<_>>()
-
    );
-

-
    let out = run_fetch(
-
        &remote,
-
        fetch::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            haves: vec![],
-
            wants: vec![],
-
            want_refs: refs.iter().map(|r| r.unpack().0.clone()).collect(),
-
        },
-
        |_| packwriter::Discard,
-
    )
-
    .unwrap();
-

-
    assert!(out.pack.is_some());
-
}
-

-
#[test]
-
fn want_ref() {
-
    let remote = upstream();
-
    let out = run_fetch(
-
        &remote,
-
        fetch::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            haves: vec![],
-
            wants: vec![],
-
            want_refs: vec!["refs/heads/main".into(), "refs/pulls/1/head".into()],
-
        },
-
        |_| packwriter::Discard,
-
    )
-
    .unwrap();
-

-
    assert!(out.pack.is_some());
-
    assert_eq!(
-
        out.wanted_refs
-
            .iter()
-
            .map(|r| r.unpack().0)
-
            .collect::<BTreeSet<_>>(),
-
        ["refs/heads/main".into(), "refs/pulls/1/head".into(),]
-
            .iter()
-
            .collect::<BTreeSet<_>>()
-
    )
-
}
-

-
#[test]
-
#[should_panic(expected = "`fetch` is empty")]
-
fn empty_fetch() {
-
    let remote = upstream();
-
    run_fetch(
-
        &remote,
-
        fetch::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            haves: vec![],
-
            wants: vec![],
-
            want_refs: vec![],
-
        },
-
        |_| packwriter::Discard,
-
    )
-
    .unwrap();
-
}
-

-
fn clone_with<R, L, B, P>(remote: R, local: L, build_pack_writer: B)
-
where
-
    R: AsRef<Path>,
-
    L: AsRef<Path>,
-
    B: FnOnce(Arc<AtomicBool>) -> P,
-
    P: PackWriter + Send + 'static,
-
    P::Output: Send + 'static,
-
{
-
    let refs = run_ls_refs(
-
        &remote,
-
        ls::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            ref_prefixes: vec!["refs/heads/".into(), "refs/pulls/".into()],
-
        },
-
    )
-
    .unwrap();
-
    let out = run_fetch(
-
        &remote,
-
        fetch::Options {
-
            repo: "foo".into(),
-
            extra_params: vec![],
-
            haves: vec![],
-
            wants: vec![],
-
            want_refs: refs.iter().map(|r| r.unpack().0.clone()).collect(),
-
        },
-
        build_pack_writer,
-
    )
-
    .unwrap();
-

-
    assert!(out.pack.is_some());
-

-
    let remote_repo = git2::Repository::open(remote).unwrap();
-
    remote_repo.set_namespace("foo").unwrap();
-
    let local_repo = git2::Repository::open(&local).unwrap();
-

-
    update_tips(&local_repo, &out.wanted_refs).unwrap();
-

-
    let mut remote_refs = collect_refs(&remote_repo).unwrap();
-
    let mut local_refs = collect_refs(&local_repo).unwrap();
-

-
    remote_refs.sort();
-
    local_refs.sort();
-

-
    assert_eq!(remote_refs, local_refs);
-
}
-

-
#[test]
-
fn clone_libgit() {
-
    let remote = upstream();
-
    let local = tempdir().unwrap();
-
    let local_repo = git2::Repository::init(&local).unwrap();
-

-
    clone_with(&remote, &local, move |stop| {
-
        packwriter::Libgit::new(packwriter::Options::default(), local_repo, stop)
-
    })
-
}
-

-
#[test]
-
fn clone_gitoxide() {
-
    let remote = upstream();
-
    let local = tempdir().unwrap();
-
    let local_repo = git2::Repository::init(&local).unwrap();
-

-
    clone_with(&remote, &local, move |stop| {
-
        packwriter::Standard::new(
-
            local_repo.path(),
-
            packwriter::Options::default(),
-
            packwriter::StandardThickener::new(local_repo.path()),
-
            stop,
-
        )
-
    })
-
}
-

-
fn thin_pack_with<R, L, B, P>(remote: R, local: L, build_pack_writer: B)
-
where
-
    R: AsRef<Path>,
-
    L: AsRef<Path>,
-
    B: Fn(Arc<AtomicBool>) -> P,
-
    P: PackWriter + Send + 'static,
-
    P::Output: Send + 'static,
-
{
-
    // Clone main only
-
    {
-
        let out = run_fetch(
-
            &remote,
-
            fetch::Options {
-
                repo: "foo".into(),
-
                extra_params: vec![],
-
                haves: vec![],
-
                wants: vec![],
-
                want_refs: vec!["refs/heads/main".into()],
-
            },
-
            &build_pack_writer,
-
        )
-
        .unwrap();
-
        assert!(out.pack.is_some());
-
    }
-

-
    let remote_repo = git2::Repository::open(&remote).unwrap();
-
    remote_repo.set_namespace("foo").unwrap();
-
    let local_repo = git2::Repository::open(&local).unwrap();
-

-
    // Fetch next, which is ahead of main
-
    {
-
        let head = remote_repo.refname_to_id("refs/heads/main").unwrap();
-
        let out = run_fetch(
-
            &remote,
-
            fetch::Options {
-
                repo: "foo".into(),
-
                extra_params: vec![],
-
                haves: vec![ObjectId::from_20_bytes(head.as_bytes())],
-
                wants: vec![],
-
                want_refs: vec!["refs/heads/next".into()],
-
            },
-
            build_pack_writer,
-
        )
-
        .unwrap();
-
        assert!(out.pack.is_some());
-

-
        update_tips(&local_repo, &out.wanted_refs).unwrap();
-
    }
-

-
    let remote_history = collect_history(&remote_repo, "refs/heads/next").unwrap();
-
    let local_history = collect_history(&local_repo, "refs/heads/next").unwrap();
-

-
    assert!(!remote_history.is_empty());
-
    assert_eq!(remote_history, local_history)
-
}
-

-
#[test]
-
#[ignore]
-
fn thin_pack_libgit() {
-
    let remote = upstream();
-
    let local = tempdir().unwrap();
-

-
    thin_pack_with(&remote, &local, |stop| {
-
        let local_repo = git2::Repository::init(&local).unwrap();
-
        packwriter::Libgit::new(packwriter::Options::default(), local_repo, stop)
-
    })
-
}
-

-
#[test]
-
fn thin_pack_gitoxide() {
-
    let remote = upstream();
-
    let local = tempdir().unwrap();
-
    let local_repo = git2::Repository::init(&local).unwrap();
-
    let git_dir = local_repo.path().to_owned();
-

-
    thin_pack_with(&remote, &local, move |stop| {
-
        packwriter::Standard::new(
-
            &git_dir,
-
            packwriter::Options::default(),
-
            packwriter::StandardThickener::new(&git_dir),
-
            stop,
-
        )
-
    })
-
}
deleted link-git/t/src/lib.rs
@@ -1,7 +0,0 @@
-
// Copyright © 2022 The Radicle Link Contributors
-
// SPDX-License-Identifier: GPL-3.0-or-later
-

-
#[cfg(test)]
-
mod integration;
-
#[cfg(test)]
-
mod tests;
deleted link-git/t/src/tests.rs
@@ -1,6 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
mod protocol;
deleted link-git/t/src/tests/protocol.rs
@@ -1,7 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
mod take;
-
mod upload_pack;
deleted link-git/t/src/tests/protocol/take.rs
@@ -1,47 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use futures::{executor::block_on, io::Cursor, AsyncReadExt as _};
-
use link_git::protocol::take::TryTake;
-
use std::io;
-

-
#[test]
-
fn when_within_limit() {
-
    let input = b"the world is everything that is the case";
-
    let output = block_on(async move {
-
        let mut buf = Vec::with_capacity(input.len());
-
        TryTake::new(Cursor::new(input), input.len() as u64 + 1)
-
            .read_to_end(&mut buf)
-
            .await?;
-
        Ok::<_, io::Error>(buf)
-
    })
-
    .unwrap();
-

-
    assert_eq!(input, output.as_slice())
-
}
-

-
#[test]
-
fn when_limit_exceeded() {
-
    let input = b"what is the case, the fact, is the existence of atomic facts";
-
    let output =
-
        block_on(TryTake::new(Cursor::new(input), 10).read_to_end(&mut Vec::new())).unwrap_err();
-

-
    assert_eq!(output.to_string(), "max input size exceeded")
-
}
-

-
#[test]
-
fn excess_bytes_remain() {
-
    let input = b"whereof one cannot speak, thereof one must be silent";
-
    let output = block_on(async move {
-
        let mut buf = Vec::with_capacity(input.len());
-
        let res = TryTake::new(Cursor::new(input), input.len() as u64)
-
            .read_to_end(&mut buf)
-
            .await;
-
        assert!(res.is_err());
-
        buf
-
    });
-

-
    assert_eq!(input, output.as_slice())
-
}
deleted link-git/t/src/tests/protocol/upload_pack.rs
@@ -1,129 +0,0 @@
-
// Copyright © 2021 The Radicle Link Contributors
-
//
-
// This file is part of radicle-link, distributed under the GPLv3 with Radicle
-
// Linking Exception. For full terms see the included LICENSE file.
-

-
use link_git::protocol::upload_pack;
-

-
mod header {
-
    use super::*;
-
    use std::str::FromStr as _;
-

-
    #[test]
-
    fn service_must_be_upload_pack() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-receive-pack "),
-
            Err("unsupported service")
-
        )
-
    }
-

-
    #[test]
-
    fn no_path() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack "),
-
            Err("missing path")
-
        )
-
    }
-

-
    #[test]
-
    fn empty_path() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack \0host=lolhost:123\0"),
-
            Err("empty path")
-
        )
-
    }
-

-
    #[test]
-
    fn host_and_port() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack /git.git\0host=lolhost:123\0").unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: Some(("lolhost".to_owned(), Some(123))),
-
                extra: vec![]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn host_without_port() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack /git.git\0host=lolhost\0").unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: Some(("lolhost".to_owned(), None)),
-
                extra: vec![]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn no_host() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack /git.git\0").unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: None,
-
                extra: vec![]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn empty_host() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack /git.git\0\0").unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: None,
-
                extra: vec![]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn no_host_extra() {
-
        assert_eq!(
-
            upload_pack::Header::from_str("git-upload-pack /git.git\0\0version=42\0").unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: None,
-
                extra: vec![("version".to_owned(), Some("42".to_owned()))]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn host_port_extra() {
-
        assert_eq!(
-
            upload_pack::Header::from_str(
-
                "git-upload-pack /git.git\0host=lolhost:123\0\0version=42\0"
-
            )
-
            .unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: Some(("lolhost".to_owned(), Some(123))),
-
                extra: vec![("version".to_owned(), Some("42".to_owned()))]
-
            }
-
        )
-
    }
-

-
    #[test]
-
    fn host_extra_extra() {
-
        assert_eq!(
-
            upload_pack::Header::from_str(
-
                "git-upload-pack /git.git\0host=lolhost\0\0version=42\0foo\0n=69\0"
-
            )
-
            .unwrap(),
-
            upload_pack::Header {
-
                path: "/git.git".to_owned(),
-
                host: Some(("lolhost".to_owned(), None)),
-
                extra: vec![
-
                    ("version".to_owned(), Some("42".to_owned())),
-
                    ("foo".to_owned(), None),
-
                    ("n".to_owned(), Some("69".to_owned()))
-
                ]
-
            }
-
        )
-
    }
-
}
modified radicle-git-ext/Cargo.toml
@@ -1,9 +1,11 @@
[package]
name = "radicle-git-ext"
-
version = "0.2.1"
+
version = "0.3.0"
authors = [
+
  "Alexis Sellier <alexis@radicle.xyz>",
  "Kim Altintop <kim@eagain.st>",
  "Fintan Halpenny <fintan.halpenny@gmail.com",
+
  "Han Xu <keepsimple@gmail.com>",
]
edition = "2021"
license = "GPL-3.0-or-later"
modified radicle-surf/Cargo.toml
@@ -38,7 +38,7 @@ default-features = false
features = ["vendored-libgit2"]

[dependencies.radicle-git-ext]
-
version = "0.2.0"
+
version = "0.3.0"
path = "../radicle-git-ext"
features = ["serde"]

modified test/Cargo.toml
@@ -11,22 +11,10 @@ doctest = false
test = true
doc = false

-
[dev-dependencies.git-commit-test]
-
path = "../git-commit/t"
-
features = ["test"]
-

[dev-dependencies.radicle-git-ext-test]
path = "../radicle-git-ext/t"
features = ["test"]

-
[dev-dependencies.git-trailers-test]
-
path = "../git-trailers/t"
-
features = ["test"]
-

-
[dev-dependencies.link-git-test]
-
path = "../link-git/t"
-
features = ["test"]
-

[dev-dependencies.radicle-surf-test]
path = "../radicle-surf/t"
features = ["test"]