diff --git a/Cargo.toml b/Cargo.toml index 8da538f8..b09ef16d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ composefs = { version = "0.3.0", path = "crates/composefs", default-features = f composefs-oci = { version = "0.3.0", path = "crates/composefs-oci", default-features = false } composefs-boot = { version = "0.3.0", path = "crates/composefs-boot", default-features = false } composefs-http = { version = "0.3.0", path = "crates/composefs-http", default-features = false } +composefs-ostree = { version = "0.3.0", path = "crates/composefs-ostree", default-features = false } [profile.dev.package.sha2] # this is *really* slow otherwise diff --git a/crates/cfsctl/Cargo.toml b/crates/cfsctl/Cargo.toml index 4901f490..6ffe0f66 100644 --- a/crates/cfsctl/Cargo.toml +++ b/crates/cfsctl/Cargo.toml @@ -11,9 +11,10 @@ rust-version.workspace = true version.workspace = true [features] -default = ['pre-6.15', 'oci'] +default = ['pre-6.15', 'oci','ostree'] http = ['composefs-http'] oci = ['composefs-oci'] +ostree = ['composefs-ostree'] rhel9 = ['composefs/rhel9'] 'pre-6.15' = ['composefs/pre-6.15'] @@ -24,6 +25,7 @@ composefs = { workspace = true } composefs-boot = { workspace = true } composefs-oci = { workspace = true, optional = true } composefs-http = { workspace = true, optional = true } +composefs-ostree = { workspace = true, optional = true } env_logger = { version = "0.11.0", default-features = false } hex = { version = "0.4.0", default-features = false } rustix = { version = "1.0.0", default-features = false, features = ["fs", "process"] } diff --git a/crates/cfsctl/src/main.rs b/crates/cfsctl/src/main.rs index 6c81c0ea..43c89c9d 100644 --- a/crates/cfsctl/src/main.rs +++ b/crates/cfsctl/src/main.rs @@ -111,6 +111,31 @@ enum OciCommand { }, } +#[cfg(feature = "ostree")] +#[derive(Debug, Subcommand)] +enum OstreeCommand { + PullLocal { + ostree_repo_path: PathBuf, + ostree_ref: String, + #[clap(long)] + base_name: Option, + }, + Pull { + ostree_repo_url: String, + ostree_ref: String, + #[clap(long)] + base_name: Option, + }, + CreateImage { + commit_name: String, + #[clap(long)] + image_name: Option, + }, + Inspect { + commit_name: String, + }, +} + /// Common options for reading a filesystem from a path #[derive(Debug, Parser)] struct FsReadOptions { @@ -151,6 +176,12 @@ enum Command { #[clap(subcommand)] cmd: OciCommand, }, + /// Commands for dealing with OSTree commits + #[cfg(feature = "ostree")] + Ostree { + #[clap(subcommand)] + cmd: OstreeCommand, + }, /// Mounts a composefs, possibly enforcing fsverity of the image Mount { /// the name of the image to mount, either an fs-verity hash or prefixed with 'ref/' @@ -374,6 +405,50 @@ where let id = fs.compute_image_id(); println!("{}", id.to_hex()); } + #[cfg(feature = "ostree")] + Command::Ostree { cmd: ostree_cmd } => match ostree_cmd { + OstreeCommand::PullLocal { + ref ostree_repo_path, + ref ostree_ref, + base_name, + } => { + let verity = composefs_ostree::pull_local( + &Arc::new(repo), + ostree_repo_path, + ostree_ref, + base_name.as_deref(), + ) + .await?; + + println!("verity {}", verity.to_hex()); + } + OstreeCommand::Pull { + ref ostree_repo_url, + ref ostree_ref, + base_name, + } => { + let verity = composefs_ostree::pull( + &Arc::new(repo), + ostree_repo_url, + ostree_ref, + base_name.as_deref(), + ) + .await?; + + println!("verity {}", verity.to_hex()); + } + OstreeCommand::CreateImage { + ref commit_name, + ref image_name, + } => { + let fs = composefs_ostree::create_filesystem(&repo, commit_name)?; + let image_id = fs.commit_image(&repo, image_name.as_deref())?; + println!("{}", image_id.to_id()); + } + OstreeCommand::Inspect { ref commit_name } => { + composefs_ostree::inspect(&repo, commit_name)?; + } + }, Command::CreateImage { fs_opts, ref image_name, diff --git a/crates/composefs-ostree/Cargo.toml b/crates/composefs-ostree/Cargo.toml new file mode 100644 index 00000000..24c07db3 --- /dev/null +++ b/crates/composefs-ostree/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "composefs-ostree" +description = "ostree support for composefs" +keywords = ["composefs", "ostree"] + +edition.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +anyhow = { version = "1.0.87", default-features = false } +composefs = { workspace = true } +configparser = { version = "3.1.0", features = [] } +flate2 = { version = "1.1.2", default-features = true } +gvariant = { version = "0.5.0", default-features = true} +hex = { version = "0.4.0", default-features = false, features = ["std"] } +rustix = { version = "1.0.0", default-features = false, features = ["fs", "mount", "process", "std"] } +sha2 = { version = "0.10.1", default-features = false } +zerocopy = { version = "0.8.0", default-features = false, features = ["derive", "std"] } +reqwest = { version = "0.12.15", features = ["zstd"] } + +[dev-dependencies] +similar-asserts = "1.7.0" + +[lints] +workspace = true diff --git a/crates/composefs-ostree/src/commit.rs b/crates/composefs-ostree/src/commit.rs new file mode 100644 index 00000000..cb0f4e7a --- /dev/null +++ b/crates/composefs-ostree/src/commit.rs @@ -0,0 +1,587 @@ +//! Ostree commit splitstream implementation + +/* Implementation of the ostree commit splitstream format + * + * Commit splitstreams are mappings from a set of ostree sha256 + * digests into the content for that ostree object. The content is + * defined as some data, and an optional ObjectID referencing an + * external object. In the case there is an external reference, the + * data is the header of the ostree object. + * + * The file format is intended to be stored in a splitstream and + * uses the splitstream header to reference the external object ids. + * + * An object file has this format: + * (All ints are in little endian) + * + * header: + * +-----------------------------------+ + * | u32: index of commit object | + * | u32: flags | + * +-----------------------------------+ + * + * buckets; + * 256 x (indexes are into ostree_ids) + * +-----------------------------------+ + * | u32: end index of bucket | + * +-----------------------------------+ + * + * ostree_ids: + * n_objects x (sorted) + * +-----------------------------------+ + * | [u8; 32] ostree object id | + * +-----------------------------------+ + * + * object_data: + * n_objects x (same order as ostree_ids) + * +-----------------------------------+ + * | u32: offset to per-object data | + * | u32: length of per-object data | + * | u32: Index of external object ref | + * | or MAXUINT32 if none. | + * +-----------------------------------+ + * + * Offset are 8 byte aligned offsets from after the end of the + * object_data array. + * + */ +use anyhow::{bail, Error, Result}; +use gvariant::aligned_bytes::{AlignedBuf, AlignedSlice, AsAligned, TryAsAligned, A8}; +use std::{fmt, io::Read, mem::size_of, sync::Arc}; +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; + +use gvariant::{gv, Marker, Structure}; +use sha2::{Digest, Sha256}; +use std::{collections::BTreeMap, ffi::OsStr, os::unix::ffi::OsStrExt}; + +use composefs::{ + fsverity::FsVerityHashValue, + repository::Repository, + tree::{Directory, FileSystem, Inode, Leaf, LeafContent, RegularFile, Stat}, + util::Sha256Digest, +}; + +use crate::repo::split_sized_variant; + +const OSTREE_COMMIT_CONTENT_TYPE: u64 = 0xAFE138C18C463EF1; + +const S_IFMT: u32 = 0o170000; +const S_IFLNK: u32 = 0o120000; + +#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] +#[repr(C)] +struct CommitHeader { + commit_id: u32, + flags: u32, + bucket_ends: [u32; 256], +} + +#[derive(Debug, FromBytes, Immutable, KnownLayout)] +#[repr(C)] +struct Sha256DigestArray { + ids: [Sha256Digest], +} + +const NO_EXTERNAL_INDEX: u32 = u32::MAX; + +#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout, Clone)] +#[repr(C)] +struct DataRef { + offset: u32, + size: u32, + external_index: u32, +} + +impl DataRef { + pub fn new(offset: usize, size: usize, external_index: Option) -> Self { + DataRef { + offset: u32::to_le(offset as u32), + size: u32::to_le(size as u32), + external_index: u32::to_le(match external_index { + Some(idx) => idx as u32, + None => NO_EXTERNAL_INDEX, + }), + } + } + pub fn get_offset(&self) -> usize { + u32::from_le(self.offset) as usize + } + pub fn get_size(&self) -> usize { + u32::from_le(self.size) as usize + } + pub fn get_external_index(&self) -> Option { + match u32::from_le(self.external_index) { + NO_EXTERNAL_INDEX => None, + idx => Some(idx as usize), + } + } +} + +#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] +#[repr(C)] +struct DataRefs { + datas: [DataRef], +} + +#[derive(Debug)] +struct WriterEntry { + ostree_id: Sha256Digest, + external_object: Option, + data: AlignedBuf, +} + +#[derive(Debug)] +pub(crate) struct CommitWriter { + commit_id: Option, + map: Vec>, +} + +fn align8(x: usize) -> usize { + (x + 7) & !7 +} + +impl CommitWriter { + pub fn new() -> Self { + CommitWriter { + commit_id: None, + map: vec![], + } + } + + fn lookup_idx(&self, ostree_id: &Sha256Digest) -> Option { + self.map + .binary_search_by_key(ostree_id, |e| e.ostree_id) + .ok() + } + + pub fn contains(&self, ostree_id: &Sha256Digest) -> bool { + self.lookup_idx(ostree_id).is_some() + } + + pub fn set_commit_id(&mut self, id: &Sha256Digest) { + self.commit_id = Some(*id); + } + + pub fn insert( + &mut self, + ostree_id: &Sha256Digest, + external_object: Option<&ObjectID>, + data: &[u8], + ) { + match self.map.binary_search_by_key(ostree_id, |e| e.ostree_id) { + Ok(_idx) => {} + Err(idx) => { + let mut aligned_data = AlignedBuf::new(); + aligned_data.with_vec(|v| v.extend_from_slice(data)); + self.map.insert( + idx, + WriterEntry { + ostree_id: *ostree_id, + external_object: external_object.cloned(), + data: aligned_data, + }, + ); + } + } + } + + pub fn serialize( + &self, + repo: &Arc>, + content_id: &str, + ) -> Result { + let mut ss = repo.create_stream(OSTREE_COMMIT_CONTENT_TYPE); + + /* Ensure we can index and count items using u32 (leaving one for NO_EXTERNAL_INDEX) */ + let item_count = self.map.len(); + if item_count > (NO_EXTERNAL_INDEX - 1) as usize { + return Err(Error::msg("Too many items in object map")); + } + + let main_idx = if let Some(objid) = &self.commit_id { + if let Some(idx) = self.lookup_idx(objid) { + idx + } else { + return Err(Error::msg("commit object not in commit")); + } + } else { + return Err(Error::msg("No commit id set")); + }; + + let mut header = CommitHeader { + commit_id: u32::to_le(main_idx as u32), + flags: 0, + bucket_ends: [0; 256], + }; + + // Compute data offsets and add external object references + let mut data_size = 0usize; + let mut data_offsets = vec![0usize; item_count]; + for (i, e) in self.map.iter().enumerate() { + data_offsets[i] = data_size; + data_size += align8(e.data.len()); + } + + // Ensure all data can be indexed by u32 + if data_size > u32::MAX as usize { + return Err(Error::msg("Too large data in object map")); + } + + // Compute bucket ends + for e in self.map.iter() { + // Initially end is just the count + header.bucket_ends[e.ostree_id[0] as usize] += 1; + } + for i in 1..256 { + // Then we sum them up to the end + header.bucket_ends[i] += header.bucket_ends[i - 1]; + } + // Convert buckets to little endian + for i in 0..256 { + header.bucket_ends[i] = u32::to_le(header.bucket_ends[i]); + } + + // Add header + ss.write_inline(header.as_bytes()); + // Add mapped ids + for e in self.map.iter() { + ss.write_inline(&e.ostree_id); + } + // Add data refs + for (i, e) in self.map.iter().enumerate() { + let idx = e + .external_object + .as_ref() + .map(|external_object| ss.add_object_ref(external_object)); + let d = DataRef::new(data_offsets[i], e.data.len(), idx); + ss.write_inline(d.as_bytes()); + } + + // Add 8-aligned data chunks + for e in self.map.iter() { + ss.write_inline(&e.data); + // Pad to 8 + let padding = align8(e.data.len()) - e.data.len(); + if padding > 0 { + ss.write_inline(&vec![0u8; padding]); + } + } + + repo.write_stream(ss, content_id, None) + } +} + +#[derive(Debug)] +struct ReaderEntry { + ostree_id: Sha256Digest, + data_offset: usize, + data_size: usize, + external_object: Option, +} + +pub(crate) struct CommitReader { + map: Vec>, + commit_id: Sha256Digest, + bucket_ends: [u32; 256], + data: AlignedBuf, +} + +impl fmt::Debug for CommitReader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut m = f.debug_map(); + for e in self.map.iter() { + m.entry( + &hex::encode(e.ostree_id), + &format!("{:?}", self.lookup(&e.ostree_id).unwrap()), + ); + } + m.finish() + } +} + +fn validate_buckets(buckets: &[u32; 256]) -> Result<()> { + for i in 1..256 { + // Bucket ends are (non-strictly) increasing + if buckets[i] < buckets[i - 1] { + return Err(Error::msg("Invalid commit bucket data")); + } + } + Ok(()) +} + +impl CommitReader { + pub fn load(repo: &Repository, content_id: &str) -> Result { + let mut ss = repo.open_stream(content_id, None, Some(OSTREE_COMMIT_CONTENT_TYPE))?; + + let mut buf = AlignedBuf::new(); + + buf.with_vec(|v| v.resize(size_of::(), 0u8)); + let n_read = ss.read(&mut buf)?; + if n_read != buf.len() { + return Err(Error::msg("Not enough data")); + } + + let h = + CommitHeader::ref_from_bytes(&buf).map_err(|_e| Error::msg("Invalid commit header"))?; + + let commit_id_idx = u32::from_le(h.commit_id) as usize; + + let mut buckets: [u32; 256] = h.bucket_ends; + for b in buckets.iter_mut() { + *b = u32::from_le(*b); + } + validate_buckets(&buckets)?; + let item_count = buckets[255] as usize; + + if commit_id_idx >= item_count { + return Err(Error::msg("commit id out of bounds")); + } + + buf.with_vec(|v| v.resize(item_count * size_of::(), 0u8)); + let n_read = ss.read(&mut buf)?; + if n_read != buf.len() { + return Err(Error::msg("Not enough data")); + }; + let ostree_ids = Sha256DigestArray::ref_from_bytes(&buf) + .map_err(|_e| Error::msg("Invalid commit array"))?; + + if ostree_ids.ids.len() != item_count { + return Err(Error::msg("Invalid commit array")); + } + + let commit_id = ostree_ids.ids[commit_id_idx]; + + let mut map = Vec::>::with_capacity(item_count); + for i in 0..item_count { + map.push(ReaderEntry { + ostree_id: ostree_ids.ids[i], + data_offset: 0, + data_size: 0, + external_object: None, + }) + } + + buf.with_vec(|v| v.resize(item_count * size_of::(), 0u8)); + let n_read = ss.read(&mut buf)?; + if n_read != buf.len() { + return Err(Error::msg("Not enough data")); + }; + + let data_refs = + DataRefs::ref_from_bytes(&buf).map_err(|_e| Error::msg("Invalid commit array"))?; + + if data_refs.datas.len() != item_count { + return Err(Error::msg("Invalid commit array")); + } + + for (i, item) in map.iter_mut().enumerate() { + let data = &data_refs.datas[i]; + + item.data_offset = data.get_offset(); + item.data_size = data.get_size(); + item.external_object = if let Some(idx) = data.get_external_index() { + ss.lookup_external_ref(idx).cloned() + } else { + None + }; + } + + buf.with_vec(|v| { + v.clear(); + ss.read_to_end(v) + })?; + + Ok(CommitReader { + map, + commit_id, + data: buf, + bucket_ends: buckets, + }) + } + + fn get_data(&self, entry: &ReaderEntry) -> &AlignedSlice { + let start = entry.data_offset; + let end = start + entry.data_size; + // The unwrap here is safe, because data is always 8 aligned + self.data[start..end].try_as_aligned().unwrap() + } + + fn get_bucket(&self, ostree_id: &Sha256Digest) -> (usize, usize) { + let first = ostree_id[0] as usize; + let start = if first == 0 { + 0 + } else { + self.bucket_ends[first - 1] + }; + let end = self.bucket_ends[first]; + (start as usize, end as usize) + } + + pub fn lookup( + &self, + ostree_id: &Sha256Digest, + ) -> Option<(Option<&ObjectID>, &AlignedSlice)> { + let (start, end) = self.get_bucket(ostree_id); + let in_bucket = &self.map[start..end]; + let index = match in_bucket.binary_search_by_key(ostree_id, |e| e.ostree_id) { + Ok(i) => i, + Err(..) => return None, + }; + let entry = &in_bucket[index]; + Some((entry.external_object.as_ref(), self.get_data(entry))) + } + + pub fn lookup_data(&self, ostree_id: &Sha256Digest) -> Option<&AlignedSlice> { + if let Some((None, data)) = self.lookup(ostree_id) { + Some(data) + } else { + None + } + } + + pub fn iter( + &self, + ) -> impl Iterator, &AlignedSlice)> { + self.map + .iter() + .map(|e| (&e.ostree_id, e.external_object.as_ref(), self.get_data(e))) + } + + fn create_filesystem_file(&self, id: &Sha256Digest) -> Result> { + let (maybe_obj_id, file_header) = self.lookup(id).ok_or(Error::msg(format!( + "Unexpectedly missing ostree file object {}", + hex::encode(id) + )))?; + + let (_sized_data, variant_data, remaining_data) = split_sized_variant(file_header)?; + + let data = gv!("(tuuuusa(ayay))").cast(variant_data.try_as_aligned()?); + let (size, uid, gid, mode, _zero, symlink_target, xattrs_data) = data.to_tuple(); + let mut xattrs = BTreeMap::, Box<[u8]>>::new(); + for x in xattrs_data.iter() { + let (key, value) = x.to_tuple(); + xattrs.insert(OsStr::from_bytes(key).into(), Box::from(value)); + } + + let stat = Stat { + st_mode: u32::from_be(*mode), + st_uid: u32::from_be(*uid), + st_gid: u32::from_be(*gid), + st_mtim_sec: 0, + xattrs: xattrs.into(), + }; + + let content = if (stat.st_mode & S_IFMT) == S_IFLNK { + LeafContent::Symlink(OsStr::new(symlink_target.to_str()).into()) + } else { + let file = if let Some(obj_id) = maybe_obj_id { + if !remaining_data.is_empty() { + bail!("Unexpected trailing file data"); + } + RegularFile::External(obj_id.clone(), u64::from_be(*size)) + } else { + RegularFile::Inline(remaining_data.into()) + }; + LeafContent::Regular(file) + }; + + Ok(Leaf { stat, content }) + } + + fn create_filesystem_dir( + &self, + dirtree_id: &Sha256Digest, + dirmeta_id: &Sha256Digest, + ) -> Result> { + let (_obj_id, dirmeta) = self.lookup(dirmeta_id).ok_or(Error::msg(format!( + "Unexpectedly missing ostree dirmeta object {}", + hex::encode(dirmeta_id) + )))?; + let (_obj_id, dirtree) = self.lookup(dirtree_id).ok_or(Error::msg(format!( + "Unexpectedly missing ostree dirtree object {}", + hex::encode(dirtree_id) + )))?; + + let dirmeta_sha = Sha256::digest(dirmeta); + if *dirmeta_sha != *dirmeta_id { + bail!( + "Invalid dirmeta checksum {:?}, expected {:?}", + dirmeta_sha, + dirmeta_id + ); + } + let dirtree_sha = Sha256::digest(dirtree); + if *dirtree_sha != *dirtree_id { + bail!( + "Invalid dirtree checksum {:?}, expected {:?}", + dirtree_sha, + dirtree_id + ); + } + + let data = gv!("(uuua(ayay))").cast(dirmeta.as_aligned()); + let (uid, gid, mode, xattrs_data) = data.to_tuple(); + let mut xattrs = BTreeMap::, Box<[u8]>>::new(); + for x in xattrs_data.iter() { + let (key, value) = x.to_tuple(); + xattrs.insert(OsStr::from_bytes(key).into(), Box::from(value)); + } + + let stat = Stat { + st_mode: u32::from_be(*mode), + st_uid: u32::from_be(*uid), + st_gid: u32::from_be(*gid), + st_mtim_sec: 0, + xattrs: xattrs.into(), + }; + + let mut directory = Directory::new(stat); + + let tree_data = gv!("(a(say)a(sayay))").cast(dirtree.as_aligned()); + let (files_data, dirs_data) = tree_data.to_tuple(); + + for f in files_data.iter() { + let (name, checksum) = f.to_tuple(); + + let file = self.create_filesystem_file(checksum.try_into()?)?; + directory.insert(OsStr::new(name.to_str()), Inode::Leaf(file.into())); + } + + for d in dirs_data.iter() { + let (name, tree_checksum, meta_checksum) = d.to_tuple(); + + let subdir = + self.create_filesystem_dir(tree_checksum.try_into()?, meta_checksum.try_into()?)?; + + directory.insert( + OsStr::new(name.to_str()), + Inode::Directory(Box::new(subdir)), + ); + } + + Ok(directory) + } + + /// Create a tree::Filesystem for the commit + pub fn create_filesystem(&self) -> Result> { + let commit = self + .lookup_data(&self.commit_id) + .ok_or(Error::msg("Unexpectedly missing commit object"))?; + + let data = gv!("(a{sv}aya(say)sstayay)").cast(commit); + let ( + _metadata_data, + _parent_checksum, + _related_objects, + _subject, + _body, + _timestamp, + root_tree, + root_metadata, + ) = data.to_tuple(); + + let root = self.create_filesystem_dir(root_tree.try_into()?, root_metadata.try_into()?)?; + + Ok(FileSystem:: { root }) + } +} diff --git a/crates/composefs-ostree/src/lib.rs b/crates/composefs-ostree/src/lib.rs new file mode 100644 index 00000000..6ea77b1e --- /dev/null +++ b/crates/composefs-ostree/src/lib.rs @@ -0,0 +1,81 @@ +//! Rust bindings and utilities for working with composefs repositorie and ostree +//! + +use anyhow::Result; +use rustix::fs::CWD; +use std::{path::Path, sync::Arc}; + +use composefs::{fsverity::FsVerityHashValue, repository::Repository, tree::FileSystem}; + +pub mod commit; +pub mod pull; +pub mod repo; + +use crate::commit::CommitReader; +use crate::pull::PullOperation; +use crate::repo::{LocalRepo, RemoteRepo}; + +/// Pull from a local ostree repo into the repository +pub async fn pull_local( + repo: &Arc>, + ostree_repo_path: &Path, + ostree_ref: &str, + base_reference: Option<&str>, +) -> Result { + let ostree_repo = LocalRepo::open_path(repo, CWD, ostree_repo_path)?; + + let commit_checksum = ostree_repo.read_ref(ostree_ref)?; + + let mut op = PullOperation::>::new(repo, ostree_repo); + if let Some(base_name) = base_reference { + op.add_base(base_name)?; + } + + op.pull_commit(&commit_checksum).await +} + +/// Pull from a remote ostree repo into the repository +pub async fn pull( + repo: &Arc>, + ostree_repo_url: &str, + ostree_ref: &str, + base_reference: Option<&str>, +) -> Result { + let ostree_repo = RemoteRepo::new(repo, ostree_repo_url)?; + + let commit_checksum = ostree_repo.resolve_ref(ostree_ref).await?; + + let mut op = PullOperation::>::new(repo, ostree_repo); + if let Some(base_name) = base_reference { + op.add_base(base_name)?; + } + + op.pull_commit(&commit_checksum).await +} + +/// Creates a filesystem from the given OSTree commit. +pub fn create_filesystem( + repo: &Repository, + commit_name: &str, +) -> Result> { + let commit = CommitReader::::load(repo, commit_name)?; + let fs = commit.create_filesystem()?; + + Ok(fs) +} + +/// Inspects commit +pub fn inspect( + repo: &Repository, + commit_name: &str, +) -> Result<()> { + let objmap = CommitReader::::load(repo, commit_name)?; + + for (ostree_digest, maybe_obj_id, _data) in objmap.iter() { + if let Some(obj_id) = maybe_obj_id { + println!("Ostree {} => {:?}", hex::encode(ostree_digest), obj_id); + } + } + + Ok(()) +} diff --git a/crates/composefs-ostree/src/pull.rs b/crates/composefs-ostree/src/pull.rs new file mode 100644 index 00000000..b453194b --- /dev/null +++ b/crates/composefs-ostree/src/pull.rs @@ -0,0 +1,272 @@ +//! Ostree pull support + +use anyhow::{bail, Result}; +use composefs::{fsverity::FsVerityHashValue, repository::Repository, util::Sha256Digest}; +use gvariant::aligned_bytes::{AlignedBuf, AsAligned}; +use gvariant::{gv, Marker, Structure}; +use sha2::{Digest, Sha256}; +use std::collections::{HashSet, VecDeque}; +use std::{fmt, sync::Arc}; + +use crate::commit::{CommitReader, CommitWriter}; +use crate::repo::{ObjectType, OstreeRepo}; + +struct Outstanding { + id: Sha256Digest, + obj_type: ObjectType, +} + +impl fmt::Debug for Outstanding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Outstanding") + .field("id", &hex::encode(self.id)) + .field("obj_type", &self.obj_type) + .finish() + } +} + +#[derive(Debug)] +pub(crate) struct PullOperation> { + repo: Arc>, + writer: CommitWriter, + commit_id: Option, + ostree_repo: RepoType, + base_commits: Vec>, + outstanding: VecDeque, + // All ids that were ever enqueued (including already fetched and currently being fetched) + fetched: HashSet, +} + +impl> + PullOperation +{ + pub fn new(repo: &Arc>, ostree_repo: RepoType) -> Self { + PullOperation { + repo: repo.clone(), + commit_id: None, + writer: CommitWriter::::new(), + ostree_repo, + outstanding: VecDeque::new(), + base_commits: vec![], + fetched: HashSet::new(), + } + } + + pub fn add_base(&mut self, base_name: &str) -> Result<()> { + let base = CommitReader::::load(&self.repo, base_name)?; + self.base_commits.push(base); + Ok(()) + } + + fn enqueue_fetch(&mut self, id: &Sha256Digest, obj_type: ObjectType) { + // To avoid fetching twice, even if the id is not in the outstanding list + // (for example we may be currenly downloading it) we keep all ids we ever + // fetch in a map + if self.fetched.contains(id) { + return; + } + self.fetched.insert(*id); + // We request metadata objects first + if obj_type == ObjectType::File { + self.outstanding + .push_back(Outstanding { id: *id, obj_type }); + } else { + self.outstanding + .push_front(Outstanding { id: *id, obj_type }); + } + } + + fn insert_commit(&mut self, id: &Sha256Digest, data: &[u8]) { + self.writer.insert(id, None, data); + self.writer.set_commit_id(id); + self.commit_id = Some(*id); + } + + fn insert_dirmeta(&mut self, id: &Sha256Digest, data: &[u8]) { + self.writer.insert(id, None, data); + } + + fn insert_dirtree(&mut self, id: &Sha256Digest, data: &[u8]) { + self.writer.insert(id, None, data); + } + + fn insert_file( + &mut self, + id: &Sha256Digest, + obj_id: Option<&ObjectID>, + file_header: AlignedBuf, + ) { + self.writer.insert(id, obj_id, &file_header); + } + + fn maybe_fetch_file(&mut self, id: &Sha256Digest) { + if self.writer.contains(id) { + return; + } + + for base in self.base_commits.iter() { + if let Some((obj_id, file_header)) = base.lookup(id) { + self.add_file(id, obj_id.cloned().as_ref(), file_header.to_owned()); + return; + } + } + + self.enqueue_fetch(id, ObjectType::File); + } + + fn add_file(&mut self, id: &Sha256Digest, obj_id: Option<&ObjectID>, file_header: AlignedBuf) { + self.insert_file(id, obj_id, file_header); + } + + fn maybe_fetch_dirmeta(&mut self, id: &Sha256Digest) { + if self.writer.contains(id) { + return; + } + + for base in self.base_commits.iter() { + if let Some(dirmeta) = base.lookup_data(id) { + self.add_dirmeta(id, dirmeta.to_owned()); + return; + } + } + + self.enqueue_fetch(id, ObjectType::DirMeta); + } + + fn add_dirmeta(&mut self, id: &Sha256Digest, data: AlignedBuf) { + self.insert_dirmeta(id, &data); + } + + fn maybe_fetch_dirtree(&mut self, id: &Sha256Digest) { + if self.writer.contains(id) { + return; + } + + for base in self.base_commits.iter() { + if let Some(dirtree) = base.lookup_data(id) { + self.add_dirtree(id, dirtree.to_owned()); + return; + } + } + + self.enqueue_fetch(id, ObjectType::DirTree); + } + + fn add_dirtree(&mut self, id: &Sha256Digest, buf: AlignedBuf) { + let data = gv!("(a(say)a(sayay))").cast(buf.as_aligned()); + let (files_data, dirs_data) = data.to_tuple(); + + for f in files_data.iter() { + let (_name, checksum) = f.to_tuple(); + + self.maybe_fetch_file(checksum.try_into().unwrap()); + } + + for d in dirs_data.iter() { + let (_name, tree_checksum, meta_checksum) = d.to_tuple(); + + self.maybe_fetch_dirmeta(meta_checksum.try_into().unwrap()); + self.maybe_fetch_dirtree(tree_checksum.try_into().unwrap()); + } + + self.insert_dirtree(id, &buf); + } + + fn add_commit(&mut self, id: &Sha256Digest, buf: AlignedBuf) { + let data = gv!("(a{sv}aya(say)sstayay)").cast(&buf); + let ( + _metadata_data, + _parent_checksum, + _related_objects, + _subject, + _body, + _timestamp, + root_tree, + root_metadata, + ) = data.to_tuple(); + + self.maybe_fetch_dirmeta(root_metadata.try_into().unwrap()); + self.maybe_fetch_dirtree(root_tree.try_into().unwrap()); + + self.insert_commit(id, &buf); + } + + pub async fn pull_commit(&mut self, commit_id: &Sha256Digest) -> Result { + let content_id = format!("ostree-commit-{}", hex::encode(commit_id)); + if let Some(objid) = self.repo.has_stream(&content_id)? { + return Ok(objid); + } + + self.enqueue_fetch(commit_id, ObjectType::Commit); + + // TODO: Support deltas + + // TODO: At least for http we should make parallel fetches + while !self.outstanding.is_empty() { + let fetch = self.outstanding.pop_front().unwrap(); + println!( + "Fetching ostree {:?} object {} ", + fetch.obj_type, + hex::encode(fetch.id) + ); + + match fetch.obj_type { + ObjectType::Commit => { + let data = self + .ostree_repo + .fetch_object(&fetch.id, fetch.obj_type) + .await?; + let data_sha = Sha256::digest(&*data); + if *data_sha != fetch.id { + bail!( + "Invalid commit checksum {:?}, expected {:?}", + data_sha, + fetch.id + ); + } + self.add_commit(&fetch.id, data); + } + ObjectType::DirMeta => { + let data = self + .ostree_repo + .fetch_object(&fetch.id, fetch.obj_type) + .await?; + let data_sha = Sha256::digest(&*data); + if *data_sha != fetch.id { + bail!( + "Invalid dirmeta checksum {:?}, expected {:?}", + data_sha, + fetch.id + ); + } + self.add_dirmeta(&fetch.id, data); + } + ObjectType::DirTree => { + let data = self + .ostree_repo + .fetch_object(&fetch.id, fetch.obj_type) + .await?; + let data_sha = Sha256::digest(&*data); + if *data_sha != fetch.id { + bail!( + "Invalid dirtree checksum {:?}, expected {:?}", + data_sha, + fetch.id + ); + } + self.add_dirtree(&fetch.id, data); + } + ObjectType::File => { + let (file_header, obj_id) = self.ostree_repo.fetch_file(&fetch.id).await?; + + self.add_file(&fetch.id, obj_id.as_ref(), file_header); + } + _ => {} + } + } + + let commit_id = self.writer.serialize(&self.repo, &content_id)?; + + Ok(commit_id) + } +} diff --git a/crates/composefs-ostree/src/repo.rs b/crates/composefs-ostree/src/repo.rs new file mode 100644 index 00000000..909c2f88 --- /dev/null +++ b/crates/composefs-ostree/src/repo.rs @@ -0,0 +1,544 @@ +//! Ostree repo support + +use anyhow::{bail, Context, Error, Result}; +use configparser::ini::Ini; +use flate2::read::DeflateDecoder; +use gvariant::aligned_bytes::{AlignedBuf, AlignedSlice, A8}; +use gvariant::{gv, Marker, Structure}; +use reqwest::{Client, Url}; +use rustix::fd::AsRawFd; +use rustix::fs::{fstat, openat, readlinkat, FileType, Mode, OFlags}; +use rustix::io::Errno; +use sha2::{Digest, Sha256}; +use std::{ + fs::File, + future::Future, + io::{empty, Read}, + os::fd::{AsFd, OwnedFd}, + path::Path, + sync::Arc, +}; +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; + +use composefs::{ + fsverity::FsVerityHashValue, + repository::Repository, + util::{parse_sha256, ErrnoFilter, Sha256Digest}, + INLINE_CONTENT_MAX, +}; + +#[derive(Debug, PartialEq, Copy, Clone)] +pub(crate) enum RepoMode { + Bare, + Archive, + BareUser, + BareUserOnly, + BareSplitXAttrs, +} + +#[allow(dead_code)] +#[derive(Debug, PartialEq)] +pub(crate) enum ObjectType { + File, + DirTree, + DirMeta, + Commit, + TombstoneCommit, + PayloadLink, + FileXAttrs, + FileXAttrsLink, +} + +impl ObjectType { + pub fn extension(&self, repo_mode: RepoMode) -> &'static str { + match self { + ObjectType::File => { + if repo_mode == RepoMode::Archive { + ".filez" + } else { + ".file" + } + } + ObjectType::DirTree => ".dirtree", + ObjectType::DirMeta => ".dirmeta", + ObjectType::Commit => ".commit", + ObjectType::TombstoneCommit => ".commit-tombstone", + ObjectType::PayloadLink => ".payload-link", + ObjectType::FileXAttrs => ".file-xattrs", + ObjectType::FileXAttrsLink => ".file-xattrs-link", + } + } +} + +impl RepoMode { + pub fn parse(s: &str) -> Result { + match s { + "bare" => Ok(RepoMode::Bare), + "archive" => Ok(RepoMode::Archive), + "archive-z2" => Ok(RepoMode::Archive), + "bare-user" => Ok(RepoMode::BareUser), + "bare-user-only" => Ok(RepoMode::BareUserOnly), + "bare-split-xattrs" => Ok(RepoMode::BareSplitXAttrs), + _ => Err(Error::msg(format!("Unsupported repo mode {}", s))), + } + } +} + +fn get_object_pathname(mode: RepoMode, checksum: &Sha256Digest, object_type: ObjectType) -> String { + format!( + "{:02x}/{}{}", + checksum[0], + hex::encode(&checksum[1..]), + object_type.extension(mode) + ) +} + +fn size_prefix(data: &[u8]) -> AlignedBuf { + let mut buf = AlignedBuf::new(); + let svh = SizedVariantHeader { + size: u32::to_be(data.len() as u32), + padding: 0, + }; + buf.with_vec(|v| v.extend_from_slice(svh.as_bytes())); + buf.with_vec(|v| v.extend_from_slice(data)); + buf +} + +pub(crate) fn get_sized_variant_size(data: &[u8]) -> Result { + let variant_header_size = size_of::(); + if data.len() < variant_header_size { + bail!("Sized variant too small"); + } + + let aligned: AlignedBuf = data[0..variant_header_size].to_vec().into(); + let h = SizedVariantHeader::ref_from_bytes(&aligned) + .map_err(|e| Error::msg(format!("Sized variant header: {:?}", e)))?; + Ok(u32::from_be(h.size) as usize) +} + +pub(crate) fn split_sized_variant(data: &[u8]) -> Result<(&[u8], &[u8], &[u8])> { + let variant_size = get_sized_variant_size(data)?; + let header_size = size_of::(); + if data.len() < header_size + variant_size { + bail!("Sized variant too small"); + } + + let sized_data = &data[0..header_size + variant_size]; + let variant_data = &data[header_size..header_size + variant_size]; + let remaining_data = &data[header_size + variant_size..]; + + Ok((sized_data, variant_data, remaining_data)) +} + +pub(crate) fn ostree_zlib_file_header_to_regular(zlib_header_data: &AlignedSlice) -> Vec { + let data = gv!("(tuuuusa(ayay))").cast(zlib_header_data); + let (_size, uid, gid, mode, zero, symlink_target, xattrs_data) = data.to_tuple(); + let mut s = Vec::<(&[u8], &[u8])>::new(); + for x in xattrs_data.iter() { + let (key, value) = x.to_tuple(); + s.push((key, value)) + } + + gv!("(uuuusa(ayay))").serialize_to_vec(&(*uid, *gid, *mode, *zero, symlink_target.to_str(), &s)) +} + +/* This is how ostree stores gvariants on disk when used as a header for filez objects */ +#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] +#[repr(C)] +pub(crate) struct SizedVariantHeader { + size: u32, + padding: u32, +} + +pub(crate) trait OstreeRepo { + fn fetch_object( + &self, + checksum: &Sha256Digest, + object_type: ObjectType, + ) -> impl Future>; + fn fetch_file( + &self, + checksum: &Sha256Digest, + ) -> impl Future)>>; +} + +#[derive(Debug)] +pub(crate) struct RemoteRepo { + repo: Arc>, + client: Client, + url: Url, +} + +impl RemoteRepo { + pub fn new(repo: &Arc>, url: &str) -> Result { + Ok(RemoteRepo { + repo: repo.clone(), + client: Client::new(), + url: Url::parse(url)?, + }) + } + + pub async fn resolve_ref(&self, ref_name: &str) -> Result { + // TODO: Support summary format + let path = format!("refs/heads/{}", ref_name); + let url = self.url.join(&path)?; + + let t = self + .client + .get(url.clone()) + .send() + .await? + .text() + .await + .with_context(|| format!("Cannot get ostree ref at {}", url))?; + + Ok(parse_sha256(t.trim())?) + } +} + +impl OstreeRepo for RemoteRepo { + async fn fetch_object( + &self, + checksum: &Sha256Digest, + object_type: ObjectType, + ) -> Result { + let path = format!( + "objects/{}", + get_object_pathname(RepoMode::Archive, checksum, object_type) + ); + let url = self.url.join(&path)?; + + let response = self.client.get(url.clone()).send().await?; + response.error_for_status_ref()?; + let b = response + .bytes() + .await + .with_context(|| format!("Cannot get ostree object at {}", url))?; + + Ok(b.to_vec().into()) + } + + async fn fetch_file(&self, checksum: &Sha256Digest) -> Result<(AlignedBuf, Option)> { + let path = format!( + "objects/{}", + get_object_pathname(RepoMode::Archive, checksum, ObjectType::File) + ); + let url = self.url.join(&path)?; + + let response = self.client.get(url.clone()).send().await?; + response.error_for_status_ref()?; + + let data = response + .bytes() + .await + .with_context(|| format!("Cannot get ostree file at {}", url))?; + + let (file_header, variant_data, compressed_data) = split_sized_variant(&data)?; + + // Force align the data as there is a gvariant-rs bug (https://github.com/ostreedev/gvariant-rs/pull/9) + let mut aligned_variant_data = AlignedBuf::new(); + aligned_variant_data.with_vec(|v| v.extend_from_slice(variant_data)); + + // Compute the checksum of (regular) header + data + let mut hasher = Sha256::new(); + let regular_header = ostree_zlib_file_header_to_regular(&aligned_variant_data); + let sized_regular_header = size_prefix(®ular_header); + hasher.update(&*sized_regular_header); + + // Decompress rest + let mut uncompressed = DeflateDecoder::new(compressed_data); + + // TODO: Stream files into repo instead of reading it all + + let mut file_content = Vec::new(); + uncompressed.read_to_end(&mut file_content)?; + + hasher.update(&file_content); + let actual_checksum = hasher.finalize(); + if *actual_checksum != *checksum { + bail!( + "Unexpected file checksum {:?}, expected {:?}", + actual_checksum, + checksum + ); + } + + let mut file_data = file_header.to_vec(); + let obj_id = if file_content.len() <= INLINE_CONTENT_MAX { + file_data.extend_from_slice(&file_content); + None + } else { + Some(self.repo.ensure_object(&file_content)?) + }; + + Ok((file_data.into(), obj_id)) + } +} + +#[derive(Debug)] +pub(crate) struct LocalRepo { + repo: Arc>, + mode: RepoMode, + dir: OwnedFd, + objects: OwnedFd, +} + +impl LocalRepo { + pub fn open_path( + repo: &Arc>, + dirfd: impl AsFd, + path: impl AsRef, + ) -> Result { + let path = path.as_ref(); + let repofd = openat( + &dirfd, + path, + OFlags::RDONLY | OFlags::CLOEXEC, + Mode::empty(), + ) + .with_context(|| format!("Cannot open ostree repository at {}", path.display()))?; + + let configfd = openat( + &repofd, + "config", + OFlags::RDONLY | OFlags::CLOEXEC, + Mode::empty(), + ) + .with_context(|| format!("Cannot open ostree repo config file at {}", path.display()))?; + + let mut config_data = String::new(); + + File::from(configfd) + .read_to_string(&mut config_data) + .with_context(|| "Can't read config file")?; + + let mut config = Ini::new(); + let map = config + .read(config_data) + .map_err(Error::msg) + .with_context(|| "Can't read config file")?; + + let core = if let Some(core_map) = map.get("core") { + core_map + } else { + return Err(Error::msg("No [core] section in config")); + }; + + let mode = if let Some(Some(mode)) = core.get("mode") { + RepoMode::parse(mode)? + } else { + return Err(Error::msg("No mode in [core] section in config")); + }; + + if mode != RepoMode::Archive && mode != RepoMode::BareUserOnly { + return Err(Error::msg(format!("Unsupported repo mode {mode:?}"))); + } + + let objectsfd = openat( + &repofd, + "objects", + OFlags::PATH | OFlags::CLOEXEC | OFlags::DIRECTORY, + 0o666.into(), + ) + .with_context(|| { + format!( + "Cannot open ostree repository objects directory at {}", + path.display() + ) + })?; + + Ok(Self { + repo: repo.clone(), + mode, + dir: repofd, + objects: objectsfd, + }) + } + + pub fn open_object_flags( + &self, + checksum: &Sha256Digest, + object_type: ObjectType, + flags: OFlags, + ) -> Result { + let path = get_object_pathname(self.mode, checksum, object_type); + + openat(&self.objects, &path, flags | OFlags::CLOEXEC, Mode::empty()) + .with_context(|| format!("Cannot open ostree objects object at {}", path)) + } + + pub fn open_object(&self, checksum: &Sha256Digest, object_type: ObjectType) -> Result { + self.open_object_flags(checksum, object_type, OFlags::RDONLY | OFlags::NOFOLLOW) + } + + pub fn read_ref(&self, ref_name: &str) -> Result { + let path1 = format!("refs/{}", ref_name); + let path2 = format!("refs/heads/{}", ref_name); + + let fd1 = openat( + &self.dir, + &path1, + OFlags::RDONLY | OFlags::CLOEXEC, + Mode::empty(), + ) + .filter_errno(Errno::NOENT) + .with_context(|| format!("Cannot open ostree ref at {}", path1))?; + + let fd = if let Some(fd) = fd1 { + fd + } else { + openat( + &self.dir, + &path2, + OFlags::RDONLY | OFlags::CLOEXEC, + Mode::empty(), + ) + .with_context(|| format!("Cannot open ostree ref at {}", path2))? + }; + + let mut buffer = String::new(); + File::from(fd) + .read_to_string(&mut buffer) + .with_context(|| "Can't read ref file")?; + + Ok(parse_sha256(buffer.trim())?) + } + + async fn fetch_file_bare( + &self, + checksum: &Sha256Digest, + ) -> Result<(AlignedBuf, Box)> { + let path_fd = + self.open_object_flags(checksum, ObjectType::File, OFlags::PATH | OFlags::NOFOLLOW)?; + + let st = fstat(&path_fd)?; + + let filetype = FileType::from_raw_mode(st.st_mode); + + let symlink_target = if filetype.is_symlink() { + readlinkat(&path_fd, "", [])?.into_string()? + } else { + String::from("") + }; + + let xattrs = Vec::<(&[u8], &[u8])>::new(); + + let (uid, gid, mode) = match self.mode { + RepoMode::Bare => { + // TODO: Read xattrs from disk + (st.st_uid, st.st_gid, st.st_mode) + } + RepoMode::BareUser => { + // TODO: read user.ostreemeta xattr + bail!("BareUser not supported yet") + } + RepoMode::BareUserOnly => (0, 0, st.st_mode), + _ => { + bail!("Unsupported repo mode {:?}", self.mode) + } + }; + + let v = gv!("(tuuuusa(ayay))").serialize_to_vec(&( + u64::to_be(st.st_size as u64), + u32::to_be(uid), + u32::to_be(gid), + u32::to_be(mode), + u32::to_be(0), // rdev + &symlink_target, + &xattrs, + )); + + let zlib_header = size_prefix(&v); + + if filetype.is_symlink() { + Ok((zlib_header, Box::new(empty()))) + } else { + let fd_path = format!("/proc/self/fd/{}", path_fd.as_fd().as_raw_fd()); + Ok((zlib_header, Box::new(File::open(fd_path)?))) + } + } + + async fn fetch_file_archive( + &self, + checksum: &Sha256Digest, + ) -> Result<(AlignedBuf, Box)> { + let fd = self.open_object(checksum, ObjectType::File)?; + let mut file = File::from(fd); + + let mut header_buf = AlignedBuf::new(); + + // Read variant size header + let header_size = size_of::(); + header_buf.with_vec(|v| { + v.resize(header_size, 0u8); + file.read_exact(v) + })?; + + // Read variant + let variant_size = get_sized_variant_size(&header_buf)?; + header_buf.with_vec(|v| { + v.resize(header_size + variant_size, 0u8); + file.read_exact(&mut v[header_size..]) + })?; + + // Decompress rest + Ok((header_buf, Box::new(DeflateDecoder::new(file)))) + } +} + +impl OstreeRepo for LocalRepo { + async fn fetch_object( + &self, + checksum: &Sha256Digest, + object_type: ObjectType, + ) -> Result { + let fd = self.open_object(checksum, object_type)?; + + let mut buffer = Vec::new(); + File::from(fd).read_to_end(&mut buffer)?; + Ok(buffer.into()) + } + + async fn fetch_file(&self, checksum: &Sha256Digest) -> Result<(AlignedBuf, Option)> { + let (mut header_buf, mut rest) = if self.mode == RepoMode::Archive { + self.fetch_file_archive(checksum).await? + } else { + self.fetch_file_bare(checksum).await? + }; + + // Force align the data as there is a gvariant-rs bug (https://github.com/ostreedev/gvariant-rs/pull/9) + let mut aligned_variant_data = AlignedBuf::new(); + let header_size = size_of::(); + aligned_variant_data.with_vec(|v| v.extend_from_slice(&header_buf[header_size..])); + + // Compute the checksum of (regular) header + data + let mut hasher = Sha256::new(); + let regular_header = ostree_zlib_file_header_to_regular(&aligned_variant_data); + let sized_regular_header = size_prefix(®ular_header); + hasher.update(&*sized_regular_header); + + // TODO: Stream files into repo instead of reading it all + let mut file_content = Vec::new(); + rest.read_to_end(&mut file_content)?; + hasher.update(&file_content); + + // Ensure matching checksum + let actual_checksum = hasher.finalize(); + if *actual_checksum != *checksum { + bail!( + "Unexpected file checksum {}, expected {}", + hex::encode(actual_checksum), + hex::encode(checksum) + ); + } + + let obj_id = if file_content.len() <= INLINE_CONTENT_MAX { + header_buf.with_vec(|v| v.extend_from_slice(&file_content)); + None + } else { + Some(self.repo.ensure_object(&file_content)?) + }; + + Ok((header_buf, obj_id)) + } +} diff --git a/crates/composefs/src/splitstream.rs b/crates/composefs/src/splitstream.rs index 2bdb04e2..f5844351 100644 --- a/crates/composefs/src/splitstream.rs +++ b/crates/composefs/src/splitstream.rs @@ -742,6 +742,11 @@ impl SplitStreamReader { self.named_refs } + /// Look up the digest of an external reference by index + pub fn lookup_external_ref(&self, idx: usize) -> Option<&ObjectID> { + self.object_refs.get(idx) + } + fn ensure_chunk( &mut self, eof_ok: bool, diff --git a/crates/composefs/src/util.rs b/crates/composefs/src/util.rs index 88f7809e..7412eae7 100644 --- a/crates/composefs/src/util.rs +++ b/crates/composefs/src/util.rs @@ -112,7 +112,10 @@ pub fn parse_sha256(string: impl AsRef) -> Result { Ok(value) } -pub(crate) trait ErrnoFilter { +/// Utility for filtering ErrnoResult errors. +pub trait ErrnoFilter { + /// Parse a ErrnoResult into ErrnoResult> where the option is + /// None if the errno was a specified errno (often used with ENOENT). fn filter_errno(self, ignored: Errno) -> ErrnoResult>; }