Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions core-dockpack/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,4 @@ futures-util = "0.3.31"
tokio-util = { version = "0.7.14", features = ["io"] }
tokio-tar = "0.3.1"
futures-core = "0.3.31"
anyhow = "1.0.98"
13 changes: 10 additions & 3 deletions core-dockpack/src/cmd_processes/build/build_dockerfile.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,27 @@
//! Builds a Dockerfile from a directory

use anyhow::{Context, Result};
use std::fs::File;
use std::io::Write;

// directory is the build context

pub fn create_dockerfile(directory: &str) -> Result<(), String> {
pub fn create_dockerfile(directory: &str) -> Result<()> {
let docker_file_content = "FROM scratch\nCOPY . .\n".to_string();

let dockerfile_path = format!("{}/Dockerfile", directory);

let mut dockerfile = File::create(&dockerfile_path).map_err(|e| e.to_string())?;
let mut dockerfile = File::create(&dockerfile_path)
.with_context(|| format!("Error creatining file at path {}", dockerfile_path))?;

dockerfile
.write_all(docker_file_content.as_bytes())
.map_err(|e| e.to_string())?;
.with_context(|| {
format!(
"Could not write all from scratch content to dockerfile at path {}",
dockerfile_path,
)
})?;

Ok(())
}
Expand Down
5 changes: 3 additions & 2 deletions core-dockpack/src/cmd_processes/pull/unpack_files.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
//! The API for unpacking Docker images into a directory.
use crate::utils::{cache, docker_commands, unpacking};
use anyhow::Result;
use std::path::PathBuf;

/// Unpacks the files from a Docker image into a directory.
Expand All @@ -10,7 +11,7 @@ use std::path::PathBuf;
///
/// # Returns
/// The path to the directory where the Docker image files are stored.
pub async fn unpack_files_from_image(image: &str, directory: &str) -> Result<String, String> {
pub async fn unpack_files_from_image(image: &str, directory: &str) -> Result<String> {
let main_path = PathBuf::from(directory);
cache::wipe_and_create_cache(&main_path);

Expand Down Expand Up @@ -46,6 +47,6 @@ mod tests {
assert!(result.is_ok());
let path = result.unwrap();
assert!(Path::new(&path).exists());
// fs::remove_dir_all(directory).unwrap();
fs::remove_dir_all(directory).unwrap();
}
}
36 changes: 18 additions & 18 deletions core-dockpack/src/cmd_processes/push/execute_push.rs
Original file line number Diff line number Diff line change
@@ -1,55 +1,55 @@
use crate::utils::cache;
use anyhow::{Context, Result};
use bollard::models::{CreateImageInfo, PushImageInfo};
use bollard::query_parameters::{CreateImageOptionsBuilder, PushImageOptionsBuilder};
use bollard::Docker;
use futures_util::stream::TryStreamExt;
use tokio::fs::File;
use tokio_util::io::ReaderStream;

async fn dir_to_tar(dir: &str, image_name: &str) -> Result<(), String> {
let tar_name = &format!("{}.tar", cache::process_image_name(image_name));
let tar = File::create(tar_name)
async fn dir_to_tar(dir: &str, image_name: &str) -> Result<String> {
let tar_name = format!("{}.tar", cache::process_image_name(image_name));
let tar = File::create(&tar_name)
.await
.expect("Could not create archive file");
.with_context(|| "Could not create archive file")?;
let mut tar = tokio_tar::Builder::new(tar);
tar.append_dir_all("", dir)
.await
.expect("Could not add path to target");
.with_context(|| "Could not add path to target")?;
tar.finish()
.await
.expect("An error occured in converting dir to tar");
Ok(())
.with_context(|| "An error occured in converting dir to tar")?;
Ok(tar_name)
}

pub async fn execute_docker_build(directory: &str, image: &str) -> Result<(), String> {
pub async fn execute_docker_build(directory: &str, image: &str) -> Result<()> {
// Convert directory to a tar file
dir_to_tar(directory, image).await?;
let tar_path = dir_to_tar(directory, image).await?;

let file = File::open(&format!("{}.tar", image))
let file = File::open(tar_path)
.await
.expect("Could not find archive.");
.with_context(|| format!("Could not find archive at path {}.tar", image))?;
let stream = ReaderStream::new(file);

let docker = Docker::connect_with_socket_defaults()
.expect("Could no connect to docker socket. Is docker running?");
.with_context(|| "Could not connect to docker socket. Is docker running?")?;

let options = CreateImageOptionsBuilder::default()
.from_src("-") // from_src must be "-" when sending the archive in the request body
.repo(image) // The name of the image in the docker daemon.
.tag("1.0.0") // The tag of this particular image.
.build();
let _: Vec<CreateImageInfo> = docker
.create_image(Some(options), Some(bollard::body_try_stream(stream)), None)
.try_collect()
.await
.expect("Could not create image");
.with_context(|| "Could not create image")?;

let options = PushImageOptionsBuilder::new().tag("latest").build();
let _: Vec<PushImageInfo> = docker
.push_image(&cache::process_image_name(image), Some(options), None)
.push_image(image, Some(options), None)
.try_collect()
.await
.expect("Could not push image");
.with_context(|| "Could not push image")?;

Ok(())
}
Expand All @@ -73,7 +73,7 @@ mod tests {

assert!(result.is_ok());

// fs::remove_dir_all(directory).expect("Failed to remove test directory");
fs::remove_dir_all(directory).expect("Failed to remove test directory");
}

#[tokio::test]
Expand All @@ -89,6 +89,6 @@ mod tests {
let result = execute_docker_build(directory, image_name).await;
assert!(result.is_ok());

// fs::remove_dir_all(directory).expect("Failed to remove test directory");
fs::remove_dir_all(directory).expect("Failed to remove test directory");
}
}
34 changes: 19 additions & 15 deletions core-dockpack/src/utils/docker_commands.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,28 @@
//! Defines the actions around downloading and unpacking docker images to access the files.
use super::cache::process_image_name;
use bollard::image::CreateImageOptions;
use anyhow::{anyhow, Context, Result};
use bollard::query_parameters::CreateImageOptionsBuilder;
use bollard::Docker;
use futures_util::stream::TryStreamExt;
use futures_util::StreamExt;
use std::default::Default;
use std::process::Command;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio_tar::Archive;

async fn pull_image(image_name: &str, docker: &Docker) -> Result<(), String> {
let options = Some(CreateImageOptions {
from_image: image_name,
..Default::default()
});
async fn pull_image(image_name: &str, docker: &Docker) -> Result<()> {
let options = Some(
CreateImageOptionsBuilder::new()
.from_image(image_name)
.build(),
);
println!("image_name: {}", image_name);
// confirmed: this works
docker
.create_image(options, None, None)
.try_collect::<Vec<_>>()
.await
.map_err(|err| err.to_string())?;
.with_context(|| "Error creating image")?;
Ok(())
}

Expand All @@ -36,7 +37,7 @@ async fn pull_image(image_name: &str, docker: &Docker) -> Result<(), String> {
///
/// # Returns
/// The path to where the compressed Docker image files are stored
pub async fn save_docker_image(image_name: &str, tar_path: &str) -> Result<String, String> {
pub async fn save_docker_image(image_name: &str, tar_path: &str) -> Result<String> {
// pull image
// // TODO: consider moving to receive in the function
let docker = Docker::connect_with_socket_defaults()
Expand All @@ -56,17 +57,17 @@ pub async fn save_docker_image(image_name: &str, tar_path: &str) -> Result<Strin
let mut tar = docker.export_image(image_name);
let mut archive_file = File::create(file_path)
.await
.map_err(|err| err.to_string())?;
.with_context(|| "Could not create file path for tar file")?;
while let Some(chunk) = tar.next().await {
let data = chunk.map_err(|err| err.to_string())?;
let data = chunk.with_context(|| "Could not read bytes from zip stream")?;
archive_file
.write_all(&data)
.await
.map_err(|err| err.to_string())?;
.with_context(|| "Error writing bytes to file")?;
archive_file
.sync_all()
.await
.map_err(|err| err.to_string())?;
.with_context(|| "Error syncing all data to file")?;
}
println!("Synced to tar file");

Expand All @@ -75,12 +76,15 @@ pub async fn save_docker_image(image_name: &str, tar_path: &str) -> Result<Strin
.expect("Could not reopen archive file");
let mut archive = Archive::new(file);

archive.unpack(tar_path).await.map_err(|e| e.to_string())?;
archive
.unpack(tar_path)
.await
.with_context(|| "Error unpacking tar file")?;
//
// // return statement
Ok(match tar_path.to_str() {
Some(v) => v.to_string(),
None => return Err("Failed to convert path to string".to_string()),
None => return Err(anyhow!("Failed to convert path to string")),
})
}

Expand Down
24 changes: 13 additions & 11 deletions core-dockpack/src/utils/unpacking.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
//! Defines the actions around unpacking compressed Docker files from the manifest.
use anyhow::{anyhow, Context, Result};
use flate2::read::GzDecoder;
use serde_json::Value;
use std::fs::File;
Expand Down Expand Up @@ -48,16 +49,17 @@ fn read_json_file<P: AsRef<Path>>(path: P) -> std::io::Result<Value> {
///
/// # Returns
/// The path to where the layers are extracted.
pub fn extract_layers(main_path: &str, unpack_path: &str) -> Result<String, String> {
pub fn extract_layers(main_path: &str, unpack_path: &str) -> Result<String> {
let manifest_path = std::path::Path::new(main_path).join("manifest.json");
let blobs_dir = std::path::Path::new(main_path);
let unpack_path = std::path::Path::new(unpack_path);

if !unpack_path.exists() {
std::fs::create_dir_all(unpack_path).map_err(|e| e.to_string())?;
std::fs::create_dir_all(unpack_path).with_context(|| "Error creating directory")?;
}

let manifest = read_json_file(&manifest_path).map_err(|e| e.to_string())?;
let manifest =
read_json_file(&manifest_path).with_context(|| "Error reading json manifest file")?;

if let Some(layers) = manifest[0]["Layers"].as_array() {
println!("Found {} layers in manifest", layers.len());
Expand All @@ -70,25 +72,26 @@ pub fn extract_layers(main_path: &str, unpack_path: &str) -> Result<String, Stri
Some(layer) => layer,
None => {
return Err(
"Failed to get the layer path when extracting a layer from the Docker image".to_string()
anyhow!("Failed to get the layer path when extracting a layer from the Docker image")
);
}
});

// Extract the layer's tarball to a directory
let mut tar_file = File::open(&layer_path).map_err(|e| e.to_string())?;
let if_gzipped = check_if_gzipped(&mut tar_file).map_err(|e| e.to_string())?;
let mut tar_file = File::open(&layer_path).with_context(|| "Error reading tar file")?;
let if_gzipped = check_if_gzipped(&mut tar_file)
.with_context(|| "Error checking if tar fle is gzipped")?;
match if_gzipped {
true => {
println!("Layer is gzipped");
let decompressed = GzDecoder::new(tar_file);
let mut archive = Archive::new(decompressed);
archive.unpack(unpack_path).map_err(|e| e.to_string())?;
archive.unpack(unpack_path)?;
}
false => {
println!("Layer is not gzipped");
let mut archive = Archive::new(tar_file);
archive.unpack(unpack_path).map_err(|e| e.to_string())?;
archive.unpack(unpack_path)?;
}
}
}
Expand All @@ -99,10 +102,9 @@ pub fn extract_layers(main_path: &str, unpack_path: &str) -> Result<String, Stri
Ok(match unpack_path.to_str() {
Some(v) => v.to_string(),
None => {
return Err(
return Err(anyhow!(
"Failed to convert path to string when extracting layers from the Docker image"
.to_string(),
);
));
}
})
}
1 change: 1 addition & 0 deletions coredockpack/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ edition = "2021"

[dependencies]
core-dockpack = { path = "../core-dockpack" }
tokio = "1.45.1"


[lib]
Expand Down
7 changes: 5 additions & 2 deletions coredockpack/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use core_dockpack::cmd_processes::pull::unpack_files::unpack_files_from_image;
use std::ffi::{CStr, CString};
use std::os::raw::c_char;
use tokio::runtime::Runtime;

/// Unpacks the files from a Docker image into a directory.
///
Expand All @@ -13,15 +14,17 @@ use std::os::raw::c_char;
/// On error, returns a null pointer.
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub async extern "C" fn unpack_files_from_image_c(
pub extern "C" fn unpack_files_from_image_c(
image: *const c_char,
directory: *const c_char,
) -> *const c_char {
// Convert C strings to Rust strings
let image = unsafe { CStr::from_ptr(image).to_string_lossy().into_owned() };
let directory = unsafe { CStr::from_ptr(directory).to_string_lossy().into_owned() };

match unpack_files_from_image(&image, &directory).await {
let rt = Runtime::new().unwrap();
let result = rt.block_on(unpack_files_from_image(&image, &directory));
match result {
Ok(path) => {
let c_string = CString::new(path).unwrap();
c_string.into_raw() // Return the C string
Expand Down