mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-06 03:56:34 +00:00
Merge pull request #11426 from charludo/fix/genpolicy-corruption-of-layer-cache-file
genpolicy: prevent corruption of the layer cache file
This commit is contained in:
commit
0a57e09259
97
src/tools/genpolicy/src/layers_cache.rs
Normal file
97
src/tools/genpolicy/src/layers_cache.rs
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// Copyright (c) 2025 Edgeless Systems GmbH
|
||||||
|
//
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
use crate::registry::ImageLayer;
|
||||||
|
|
||||||
|
use fs2::FileExt;
|
||||||
|
use log::{debug, warn};
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ImageLayersCache {
|
||||||
|
inner: Arc<Mutex<Vec<ImageLayer>>>,
|
||||||
|
filename: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ImageLayersCache {
|
||||||
|
pub fn new(layers_cache_file_path: &Option<String>) -> Self {
|
||||||
|
let layers = match ImageLayersCache::try_new(layers_cache_file_path) {
|
||||||
|
Ok(layers) => layers,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Could not read image layers cache: {e}");
|
||||||
|
Vec::new()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Mutex::new(layers)),
|
||||||
|
filename: layers_cache_file_path.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_new(layers_cache_file_path: &Option<String>) -> std::io::Result<Vec<ImageLayer>> {
|
||||||
|
match &layers_cache_file_path {
|
||||||
|
Some(filename) => {
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.truncate(false)
|
||||||
|
.open(filename)?;
|
||||||
|
// Using try_lock_shared allows this genpolicy instance to make progress even if another concurrent instance holds a lock.
|
||||||
|
// In this case, the cache will simply not be used for this instance.
|
||||||
|
FileExt::try_lock_shared(&file)?;
|
||||||
|
|
||||||
|
let initial_state: Vec<ImageLayer> = match serde_json::from_reader(&file) {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(e) if e.is_eof() => Vec::new(), // empty file
|
||||||
|
Err(e) => {
|
||||||
|
FileExt::unlock(&file)?;
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
FileExt::unlock(&file)?;
|
||||||
|
Ok(initial_state)
|
||||||
|
}
|
||||||
|
None => Ok(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_layer(&self, diff_id: &str) -> Option<ImageLayer> {
|
||||||
|
let layers = self.inner.lock().unwrap();
|
||||||
|
layers
|
||||||
|
.iter()
|
||||||
|
.find(|layer| layer.diff_id == diff_id)
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert_layer(&self, layer: &ImageLayer) {
|
||||||
|
let mut layers = self.inner.lock().unwrap();
|
||||||
|
layers.push(layer.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn persist(&self) {
|
||||||
|
if let Err(e) = self.try_persist() {
|
||||||
|
warn!("Could not persist image layers cache: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_persist(&self) -> std::io::Result<()> {
|
||||||
|
let Some(ref filename) = self.filename else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
debug!("Persisting image layers cache...");
|
||||||
|
let layers = self.inner.lock().unwrap();
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.create(true)
|
||||||
|
.open(filename)?;
|
||||||
|
FileExt::try_lock_exclusive(&file)?;
|
||||||
|
serde_json::to_writer_pretty(&file, &*layers)?;
|
||||||
|
FileExt::unlock(&file)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -9,6 +9,7 @@ pub mod cronjob;
|
|||||||
pub mod daemon_set;
|
pub mod daemon_set;
|
||||||
pub mod deployment;
|
pub mod deployment;
|
||||||
pub mod job;
|
pub mod job;
|
||||||
|
pub mod layers_cache;
|
||||||
pub mod list;
|
pub mod list;
|
||||||
pub mod mount_and_storage;
|
pub mod mount_and_storage;
|
||||||
pub mod no_policy;
|
pub mod no_policy;
|
||||||
|
@ -11,6 +11,7 @@ mod cronjob;
|
|||||||
mod daemon_set;
|
mod daemon_set;
|
||||||
mod deployment;
|
mod deployment;
|
||||||
mod job;
|
mod job;
|
||||||
|
mod layers_cache;
|
||||||
mod list;
|
mod list;
|
||||||
mod mount_and_storage;
|
mod mount_and_storage;
|
||||||
mod no_policy;
|
mod no_policy;
|
||||||
@ -52,5 +53,6 @@ async fn main() {
|
|||||||
|
|
||||||
debug!("Exporting policy to yaml file...");
|
debug!("Exporting policy to yaml file...");
|
||||||
policy.export_policy();
|
policy.export_policy();
|
||||||
|
config.layers_cache.persist();
|
||||||
info!("Success!");
|
info!("Success!");
|
||||||
}
|
}
|
||||||
|
@ -7,13 +7,13 @@
|
|||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use crate::containerd;
|
use crate::containerd;
|
||||||
|
use crate::layers_cache::ImageLayersCache;
|
||||||
use crate::policy;
|
use crate::policy;
|
||||||
use crate::utils::Config;
|
use crate::utils::Config;
|
||||||
use crate::verity;
|
use crate::verity;
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
use anyhow::{anyhow, bail, Result};
|
||||||
use docker_credential::{CredentialRetrievalError, DockerCredential};
|
use docker_credential::{CredentialRetrievalError, DockerCredential};
|
||||||
use fs2::FileExt;
|
|
||||||
use log::{debug, info, warn, LevelFilter};
|
use log::{debug, info, warn, LevelFilter};
|
||||||
use oci_client::{
|
use oci_client::{
|
||||||
client::{linux_amd64_resolver, ClientConfig, ClientProtocol},
|
client::{linux_amd64_resolver, ClientConfig, ClientProtocol},
|
||||||
@ -23,10 +23,7 @@ use oci_client::{
|
|||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sha2::{digest::typenum::Unsigned, digest::OutputSizeUser, Sha256};
|
use sha2::{digest::typenum::Unsigned, digest::OutputSizeUser, Sha256};
|
||||||
use std::{
|
use std::{collections::BTreeMap, io, io::Read, io::Seek, io::Write, path::Path};
|
||||||
collections::BTreeMap, fs::OpenOptions, io, io::BufWriter, io::Read, io::Seek, io::Write,
|
|
||||||
path::Path,
|
|
||||||
};
|
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
|
|
||||||
/// Container image properties obtained from an OCI repository.
|
/// Container image properties obtained from an OCI repository.
|
||||||
@ -165,7 +162,7 @@ impl Container {
|
|||||||
debug!("config_layer: {:?}", &config_layer);
|
debug!("config_layer: {:?}", &config_layer);
|
||||||
|
|
||||||
let image_layers = get_image_layers(
|
let image_layers = get_image_layers(
|
||||||
config.layers_cache_file_path.clone(),
|
&config.layers_cache,
|
||||||
&mut client,
|
&mut client,
|
||||||
&reference,
|
&reference,
|
||||||
&manifest,
|
&manifest,
|
||||||
@ -439,7 +436,7 @@ impl Container {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_image_layers(
|
async fn get_image_layers(
|
||||||
layers_cache_file_path: Option<String>,
|
layers_cache: &ImageLayersCache,
|
||||||
client: &mut Client,
|
client: &mut Client,
|
||||||
reference: &Reference,
|
reference: &Reference,
|
||||||
manifest: &manifest::OciImageManifest,
|
manifest: &manifest::OciImageManifest,
|
||||||
@ -455,20 +452,16 @@ async fn get_image_layers(
|
|||||||
|| layer.media_type.eq(manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE)
|
|| layer.media_type.eq(manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE)
|
||||||
{
|
{
|
||||||
if layer_index < config_layer.rootfs.diff_ids.len() {
|
if layer_index < config_layer.rootfs.diff_ids.len() {
|
||||||
let (verity_hash, passwd, group) = get_verity_and_users(
|
let mut imageLayer = get_verity_and_users(
|
||||||
layers_cache_file_path.clone(),
|
layers_cache,
|
||||||
client,
|
client,
|
||||||
reference,
|
reference,
|
||||||
&layer.digest,
|
&layer.digest,
|
||||||
&config_layer.rootfs.diff_ids[layer_index].clone(),
|
&config_layer.rootfs.diff_ids[layer_index].clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
layers.push(ImageLayer {
|
imageLayer.diff_id = config_layer.rootfs.diff_ids[layer_index].clone();
|
||||||
diff_id: config_layer.rootfs.diff_ids[layer_index].clone(),
|
layers.push(imageLayer);
|
||||||
verity_hash: verity_hash.to_owned(),
|
|
||||||
passwd: passwd.to_owned(),
|
|
||||||
group: group.to_owned(),
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("Too many Docker gzip layers"));
|
return Err(anyhow!("Too many Docker gzip layers"));
|
||||||
}
|
}
|
||||||
@ -481,12 +474,18 @@ async fn get_image_layers(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_verity_and_users(
|
async fn get_verity_and_users(
|
||||||
layers_cache_file_path: Option<String>,
|
layers_cache: &ImageLayersCache,
|
||||||
client: &mut Client,
|
client: &mut Client,
|
||||||
reference: &Reference,
|
reference: &Reference,
|
||||||
layer_digest: &str,
|
layer_digest: &str,
|
||||||
diff_id: &str,
|
diff_id: &str,
|
||||||
) -> Result<(String, String, String)> {
|
) -> Result<ImageLayer> {
|
||||||
|
if let Some(layer) = layers_cache.get_layer(diff_id) {
|
||||||
|
info!("Using cache file");
|
||||||
|
info!("dm-verity root hash: {}", layer.verity_hash);
|
||||||
|
return Ok(layer);
|
||||||
|
}
|
||||||
|
|
||||||
let temp_dir = tempfile::tempdir_in(".")?;
|
let temp_dir = tempfile::tempdir_in(".")?;
|
||||||
let base_dir = temp_dir.path();
|
let base_dir = temp_dir.path();
|
||||||
// Use file names supported by both Linux and Windows.
|
// Use file names supported by both Linux and Windows.
|
||||||
@ -497,21 +496,6 @@ async fn get_verity_and_users(
|
|||||||
let mut compressed_path = decompressed_path.clone();
|
let mut compressed_path = decompressed_path.clone();
|
||||||
compressed_path.set_extension("gz");
|
compressed_path.set_extension("gz");
|
||||||
|
|
||||||
let mut verity_hash = "".to_string();
|
|
||||||
let mut passwd = "".to_string();
|
|
||||||
let mut group = "".to_string();
|
|
||||||
let mut error_message = "".to_string();
|
|
||||||
let mut error = false;
|
|
||||||
|
|
||||||
// get value from store and return if it exists
|
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
|
||||||
(verity_hash, passwd, group) = read_verity_and_users_from_store(path, diff_id)?;
|
|
||||||
info!("Using cache file");
|
|
||||||
info!("dm-verity root hash: {verity_hash}");
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the layer files
|
|
||||||
if verity_hash.is_empty() {
|
|
||||||
if let Err(e) = create_decompressed_layer_file(
|
if let Err(e) = create_decompressed_layer_file(
|
||||||
client,
|
client,
|
||||||
reference,
|
reference,
|
||||||
@ -521,116 +505,29 @@ async fn get_verity_and_users(
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error_message = format!("Failed to create verity hash for {layer_digest}, error {e}");
|
temp_dir.close()?;
|
||||||
error = true
|
bail!(format!(
|
||||||
|
"Failed to create verity hash for {layer_digest}, error {e}"
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
if !error {
|
|
||||||
match get_verity_hash_and_users(&decompressed_path) {
|
match get_verity_hash_and_users(&decompressed_path) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error_message = format!("Failed to get verity hash {e}");
|
|
||||||
error = true;
|
|
||||||
}
|
|
||||||
Ok(res) => {
|
|
||||||
(verity_hash, passwd, group) = res;
|
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
|
||||||
add_verity_and_users_to_store(
|
|
||||||
path,
|
|
||||||
diff_id,
|
|
||||||
&verity_hash,
|
|
||||||
&passwd,
|
|
||||||
&group,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
info!("dm-verity root hash: {verity_hash}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
temp_dir.close()?;
|
temp_dir.close()?;
|
||||||
if error {
|
bail!(format!("Failed to get verity hash {e}"));
|
||||||
// remove the cache file if we're using it
|
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
|
||||||
std::fs::remove_file(path)?;
|
|
||||||
}
|
}
|
||||||
bail!(error_message);
|
Ok((verity_hash, passwd, group)) => {
|
||||||
}
|
info!("dm-verity root hash: {verity_hash}");
|
||||||
Ok((verity_hash, passwd, group))
|
let layer = ImageLayer {
|
||||||
}
|
|
||||||
|
|
||||||
// the store is a json file that matches layer hashes to verity hashes
|
|
||||||
pub fn add_verity_and_users_to_store(
|
|
||||||
cache_file: &str,
|
|
||||||
diff_id: &str,
|
|
||||||
verity_hash: &str,
|
|
||||||
passwd: &str,
|
|
||||||
group: &str,
|
|
||||||
) -> Result<()> {
|
|
||||||
// open the json file in read mode, create it if it doesn't exist
|
|
||||||
let read_file = OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.write(true)
|
|
||||||
.create(true)
|
|
||||||
.truncate(false)
|
|
||||||
.open(cache_file)?;
|
|
||||||
|
|
||||||
// Return empty vector if the file is malformed
|
|
||||||
let mut data: Vec<ImageLayer> = serde_json::from_reader(read_file).unwrap_or_default();
|
|
||||||
|
|
||||||
// Add new data to the deserialized JSON
|
|
||||||
data.push(ImageLayer {
|
|
||||||
diff_id: diff_id.to_string(),
|
diff_id: diff_id.to_string(),
|
||||||
verity_hash: verity_hash.to_string(),
|
verity_hash,
|
||||||
passwd: passwd.to_string(),
|
passwd,
|
||||||
group: group.to_string(),
|
group,
|
||||||
});
|
};
|
||||||
|
layers_cache.insert_layer(&layer);
|
||||||
// Serialize in pretty format
|
Ok(layer)
|
||||||
let serialized = serde_json::to_string_pretty(&data)?;
|
|
||||||
|
|
||||||
// Open the JSON file to write
|
|
||||||
let file = OpenOptions::new().write(true).open(cache_file)?;
|
|
||||||
|
|
||||||
// try to lock the file, if it fails, get the error
|
|
||||||
let result = file.try_lock_exclusive();
|
|
||||||
if result.is_err() {
|
|
||||||
warn!("Waiting to lock file: {cache_file}");
|
|
||||||
file.lock_exclusive()?;
|
|
||||||
}
|
|
||||||
// Write the serialized JSON to the file
|
|
||||||
let mut writer = BufWriter::new(&file);
|
|
||||||
writeln!(writer, "{}", serialized)?;
|
|
||||||
writer.flush()?;
|
|
||||||
fs2::FileExt::unlock(&file)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper function to read the verity hash from the store
|
|
||||||
// returns empty string if not found or file does not exist
|
|
||||||
pub fn read_verity_and_users_from_store(
|
|
||||||
cache_file: &str,
|
|
||||||
diff_id: &str,
|
|
||||||
) -> Result<(String, String, String)> {
|
|
||||||
match OpenOptions::new().read(true).open(cache_file) {
|
|
||||||
Ok(file) => match serde_json::from_reader(file) {
|
|
||||||
Result::<Vec<ImageLayer>, _>::Ok(layers) => {
|
|
||||||
for layer in layers {
|
|
||||||
if layer.diff_id == diff_id {
|
|
||||||
return Ok((layer.verity_hash, layer.passwd, layer.group));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("read_verity_and_users_from_store: failed to read cached image layers: {e}");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
info!("read_verity_and_users_from_store: failed to open cache file: {e}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((String::new(), String::new(), String::new()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_decompressed_layer_file(
|
async fn create_decompressed_layer_file(
|
||||||
|
@ -5,9 +5,9 @@
|
|||||||
|
|
||||||
// Allow Docker image config field names.
|
// Allow Docker image config field names.
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
use crate::layers_cache::ImageLayersCache;
|
||||||
use crate::registry::{
|
use crate::registry::{
|
||||||
add_verity_and_users_to_store, get_verity_hash_and_users, read_verity_and_users_from_store,
|
get_verity_hash_and_users, Container, DockerConfigLayer, ImageLayer, WHITEOUT_MARKER,
|
||||||
Container, DockerConfigLayer, ImageLayer, WHITEOUT_MARKER,
|
|
||||||
};
|
};
|
||||||
use crate::utils::Config;
|
use crate::utils::Config;
|
||||||
|
|
||||||
@ -60,13 +60,8 @@ impl Container {
|
|||||||
let config_layer = get_config_layer(image_ref_str, k8_cri_image_client)
|
let config_layer = get_config_layer(image_ref_str, k8_cri_image_client)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let image_layers = get_image_layers(
|
let image_layers =
|
||||||
config.layers_cache_file_path.clone(),
|
get_image_layers(&config.layers_cache, &manifest, &config_layer, &ctrd_client).await?;
|
||||||
&manifest,
|
|
||||||
&config_layer,
|
|
||||||
&ctrd_client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Find the last layer with an /etc/* file, respecting whiteouts.
|
// Find the last layer with an /etc/* file, respecting whiteouts.
|
||||||
let mut passwd = String::new();
|
let mut passwd = String::new();
|
||||||
@ -275,7 +270,7 @@ pub fn build_auth(reference: &Reference) -> Option<AuthConfig> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_image_layers(
|
pub async fn get_image_layers(
|
||||||
layers_cache_file_path: Option<String>,
|
layers_cache: &ImageLayersCache,
|
||||||
manifest: &serde_json::Value,
|
manifest: &serde_json::Value,
|
||||||
config_layer: &DockerConfigLayer,
|
config_layer: &DockerConfigLayer,
|
||||||
client: &containerd_client::Client,
|
client: &containerd_client::Client,
|
||||||
@ -291,19 +286,14 @@ pub async fn get_image_layers(
|
|||||||
|| layer_media_type.eq("application/vnd.oci.image.layer.v1.tar+gzip")
|
|| layer_media_type.eq("application/vnd.oci.image.layer.v1.tar+gzip")
|
||||||
{
|
{
|
||||||
if layer_index < config_layer.rootfs.diff_ids.len() {
|
if layer_index < config_layer.rootfs.diff_ids.len() {
|
||||||
let (verity_hash, passwd, group) = get_verity_and_users(
|
let mut imageLayer = get_verity_and_users(
|
||||||
layers_cache_file_path.clone(),
|
layers_cache,
|
||||||
layer["digest"].as_str().unwrap(),
|
layer["digest"].as_str().unwrap(),
|
||||||
client,
|
client,
|
||||||
&config_layer.rootfs.diff_ids[layer_index].clone(),
|
&config_layer.rootfs.diff_ids[layer_index].clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
let imageLayer = ImageLayer {
|
imageLayer.diff_id = config_layer.rootfs.diff_ids[layer_index].clone();
|
||||||
diff_id: config_layer.rootfs.diff_ids[layer_index].clone(),
|
|
||||||
verity_hash,
|
|
||||||
passwd,
|
|
||||||
group,
|
|
||||||
};
|
|
||||||
layersVec.push(imageLayer);
|
layersVec.push(imageLayer);
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("Too many Docker gzip layers"));
|
return Err(anyhow!("Too many Docker gzip layers"));
|
||||||
@ -316,11 +306,17 @@ pub async fn get_image_layers(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_verity_and_users(
|
async fn get_verity_and_users(
|
||||||
layers_cache_file_path: Option<String>,
|
layers_cache: &ImageLayersCache,
|
||||||
layer_digest: &str,
|
layer_digest: &str,
|
||||||
client: &containerd_client::Client,
|
client: &containerd_client::Client,
|
||||||
diff_id: &str,
|
diff_id: &str,
|
||||||
) -> Result<(String, String, String)> {
|
) -> Result<ImageLayer> {
|
||||||
|
if let Some(layer) = layers_cache.get_layer(diff_id) {
|
||||||
|
info!("Using cache file");
|
||||||
|
info!("dm-verity root hash: {}", layer.verity_hash);
|
||||||
|
return Ok(layer);
|
||||||
|
}
|
||||||
|
|
||||||
let temp_dir = tempfile::tempdir_in(".")?;
|
let temp_dir = tempfile::tempdir_in(".")?;
|
||||||
let base_dir = temp_dir.path();
|
let base_dir = temp_dir.path();
|
||||||
// Use file names supported by both Linux and Windows.
|
// Use file names supported by both Linux and Windows.
|
||||||
@ -331,63 +327,34 @@ async fn get_verity_and_users(
|
|||||||
let mut compressed_path = decompressed_path.clone();
|
let mut compressed_path = decompressed_path.clone();
|
||||||
compressed_path.set_extension("gz");
|
compressed_path.set_extension("gz");
|
||||||
|
|
||||||
let mut verity_hash = "".to_string();
|
|
||||||
let mut passwd = "".to_string();
|
|
||||||
let mut group = "".to_string();
|
|
||||||
let mut error_message = "".to_string();
|
|
||||||
let mut error = false;
|
|
||||||
|
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
|
||||||
(verity_hash, passwd, group) = read_verity_and_users_from_store(path, diff_id)?;
|
|
||||||
info!("Using cache file");
|
|
||||||
info!("dm-verity root hash: {verity_hash}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if verity_hash.is_empty() {
|
|
||||||
// go find verity hash if not found in cache
|
// go find verity hash if not found in cache
|
||||||
if let Err(e) = create_decompressed_layer_file(
|
if let Err(e) =
|
||||||
client,
|
create_decompressed_layer_file(client, layer_digest, &decompressed_path, &compressed_path)
|
||||||
layer_digest,
|
|
||||||
&decompressed_path,
|
|
||||||
&compressed_path,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error = true;
|
temp_dir.close()?;
|
||||||
error_message = format!("Failed to create verity hash for {layer_digest}, error {e}");
|
bail!(format!(
|
||||||
|
"Failed to create verity hash for {layer_digest}, error {e}"
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !error {
|
|
||||||
match get_verity_hash_and_users(&decompressed_path) {
|
match get_verity_hash_and_users(&decompressed_path) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error_message = format!("Failed to get verity hash {e}");
|
|
||||||
error = true;
|
|
||||||
}
|
|
||||||
Ok(res) => {
|
|
||||||
(verity_hash, passwd, group) = res;
|
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
|
||||||
add_verity_and_users_to_store(
|
|
||||||
path,
|
|
||||||
diff_id,
|
|
||||||
&verity_hash,
|
|
||||||
&passwd,
|
|
||||||
&group,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
info!("dm-verity root hash: {verity_hash}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
temp_dir.close()?;
|
temp_dir.close()?;
|
||||||
if error {
|
bail!(format!("Failed to get verity hash {e}"));
|
||||||
// remove the cache file if we're using it
|
}
|
||||||
if let Some(path) = layers_cache_file_path.as_ref() {
|
Ok((verity_hash, passwd, group)) => {
|
||||||
std::fs::remove_file(path)?;
|
info!("dm-verity root hash: {verity_hash}");
|
||||||
|
let layer = ImageLayer {
|
||||||
|
diff_id: diff_id.to_string(),
|
||||||
|
verity_hash,
|
||||||
|
passwd,
|
||||||
|
group,
|
||||||
|
};
|
||||||
|
layers_cache.insert_layer(&layer);
|
||||||
|
Ok(layer)
|
||||||
}
|
}
|
||||||
bail!(error_message);
|
|
||||||
}
|
}
|
||||||
Ok((verity_hash, passwd, group))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_decompressed_layer_file(
|
async fn create_decompressed_layer_file(
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
use crate::layers_cache;
|
||||||
use crate::settings;
|
use crate::settings;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
|
||||||
@ -123,7 +124,7 @@ pub struct Config {
|
|||||||
pub raw_out: bool,
|
pub raw_out: bool,
|
||||||
pub base64_out: bool,
|
pub base64_out: bool,
|
||||||
pub containerd_socket_path: Option<String>,
|
pub containerd_socket_path: Option<String>,
|
||||||
pub layers_cache_file_path: Option<String>,
|
pub layers_cache: layers_cache::ImageLayersCache,
|
||||||
pub version: bool,
|
pub version: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +162,7 @@ impl Config {
|
|||||||
raw_out: args.raw_out,
|
raw_out: args.raw_out,
|
||||||
base64_out: args.base64_out,
|
base64_out: args.base64_out,
|
||||||
containerd_socket_path: args.containerd_socket_path,
|
containerd_socket_path: args.containerd_socket_path,
|
||||||
layers_cache_file_path,
|
layers_cache: layers_cache::ImageLayersCache::new(&layers_cache_file_path),
|
||||||
version: args.version,
|
version: args.version,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ mod tests {
|
|||||||
config_files: None,
|
config_files: None,
|
||||||
containerd_socket_path: None, // Some(String::from("/var/run/containerd/containerd.sock")),
|
containerd_socket_path: None, // Some(String::from("/var/run/containerd/containerd.sock")),
|
||||||
insecure_registries: Vec::new(),
|
insecure_registries: Vec::new(),
|
||||||
layers_cache_file_path: None,
|
layers_cache: genpolicy::layers_cache::ImageLayersCache::new(&None),
|
||||||
raw_out: false,
|
raw_out: false,
|
||||||
rego_rules_path: workdir.join("rules.rego").to_str().unwrap().to_string(),
|
rego_rules_path: workdir.join("rules.rego").to_str().unwrap().to_string(),
|
||||||
runtime_class_names: Vec::new(),
|
runtime_class_names: Vec::new(),
|
||||||
|
Loading…
Reference in New Issue
Block a user