mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-26 15:32:30 +00:00
Merge pull request #8822 from microsoft/danmihai1/cargo-clippy
genpolicy: cargo clippy fixes
This commit is contained in:
commit
0dc00ae373
@ -75,13 +75,13 @@ impl yaml::K8sResource for List {
|
||||
}
|
||||
|
||||
fn serialize(&mut self, policy: &str) -> String {
|
||||
let policies: Vec<&str> = policy.split(":").collect();
|
||||
let policies: Vec<&str> = policy.split(':').collect();
|
||||
let len = policies.len();
|
||||
assert!(len == self.resources.len());
|
||||
|
||||
self.items.clear();
|
||||
for i in 0..len {
|
||||
let yaml = self.resources[i].serialize(policies[i]);
|
||||
for (i, p) in policies.iter().enumerate().take(len) {
|
||||
let yaml = self.resources[i].serialize(p);
|
||||
let document = serde_yaml::Deserializer::from_str(&yaml);
|
||||
let doc_value = Value::deserialize(document).unwrap();
|
||||
self.items.push(doc_value.clone());
|
||||
|
@ -3,8 +3,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use clap::Parser;
|
||||
use env_logger;
|
||||
use log::{debug, info};
|
||||
|
||||
mod config_map;
|
||||
@ -31,88 +29,10 @@ mod verity;
|
||||
mod volume;
|
||||
mod yaml;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct CommandLineOptions {
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Kubernetes input/output YAML file path. stdin/stdout get used if this option is not specified."
|
||||
)]
|
||||
yaml_file: Option<String>,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Optional Kubernetes config map YAML input file path"
|
||||
)]
|
||||
config_map_file: Option<String>,
|
||||
|
||||
#[clap(
|
||||
short = 'j',
|
||||
long,
|
||||
default_value_t = String::from("genpolicy-settings.json"),
|
||||
help = "genpolicy settings file name"
|
||||
)]
|
||||
settings_file_name: String,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
default_value_t = String::from("."),
|
||||
help = "Path to the rules.rego and settings input files"
|
||||
)]
|
||||
input_files_path: String,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Create and use a cache of container image layer contents and dm-verity information (in ./layers_cache/)"
|
||||
)]
|
||||
use_cached_files: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Print the output Rego policy text to standard output"
|
||||
)]
|
||||
raw_out: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Print the base64 encoded output Rego policy to standard output"
|
||||
)]
|
||||
base64_out: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Ignore unsupported input Kubernetes YAML fields. This is not recommeded unless you understand exactly how genpolicy works!"
|
||||
)]
|
||||
silent_unsupported_fields: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
|
||||
let args = CommandLineOptions::parse();
|
||||
|
||||
let mut config_map_files = Vec::new();
|
||||
if let Some(config_map_file) = &args.config_map_file {
|
||||
config_map_files.push(config_map_file.clone());
|
||||
}
|
||||
|
||||
let config = utils::Config::new(
|
||||
args.use_cached_files,
|
||||
args.yaml_file,
|
||||
&args.input_files_path,
|
||||
&args.settings_file_name,
|
||||
&config_map_files,
|
||||
args.silent_unsupported_fields,
|
||||
args.raw_out,
|
||||
args.base64_out,
|
||||
);
|
||||
let config = utils::Config::new();
|
||||
|
||||
debug!("Creating policy from yaml, settings, and rules.rego files...");
|
||||
let mut policy = policy::AgentPolicy::from_files(&config).await.unwrap();
|
||||
|
@ -32,9 +32,9 @@ pub fn get_policy_mounts(
|
||||
};
|
||||
|
||||
for s_mount in settings_mounts {
|
||||
if keep_settings_mount(settings, &s_mount, &yaml_container.volumeMounts) {
|
||||
if keep_settings_mount(settings, s_mount, &yaml_container.volumeMounts) {
|
||||
let mut mount = s_mount.clone();
|
||||
adjust_termination_path(&mut mount, &yaml_container);
|
||||
adjust_termination_path(&mut mount, yaml_container);
|
||||
|
||||
if mount.source.is_empty() && mount.type_.eq("bind") {
|
||||
if let Some(file_name) = Path::new(&mount.destination).file_name() {
|
||||
@ -54,12 +54,11 @@ pub fn get_policy_mounts(
|
||||
policy_mount.options = mount.options.iter().map(String::from).collect();
|
||||
} else {
|
||||
// Add a new mount.
|
||||
if !is_pause_container {
|
||||
if s_mount.destination.eq("/etc/hostname")
|
||||
|| s_mount.destination.eq("/etc/resolv.conf")
|
||||
{
|
||||
mount.options.push(rootfs_access.to_string());
|
||||
}
|
||||
if !is_pause_container
|
||||
&& (s_mount.destination.eq("/etc/hostname")
|
||||
|| s_mount.destination.eq("/etc/resolv.conf"))
|
||||
{
|
||||
mount.options.push(rootfs_access.to_string());
|
||||
}
|
||||
p_mounts.push(mount);
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ impl yaml::K8sResource for NoPolicyResource {
|
||||
}
|
||||
|
||||
fn generate_policy(&self, _agent_policy: &policy::AgentPolicy) -> String {
|
||||
return "".to_string();
|
||||
"".to_string()
|
||||
}
|
||||
|
||||
fn serialize(&mut self, _policy: &str) -> String {
|
||||
|
@ -729,7 +729,7 @@ impl Container {
|
||||
capabilities.Permitted.clear();
|
||||
capabilities.Effective.clear();
|
||||
} else {
|
||||
let cap = "CAP_".to_string() + &c;
|
||||
let cap = "CAP_".to_string() + c;
|
||||
|
||||
capabilities.Bounding.retain(|x| !x.eq(&cap));
|
||||
capabilities.Permitted.retain(|x| !x.eq(&cap));
|
||||
@ -739,7 +739,7 @@ impl Container {
|
||||
}
|
||||
if let Some(add) = &yaml_capabilities.add {
|
||||
for c in add {
|
||||
let cap = "CAP_".to_string() + &c;
|
||||
let cap = "CAP_".to_string() + c;
|
||||
|
||||
if !capabilities.Bounding.contains(&cap) {
|
||||
capabilities.Bounding.push(cap.clone());
|
||||
@ -779,7 +779,7 @@ fn compress_capabilities(capabilities: &mut Vec<String>, defaults: &policy::Comm
|
||||
""
|
||||
};
|
||||
|
||||
if default_caps.len() != 0 {
|
||||
if !default_caps.is_empty() {
|
||||
capabilities.clear();
|
||||
capabilities.push(default_caps.to_string());
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ impl AgentPolicy {
|
||||
|
||||
if let Some(config_map_files) = &config.config_map_files {
|
||||
for file in config_map_files {
|
||||
config_maps.push(config_map::ConfigMap::new(&file)?);
|
||||
config_maps.push(config_map::ConfigMap::new(file)?);
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,13 +429,11 @@ impl AgentPolicy {
|
||||
.create(true)
|
||||
.open(yaml_file)
|
||||
.unwrap()
|
||||
.write_all(&yaml_string.as_bytes())
|
||||
.write_all(yaml_string.as_bytes())
|
||||
.unwrap();
|
||||
} else {
|
||||
// When input YAML came through stdin, print the output YAML to stdout.
|
||||
std::io::stdout()
|
||||
.write_all(&yaml_string.as_bytes())
|
||||
.unwrap();
|
||||
std::io::stdout().write_all(yaml_string.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -443,12 +441,8 @@ impl AgentPolicy {
|
||||
let yaml_containers = resource.get_containers();
|
||||
let mut policy_containers = Vec::new();
|
||||
|
||||
for i in 0..yaml_containers.len() {
|
||||
policy_containers.push(self.get_container_policy(
|
||||
resource,
|
||||
&yaml_containers[i],
|
||||
i == 0,
|
||||
));
|
||||
for (i, yaml_container) in yaml_containers.iter().enumerate() {
|
||||
policy_containers.push(self.get_container_policy(resource, yaml_container, i == 0));
|
||||
}
|
||||
|
||||
let policy_data = policy::PolicyData {
|
||||
@ -698,8 +692,8 @@ fn substitute_env_variables(env: &mut Vec<String>) {
|
||||
for i in 0..env.len() {
|
||||
let components: Vec<&str> = env[i].split('=').collect();
|
||||
if components.len() == 2 {
|
||||
if let Some((start, end)) = find_subst_target(&components[1]) {
|
||||
if let Some(new_value) = substitute_variable(&components[1], start, end, env) {
|
||||
if let Some((start, end)) = find_subst_target(components[1]) {
|
||||
if let Some(new_value) = substitute_variable(components[1], start, end, env) {
|
||||
let new_var = format!("{}={new_value}", &components[0]);
|
||||
debug!("Replacing env variable <{}> with <{new_var}>", &env[i]);
|
||||
env[i] = new_var;
|
||||
@ -719,7 +713,7 @@ fn find_subst_target(env_value: &str) -> Option<(usize, usize)> {
|
||||
if let Some(mut start) = env_value.find("$(") {
|
||||
start += 2;
|
||||
if env_value.len() > start {
|
||||
if let Some(end) = env_value[start..].find(")") {
|
||||
if let Some(end) = env_value[start..].find(')') {
|
||||
return Some((start, start + end));
|
||||
}
|
||||
}
|
||||
@ -735,7 +729,7 @@ fn substitute_variable(
|
||||
env: &Vec<String>,
|
||||
) -> Option<String> {
|
||||
// Variables generated by this application.
|
||||
let internal_vars = vec![
|
||||
let internal_vars = [
|
||||
"bundle-id",
|
||||
"host-ip",
|
||||
"node-name",
|
||||
|
@ -9,7 +9,7 @@
|
||||
use crate::policy;
|
||||
use crate::verity;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use docker_credential::{CredentialRetrievalError, DockerCredential};
|
||||
use log::warn;
|
||||
use log::{debug, info, LevelFilter};
|
||||
@ -17,7 +17,8 @@ use oci_distribution::client::{linux_amd64_resolver, ClientConfig};
|
||||
use oci_distribution::{manifest, secrets::RegistryAuth, Client, Reference};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{digest::typenum::Unsigned, digest::OutputSizeUser, Sha256};
|
||||
use std::{io, io::Seek, io::Write, path::Path};
|
||||
use std::io::{self, Seek, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::{fs, io::AsyncWriteExt};
|
||||
|
||||
/// Container image properties obtained from an OCI repository.
|
||||
@ -253,10 +254,33 @@ async fn get_image_layers(
|
||||
Ok(layers)
|
||||
}
|
||||
|
||||
fn delete_files(decompressed_path: &Path, compressed_path: &Path, verity_path: &Path) {
|
||||
let _ = fs::remove_file(&decompressed_path);
|
||||
let _ = fs::remove_file(&compressed_path);
|
||||
let _ = fs::remove_file(&verity_path);
|
||||
fn get_verity_path(base_dir: &Path, file_name: &str) -> PathBuf {
|
||||
let mut verity_path: PathBuf = base_dir.join(file_name);
|
||||
verity_path.set_extension("verity");
|
||||
verity_path
|
||||
}
|
||||
|
||||
fn get_decompressed_path(verity_path: &Path) -> PathBuf {
|
||||
let mut decompressed_path = verity_path.to_path_buf().clone();
|
||||
decompressed_path.set_extension("tar");
|
||||
decompressed_path
|
||||
}
|
||||
|
||||
fn get_compressed_path(decompressed_path: &Path) -> PathBuf {
|
||||
let mut compressed_path = decompressed_path.to_path_buf().clone();
|
||||
compressed_path.set_extension("gz");
|
||||
compressed_path
|
||||
}
|
||||
|
||||
async fn delete_files(base_dir: &Path, file_name: &str) {
|
||||
let verity_path = get_verity_path(base_dir, file_name);
|
||||
let _ = fs::remove_file(&verity_path).await;
|
||||
|
||||
let decompressed_path = get_decompressed_path(&verity_path);
|
||||
let _ = fs::remove_file(&decompressed_path).await;
|
||||
|
||||
let compressed_path = get_compressed_path(&decompressed_path);
|
||||
let _ = fs::remove_file(&compressed_path).await;
|
||||
}
|
||||
|
||||
async fn get_verity_hash(
|
||||
@ -265,23 +289,11 @@ async fn get_verity_hash(
|
||||
reference: &Reference,
|
||||
layer_digest: &str,
|
||||
) -> Result<String> {
|
||||
let base_dir = std::path::Path::new("layers_cache");
|
||||
|
||||
// Use file names supported by both Linux and Windows.
|
||||
let file_name = str::replace(&layer_digest, ":", "-");
|
||||
let file_name = str::replace(layer_digest, ":", "-");
|
||||
|
||||
let mut decompressed_path = base_dir.join(file_name);
|
||||
decompressed_path.set_extension("tar");
|
||||
|
||||
let mut compressed_path = decompressed_path.clone();
|
||||
compressed_path.set_extension("gz");
|
||||
|
||||
let mut verity_path = decompressed_path.clone();
|
||||
verity_path.set_extension("verity");
|
||||
|
||||
let mut verity_hash = "".to_string();
|
||||
let mut error_message = "".to_string();
|
||||
let mut error = false;
|
||||
let base_dir = std::path::Path::new("layers_cache");
|
||||
let verity_path = get_verity_path(base_dir, &file_name);
|
||||
|
||||
if use_cached_files && verity_path.exists() {
|
||||
info!("Using cached file {:?}", &verity_path);
|
||||
@ -290,40 +302,27 @@ async fn get_verity_hash(
|
||||
client,
|
||||
reference,
|
||||
layer_digest,
|
||||
&base_dir,
|
||||
&decompressed_path,
|
||||
&compressed_path,
|
||||
&verity_path,
|
||||
base_dir,
|
||||
&get_decompressed_path(&verity_path),
|
||||
)
|
||||
.await
|
||||
{
|
||||
error = true;
|
||||
error_message = format!("Failed to create verity hash for {layer_digest}, error {e}");
|
||||
delete_files(base_dir, &file_name).await;
|
||||
bail!("{e}");
|
||||
}
|
||||
|
||||
if !error {
|
||||
match std::fs::read_to_string(&verity_path) {
|
||||
Err(e) => {
|
||||
error = true;
|
||||
error_message = format!("Failed to read {:?}, error {e}", &verity_path);
|
||||
}
|
||||
Ok(v) => {
|
||||
verity_hash = v;
|
||||
info!("dm-verity root hash: {verity_hash}");
|
||||
}
|
||||
match std::fs::read_to_string(&verity_path) {
|
||||
Err(e) => {
|
||||
delete_files(base_dir, &file_name).await;
|
||||
bail!("Failed to read {:?}, error {e}", &verity_path);
|
||||
}
|
||||
Ok(v) => {
|
||||
if !use_cached_files {
|
||||
let _ = std::fs::remove_dir_all(base_dir);
|
||||
}
|
||||
info!("dm-verity root hash: {v}");
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
if !use_cached_files {
|
||||
let _ = std::fs::remove_dir_all(&base_dir);
|
||||
} else if error {
|
||||
delete_files(&decompressed_path, &compressed_path, &verity_path);
|
||||
}
|
||||
|
||||
if error {
|
||||
panic!("{error_message}");
|
||||
} else {
|
||||
Ok(verity_hash)
|
||||
}
|
||||
}
|
||||
|
||||
@ -333,27 +332,23 @@ async fn create_verity_hash_file(
|
||||
reference: &Reference,
|
||||
layer_digest: &str,
|
||||
base_dir: &Path,
|
||||
decompressed_path: &Path,
|
||||
compressed_path: &Path,
|
||||
verity_path: &Path,
|
||||
decompressed_path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
if use_cached_files && decompressed_path.exists() {
|
||||
info!("Using cached file {:?}", &decompressed_path);
|
||||
} else {
|
||||
std::fs::create_dir_all(&base_dir)?;
|
||||
|
||||
std::fs::create_dir_all(base_dir)?;
|
||||
create_decompressed_layer_file(
|
||||
use_cached_files,
|
||||
client,
|
||||
reference,
|
||||
layer_digest,
|
||||
&decompressed_path,
|
||||
&compressed_path,
|
||||
decompressed_path,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
do_create_verity_hash_file(decompressed_path, verity_path)
|
||||
do_create_verity_hash_file(decompressed_path)
|
||||
}
|
||||
|
||||
async fn create_decompressed_layer_file(
|
||||
@ -361,9 +356,10 @@ async fn create_decompressed_layer_file(
|
||||
client: &mut Client,
|
||||
reference: &Reference,
|
||||
layer_digest: &str,
|
||||
decompressed_path: &Path,
|
||||
compressed_path: &Path,
|
||||
decompressed_path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
let compressed_path = get_compressed_path(decompressed_path);
|
||||
|
||||
if use_cached_files && compressed_path.exists() {
|
||||
info!("Using cached file {:?}", &compressed_path);
|
||||
} else {
|
||||
@ -372,20 +368,20 @@ async fn create_decompressed_layer_file(
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
client
|
||||
.pull_blob(&reference, layer_digest, &mut file)
|
||||
.pull_blob(reference, layer_digest, &mut file)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
file.flush().await.map_err(|e| anyhow!(e))?;
|
||||
}
|
||||
|
||||
info!("Decompressing layer");
|
||||
let compressed_file = std::fs::File::open(&compressed_path).map_err(|e| anyhow!(e))?;
|
||||
let compressed_file = std::fs::File::open(compressed_path).map_err(|e| anyhow!(e))?;
|
||||
let mut decompressed_file = std::fs::OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(&decompressed_path)?;
|
||||
.open(decompressed_path)?;
|
||||
let mut gz_decoder = flate2::read::GzDecoder::new(compressed_file);
|
||||
std::io::copy(&mut gz_decoder, &mut decompressed_file).map_err(|e| anyhow!(e))?;
|
||||
|
||||
@ -397,12 +393,15 @@ async fn create_decompressed_layer_file(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn do_create_verity_hash_file(path: &Path, verity_path: &Path) -> Result<()> {
|
||||
fn do_create_verity_hash_file(decompressed_path: &PathBuf) -> Result<()> {
|
||||
info!("Calculating dm-verity root hash");
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut file = std::fs::File::open(decompressed_path)?;
|
||||
let size = file.seek(std::io::SeekFrom::End(0))?;
|
||||
if size < 4096 {
|
||||
return Err(anyhow!("Block device {:?} is too small: {size}", &path));
|
||||
return Err(anyhow!(
|
||||
"Block device {:?} is too small: {size}",
|
||||
&decompressed_path
|
||||
));
|
||||
}
|
||||
|
||||
let salt = [0u8; <Sha256 as OutputSizeUser>::OutputSize::USIZE];
|
||||
@ -410,6 +409,8 @@ fn do_create_verity_hash_file(path: &Path, verity_path: &Path) -> Result<()> {
|
||||
let hash = verity::traverse_file(&mut file, 0, false, v, &mut verity::no_write)?;
|
||||
let result = format!("{:x}", hash);
|
||||
|
||||
let mut verity_path = decompressed_path.clone();
|
||||
verity_path.set_extension("verity");
|
||||
let mut verity_file = std::fs::File::create(verity_path).map_err(|e| anyhow!(e))?;
|
||||
verity_file
|
||||
.write_all(result.as_bytes())
|
||||
@ -428,7 +429,7 @@ fn build_auth(reference: &Reference) -> RegistryAuth {
|
||||
|
||||
let server = reference
|
||||
.resolve_registry()
|
||||
.strip_suffix("/")
|
||||
.strip_suffix('/')
|
||||
.unwrap_or_else(|| reference.resolve_registry());
|
||||
|
||||
match docker_credential::get_credential(server) {
|
||||
|
@ -44,7 +44,7 @@ impl Secret {
|
||||
if my_name.eq(name) {
|
||||
if let Some(data) = &self.data {
|
||||
if let Some(value) = data.get(&key_ref.key) {
|
||||
let value_bytes = general_purpose::STANDARD.decode(&value).unwrap();
|
||||
let value_bytes = general_purpose::STANDARD.decode(value).unwrap();
|
||||
let value_string = std::str::from_utf8(&value_bytes).unwrap();
|
||||
return Some(value_string.to_string());
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ impl StatefulSet {
|
||||
source:
|
||||
"^/run/kata-containers/shared/containers/$(bundle-id)-[a-z0-9]{16}-"
|
||||
.to_string()
|
||||
+ &file_name
|
||||
+ file_name
|
||||
+ "$",
|
||||
options: vec![
|
||||
"rbind".to_string(),
|
||||
|
@ -3,8 +3,70 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use clap::Parser;
|
||||
use log::debug;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct CommandLineOptions {
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Kubernetes input/output YAML file path. stdin/stdout get used if this option is not specified."
|
||||
)]
|
||||
yaml_file: Option<String>,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Optional Kubernetes config map YAML input file path"
|
||||
)]
|
||||
config_map_file: Option<String>,
|
||||
|
||||
#[clap(
|
||||
short = 'j',
|
||||
long,
|
||||
default_value_t = String::from("genpolicy-settings.json"),
|
||||
help = "genpolicy settings file name"
|
||||
)]
|
||||
settings_file_name: String,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
default_value_t = String::from("."),
|
||||
help = "Path to the rules.rego and settings input files"
|
||||
)]
|
||||
input_files_path: String,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Create and use a cache of container image layer contents and dm-verity information (in ./layers_cache/)"
|
||||
)]
|
||||
use_cached_files: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Print the output Rego policy text to standard output"
|
||||
)]
|
||||
raw_out: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Print the base64 encoded output Rego policy to standard output"
|
||||
)]
|
||||
base64_out: bool,
|
||||
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
help = "Ignore unsupported input Kubernetes YAML fields. This is not recommeded unless you understand exactly how genpolicy works!"
|
||||
)]
|
||||
silent_unsupported_fields: bool,
|
||||
}
|
||||
|
||||
/// Application configuration, derived from on command line parameters.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Config {
|
||||
@ -21,21 +83,13 @@ pub struct Config {
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new(
|
||||
use_cache: bool,
|
||||
yaml_file: Option<String>,
|
||||
input_files_path: &str,
|
||||
settings_file_name: &str,
|
||||
config_map_files: &Vec<String>,
|
||||
silent_unsupported_fields: bool,
|
||||
raw_out: bool,
|
||||
base64_out: bool,
|
||||
) -> Self {
|
||||
let rules_file = format!("{input_files_path}/rules.rego");
|
||||
debug!("Rules file: {rules_file}");
|
||||
pub fn new() -> Self {
|
||||
let args = CommandLineOptions::parse();
|
||||
|
||||
let settings_file = format!("{input_files_path}/{settings_file_name}");
|
||||
debug!("Settings file: {settings_file}");
|
||||
let mut config_map_files = Vec::new();
|
||||
if let Some(config_map_file) = &args.config_map_file {
|
||||
config_map_files.push(config_map_file.clone());
|
||||
}
|
||||
|
||||
let cm_files = if !config_map_files.is_empty() {
|
||||
Some(config_map_files.clone())
|
||||
@ -43,15 +97,21 @@ impl Config {
|
||||
None
|
||||
};
|
||||
|
||||
let rules_file = format!("{}/rules.rego", &args.input_files_path);
|
||||
debug!("Rules file: {rules_file}");
|
||||
|
||||
let settings_file = format!("{}/{}", &args.input_files_path, &args.settings_file_name);
|
||||
debug!("Settings file: {settings_file}");
|
||||
|
||||
Self {
|
||||
use_cache,
|
||||
yaml_file,
|
||||
use_cache: args.use_cached_files,
|
||||
yaml_file: args.yaml_file,
|
||||
rules_file,
|
||||
settings_file,
|
||||
config_map_files: cm_files,
|
||||
silent_unsupported_fields,
|
||||
raw_out,
|
||||
base64_out,
|
||||
silent_unsupported_fields: args.silent_unsupported_fields,
|
||||
raw_out: args.raw_out,
|
||||
base64_out: args.base64_out,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,6 @@ use core::fmt::Debug;
|
||||
use log::debug;
|
||||
use protocols::agent;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_yaml;
|
||||
use std::boxed;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::read_to_string;
|
||||
@ -95,7 +94,7 @@ pub fn new_k8s_resource(
|
||||
) -> anyhow::Result<(boxed::Box<dyn K8sResource + Sync + Send>, String)> {
|
||||
let header = get_yaml_header(yaml)?;
|
||||
let kind: &str = &header.kind;
|
||||
let d = serde_yaml::Deserializer::from_str(&yaml);
|
||||
let d = serde_yaml::Deserializer::from_str(yaml);
|
||||
|
||||
match kind {
|
||||
"ConfigMap" => {
|
||||
@ -202,7 +201,7 @@ pub fn new_k8s_resource(
|
||||
|
||||
pub fn get_input_yaml(yaml_file: &Option<String>) -> anyhow::Result<String> {
|
||||
let yaml_string = if let Some(yaml) = yaml_file {
|
||||
read_to_string(&yaml)?
|
||||
read_to_string(yaml)?
|
||||
} else {
|
||||
std::io::read_to_string(std::io::stdin())?
|
||||
};
|
||||
@ -211,7 +210,7 @@ pub fn get_input_yaml(yaml_file: &Option<String>) -> anyhow::Result<String> {
|
||||
}
|
||||
|
||||
pub fn get_yaml_header(yaml: &str) -> anyhow::Result<YamlHeader> {
|
||||
return Ok(serde_yaml::from_str(yaml)?);
|
||||
Ok(serde_yaml::from_str(yaml)?)
|
||||
}
|
||||
|
||||
pub async fn k8s_resource_init(spec: &mut pod::PodSpec, use_cache: bool) {
|
||||
@ -267,7 +266,7 @@ pub fn add_policy_annotation(
|
||||
|
||||
let path_components = metadata_path.split('.');
|
||||
for name in path_components {
|
||||
ancestor = ancestor.get_mut(&name).unwrap();
|
||||
ancestor = ancestor.get_mut(name).unwrap();
|
||||
}
|
||||
|
||||
if let Some(annotations) = ancestor.get_mut(&annotations_key) {
|
||||
|
Loading…
Reference in New Issue
Block a user