m/cup
1
0
mirror of https://github.com/sergi0g/cup.git synced 2025-11-17 09:33:38 -05:00
Many many many changes, honestly just read the release notes
This commit is contained in:
Sergio
2025-02-28 20:43:49 +02:00
committed by GitHub
parent b12acba745
commit 0f9c5d1466
141 changed files with 4527 additions and 5848 deletions

View File

@@ -1,63 +1,155 @@
use futures::future::join_all;
use itertools::Itertools;
use rustc_hash::{FxHashMap, FxHashSet};
use crate::{
config::Config, image::Image, registry::{check_auth, get_token}, utils::new_reqwest_client
docker::get_images_from_docker_daemon,
http::Client,
registry::{check_auth, get_token},
structs::{image::Image, update::Update},
utils::request::{get_response_body, parse_json},
Context,
};
use crate::registry::get_latest_digest;
/// Fetches image data from other Cup instances
async fn get_remote_updates(ctx: &Context, client: &Client, refresh: bool) -> Vec<Update> {
let mut remote_images = Vec::new();
/// Trait for a type that implements a function `unique` that removes any duplicates.
/// In this case, it will be used for a Vec.
pub trait Unique<T> {
fn unique(&mut self) -> Vec<T>;
}
let handles: Vec<_> = ctx.config.servers
.iter()
.map(|(name, url)| async move {
let base_url = if url.starts_with("http://") || url.starts_with("https://") {
format!("{}/api/v3/", url.trim_end_matches('/'))
} else {
format!("https://{}/api/v3/", url.trim_end_matches('/'))
};
let json_url = base_url.clone() + "json";
if refresh {
let refresh_url = base_url + "refresh";
match client.get(&(&refresh_url), vec![], false).await {
Ok(response) => {
if response.status() != 200 {
ctx.logger.warn(format!("GET {}: Failed to refresh server. Server returned invalid response code: {}",refresh_url,response.status()));
return Vec::new();
}
},
Err(e) => {
ctx.logger.warn(format!("GET {}: Failed to refresh server. {}", refresh_url, e));
return Vec::new();
},
}
impl<T> Unique<T> for Vec<T>
where
T: Clone + Eq + std::hash::Hash,
{
/// Remove duplicates from Vec
fn unique(self: &mut Vec<T>) -> Self {
let mut seen: FxHashSet<T> = FxHashSet::default();
self.retain(|item| seen.insert(item.clone()));
self.to_vec()
}
match client.get(&json_url, vec![], false).await {
Ok(response) => {
if response.status() != 200 {
ctx.logger.warn(format!("GET {}: Failed to fetch updates from server. Server returned invalid response code: {}",json_url,response.status()));
return Vec::new();
}
let json = parse_json(&get_response_body(response).await);
ctx.logger.debug(format!("JSON response for {}: {}", name, json));
if let Some(updates) = json["images"].as_array() {
let mut server_updates: Vec<Update> = updates
.iter()
.filter_map(|img| serde_json::from_value(img.clone()).ok())
.collect();
// Add server origin to each image
for update in &mut server_updates {
update.server = Some(name.clone());
update.status = update.get_status();
}
ctx.logger.debug(format!("Updates for {}: {:#?}", name, server_updates));
return server_updates;
}
Vec::new()
}
Err(e) => {
ctx.logger.warn(format!("GET {}: Failed to fetch updates from server. {}", json_url, e));
Vec::new()
},
}
})
.collect();
for mut images in join_all(handles).await {
remote_images.append(&mut images);
}
remote_images
}
/// Returns a list of updates for all images passed in.
pub async fn get_updates(images: &[Image], config: &Config) -> Vec<(String, Option<bool>)> {
pub async fn get_updates(
references: &Option<Vec<String>>,
refresh: bool,
ctx: &Context,
) -> Vec<Update> {
let client = Client::new(ctx);
// Get local images
ctx.logger.debug("Retrieving images to be checked");
let mut images = get_images_from_docker_daemon(ctx, references).await;
// Add extra images from references
if let Some(refs) = references {
let image_refs: FxHashSet<&String> = images.iter().map(|image| &image.reference).collect();
let extra = refs
.iter()
.filter(|&reference| !image_refs.contains(reference))
.map(|reference| Image::from_reference(reference))
.collect::<Vec<Image>>();
images.extend(extra);
}
// Get remote images from other servers
let remote_updates = if !ctx.config.servers.is_empty() {
ctx.logger.debug("Fetching updates from remote servers");
get_remote_updates(ctx, &client, refresh).await
} else {
Vec::new()
};
ctx.logger.debug(format!(
"Checking {:?}",
images.iter().map(|image| &image.reference).collect_vec()
));
// Get a list of unique registries our images belong to. We are unwrapping the registry because it's guaranteed to be there.
let registries: Vec<&String> = images
.iter()
.map(|image| image.registry.as_ref().unwrap())
.collect::<Vec<&String>>()
.unique();
.map(|image| &image.parts.registry)
.unique()
.collect::<Vec<&String>>();
// Create request client. All network requests share the same client for better performance.
// This client is also configured to retry a failed request up to 3 times with exponential backoff in between.
let client = new_reqwest_client();
let client = Client::new(ctx);
// Create a map of images indexed by registry. This solution seems quite inefficient, since each iteration causes a key to be looked up. I can't find anything better at the moment.
let mut image_map: FxHashMap<&String, Vec<&Image>> = FxHashMap::default();
for image in images {
for image in &images {
image_map
.entry(image.registry.as_ref().unwrap())
.entry(&image.parts.registry)
.or_default()
.push(image);
}
// Retrieve an authentication token (if required) for each registry.
let mut tokens: FxHashMap<&String, Option<String>> = FxHashMap::default();
let mut tokens: FxHashMap<&str, Option<String>> = FxHashMap::default();
for registry in registries {
let credentials = config.authentication.get(registry);
match check_auth(registry, config, &client).await {
let credentials = if let Some(registry_config) = ctx.config.registries.get(registry) {
&registry_config.authentication
} else {
&None
};
match check_auth(registry, ctx, &client).await {
Some(auth_url) => {
let token = get_token(
image_map.get(registry).unwrap(),
&auth_url,
&credentials,
credentials,
&client,
)
.await;
@@ -69,27 +161,41 @@ pub async fn get_updates(images: &[Image], config: &Config) -> Vec<(String, Opti
}
}
// Create a Vec to store futures so we can await them all at once.
let mut handles = Vec::new();
// Loop through images and get the latest digest for each
for image in images {
let token = tokens.get(&image.registry.as_ref().unwrap()).unwrap();
let future = get_latest_digest(image, token.as_ref(), config, &client);
handles.push(future);
ctx.logger.debug(format!("Tokens: {:?}", tokens));
let ignored_registries = ctx
.config
.registries
.iter()
.filter_map(|(registry, registry_config)| {
if registry_config.ignore {
Some(registry)
} else {
None
}
})
.collect::<Vec<&String>>();
let mut handles = Vec::with_capacity(images.len());
// Loop through images check for updates
for image in &images {
let is_ignored = ignored_registries.contains(&&image.parts.registry)
|| ctx
.config
.images
.exclude
.iter()
.any(|item| image.reference.starts_with(item));
if !is_ignored {
let token = tokens.get(image.parts.registry.as_str()).unwrap();
let future = image.check(token.as_deref(), ctx, &client);
handles.push(future);
}
}
// Await all the futures
let final_images = join_all(handles).await;
let mut result: Vec<(String, Option<bool>)> = Vec::with_capacity(images.len());
final_images
.iter()
.for_each(|image| match &image.remote_digest {
Some(digest) => {
let has_update = !image.local_digests.as_ref().unwrap().contains(digest);
result.push((image.reference.clone(), Some(has_update)))
}
None => result.push((image.reference.clone(), None)),
});
result
let images = join_all(handles).await;
let mut updates: Vec<Update> = images.iter().map(|image| image.to_update()).collect();
updates.extend_from_slice(&remote_updates);
updates
}