mirror of
https://github.com/armbian/imager.git
synced 2026-01-06 12:31:28 -08:00
Apply rustfmt formatting and fix Clippy warnings
This commit is contained in:
@@ -62,7 +62,11 @@ pub async fn get_images_for_board(
|
||||
|
||||
let json_guard = state.images_json.lock().await;
|
||||
let json = json_guard.as_ref().ok_or_else(|| {
|
||||
log_error!("board_queries", "Images not loaded when requesting board: {}", board_slug);
|
||||
log_error!(
|
||||
"board_queries",
|
||||
"Images not loaded when requesting board: {}",
|
||||
board_slug
|
||||
);
|
||||
"Images not loaded. Call get_boards first.".to_string()
|
||||
})?;
|
||||
|
||||
|
||||
@@ -24,7 +24,12 @@ pub struct CustomImageInfo {
|
||||
pub async fn check_needs_decompression(image_path: String) -> Result<bool, String> {
|
||||
let path = PathBuf::from(&image_path);
|
||||
let needs = needs_decompression(&path);
|
||||
log_info!("custom_image", "Check decompression for {}: {}", image_path, needs);
|
||||
log_info!(
|
||||
"custom_image",
|
||||
"Check decompression for {}: {}",
|
||||
image_path,
|
||||
needs
|
||||
);
|
||||
Ok(needs)
|
||||
}
|
||||
|
||||
@@ -52,7 +57,11 @@ pub async fn decompress_custom_image(
|
||||
|
||||
match &result {
|
||||
Ok(path) => {
|
||||
log_info!("custom_image", "Decompression completed: {}", path.display());
|
||||
log_info!(
|
||||
"custom_image",
|
||||
"Decompression completed: {}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
log_error!("custom_image", "Decompression failed: {}", e);
|
||||
@@ -64,9 +73,7 @@ pub async fn decompress_custom_image(
|
||||
|
||||
/// Select a custom image file using native file picker
|
||||
#[tauri::command]
|
||||
pub async fn select_custom_image(
|
||||
window: tauri::Window,
|
||||
) -> Result<Option<CustomImageInfo>, String> {
|
||||
pub async fn select_custom_image(window: tauri::Window) -> Result<Option<CustomImageInfo>, String> {
|
||||
use tauri_plugin_dialog::DialogExt;
|
||||
|
||||
log_info!("custom_image", "Opening file picker dialog");
|
||||
@@ -84,17 +91,19 @@ pub async fn select_custom_image(
|
||||
|
||||
match file_path {
|
||||
Some(file_path) => {
|
||||
let path_buf = file_path
|
||||
.as_path()
|
||||
.ok_or_else(|| {
|
||||
log_error!("custom_image", "Invalid path: not a valid file path");
|
||||
"Invalid path: not a valid file path".to_string()
|
||||
})?;
|
||||
let metadata = std::fs::metadata(path_buf)
|
||||
.map_err(|e| {
|
||||
log_error!("custom_image", "Failed to read file info for {:?}: {}", path_buf, e);
|
||||
format!("Failed to read file info: {}", e)
|
||||
})?;
|
||||
let path_buf = file_path.as_path().ok_or_else(|| {
|
||||
log_error!("custom_image", "Invalid path: not a valid file path");
|
||||
"Invalid path: not a valid file path".to_string()
|
||||
})?;
|
||||
let metadata = std::fs::metadata(path_buf).map_err(|e| {
|
||||
log_error!(
|
||||
"custom_image",
|
||||
"Failed to read file info for {:?}: {}",
|
||||
path_buf,
|
||||
e
|
||||
);
|
||||
format!("Failed to read file info: {}", e)
|
||||
})?;
|
||||
|
||||
let name = path_buf
|
||||
.file_name()
|
||||
@@ -102,7 +111,12 @@ pub async fn select_custom_image(
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
log_info!("custom_image", "Selected custom image: {} ({} bytes)", name, metadata.len());
|
||||
log_info!(
|
||||
"custom_image",
|
||||
"Selected custom image: {} ({} bytes)",
|
||||
name,
|
||||
metadata.len()
|
||||
);
|
||||
|
||||
Ok(Some(CustomImageInfo {
|
||||
path: path_buf.to_string_lossy().to_string(),
|
||||
|
||||
@@ -7,8 +7,8 @@ pub mod custom_image;
|
||||
pub mod operations;
|
||||
pub mod progress;
|
||||
pub mod scraping;
|
||||
pub mod system;
|
||||
mod state;
|
||||
pub mod system;
|
||||
|
||||
// Re-export state for use in main.rs
|
||||
pub use state::AppState;
|
||||
|
||||
@@ -19,18 +19,31 @@ use super::state::AppState;
|
||||
/// Returns true if authorized, false if user cancelled
|
||||
#[tauri::command]
|
||||
pub async fn request_write_authorization(device_path: String) -> Result<bool, String> {
|
||||
log_info!("operations", "Requesting write authorization for device: {}", device_path);
|
||||
log_info!(
|
||||
"operations",
|
||||
"Requesting write authorization for device: {}",
|
||||
device_path
|
||||
);
|
||||
let result = request_authorization(&device_path);
|
||||
match &result {
|
||||
Ok(authorized) => {
|
||||
if *authorized {
|
||||
log_info!("operations", "Authorization granted for {}", device_path);
|
||||
} else {
|
||||
log_info!("operations", "Authorization denied/cancelled for {}", device_path);
|
||||
log_info!(
|
||||
"operations",
|
||||
"Authorization denied/cancelled for {}",
|
||||
device_path
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log_error!("operations", "Authorization failed for {}: {}", device_path, e);
|
||||
log_error!(
|
||||
"operations",
|
||||
"Authorization failed for {}: {}",
|
||||
device_path,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
result
|
||||
@@ -80,7 +93,13 @@ pub async fn flash_image(
|
||||
verify: bool,
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<(), String> {
|
||||
log_info!("operations", "Starting flash: {} -> {} (verify: {})", image_path, device_path, verify);
|
||||
log_info!(
|
||||
"operations",
|
||||
"Starting flash: {} -> {} (verify: {})",
|
||||
image_path,
|
||||
device_path,
|
||||
verify
|
||||
);
|
||||
let path = PathBuf::from(&image_path);
|
||||
let flash_state = state.flash_state.clone();
|
||||
|
||||
@@ -103,7 +122,11 @@ pub async fn delete_downloaded_image(image_path: String) -> Result<(), String> {
|
||||
let cache_dir = get_cache_dir(config::app::NAME);
|
||||
|
||||
if !path.starts_with(&cache_dir) {
|
||||
log_error!("operations", "Attempted to delete file outside cache: {}", image_path);
|
||||
log_error!(
|
||||
"operations",
|
||||
"Attempted to delete file outside cache: {}",
|
||||
image_path
|
||||
);
|
||||
return Err("Cannot delete files outside cache directory".to_string());
|
||||
}
|
||||
|
||||
|
||||
@@ -35,9 +35,15 @@ pub async fn get_download_progress(state: State<'_, AppState>) -> Result<Downloa
|
||||
let ds = &state.download_state;
|
||||
|
||||
let total = ds.total_bytes.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let downloaded = ds.downloaded_bytes.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let is_verifying_sha = ds.is_verifying_sha.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let is_decompressing = ds.is_decompressing.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let downloaded = ds
|
||||
.downloaded_bytes
|
||||
.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let is_verifying_sha = ds
|
||||
.is_verifying_sha
|
||||
.load(std::sync::atomic::Ordering::SeqCst);
|
||||
let is_decompressing = ds
|
||||
.is_decompressing
|
||||
.load(std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
let progress = if total > 0 {
|
||||
(downloaded as f64 / total as f64) * 100.0
|
||||
|
||||
@@ -53,7 +53,10 @@ fn open_url_linux(url: &str) -> Result<(), String> {
|
||||
|
||||
if euid == 0 {
|
||||
// Running as root - need to run xdg-open as the original user
|
||||
log_info!(MODULE, "Running as root, attempting to open URL as original user");
|
||||
log_info!(
|
||||
MODULE,
|
||||
"Running as root, attempting to open URL as original user"
|
||||
);
|
||||
|
||||
// Try to get the original user from PKEXEC_UID or SUDO_UID
|
||||
let target_uid = std::env::var("PKEXEC_UID")
|
||||
@@ -121,7 +124,10 @@ fn open_url_linux(url: &str) -> Result<(), String> {
|
||||
}
|
||||
|
||||
// Fallback: try xdg-open directly (might not work but worth trying)
|
||||
log_info!(MODULE, "Could not determine original user, trying xdg-open directly");
|
||||
log_info!(
|
||||
MODULE,
|
||||
"Could not determine original user, trying xdg-open directly"
|
||||
);
|
||||
}
|
||||
|
||||
// Not running as root, or fallback - use xdg-open directly
|
||||
|
||||
@@ -69,7 +69,10 @@ pub fn decompress_with_system_xz(
|
||||
loop {
|
||||
// Check for cancellation
|
||||
if state.is_cancelled.load(Ordering::SeqCst) {
|
||||
log_info!(MODULE, "Decompression cancelled by user, killing xz process");
|
||||
log_info!(
|
||||
MODULE,
|
||||
"Decompression cancelled by user, killing xz process"
|
||||
);
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
drop(output_file);
|
||||
@@ -273,10 +276,7 @@ pub fn decompress_local_file(
|
||||
log_info!(MODULE, "Decompressing ZSTD format");
|
||||
decompress_with_zstd(input_path, &output_path, state)
|
||||
} else {
|
||||
return Err(format!(
|
||||
"Unsupported compression format for: {}",
|
||||
filename
|
||||
));
|
||||
return Err(format!("Unsupported compression format for: {}", filename));
|
||||
};
|
||||
|
||||
result?;
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
use std::process::Command;
|
||||
|
||||
use crate::utils::format_size;
|
||||
use crate::log_error;
|
||||
use crate::utils::format_size;
|
||||
|
||||
use super::types::BlockDevice;
|
||||
|
||||
@@ -21,7 +21,11 @@ pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
log_error!("devices", "lsblk command failed with status: {:?}", output.status);
|
||||
log_error!(
|
||||
"devices",
|
||||
"lsblk command failed with status: {:?}",
|
||||
output.status
|
||||
);
|
||||
return Err("lsblk command failed".to_string());
|
||||
}
|
||||
|
||||
@@ -30,10 +34,11 @@ pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
let system_disks = get_system_disks();
|
||||
|
||||
// Parse JSON output
|
||||
let json: serde_json::Value = serde_json::from_str(&stdout)
|
||||
.map_err(|e| format!("Failed to parse lsblk JSON: {}", e))?;
|
||||
let json: serde_json::Value =
|
||||
serde_json::from_str(&stdout).map_err(|e| format!("Failed to parse lsblk JSON: {}", e))?;
|
||||
|
||||
let blockdevices = json["blockdevices"].as_array()
|
||||
let blockdevices = json["blockdevices"]
|
||||
.as_array()
|
||||
.ok_or("Invalid lsblk JSON structure")?;
|
||||
|
||||
for dev in blockdevices {
|
||||
@@ -57,9 +62,9 @@ pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
let dev_name = path.strip_prefix("/dev/").unwrap_or(path);
|
||||
|
||||
// Mark as system disk instead of skipping (consistent with macOS behavior)
|
||||
let is_system = system_disks.iter().any(|sys| {
|
||||
sys.starts_with(dev_name) || dev_name.starts_with(sys)
|
||||
});
|
||||
let is_system = system_disks
|
||||
.iter()
|
||||
.any(|sys| sys.starts_with(dev_name) || dev_name.starts_with(sys));
|
||||
|
||||
// Parse size - can be string or number in JSON
|
||||
let size: u64 = match &dev["size"] {
|
||||
@@ -139,7 +144,7 @@ fn get_system_disks() -> Vec<String> {
|
||||
system_disks.push(pkname);
|
||||
}
|
||||
}
|
||||
if let Some(name) = source.split('/').last() {
|
||||
if let Some(name) = source.split('/').next_back() {
|
||||
system_disks.push(name.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,13 @@
|
||||
|
||||
use std::process::Command;
|
||||
|
||||
use crate::utils::format_size;
|
||||
use crate::log_error;
|
||||
use crate::utils::format_size;
|
||||
|
||||
use super::types::BlockDevice;
|
||||
|
||||
/// Get list of block devices on macOS
|
||||
pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
|
||||
let output = Command::new("diskutil")
|
||||
.args(["list", "-plist", "external", "physical"])
|
||||
.output()
|
||||
@@ -94,10 +93,7 @@ fn parse_diskutil(_plist_data: &[u8]) -> Result<Vec<BlockDevice>, String> {
|
||||
|
||||
/// Get the system disk identifier
|
||||
fn get_system_disk() -> Option<String> {
|
||||
let output = Command::new("diskutil")
|
||||
.args(["info", "/"])
|
||||
.output()
|
||||
.ok()?;
|
||||
let output = Command::new("diskutil").args(["info", "/"]).output().ok()?;
|
||||
|
||||
let info = String::from_utf8_lossy(&output.stdout);
|
||||
for line in info.lines() {
|
||||
@@ -114,7 +110,12 @@ fn get_disk_info(disk_path: &str) -> Result<BlockDevice, String> {
|
||||
.args(["info", disk_path])
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
log_error!("devices", "Failed to get disk info for {}: {}", disk_path, e);
|
||||
log_error!(
|
||||
"devices",
|
||||
"Failed to get disk info for {}: {}",
|
||||
disk_path,
|
||||
e
|
||||
);
|
||||
format!("Failed to get disk info: {}", e)
|
||||
})?;
|
||||
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
//!
|
||||
//! Uses PowerShell Get-Disk to enumerate block devices.
|
||||
|
||||
use std::process::Command;
|
||||
use std::collections::HashMap;
|
||||
use std::process::Command;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
use std::os::windows::process::CommandExt;
|
||||
|
||||
use crate::utils::format_size;
|
||||
use crate::log_error;
|
||||
use crate::utils::format_size;
|
||||
|
||||
use super::types::BlockDevice;
|
||||
|
||||
@@ -52,7 +52,11 @@ pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
log_error!("devices", "PowerShell command failed with status: {:?}", output.status);
|
||||
log_error!(
|
||||
"devices",
|
||||
"PowerShell command failed with status: {:?}",
|
||||
output.status
|
||||
);
|
||||
return Err("PowerShell command failed".to_string());
|
||||
}
|
||||
|
||||
@@ -113,7 +117,9 @@ pub fn get_block_devices() -> Result<Vec<BlockDevice>, String> {
|
||||
let is_removable = bus_type_str == "USB" || bus_type_str == "SD";
|
||||
|
||||
// Mark as system disk (consistent with macOS/Linux behavior)
|
||||
let is_system = system_disk.map(|sys_num| number == sys_num).unwrap_or(false);
|
||||
let is_system = system_disk
|
||||
.map(|sys_num| number == sys_num)
|
||||
.unwrap_or(false);
|
||||
|
||||
// Get drive letters from our pre-built map
|
||||
let name = match drive_letters_map.get(&number) {
|
||||
|
||||
@@ -62,7 +62,7 @@ fn extract_filename(url: &str) -> Result<&str, String> {
|
||||
let url_path = url.split('?').next().unwrap_or(url);
|
||||
url_path
|
||||
.split('/')
|
||||
.last()
|
||||
.next_back()
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| "Invalid URL: no filename".to_string())
|
||||
}
|
||||
@@ -78,7 +78,10 @@ async fn fetch_expected_sha(client: &Client, sha_url: &str) -> Result<String, St
|
||||
.map_err(|e| format!("Failed to fetch SHA: {}", e))?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(format!("SHA fetch failed with status: {}", response.status()));
|
||||
return Err(format!(
|
||||
"SHA fetch failed with status: {}",
|
||||
response.status()
|
||||
));
|
||||
}
|
||||
|
||||
let content = response
|
||||
@@ -207,14 +210,10 @@ pub async fn download_image(
|
||||
|
||||
// Start download
|
||||
log_info!(MODULE, "Starting download...");
|
||||
let response = client
|
||||
.get(url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
log_error!(MODULE, "Failed to start download: {}", e);
|
||||
format!("Failed to start download: {}", e)
|
||||
})?;
|
||||
let response = client.get(url).send().await.map_err(|e| {
|
||||
log_error!(MODULE, "Failed to start download: {}", e);
|
||||
format!("Failed to start download: {}", e)
|
||||
})?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
log_error!(MODULE, "Download failed with status: {}", response.status());
|
||||
|
||||
@@ -124,11 +124,7 @@ pub async fn flash_image(
|
||||
let mut device = match open_device_udisks2(device_path).await {
|
||||
Ok(file) => file,
|
||||
Err(e) => {
|
||||
log_info!(
|
||||
MODULE,
|
||||
"UDisks2 open failed ({}), trying direct open...",
|
||||
e
|
||||
);
|
||||
log_info!(MODULE, "UDisks2 open failed ({}), trying direct open...", e);
|
||||
open_device_direct(device_path)?
|
||||
}
|
||||
};
|
||||
|
||||
@@ -55,7 +55,11 @@ pub fn request_authorization(device_path: &str) -> Result<bool, String> {
|
||||
|
||||
let mut auth_ref: AuthorizationRef = std::ptr::null_mut();
|
||||
|
||||
log_info!(MODULE, "Requesting authorization for device: {}", raw_device);
|
||||
log_info!(
|
||||
MODULE,
|
||||
"Requesting authorization for device: {}",
|
||||
raw_device
|
||||
);
|
||||
log_debug!(MODULE, "Right name: {}", right_name);
|
||||
|
||||
let status = AuthorizationCreate(
|
||||
|
||||
@@ -155,10 +155,7 @@ pub fn open_device_with_saved_auth(device_path: &str) -> Result<OpenDeviceResult
|
||||
libc::close(sock_pair[0]);
|
||||
|
||||
if size <= 0 {
|
||||
return Err(format!(
|
||||
"Failed to receive file descriptor (size={})",
|
||||
size
|
||||
));
|
||||
return Err(format!("Failed to receive file descriptor (size={})", size));
|
||||
}
|
||||
|
||||
if libc::WIFEXITED(status) && libc::WEXITSTATUS(status) != 0 {
|
||||
@@ -279,9 +276,16 @@ pub async fn flash_image(
|
||||
}
|
||||
|
||||
// Use inner function to do the actual work, then always free auth at the end
|
||||
let result =
|
||||
do_flash_work(image_path, device_path, &mut device, device_fd, image_size, state, verify)
|
||||
.await;
|
||||
let result = do_flash_work(
|
||||
image_path,
|
||||
device_path,
|
||||
&mut device,
|
||||
device_fd,
|
||||
image_size,
|
||||
state,
|
||||
verify,
|
||||
)
|
||||
.await;
|
||||
|
||||
drop(device);
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@
|
||||
|
||||
mod verify;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos;
|
||||
#[cfg(target_os = "linux")]
|
||||
mod linux;
|
||||
#[cfg(target_os = "macos")]
|
||||
mod macos;
|
||||
#[cfg(target_os = "windows")]
|
||||
mod windows;
|
||||
|
||||
@@ -51,18 +51,18 @@ impl FlashState {
|
||||
}
|
||||
|
||||
// Re-export the platform-specific flash_image function
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use macos::flash_image;
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use linux::flash_image;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use macos::flash_image;
|
||||
#[cfg(target_os = "windows")]
|
||||
pub use windows::flash_image;
|
||||
|
||||
// Re-export authorization functions
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use macos::request_authorization;
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use linux::request_authorization;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use macos::request_authorization;
|
||||
|
||||
/// Request authorization before flashing (platform-specific)
|
||||
/// On macOS: Shows Touch ID / password dialog
|
||||
|
||||
@@ -35,8 +35,8 @@ pub fn verify_data<R: Read>(
|
||||
state.is_verifying.store(true, Ordering::SeqCst);
|
||||
state.verified_bytes.store(0, Ordering::SeqCst);
|
||||
|
||||
let mut image_file =
|
||||
File::open(image_path).map_err(|e| format!("Failed to open image for verification: {}", e))?;
|
||||
let mut image_file = File::open(image_path)
|
||||
.map_err(|e| format!("Failed to open image for verification: {}", e))?;
|
||||
|
||||
let chunk_size = config::flash::CHUNK_SIZE;
|
||||
let mut image_buffer = vec![0u8; chunk_size];
|
||||
@@ -95,7 +95,11 @@ pub fn verify_data<R: Read>(
|
||||
}
|
||||
|
||||
if image_buffer[..image_read] != device_buffer[..device_read] {
|
||||
log_error!(MODULE, "Verification failed: data mismatch at byte {}", verified);
|
||||
log_error!(
|
||||
MODULE,
|
||||
"Verification failed: data mismatch at byte {}",
|
||||
verified
|
||||
);
|
||||
return Err(format!(
|
||||
"Verification failed: data mismatch at byte {}",
|
||||
verified
|
||||
@@ -106,7 +110,7 @@ pub fn verify_data<R: Read>(
|
||||
state.verified_bytes.store(verified, Ordering::SeqCst);
|
||||
|
||||
// Log progress at configured interval
|
||||
let current_percent = (verified * 100 / image_size) as u64;
|
||||
let current_percent = verified * 100 / image_size;
|
||||
if current_percent >= last_logged_percent + config::flash::LOG_INTERVAL_PERCENT {
|
||||
log_info!(
|
||||
MODULE,
|
||||
@@ -123,9 +127,6 @@ pub fn verify_data<R: Read>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[test]
|
||||
fn test_verify_matching_data() {
|
||||
// This test requires a temp file, which we'll skip for now
|
||||
|
||||
@@ -35,7 +35,12 @@ pub async fn flash_image(
|
||||
) -> Result<(), String> {
|
||||
state.reset();
|
||||
|
||||
log_info!(MODULE, "Starting flash: {} -> {}", image_path.display(), device_path);
|
||||
log_info!(
|
||||
MODULE,
|
||||
"Starting flash: {} -> {}",
|
||||
image_path.display(),
|
||||
device_path
|
||||
);
|
||||
|
||||
let image_size = std::fs::metadata(image_path)
|
||||
.map_err(|e| format!("Failed to get image size: {}", e))?
|
||||
@@ -191,8 +196,8 @@ fn lock_disk_volumes(disk_number: u32) -> Result<VolumeLocks, String> {
|
||||
CloseHandle, GetLastError, GENERIC_READ, GENERIC_WRITE, INVALID_HANDLE_VALUE, MAX_PATH,
|
||||
};
|
||||
use windows_sys::Win32::Storage::FileSystem::{
|
||||
CreateFileW, FILE_SHARE_READ, FILE_SHARE_WRITE, FindFirstVolumeW, FindNextVolumeW,
|
||||
FindVolumeClose, OPEN_EXISTING,
|
||||
CreateFileW, FindFirstVolumeW, FindNextVolumeW, FindVolumeClose, FILE_SHARE_READ,
|
||||
FILE_SHARE_WRITE, OPEN_EXISTING,
|
||||
};
|
||||
use windows_sys::Win32::System::Ioctl::{FSCTL_DISMOUNT_VOLUME, FSCTL_LOCK_VOLUME};
|
||||
use windows_sys::Win32::System::IO::DeviceIoControl;
|
||||
@@ -221,18 +226,31 @@ fn lock_disk_volumes(disk_number: u32) -> Result<VolumeLocks, String> {
|
||||
let find_handle = FindFirstVolumeW(volume_name.as_mut_ptr(), MAX_PATH);
|
||||
if find_handle.is_null() {
|
||||
log_warn!(MODULE, "FindFirstVolumeW failed: {}", GetLastError());
|
||||
return Ok(VolumeLocks { handles: locked_handles });
|
||||
return Ok(VolumeLocks {
|
||||
handles: locked_handles,
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
let vol_len = volume_name.iter().position(|&c| c == 0).unwrap_or(volume_name.len());
|
||||
let vol_len = volume_name
|
||||
.iter()
|
||||
.position(|&c| c == 0)
|
||||
.unwrap_or(volume_name.len());
|
||||
let vol_str = String::from_utf16_lossy(&volume_name[..vol_len]);
|
||||
|
||||
// Remove trailing backslash for CreateFile
|
||||
let vol_path: Vec<u16> = if vol_len > 0 && volume_name[vol_len - 1] == b'\\' as u16 {
|
||||
volume_name[..vol_len - 1].iter().copied().chain(std::iter::once(0)).collect()
|
||||
volume_name[..vol_len - 1]
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(std::iter::once(0))
|
||||
.collect()
|
||||
} else {
|
||||
volume_name[..vol_len].iter().copied().chain(std::iter::once(0)).collect()
|
||||
volume_name[..vol_len]
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(std::iter::once(0))
|
||||
.collect()
|
||||
};
|
||||
|
||||
let vol_handle = CreateFileW(
|
||||
@@ -322,7 +340,9 @@ fn lock_disk_volumes(disk_number: u32) -> Result<VolumeLocks, String> {
|
||||
}
|
||||
|
||||
log_info!(MODULE, "Holding {} volume lock(s)", locked_handles.len());
|
||||
Ok(VolumeLocks { handles: locked_handles })
|
||||
Ok(VolumeLocks {
|
||||
handles: locked_handles,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
@@ -368,8 +388,8 @@ fn verify_with_sector_alignment(
|
||||
state.is_verifying.store(true, Ordering::SeqCst);
|
||||
state.verified_bytes.store(0, Ordering::SeqCst);
|
||||
|
||||
let mut image_file = std::fs::File::open(image_path)
|
||||
.map_err(|e| format!("Failed to open image: {}", e))?;
|
||||
let mut image_file =
|
||||
std::fs::File::open(image_path).map_err(|e| format!("Failed to open image: {}", e))?;
|
||||
|
||||
let image_size = state.total_bytes.load(Ordering::SeqCst);
|
||||
|
||||
@@ -384,7 +404,12 @@ fn verify_with_sector_alignment(
|
||||
let chunk_size = config::flash::CHUNK_SIZE;
|
||||
let aligned_chunk_size = (chunk_size / sector_size) * sector_size;
|
||||
|
||||
log_debug!(MODULE, "Sector size: {} bytes, chunk size: {} bytes", sector_size, aligned_chunk_size);
|
||||
log_debug!(
|
||||
MODULE,
|
||||
"Sector size: {} bytes, chunk size: {} bytes",
|
||||
sector_size,
|
||||
aligned_chunk_size
|
||||
);
|
||||
|
||||
let mut image_buffer = vec![0u8; aligned_chunk_size];
|
||||
let mut device_buffer = vec![0u8; aligned_chunk_size];
|
||||
@@ -413,7 +438,13 @@ fn verify_with_sector_alignment(
|
||||
while total_read < device_read_size {
|
||||
let n = device
|
||||
.read(&mut device_buffer[total_read..device_read_size])
|
||||
.map_err(|e| format!("Failed to read device at byte {}: {}", verified + total_read as u64, e))?;
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to read device at byte {}: {}",
|
||||
verified + total_read as u64,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
@@ -495,7 +526,11 @@ fn get_device_sector_size(device: &std::fs::File) -> Result<usize, String> {
|
||||
let sector_size = geometry.bytes_per_sector as usize;
|
||||
|
||||
if sector_size < 512 || sector_size > 8192 || (sector_size & (sector_size - 1)) != 0 {
|
||||
log_warn!(MODULE, "Invalid sector size {}, using default 512", sector_size);
|
||||
log_warn!(
|
||||
MODULE,
|
||||
"Invalid sector size {}, using default 512",
|
||||
sector_size
|
||||
);
|
||||
return Ok(512);
|
||||
}
|
||||
|
||||
@@ -506,7 +541,9 @@ fn get_device_sector_size(device: &std::fs::File) -> Result<usize, String> {
|
||||
/// Opens device for writing with write-through caching.
|
||||
#[cfg(target_os = "windows")]
|
||||
fn open_device_for_write(device_path: &str) -> Result<std::fs::File, String> {
|
||||
use windows_sys::Win32::Foundation::{GetLastError, GENERIC_READ, GENERIC_WRITE, INVALID_HANDLE_VALUE};
|
||||
use windows_sys::Win32::Foundation::{
|
||||
GetLastError, GENERIC_READ, GENERIC_WRITE, INVALID_HANDLE_VALUE,
|
||||
};
|
||||
use windows_sys::Win32::Storage::FileSystem::{
|
||||
CreateFileW, FILE_SHARE_READ, FILE_SHARE_WRITE, OPEN_EXISTING,
|
||||
};
|
||||
@@ -582,7 +619,10 @@ fn open_device_for_read(device_path: &str) -> Result<std::fs::File, String> {
|
||||
|
||||
if handle == INVALID_HANDLE_VALUE || handle.is_null() {
|
||||
let error_code = GetLastError();
|
||||
return Err(format!("Failed to open {} for reading: error {}", device_path, error_code));
|
||||
return Err(format!(
|
||||
"Failed to open {} for reading: error {}",
|
||||
device_path, error_code
|
||||
));
|
||||
}
|
||||
|
||||
log_debug!(MODULE, "Device opened for reading");
|
||||
|
||||
@@ -145,16 +145,22 @@ pub fn get_unique_boards(images: &[ArmbianImage]) -> Vec<BoardInfo> {
|
||||
.map(|(slug, data)| {
|
||||
let name = data.board_name.unwrap_or(data.original_slug);
|
||||
|
||||
let has_platinum_support = data.platinum_support_until
|
||||
let has_platinum_support = data
|
||||
.platinum_support_until
|
||||
.as_ref()
|
||||
.and_then(|until| chrono::NaiveDate::parse_from_str(until, "%Y-%m-%d").ok())
|
||||
.map(|exp_date| exp_date >= today)
|
||||
.unwrap_or(false);
|
||||
|
||||
let has_logo = data.vendor_logo.as_ref().map(|l| !l.is_empty()).unwrap_or(false);
|
||||
let has_logo = data
|
||||
.vendor_logo
|
||||
.as_ref()
|
||||
.map(|l| !l.is_empty())
|
||||
.unwrap_or(false);
|
||||
let (vendor_id, vendor_display, vendor_logo) = if has_logo {
|
||||
let id = data.vendor.unwrap_or_else(|| "other".to_string());
|
||||
let display = data.vendor_name
|
||||
let display = data
|
||||
.vendor_name
|
||||
.filter(|n| !n.is_empty())
|
||||
.unwrap_or_else(|| capitalize_vendor(&id));
|
||||
(id, display, data.vendor_logo)
|
||||
@@ -163,9 +169,8 @@ pub fn get_unique_boards(images: &[ArmbianImage]) -> Vec<BoardInfo> {
|
||||
};
|
||||
|
||||
// Community is shown only if no standard or platinum support
|
||||
let has_community_support = data.has_community_support
|
||||
&& !data.has_standard_support
|
||||
&& !has_platinum_support;
|
||||
let has_community_support =
|
||||
data.has_community_support && !data.has_standard_support && !has_platinum_support;
|
||||
|
||||
// EOS is shown only if no other support level
|
||||
let has_eos_support = data.has_eos_support
|
||||
@@ -302,7 +307,11 @@ pub fn filter_images_for_board(
|
||||
promoted: img.promoted.as_deref() == Some("true"),
|
||||
file_url: img.file_url.clone().unwrap_or_default(),
|
||||
file_url_sha: img.file_url_sha.clone(),
|
||||
file_size: img.file_size.as_ref().and_then(|s| s.parse().ok()).unwrap_or(0),
|
||||
file_size: img
|
||||
.file_size
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(0),
|
||||
download_repository: img.download_repository.clone().unwrap_or_default(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -15,22 +15,21 @@ use crate::{log_error, log_info};
|
||||
|
||||
/// Fetch the all-images.json from Armbian
|
||||
pub async fn fetch_all_images() -> Result<serde_json::Value, String> {
|
||||
log_info!("images", "Fetching all images from {}", config::urls::ALL_IMAGES);
|
||||
log_info!(
|
||||
"images",
|
||||
"Fetching all images from {}",
|
||||
config::urls::ALL_IMAGES
|
||||
);
|
||||
|
||||
let response = reqwest::get(config::urls::ALL_IMAGES)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
log_error!("images", "Failed to fetch images: {}", e);
|
||||
format!("Failed to fetch images: {}", e)
|
||||
})?;
|
||||
let response = reqwest::get(config::urls::ALL_IMAGES).await.map_err(|e| {
|
||||
log_error!("images", "Failed to fetch images: {}", e);
|
||||
format!("Failed to fetch images: {}", e)
|
||||
})?;
|
||||
|
||||
let json: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
log_error!("images", "Failed to parse JSON response: {}", e);
|
||||
format!("Failed to parse JSON: {}", e)
|
||||
})?;
|
||||
let json: serde_json::Value = response.json().await.map_err(|e| {
|
||||
log_error!("images", "Failed to parse JSON response: {}", e);
|
||||
format!("Failed to parse JSON: {}", e)
|
||||
})?;
|
||||
|
||||
log_info!("images", "Successfully fetched images data");
|
||||
Ok(json)
|
||||
|
||||
@@ -34,7 +34,6 @@ impl LogLevel {
|
||||
LogLevel::Error => "ERROR",
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Logger configuration
|
||||
@@ -92,12 +91,7 @@ impl Logger {
|
||||
let log_filename = format!("armbian-imager_{}.log", timestamp);
|
||||
let log_path = log_dir.join(&log_filename);
|
||||
|
||||
match OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.append(true)
|
||||
.open(&log_path)
|
||||
{
|
||||
match OpenOptions::new().create(true).append(true).open(&log_path) {
|
||||
Ok(file) => (Some(file), Some(log_path)),
|
||||
Err(e) => {
|
||||
eprintln!("Failed to create log file: {}", e);
|
||||
@@ -266,9 +260,7 @@ pub fn cleanup_old_logs(keep_count: usize) -> Result<usize, String> {
|
||||
let mut log_files: Vec<_> = fs::read_dir(&log_dir)
|
||||
.map_err(|e| format!("Failed to read log directory: {}", e))?
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().extension().map_or(false, |ext| ext == "log")
|
||||
})
|
||||
.filter(|entry| entry.path().extension().is_some_and(|ext| ext == "log"))
|
||||
.collect();
|
||||
|
||||
// Sort by modification time (newest first)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user