Implement file browser & web ui components

This commit is contained in:
Mathias Wagner
2025-09-10 13:47:19 +02:00
parent 0a16e46372
commit 7ffd64049a
18 changed files with 2150 additions and 12 deletions

View File

@@ -0,0 +1,365 @@
use crate::sync::storage::Storage;
use crate::sync::meta::{MetaObj, EntryType};
use crate::sync::protocol::MetaType;
use crate::utils::{error::*, models::*, DbPool};
use serde::Serialize;
use axum::response::Response;
use axum::body::Body;
use axum::http::{HeaderMap, HeaderValue};
#[derive(Debug, Serialize)]
pub struct FileSystemEntry {
pub name: String,
pub entry_type: String, // "file", "dir", "symlink"
pub size_bytes: Option<u64>,
pub meta_hash: String,
}
#[derive(Debug, Serialize)]
pub struct DirectoryListing {
pub path: String,
pub entries: Vec<FileSystemEntry>,
pub parent_hash: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct FileMetadata {
pub name: String,
pub size_bytes: u64,
pub mime_type: String,
pub meta_hash: String,
}
pub struct FilesController;
impl FilesController {
/// List directory contents for a partition
pub async fn list_partition_root(
pool: &DbPool,
machine_id: i64,
snapshot_id: String,
partition_index: usize,
user: &User,
) -> AppResult<DirectoryListing> {
// Verify machine access
Self::verify_machine_access(pool, machine_id, user).await?;
let storage = Storage::new("./data");
// Get partition hash from snapshot
let partition_hash = Self::get_partition_hash(&storage, machine_id, &snapshot_id, partition_index).await?;
// Load partition metadata to get root directory hash
let partition_meta = storage.load_meta(MetaType::Partition, &partition_hash).await
.map_err(|_| AppError::NotFoundError("Partition metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Partition metadata not found".to_string()))?;
if let MetaObj::Partition(partition_obj) = partition_meta {
Self::list_directory_by_hash(&storage, &partition_obj.root_dir_hash, "/".to_string()).await
} else {
Err(AppError::ValidationError("Invalid partition metadata".to_string()))
}
}
/// List directory contents by directory hash
pub async fn list_directory(
pool: &DbPool,
machine_id: i64,
snapshot_id: String,
partition_index: usize,
dir_hash: String,
user: &User,
) -> AppResult<DirectoryListing> {
// Verify machine access
Self::verify_machine_access(pool, machine_id, user).await?;
let storage = Storage::new("./data");
// Decode directory hash
let hash_bytes = hex::decode(&dir_hash)
.map_err(|_| AppError::ValidationError("Invalid directory hash format".to_string()))?;
if hash_bytes.len() != 32 {
return Err(AppError::ValidationError("Directory hash must be 32 bytes".to_string()));
}
let mut hash = [0u8; 32];
hash.copy_from_slice(&hash_bytes);
Self::list_directory_by_hash(&storage, &hash, dir_hash).await
}
/// Download a file by file hash with filename
pub async fn download_file(
pool: &DbPool,
machine_id: i64,
_snapshot_id: String,
_partition_index: usize,
file_hash: String,
filename: Option<String>,
user: &User,
) -> AppResult<Response<Body>> {
// Verify machine access
Self::verify_machine_access(pool, machine_id, user).await?;
let storage = Storage::new("./data");
// Decode file hash
let hash_bytes = hex::decode(&file_hash)
.map_err(|_| AppError::ValidationError("Invalid file hash format".to_string()))?;
if hash_bytes.len() != 32 {
return Err(AppError::ValidationError("File hash must be 32 bytes".to_string()));
}
let mut hash = [0u8; 32];
hash.copy_from_slice(&hash_bytes);
// Load file metadata
let file_meta = storage.load_meta(MetaType::File, &hash).await
.map_err(|_| AppError::NotFoundError("File metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("File metadata not found".to_string()))?;
if let MetaObj::File(file_obj) = file_meta {
// Reconstruct file content from chunks
let mut file_content = Vec::new();
for chunk_hash in &file_obj.chunk_hashes {
let chunk_data = storage.load_chunk(chunk_hash).await
.map_err(|_| AppError::NotFoundError(format!("Chunk {} not found", hex::encode(chunk_hash))))?
.ok_or_else(|| AppError::NotFoundError(format!("Chunk {} not found", hex::encode(chunk_hash))))?;
file_content.extend_from_slice(&chunk_data);
}
// Use provided filename or generate a generic one
let filename = filename.unwrap_or_else(|| format!("file_{}.bin", &file_hash[..8]));
// Determine MIME type from file content
let mime_type = Self::detect_mime_type(&filename, &file_content);
// Create response headers
let mut headers = HeaderMap::new();
headers.insert(
"content-type",
HeaderValue::from_str(&mime_type).unwrap_or_else(|_| HeaderValue::from_static("application/octet-stream"))
);
headers.insert(
"content-disposition",
HeaderValue::from_str(&format!("attachment; filename=\"{}\"", filename))
.unwrap_or_else(|_| HeaderValue::from_static("attachment"))
);
headers.insert(
"content-length",
HeaderValue::from_str(&file_content.len().to_string()).unwrap()
);
let mut response = Response::new(Body::from(file_content));
*response.headers_mut() = headers;
Ok(response)
} else {
Err(AppError::ValidationError("Invalid file metadata".to_string()))
}
}
/// Get file metadata without downloading content
pub async fn get_file_metadata(
pool: &DbPool,
machine_id: i64,
snapshot_id: String,
partition_index: usize,
file_hash: String,
user: &User,
) -> AppResult<FileMetadata> {
// Verify machine access
Self::verify_machine_access(pool, machine_id, user).await?;
let storage = Storage::new("./data");
// Decode file hash
let hash_bytes = hex::decode(&file_hash)
.map_err(|_| AppError::ValidationError("Invalid file hash format".to_string()))?;
if hash_bytes.len() != 32 {
return Err(AppError::ValidationError("File hash must be 32 bytes".to_string()));
}
let mut hash = [0u8; 32];
hash.copy_from_slice(&hash_bytes);
// Load file metadata
let file_meta = storage.load_meta(MetaType::File, &hash).await
.map_err(|_| AppError::NotFoundError("File metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("File metadata not found".to_string()))?;
if let MetaObj::File(file_obj) = file_meta {
let filename = format!("file_{}.bin", &file_hash[..8]);
let mime_type = Self::detect_mime_type(&filename, &[]);
Ok(FileMetadata {
name: filename,
size_bytes: file_obj.size,
mime_type,
meta_hash: file_hash,
})
} else {
Err(AppError::ValidationError("Invalid file metadata".to_string()))
}
}
// Helper methods
async fn verify_machine_access(pool: &DbPool, machine_id: i64, user: &User) -> AppResult<()> {
let machine = sqlx::query!(
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
machine_id,
user.id
)
.fetch_optional(pool)
.await
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
if machine.is_none() {
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
}
Ok(())
}
async fn get_partition_hash(
storage: &Storage,
machine_id: i64,
snapshot_id: &str,
partition_index: usize,
) -> AppResult<[u8; 32]> {
// Load snapshot reference to get hash
let (snapshot_hash, _) = storage.load_snapshot_ref(machine_id, snapshot_id).await
.map_err(|_| AppError::NotFoundError("Snapshot not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Snapshot not found".to_string()))?;
// Load snapshot metadata
let snapshot_meta = storage.load_meta(MetaType::Snapshot, &snapshot_hash).await
.map_err(|_| AppError::NotFoundError("Snapshot metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Snapshot metadata not found".to_string()))?;
if let MetaObj::Snapshot(snapshot_obj) = snapshot_meta {
// Get first disk (assuming single disk for now)
if snapshot_obj.disk_hashes.is_empty() {
return Err(AppError::NotFoundError("No disks in snapshot".to_string()));
}
let disk_hash = snapshot_obj.disk_hashes[0];
// Load disk metadata
let disk_meta = storage.load_meta(MetaType::Disk, &disk_hash).await
.map_err(|_| AppError::NotFoundError("Disk metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Disk metadata not found".to_string()))?;
if let MetaObj::Disk(disk_obj) = disk_meta {
if partition_index >= disk_obj.partition_hashes.len() {
return Err(AppError::NotFoundError("Partition index out of range".to_string()));
}
Ok(disk_obj.partition_hashes[partition_index])
} else {
Err(AppError::ValidationError("Invalid disk metadata".to_string()))
}
} else {
Err(AppError::ValidationError("Invalid snapshot metadata".to_string()))
}
}
async fn list_directory_by_hash(
storage: &Storage,
dir_hash: &[u8; 32],
path: String,
) -> AppResult<DirectoryListing> {
// Load directory metadata
let dir_meta = storage.load_meta(MetaType::Dir, dir_hash).await
.map_err(|_| AppError::NotFoundError("Directory metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Directory metadata not found".to_string()))?;
if let MetaObj::Dir(dir_obj) = dir_meta {
let mut entries = Vec::new();
for entry in dir_obj.entries {
let entry_type_str = match entry.entry_type {
EntryType::File => "file",
EntryType::Dir => "dir",
EntryType::Symlink => "symlink",
};
let size_bytes = if entry.entry_type == EntryType::File {
// Load file metadata to get size
if let Ok(Some(MetaObj::File(file_obj))) = storage.load_meta(MetaType::File, &entry.target_meta_hash).await {
Some(file_obj.size)
} else {
None
}
} else {
None
};
entries.push(FileSystemEntry {
name: entry.name,
entry_type: entry_type_str.to_string(),
size_bytes,
meta_hash: hex::encode(entry.target_meta_hash),
});
}
// Sort entries: directories first, then files, both alphabetically
entries.sort_by(|a, b| {
match (a.entry_type.as_str(), b.entry_type.as_str()) {
("dir", "file") => std::cmp::Ordering::Less,
("file", "dir") => std::cmp::Ordering::Greater,
_ => a.name.cmp(&b.name),
}
});
Ok(DirectoryListing {
path,
entries,
parent_hash: None, // TODO: Implement parent tracking if needed
})
} else {
Err(AppError::ValidationError("Invalid directory metadata".to_string()))
}
}
fn detect_mime_type(filename: &str, _content: &[u8]) -> String {
// Simple MIME type detection based on file extension
let extension = std::path::Path::new(filename)
.extension()
.and_then(|ext| ext.to_str())
.unwrap_or("")
.to_lowercase();
match extension.as_str() {
"txt" | "md" | "readme" => "text/plain",
"html" | "htm" => "text/html",
"css" => "text/css",
"js" => "application/javascript",
"json" => "application/json",
"xml" => "application/xml",
"pdf" => "application/pdf",
"zip" => "application/zip",
"tar" => "application/x-tar",
"gz" => "application/gzip",
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"gif" => "image/gif",
"svg" => "image/svg+xml",
"mp4" => "video/mp4",
"mp3" => "audio/mpeg",
"wav" => "audio/wav",
"exe" => "application/x-msdownload",
"dll" => "application/x-msdownload",
"so" => "application/x-sharedlib",
"deb" => "application/vnd.debian.binary-package",
"rpm" => "application/x-rpm",
_ => "application/octet-stream",
}.to_string()
}
}

View File

@@ -2,3 +2,4 @@ pub mod auth;
pub mod machines;
pub mod snapshots;
pub mod users;
pub mod files;

View File

@@ -8,7 +8,7 @@ use axum::{
routing::{delete, get, post, put},
Router,
};
use routes::{accounts, admin, auth, config, machines, setup, snapshots};
use routes::{accounts, admin, auth, config, machines, setup, snapshots, files};
use std::path::Path;
use tokio::signal;
use tower_http::{
@@ -44,6 +44,10 @@ async fn main() -> Result<()> {
.route("/machines/{id}", delete(machines::delete_machine))
.route("/machines/{id}/snapshots", get(snapshots::get_machine_snapshots))
.route("/machines/{machine_id}/snapshots/{snapshot_id}", get(snapshots::get_snapshot_details))
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/files", get(files::list_partition_root))
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/files/{dir_hash}", get(files::list_directory))
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/download/{file_hash}", get(files::download_file))
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/metadata/{file_hash}", get(files::get_file_metadata))
.layer(CorsLayer::permissive())
.with_state(pool);

View File

@@ -0,0 +1,77 @@
use axum::{extract::{Path, Query, State}, Json, response::Response};
use axum::body::Body;
use serde::Deserialize;
use crate::controllers::files::{FilesController, DirectoryListing, FileMetadata};
use crate::utils::{auth::AuthUser, error::AppResult, DbPool};
#[derive(Deserialize)]
pub struct DownloadQuery {
filename: Option<String>,
}
pub async fn list_partition_root(
State(pool): State<DbPool>,
Path((machine_id, snapshot_id, partition_index)): Path<(i64, String, usize)>,
auth_user: AuthUser,
) -> AppResult<Json<DirectoryListing>> {
let listing = FilesController::list_partition_root(
&pool,
machine_id,
snapshot_id,
partition_index,
&auth_user.user,
).await?;
Ok(Json(listing))
}
pub async fn list_directory(
State(pool): State<DbPool>,
Path((machine_id, snapshot_id, partition_index, dir_hash)): Path<(i64, String, usize, String)>,
auth_user: AuthUser,
) -> AppResult<Json<DirectoryListing>> {
let listing = FilesController::list_directory(
&pool,
machine_id,
snapshot_id,
partition_index,
dir_hash,
&auth_user.user,
).await?;
Ok(Json(listing))
}
pub async fn download_file(
State(pool): State<DbPool>,
Path((machine_id, snapshot_id, partition_index, file_hash)): Path<(i64, String, usize, String)>,
Query(query): Query<DownloadQuery>,
auth_user: AuthUser,
) -> AppResult<Response<Body>> {
FilesController::download_file(
&pool,
machine_id,
snapshot_id,
partition_index,
file_hash,
query.filename,
&auth_user.user,
).await
}
pub async fn get_file_metadata(
State(pool): State<DbPool>,
Path((machine_id, snapshot_id, partition_index, file_hash)): Path<(i64, String, usize, String)>,
auth_user: AuthUser,
) -> AppResult<Json<FileMetadata>> {
let metadata = FilesController::get_file_metadata(
&pool,
machine_id,
snapshot_id,
partition_index,
file_hash,
&auth_user.user,
).await?;
Ok(Json(metadata))
}

View File

@@ -1,7 +1,8 @@
pub mod accounts;
pub mod admin;
pub mod auth;
pub mod config;
pub mod machines;
pub mod setup;
pub mod accounts;
pub mod snapshots;
pub mod files;

View File

@@ -141,9 +141,9 @@ impl ConnectionHandler {
// Read message header
let header = self.read_header().await?;
// Read payload
// Read payload with appropriate size limit based on command type
let payload = if header.payload_len > 0 {
self.read_payload(header.payload_len).await?
self.read_payload(header.cmd, header.payload_len).await?
} else {
Bytes::new()
};
@@ -184,9 +184,15 @@ impl ConnectionHandler {
.context("Failed to parse message header")
}
/// Read message payload
async fn read_payload(&mut self, len: u32) -> Result<Bytes> {
if len as usize > self.config.meta_size_limit {
/// Read message payload with appropriate size limit based on command type
async fn read_payload(&mut self, cmd: Command, len: u32) -> Result<Bytes> {
// Use different size limits based on command type
let size_limit = match cmd {
Command::SendChunk => self.config.chunk_size_limit,
_ => self.config.meta_size_limit,
};
if len as usize > size_limit {
return Err(anyhow::anyhow!("Payload too large: {} bytes", len));
}