Add working test

This commit is contained in:
2025-09-09 22:23:01 +02:00
parent 4e38b13faa
commit fa00747e80
27 changed files with 2373 additions and 46 deletions

View File

@@ -0,0 +1,56 @@
{
"db_name": "SQLite",
"query": "\n SELECT pc.id, pc.code, pc.expires_at, pc.used, m.id as machine_id, m.user_id, u.username\n FROM provisioning_codes pc\n JOIN machines m ON pc.machine_id = m.id\n JOIN users u ON m.user_id = u.id\n WHERE pc.code = ? AND pc.used = 0\n ",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Integer"
},
{
"name": "code",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expires_at",
"ordinal": 2,
"type_info": "Datetime"
},
{
"name": "used",
"ordinal": 3,
"type_info": "Bool"
},
{
"name": "machine_id",
"ordinal": 4,
"type_info": "Integer"
},
{
"name": "user_id",
"ordinal": 5,
"type_info": "Integer"
},
{
"name": "username",
"ordinal": 6,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
true,
false,
false,
true,
true,
false,
false
]
},
"hash": "2d6e5810f76e780a4a9b54c5ea39d707be614eb304dc6b4f32d8b6d28464c4b5"
}

View File

@@ -0,0 +1,26 @@
{
"db_name": "SQLite",
"query": "SELECT id, user_id FROM machines WHERE id = ?",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Integer"
},
{
"name": "user_id",
"ordinal": 1,
"type_info": "Integer"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
false
]
},
"hash": "43af0c22d05eca56b2a7b1f6eed873102d8e006330fd7d8063657d2df936b3fb"
}

View File

@@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "UPDATE provisioning_codes SET used = 1 WHERE id = ?",
"describe": {
"columns": [],
"parameters": {
"Right": 1
},
"nullable": []
},
"hash": "508e673540beae31730d323bbb52d91747bb405ef3d6f4a7f20776fdeb618688"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "SQLite",
"query": "SELECT id, username, password_hash FROM users WHERE username = ?",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Integer"
},
{
"name": "username",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "password_hash",
"ordinal": 2,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
true,
false,
false
]
},
"hash": "9f9215a05f729db6f707c84967f4f11033d39d17ded98f4fe9fb48f3d1598596"
}

View File

@@ -0,0 +1,26 @@
{
"db_name": "SQLite",
"query": "SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Integer"
},
{
"name": "user_id",
"ordinal": 1,
"type_info": "Integer"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false,
false
]
},
"hash": "cc5f2e47cc53dd29682506ff84f07f7d0914e3141e62b470e84b3886b50764a1"
}

View File

@@ -87,6 +87,7 @@ impl MachinesController {
id: row.get("id"),
user_id: row.get("user_id"),
uuid: Uuid::parse_str(&row.get::<String, _>("uuid")).unwrap(),
machine_id: row.get::<String, _>("uuid"),
name: row.get("name"),
created_at: row.get("created_at"),
})
@@ -109,6 +110,7 @@ impl MachinesController {
id: row.get("id"),
user_id: row.get("user_id"),
uuid: Uuid::parse_str(&row.get::<String, _>("uuid")).unwrap(),
machine_id: row.get::<String, _>("uuid"),
name: row.get("name"),
created_at: row.get("created_at"),
});

View File

@@ -1,3 +1,4 @@
pub mod auth;
pub mod machines;
pub mod snapshots;
pub mod users;

View File

@@ -0,0 +1,217 @@
use crate::sync::storage::Storage;
use crate::sync::meta::{MetaObj, FsType};
use crate::sync::protocol::MetaType;
use crate::utils::{error::*, models::*, DbPool};
use serde::Serialize;
use chrono::{DateTime, Utc};
#[derive(Debug, Serialize)]
pub struct SnapshotInfo {
pub id: String, // Use UUID string instead of integer
pub snapshot_hash: String,
pub created_at: String,
pub disks: Vec<DiskInfo>,
}
#[derive(Debug, Serialize)]
pub struct DiskInfo {
pub serial: String,
pub size_bytes: u64,
pub partitions: Vec<PartitionInfo>,
}
#[derive(Debug, Serialize)]
pub struct PartitionInfo {
pub fs_type: String,
pub start_lba: u64,
pub end_lba: u64,
pub size_bytes: u64,
}
pub struct SnapshotsController;
impl SnapshotsController {
pub async fn get_machine_snapshots(
pool: &DbPool,
machine_id: i64,
user: &User,
) -> AppResult<Vec<SnapshotInfo>> {
// Verify machine access
let machine = sqlx::query!(
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
machine_id,
user.id
)
.fetch_optional(pool)
.await
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
if machine.is_none() {
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
}
let _machine = machine.unwrap();
let storage = Storage::new("./data");
let mut snapshot_infos = Vec::new();
// List all snapshots for this machine from storage
match storage.list_snapshots(machine_id).await {
Ok(snapshot_ids) => {
for snapshot_id in snapshot_ids {
// Load snapshot reference to get hash and timestamp
if let Ok(Some((snapshot_hash, created_at_timestamp))) = storage.load_snapshot_ref(machine_id, &snapshot_id).await {
// Load snapshot metadata
if let Ok(Some(snapshot_meta)) = storage.load_meta(MetaType::Snapshot, &snapshot_hash).await {
if let MetaObj::Snapshot(snapshot_obj) = snapshot_meta {
let mut disks = Vec::new();
for disk_hash in snapshot_obj.disk_hashes {
if let Ok(Some(disk_meta)) = storage.load_meta(MetaType::Disk, &disk_hash).await {
if let MetaObj::Disk(disk_obj) = disk_meta {
let mut partitions = Vec::new();
for partition_hash in disk_obj.partition_hashes {
if let Ok(Some(partition_meta)) = storage.load_meta(MetaType::Partition, &partition_hash).await {
if let MetaObj::Partition(partition_obj) = partition_meta {
let fs_type_str = match partition_obj.fs_type_code {
FsType::Ext => "ext",
FsType::Ntfs => "ntfs",
FsType::Fat32 => "fat32",
FsType::Unknown => "unknown",
};
partitions.push(PartitionInfo {
fs_type: fs_type_str.to_string(),
start_lba: partition_obj.start_lba,
end_lba: partition_obj.end_lba,
size_bytes: (partition_obj.end_lba - partition_obj.start_lba) * 512,
});
}
}
}
disks.push(DiskInfo {
serial: disk_obj.serial,
size_bytes: disk_obj.disk_size_bytes,
partitions,
});
}
}
}
// Convert timestamp to readable format
let created_at_str = DateTime::<Utc>::from_timestamp(created_at_timestamp as i64, 0)
.map(|dt| dt.format("%Y-%m-%d %H:%M:%S").to_string())
.unwrap_or_else(|| "Unknown".to_string());
snapshot_infos.push(SnapshotInfo {
id: snapshot_id,
snapshot_hash: hex::encode(snapshot_hash),
created_at: created_at_str,
disks,
});
}
}
}
}
}
Err(_) => {
// If no snapshots directory exists, return empty list
return Ok(Vec::new());
}
}
// Sort snapshots by creation time (newest first)
snapshot_infos.sort_by(|a, b| b.created_at.cmp(&a.created_at));
Ok(snapshot_infos)
}
pub async fn get_snapshot_details(
pool: &DbPool,
machine_id: i64,
snapshot_id: String,
user: &User,
) -> AppResult<SnapshotInfo> {
// Verify machine access
let machine = sqlx::query!(
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
machine_id,
user.id
)
.fetch_optional(pool)
.await
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
if machine.is_none() {
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
}
let _machine = machine.unwrap();
let storage = Storage::new("./data");
// Load snapshot reference to get hash and timestamp
let (snapshot_hash, created_at_timestamp) = storage.load_snapshot_ref(machine_id, &snapshot_id).await
.map_err(|_| AppError::NotFoundError("Snapshot not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Snapshot not found".to_string()))?;
// Load snapshot metadata
let snapshot_meta = storage.load_meta(MetaType::Snapshot, &snapshot_hash).await
.map_err(|_| AppError::NotFoundError("Snapshot metadata not found".to_string()))?
.ok_or_else(|| AppError::NotFoundError("Snapshot metadata not found".to_string()))?;
if let MetaObj::Snapshot(snapshot_obj) = snapshot_meta {
let mut disks = Vec::new();
for disk_hash in snapshot_obj.disk_hashes {
if let Ok(Some(disk_meta)) = storage.load_meta(MetaType::Disk, &disk_hash).await {
if let MetaObj::Disk(disk_obj) = disk_meta {
let mut partitions = Vec::new();
for partition_hash in disk_obj.partition_hashes {
if let Ok(Some(partition_meta)) = storage.load_meta(MetaType::Partition, &partition_hash).await {
if let MetaObj::Partition(partition_obj) = partition_meta {
let fs_type_str = match partition_obj.fs_type_code {
FsType::Ext => "ext",
FsType::Ntfs => "ntfs",
FsType::Fat32 => "fat32",
FsType::Unknown => "unknown",
};
partitions.push(PartitionInfo {
fs_type: fs_type_str.to_string(),
start_lba: partition_obj.start_lba,
end_lba: partition_obj.end_lba,
size_bytes: (partition_obj.end_lba - partition_obj.start_lba) * 512,
});
}
}
}
disks.push(DiskInfo {
serial: disk_obj.serial,
size_bytes: disk_obj.disk_size_bytes,
partitions,
});
}
}
}
// Convert timestamp to readable format
let created_at_str = DateTime::<Utc>::from_timestamp(created_at_timestamp as i64, 0)
.map(|dt| dt.format("%Y-%m-%d %H:%M:%S").to_string())
.unwrap_or_else(|| "Unknown".to_string());
Ok(SnapshotInfo {
id: snapshot_id,
snapshot_hash: hex::encode(snapshot_hash),
created_at: created_at_str,
disks,
})
} else {
Err(AppError::ValidationError("Invalid snapshot metadata".to_string()))
}
}
}

View File

@@ -8,7 +8,7 @@ use axum::{
routing::{delete, get, post, put},
Router,
};
use routes::{accounts, admin, auth as auth_routes, config, machines, setup};
use routes::{accounts, admin, auth, config, machines, setup, snapshots};
use std::path::Path;
use tokio::signal;
use tower_http::{
@@ -27,8 +27,8 @@ async fn main() -> Result<()> {
let api_routes = Router::new()
.route("/setup/status", get(setup::get_setup_status))
.route("/setup/init", post(setup::init_setup))
.route("/auth/login", post(auth_routes::login))
.route("/auth/logout", post(auth_routes::logout))
.route("/auth/login", post(auth::login))
.route("/auth/logout", post(auth::logout))
.route("/accounts/me", get(accounts::me))
.route("/admin/users", get(admin::get_users))
.route("/admin/users", post(admin::create_user_handler))
@@ -40,7 +40,10 @@ async fn main() -> Result<()> {
.route("/machines/register", post(machines::register_machine))
.route("/machines/provisioning-code", post(machines::create_provisioning_code))
.route("/machines", get(machines::get_machines))
.route("/machines/{id}", get(machines::get_machine))
.route("/machines/{id}", delete(machines::delete_machine))
.route("/machines/{id}/snapshots", get(snapshots::get_machine_snapshots))
.route("/machines/{machine_id}/snapshots/{snapshot_id}", get(snapshots::get_snapshot_details))
.layer(CorsLayer::permissive())
.with_state(pool);

View File

@@ -43,6 +43,21 @@ pub async fn get_machines(
Ok(success_response(machines))
}
pub async fn get_machine(
auth_user: AuthUser,
State(pool): State<DbPool>,
Path(machine_id): Path<i64>,
) -> Result<Json<Machine>, AppError> {
let machine = MachinesController::get_machine_by_id(&pool, machine_id).await?;
// Check if user has access to this machine
if auth_user.user.role != UserRole::Admin && machine.user_id != auth_user.user.id {
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
}
Ok(success_response(machine))
}
pub async fn delete_machine(
auth_user: AuthUser,
State(pool): State<DbPool>,

View File

@@ -4,3 +4,4 @@ pub mod config;
pub mod machines;
pub mod setup;
pub mod accounts;
pub mod snapshots;

View File

@@ -0,0 +1,32 @@
use axum::{extract::{Path, State}, Json};
use crate::controllers::snapshots::{SnapshotsController, SnapshotInfo};
use crate::utils::{auth::AuthUser, error::AppResult, DbPool};
pub async fn get_machine_snapshots(
State(pool): State<DbPool>,
Path(machine_id): Path<i64>,
auth_user: AuthUser,
) -> AppResult<Json<Vec<SnapshotInfo>>> {
let snapshots = SnapshotsController::get_machine_snapshots(
&pool,
machine_id,
&auth_user.user,
).await?;
Ok(Json(snapshots))
}
pub async fn get_snapshot_details(
State(pool): State<DbPool>,
Path((machine_id, snapshot_id)): Path<(i64, String)>,
auth_user: AuthUser,
) -> AppResult<Json<SnapshotInfo>> {
let snapshot = SnapshotsController::get_snapshot_details(
&pool,
machine_id,
snapshot_id,
&auth_user.user,
).await?;
Ok(Json(snapshot))
}

View File

@@ -354,37 +354,60 @@ impl DiskObj {
}
pub fn deserialize(mut data: Bytes) -> Result<Self> {
println!("DiskObj::deserialize: input data length = {}", data.len());
if data.remaining() < 15 {
println!("DiskObj::deserialize: data too short, remaining = {}", data.remaining());
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj data too short"));
}
let version = data.get_u8();
println!("DiskObj::deserialize: version = {}", version);
if version != 1 {
println!("DiskObj::deserialize: unsupported version {}", version);
return Err(Error::new(ErrorKind::InvalidData, "Unsupported DiskObj version"));
}
let partition_count = data.get_u32_le() as usize;
println!("DiskObj::deserialize: partition_count = {}", partition_count);
if data.remaining() < partition_count * 32 + 10 {
println!("DiskObj::deserialize: not enough data for partitions, remaining = {}, needed = {}",
data.remaining(), partition_count * 32 + 10);
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj partitions too short"));
}
let mut partition_hashes = Vec::with_capacity(partition_count);
for _ in 0..partition_count {
for i in 0..partition_count {
let mut hash = [0u8; 32];
data.copy_to_slice(&mut hash);
println!("DiskObj::deserialize: partition {} hash = {}", i, hex::encode(&hash));
partition_hashes.push(hash);
}
let disk_size_bytes = data.get_u64_le();
println!("DiskObj::deserialize: disk_size_bytes = {}", disk_size_bytes);
let serial_len = data.get_u16_le() as usize;
println!("DiskObj::deserialize: serial_len = {}", serial_len);
if data.remaining() < serial_len {
println!("DiskObj::deserialize: not enough data for serial, remaining = {}, needed = {}",
data.remaining(), serial_len);
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj serial too short"));
}
let serial = String::from_utf8(data.copy_to_bytes(serial_len).to_vec())
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in serial"))?;
let serial_bytes = data.copy_to_bytes(serial_len).to_vec();
println!("DiskObj::deserialize: serial_bytes = {:?}", serial_bytes);
let serial = String::from_utf8(serial_bytes)
.map_err(|e| {
println!("DiskObj::deserialize: UTF-8 error: {}", e);
Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in serial")
})?;
println!("DiskObj::deserialize: serial = '{}'", serial);
println!("DiskObj::deserialize: successfully deserialized");
Ok(Self {
version,

View File

@@ -113,7 +113,7 @@ struct ConnectionHandler {
validator: SnapshotValidator,
config: SyncServerConfig,
session_id: Option<[u8; 16]>,
machine_id: Option<String>,
machine_id: Option<i64>,
}
impl ConnectionHandler {
@@ -308,18 +308,27 @@ impl ConnectionHandler {
self.require_auth()?;
if body.len() > self.config.meta_size_limit {
println!("Snapshot rejected: size limit exceeded ({} > {})", body.len(), self.config.meta_size_limit);
return Ok(Some(Message::SnapshotFail {
missing_chunks: vec![],
missing_metas: vec![],
}));
}
println!("Validating snapshot hash: {}", hex::encode(&snapshot_hash));
// Validate snapshot
match self.validator.validate_snapshot(&snapshot_hash, &body).await {
Ok(validation_result) => {
println!("Validation result - is_valid: {}, missing_chunks: {}, missing_metas: {}",
validation_result.is_valid,
validation_result.missing_chunks.len(),
validation_result.missing_metas.len());
if validation_result.is_valid {
// Store snapshot meta
if let Err(_e) = self.storage.store_meta(MetaType::Snapshot, &snapshot_hash, &body).await {
if let Err(e) = self.storage.store_meta(MetaType::Snapshot, &snapshot_hash, &body).await {
println!("Failed to store snapshot meta: {}", e);
return Ok(Some(Message::SnapshotFail {
missing_chunks: vec![],
missing_metas: vec![],
@@ -328,46 +337,36 @@ impl ConnectionHandler {
// Create snapshot reference
let snapshot_id = Uuid::new_v4().to_string();
let machine_id = self.machine_id.as_ref().unwrap();
let machine_id = *self.machine_id.as_ref().unwrap();
let created_at = chrono::Utc::now().timestamp() as u64;
if let Err(_e) = self.storage.store_snapshot_ref(
println!("Creating snapshot reference: machine_id={}, snapshot_id={}", machine_id, snapshot_id);
if let Err(e) = self.storage.store_snapshot_ref(
machine_id,
&snapshot_id,
&snapshot_hash,
created_at
).await {
println!("Failed to store snapshot reference: {}", e);
return Ok(Some(Message::SnapshotFail {
missing_chunks: vec![],
missing_metas: vec![],
}));
}
// Store snapshot in database
let machine_id_num: i64 = machine_id.parse().unwrap_or(0);
let snapshot_hash_hex = hex::encode(snapshot_hash);
if let Err(_e) = sqlx::query!(
"INSERT INTO snapshots (machine_id, snapshot_hash) VALUES (?, ?)",
machine_id_num,
snapshot_hash_hex
)
.execute(self.session_manager.get_db_pool())
.await {
return Ok(Some(Message::SnapshotFail {
missing_chunks: vec![],
missing_metas: vec![],
}));
}
println!("Snapshot successfully stored with ID: {}", snapshot_id);
Ok(Some(Message::SnapshotOk { snapshot_id }))
} else {
println!("Snapshot validation failed - returning missing items");
Ok(Some(Message::SnapshotFail {
missing_chunks: validation_result.missing_chunks,
missing_metas: validation_result.missing_metas,
}))
}
}
Err(_e) => {
Err(e) => {
println!("Snapshot validation error: {}", e);
Ok(Some(Message::SnapshotFail {
missing_chunks: vec![],
missing_metas: vec![],

View File

@@ -4,13 +4,12 @@ use sqlx::SqlitePool;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use uuid::Uuid;
/// Session information
#[derive(Debug, Clone)]
pub struct Session {
pub session_id: [u8; 16],
pub machine_id: String,
pub machine_id: i64,
pub user_id: i64,
pub created_at: chrono::DateTime<chrono::Utc>,
}
@@ -79,12 +78,12 @@ impl SessionManager {
return Err(anyhow::anyhow!("Machine does not belong to user"));
}
// Create session
// Create session with machine ID
let session_id = Self::generate_session_id();
let machine_id_str = machine_id.to_string();
let machine_id = machine.id; // Use database ID
let session = Session {
session_id,
machine_id: machine_id_str,
machine_id,
user_id,
created_at: chrono::Utc::now(),
};
@@ -101,7 +100,7 @@ impl SessionManager {
// Query provisioning code from database
let provisioning_code = sqlx::query!(
r#"
SELECT pc.id, pc.code, pc.expires_at, pc.used, m.user_id, u.username
SELECT pc.id, pc.code, pc.expires_at, pc.used, m.id as machine_id, m.user_id, u.username
FROM provisioning_codes pc
JOIN machines m ON pc.machine_id = m.id
JOIN users u ON m.user_id = u.id
@@ -137,7 +136,7 @@ impl SessionManager {
// Create session
let session_id = Self::generate_session_id();
let machine_id = format!("machine-{}", Uuid::new_v4());
let machine_id = provisioning_code.machine_id.expect("Machine ID should not be null"); // Use machine ID from database
let session = Session {
session_id,
machine_id,
@@ -159,7 +158,7 @@ impl SessionManager {
}
/// Validate session and return associated machine ID
pub async fn validate_session(&self, session_id: &[u8; 16]) -> Result<String> {
pub async fn validate_session(&self, session_id: &[u8; 16]) -> Result<i64> {
let session = self.get_session(session_id).await
.ok_or_else(|| anyhow::anyhow!("Invalid session"))?;

View File

@@ -6,7 +6,7 @@ use tokio::fs;
use crate::sync::protocol::{Hash, MetaType};
use crate::sync::meta::MetaObj;
/// Storage backend for chunks and meta objects
/// Storage backend for chunks and metadata objects
#[derive(Debug, Clone)]
pub struct Storage {
data_dir: PathBuf,
@@ -199,30 +199,37 @@ impl Storage {
let path = self.meta_path(meta_type, hash);
if !path.exists() {
println!("Meta file does not exist: {:?}", path);
return Ok(None);
}
println!("Reading meta file: {:?}", path);
let data = fs::read(&path).await
.context("Failed to read meta file")?;
println!("Read {} bytes from meta file", data.len());
// Verify hash
let computed_hash = blake3::hash(&data);
if computed_hash.as_bytes() != hash {
println!("Hash mismatch: expected {}, got {}", hex::encode(hash), hex::encode(computed_hash.as_bytes()));
return Err(anyhow::anyhow!("Stored meta object hash mismatch"));
}
println!("Hash verified, deserializing {:?} object", meta_type);
let meta_obj = MetaObj::deserialize(meta_type, Bytes::from(data))
.context("Failed to deserialize meta object")?;
println!("Successfully deserialized meta object");
Ok(Some(meta_obj))
}
/// Get snapshot storage path for a machine
fn snapshot_ref_path(&self, machine_id: &str, snapshot_id: &str) -> PathBuf {
fn snapshot_ref_path(&self, machine_id: i64, snapshot_id: &str) -> PathBuf {
self.data_dir
.join("sync")
.join("machines")
.join(machine_id)
.join(machine_id.to_string())
.join("snapshots")
.join(format!("{}.ref", snapshot_id))
}
@@ -230,7 +237,7 @@ impl Storage {
/// Store a snapshot reference
pub async fn store_snapshot_ref(
&self,
machine_id: &str,
machine_id: i64,
snapshot_id: &str,
snapshot_hash: &Hash,
created_at: u64
@@ -258,7 +265,7 @@ impl Storage {
}
/// Load a snapshot reference
pub async fn load_snapshot_ref(&self, machine_id: &str, snapshot_id: &str) -> Result<Option<(Hash, u64)>> {
pub async fn load_snapshot_ref(&self, machine_id: i64, snapshot_id: &str) -> Result<Option<(Hash, u64)>> {
let path = self.snapshot_ref_path(machine_id, snapshot_id);
if !path.exists() {
@@ -285,11 +292,11 @@ impl Storage {
}
/// List snapshots for a machine
pub async fn list_snapshots(&self, machine_id: &str) -> Result<Vec<String>> {
pub async fn list_snapshots(&self, machine_id: i64) -> Result<Vec<String>> {
let snapshots_dir = self.data_dir
.join("sync")
.join("machines")
.join(machine_id)
.join(machine_id.to_string())
.join("snapshots");
if !snapshots_dir.exists() {
@@ -316,7 +323,7 @@ impl Storage {
}
/// Delete old snapshots, keeping only the latest N
pub async fn cleanup_snapshots(&self, machine_id: &str, keep_count: usize) -> Result<()> {
pub async fn cleanup_snapshots(&self, machine_id: i64, keep_count: usize) -> Result<()> {
let mut snapshots = self.list_snapshots(machine_id).await?;
if snapshots.len() <= keep_count {
@@ -382,7 +389,7 @@ mod tests {
let storage = Storage::new(temp_dir.path());
storage.init().await.unwrap();
let machine_id = "test-machine";
let machine_id = 123i64;
let snapshot_id = "snapshot-001";
let snapshot_hash = [1u8; 32];
let created_at = 1234567890;

View File

@@ -110,11 +110,13 @@ impl SnapshotValidator {
// Check if meta exists
if !self.storage.meta_exists(meta_type, &hash).await {
println!("Missing metadata: {:?} hash {}", meta_type, hex::encode(&hash));
missing_metas.push((meta_type, hash));
continue; // Skip loading if missing
}
// Load and process meta object
println!("Loading metadata: {:?} hash {}", meta_type, hex::encode(&hash));
if let Some(meta_obj) = self.storage.load_meta(meta_type, &hash).await
.context("Failed to load meta object")? {

View File

@@ -83,6 +83,8 @@ pub struct Machine {
pub id: i64,
pub user_id: i64,
pub uuid: Uuid,
#[serde(rename = "machine_id")]
pub machine_id: String,
pub name: String,
pub created_at: DateTime<Utc>,
}