Add working test

This commit is contained in:
2025-09-09 22:23:01 +02:00
parent 4e38b13faa
commit fa00747e80
27 changed files with 2373 additions and 46 deletions

76
sync_client_test/Cargo.lock generated Normal file
View File

@@ -0,0 +1,76 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "arrayref"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
[[package]]
name = "arrayvec"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "blake3"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if",
"constant_time_eq",
]
[[package]]
name = "cc"
version = "1.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
dependencies = [
"find-msvc-tools",
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
[[package]]
name = "constant_time_eq"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
[[package]]
name = "find-msvc-tools"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "sync_client_test"
version = "0.1.0"
dependencies = [
"blake3",
"hex",
]

View File

@@ -0,0 +1,8 @@
[package]
name = "sync_client_test"
version = "0.1.0"
edition = "2021"
[dependencies]
blake3 = "1.5"
hex = "0.4"

View File

@@ -0,0 +1,856 @@
// Mock sync client for testing the Arkendro sync server
// This implements the binary protocol specified in PROTOCOL.md
use std::io::{Read, Write, Result, Error, ErrorKind};
use std::net::TcpStream;
/// Command codes from the protocol
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
enum Command {
Hello = 0x01,
HelloOk = 0x02,
AuthUserPass = 0x10,
AuthCode = 0x11,
AuthOk = 0x12,
AuthFail = 0x13,
BatchCheckChunk = 0x20,
CheckChunkResp = 0x21,
SendChunk = 0x22,
ChunkOk = 0x23,
ChunkFail = 0x24,
BatchCheckMeta = 0x30,
CheckMetaResp = 0x31,
SendMeta = 0x32,
MetaOk = 0x33,
MetaFail = 0x34,
SendSnapshot = 0x40,
SnapshotOk = 0x41,
SnapshotFail = 0x42,
Close = 0xFF,
}
impl Command {
fn from_u8(value: u8) -> Result<Self> {
match value {
0x01 => Ok(Command::Hello),
0x02 => Ok(Command::HelloOk),
0x10 => Ok(Command::AuthUserPass),
0x11 => Ok(Command::AuthCode),
0x12 => Ok(Command::AuthOk),
0x13 => Ok(Command::AuthFail),
0x20 => Ok(Command::BatchCheckChunk),
0x21 => Ok(Command::CheckChunkResp),
0x22 => Ok(Command::SendChunk),
0x23 => Ok(Command::ChunkOk),
0x24 => Ok(Command::ChunkFail),
0x30 => Ok(Command::BatchCheckMeta),
0x31 => Ok(Command::CheckMetaResp),
0x32 => Ok(Command::SendMeta),
0x33 => Ok(Command::MetaOk),
0x34 => Ok(Command::MetaFail),
0x40 => Ok(Command::SendSnapshot),
0x41 => Ok(Command::SnapshotOk),
0x42 => Ok(Command::SnapshotFail),
0xFF => Ok(Command::Close),
_ => Err(Error::new(ErrorKind::InvalidData, "Unknown command")),
}
}
}
/// Message header (24 bytes)
#[derive(Debug)]
struct MessageHeader {
cmd: Command,
flags: u8,
reserved: [u8; 2],
session_id: [u8; 16],
payload_len: u32,
}
impl MessageHeader {
fn new(cmd: Command, session_id: [u8; 16], payload_len: u32) -> Self {
Self {
cmd,
flags: 0,
reserved: [0; 2],
session_id,
payload_len,
}
}
fn to_bytes(&self) -> [u8; 24] {
let mut buf = [0u8; 24];
buf[0] = self.cmd as u8;
buf[1] = self.flags;
buf[2..4].copy_from_slice(&self.reserved);
buf[4..20].copy_from_slice(&self.session_id);
buf[20..24].copy_from_slice(&self.payload_len.to_le_bytes());
buf
}
fn from_bytes(buf: &[u8]) -> Result<Self> {
if buf.len() < 24 {
return Err(Error::new(ErrorKind::UnexpectedEof, "Header too short"));
}
let cmd = Command::from_u8(buf[0])?;
let flags = buf[1];
let reserved = [buf[2], buf[3]];
let mut session_id = [0u8; 16];
session_id.copy_from_slice(&buf[4..20]);
let payload_len = u32::from_le_bytes([buf[20], buf[21], buf[22], buf[23]]);
Ok(Self {
cmd,
flags,
reserved,
session_id,
payload_len,
})
}
}
/// Metadata types
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
enum MetaType {
File = 1,
Dir = 2,
Partition = 3,
Disk = 4,
Snapshot = 5,
}
/// Filesystem types
#[derive(Debug, Clone, Copy)]
#[repr(u32)]
enum FsType {
Unknown = 0,
Ext = 1,
Ntfs = 2,
Fat32 = 3,
}
/// Directory entry types
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
enum EntryType {
File = 0,
Dir = 1,
Symlink = 2,
}
/// Directory entry
#[derive(Debug, Clone)]
struct DirEntry {
entry_type: EntryType,
name: String,
target_meta_hash: [u8; 32],
}
/// File metadata object
#[derive(Debug, Clone)]
struct FileObj {
version: u8,
fs_type_code: FsType,
size: u64,
mode: u32,
uid: u32,
gid: u32,
mtime_unixsec: u64,
chunk_hashes: Vec<[u8; 32]>,
}
impl FileObj {
fn new(size: u64, chunk_hashes: Vec<[u8; 32]>) -> Self {
Self {
version: 1,
fs_type_code: FsType::Ext,
size,
mode: 0o644,
uid: 1000,
gid: 1000,
mtime_unixsec: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs(),
chunk_hashes,
}
}
fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::new();
buf.push(self.version);
buf.extend_from_slice(&(self.fs_type_code as u32).to_le_bytes());
buf.extend_from_slice(&self.size.to_le_bytes());
buf.extend_from_slice(&self.mode.to_le_bytes());
buf.extend_from_slice(&self.uid.to_le_bytes());
buf.extend_from_slice(&self.gid.to_le_bytes());
buf.extend_from_slice(&self.mtime_unixsec.to_le_bytes());
buf.extend_from_slice(&(self.chunk_hashes.len() as u32).to_le_bytes());
for hash in &self.chunk_hashes {
buf.extend_from_slice(hash);
}
buf
}
}
/// Directory metadata object
#[derive(Debug, Clone)]
struct DirObj {
version: u8,
entries: Vec<DirEntry>,
}
impl DirObj {
fn new(entries: Vec<DirEntry>) -> Self {
Self {
version: 1,
entries,
}
}
fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::new();
buf.push(self.version);
buf.extend_from_slice(&(self.entries.len() as u32).to_le_bytes());
for entry in &self.entries {
buf.push(entry.entry_type as u8);
let name_bytes = entry.name.as_bytes();
buf.extend_from_slice(&(name_bytes.len() as u16).to_le_bytes());
buf.extend_from_slice(name_bytes);
buf.extend_from_slice(&entry.target_meta_hash);
}
buf
}
}
/// Partition metadata object
#[derive(Debug, Clone)]
struct PartitionObj {
version: u8,
fs_type: FsType,
root_dir_hash: [u8; 32],
start_lba: u64,
end_lba: u64,
type_guid: [u8; 16],
}
impl PartitionObj {
fn new(label: String, root_dir_hash: [u8; 32]) -> Self {
// Generate a deterministic GUID from the label for testing
let mut type_guid = [0u8; 16];
let label_bytes = label.as_bytes();
for (i, &byte) in label_bytes.iter().take(16).enumerate() {
type_guid[i] = byte;
}
Self {
version: 1,
fs_type: FsType::Ext,
root_dir_hash,
start_lba: 2048, // Common starting LBA
end_lba: 2097152, // ~1GB partition
type_guid,
}
}
fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::new();
buf.push(self.version);
buf.extend_from_slice(&(self.fs_type as u32).to_le_bytes());
buf.extend_from_slice(&self.root_dir_hash);
buf.extend_from_slice(&self.start_lba.to_le_bytes());
buf.extend_from_slice(&self.end_lba.to_le_bytes());
buf.extend_from_slice(&self.type_guid);
buf
}
}
/// Disk metadata object
#[derive(Debug, Clone)]
struct DiskObj {
version: u8,
partition_hashes: Vec<[u8; 32]>,
disk_size_bytes: u64,
serial: String,
}
impl DiskObj {
fn new(serial: String, partition_hashes: Vec<[u8; 32]>) -> Self {
Self {
version: 1,
partition_hashes,
disk_size_bytes: 1024 * 1024 * 1024, // 1GB default
serial,
}
}
fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::new();
buf.push(self.version);
buf.extend_from_slice(&(self.partition_hashes.len() as u32).to_le_bytes());
for hash in &self.partition_hashes {
buf.extend_from_slice(hash);
}
buf.extend_from_slice(&self.disk_size_bytes.to_le_bytes());
let serial_bytes = self.serial.as_bytes();
buf.extend_from_slice(&(serial_bytes.len() as u16).to_le_bytes());
buf.extend_from_slice(serial_bytes);
buf
}
}
/// Snapshot metadata object
#[derive(Debug, Clone)]
struct SnapshotObj {
version: u8,
created_at_unixsec: u64,
disk_hashes: Vec<[u8; 32]>,
}
impl SnapshotObj {
fn new(disk_hashes: Vec<[u8; 32]>) -> Self {
Self {
version: 1,
created_at_unixsec: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs(),
disk_hashes,
}
}
fn serialize(&self) -> Vec<u8> {
let mut buf = Vec::new();
buf.push(self.version);
buf.extend_from_slice(&self.created_at_unixsec.to_le_bytes());
buf.extend_from_slice(&(self.disk_hashes.len() as u32).to_le_bytes());
for hash in &self.disk_hashes {
buf.extend_from_slice(hash);
}
buf
}
}
/// Simple sync client for testing
struct SyncClient {
stream: TcpStream,
session_id: [u8; 16],
}
impl SyncClient {
fn connect(addr: &str) -> Result<Self> {
let stream = TcpStream::connect(addr)?;
Ok(Self {
stream,
session_id: [0; 16],
})
}
fn send_message(&mut self, cmd: Command, payload: &[u8]) -> Result<()> {
let header = MessageHeader::new(cmd, self.session_id, payload.len() as u32);
self.stream.write_all(&header.to_bytes())?;
if !payload.is_empty() {
self.stream.write_all(payload)?;
}
self.stream.flush()?;
Ok(())
}
fn receive_message(&mut self) -> Result<(Command, Vec<u8>)> {
// Read header
let mut header_buf = [0u8; 24];
self.stream.read_exact(&mut header_buf)?;
let header = MessageHeader::from_bytes(&header_buf)?;
// Read payload
let mut payload = vec![0u8; header.payload_len as usize];
if header.payload_len > 0 {
self.stream.read_exact(&mut payload)?;
}
Ok((header.cmd, payload))
}
fn hello(&mut self) -> Result<()> {
println!("Sending HELLO...");
// Hello message needs client_type (1 byte) and auth_type (1 byte)
let payload = vec![0x01, 0x01]; // client_type=1, auth_type=1
self.send_message(Command::Hello, &payload)?;
let (cmd, _payload) = self.receive_message()?;
match cmd {
Command::HelloOk => {
println!("✓ Received HELLO_OK");
Ok(())
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected HELLO_OK")),
}
}
fn authenticate(&mut self, username: &str, password: &str, machine_id: i64) -> Result<()> {
println!("Authenticating as {} with machine ID {}...", username, machine_id);
// Build auth payload: username_len (u16_le) + username + password_len (u16_le) + password + machine_id (i64_le)
let mut payload = Vec::new();
payload.extend_from_slice(&(username.len() as u16).to_le_bytes());
payload.extend_from_slice(username.as_bytes());
payload.extend_from_slice(&(password.len() as u16).to_le_bytes());
payload.extend_from_slice(password.as_bytes());
payload.extend_from_slice(&machine_id.to_le_bytes());
self.send_message(Command::AuthUserPass, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::AuthOk => {
// Extract session ID from payload
if payload.len() >= 16 {
self.session_id.copy_from_slice(&payload[0..16]);
println!("✓ Authentication successful! Session ID: {:?}", self.session_id);
Ok(())
} else {
Err(Error::new(ErrorKind::InvalidData, "Invalid session ID"))
}
}
Command::AuthFail => Err(Error::new(ErrorKind::PermissionDenied, "Authentication failed")),
_ => Err(Error::new(ErrorKind::InvalidData, "Unexpected response")),
}
}
fn check_chunks(&mut self, hashes: &[[u8; 32]]) -> Result<Vec<[u8; 32]>> {
println!("Checking {} chunks...", hashes.len());
let mut payload = Vec::new();
payload.extend_from_slice(&(hashes.len() as u32).to_le_bytes());
for hash in hashes {
payload.extend_from_slice(hash);
}
self.send_message(Command::BatchCheckChunk, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::CheckChunkResp => {
if payload.len() < 4 {
return Err(Error::new(ErrorKind::InvalidData, "Invalid response"));
}
let count = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]) as usize;
let mut missing = Vec::new();
for i in 0..count {
let start = 4 + i * 32;
if payload.len() < start + 32 {
return Err(Error::new(ErrorKind::InvalidData, "Invalid hash in response"));
}
let mut hash = [0u8; 32];
hash.copy_from_slice(&payload[start..start + 32]);
missing.push(hash);
}
println!("{} chunks missing out of {}", missing.len(), hashes.len());
Ok(missing)
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected CheckChunkResp")),
}
}
fn send_chunk(&mut self, hash: &[u8; 32], data: &[u8]) -> Result<()> {
println!("Sending chunk {} bytes...", data.len());
println!("Chunk hash: {}", hex::encode(hash));
// Verify hash matches data
let computed_hash = blake3_hash(data);
if computed_hash != *hash {
return Err(Error::new(ErrorKind::InvalidData, "Hash mismatch"));
}
let mut payload = Vec::new();
payload.extend_from_slice(hash);
payload.extend_from_slice(&(data.len() as u32).to_le_bytes());
payload.extend_from_slice(data);
self.send_message(Command::SendChunk, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::ChunkOk => {
println!("✓ Chunk uploaded successfully");
Ok(())
}
Command::ChunkFail => {
let reason = if !payload.is_empty() {
String::from_utf8_lossy(&payload).to_string()
} else {
"Unknown error".to_string()
};
Err(Error::new(ErrorKind::Other, format!("Server rejected chunk: {}", reason)))
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected ChunkOk or ChunkFail")),
}
}
fn check_metadata(&mut self, items: &[(MetaType, [u8; 32])]) -> Result<Vec<(MetaType, [u8; 32])>> {
println!("Checking {} metadata items...", items.len());
let mut payload = Vec::new();
payload.extend_from_slice(&(items.len() as u32).to_le_bytes());
for (meta_type, hash) in items {
payload.push(*meta_type as u8);
payload.extend_from_slice(hash);
}
self.send_message(Command::BatchCheckMeta, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::CheckMetaResp => {
if payload.len() < 4 {
return Err(Error::new(ErrorKind::InvalidData, "Invalid response"));
}
let count = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]) as usize;
let mut missing = Vec::new();
for i in 0..count {
let start = 4 + i * 33; // 1 byte type + 32 bytes hash
if payload.len() < start + 33 {
return Err(Error::new(ErrorKind::InvalidData, "Invalid metadata in response"));
}
let meta_type = match payload[start] {
1 => MetaType::File,
2 => MetaType::Dir,
3 => MetaType::Partition,
4 => MetaType::Disk,
5 => MetaType::Snapshot,
_ => return Err(Error::new(ErrorKind::InvalidData, "Invalid metadata type")),
};
let mut hash = [0u8; 32];
hash.copy_from_slice(&payload[start + 1..start + 33]);
missing.push((meta_type, hash));
}
println!("{} metadata items missing out of {}", missing.len(), items.len());
Ok(missing)
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected CheckMetaResp")),
}
}
fn send_metadata(&mut self, meta_type: MetaType, meta_hash: &[u8; 32], body: &[u8]) -> Result<()> {
println!("Sending {:?} metadata {} bytes...", meta_type, body.len());
println!("Metadata hash: {}", hex::encode(meta_hash));
// Verify hash matches body
let computed_hash = blake3_hash(body);
if computed_hash != *meta_hash {
return Err(Error::new(ErrorKind::InvalidData, "Metadata hash mismatch"));
}
let mut payload = Vec::new();
payload.push(meta_type as u8);
payload.extend_from_slice(meta_hash);
payload.extend_from_slice(&(body.len() as u32).to_le_bytes());
payload.extend_from_slice(body);
self.send_message(Command::SendMeta, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::MetaOk => {
println!("✓ Metadata uploaded successfully");
Ok(())
}
Command::MetaFail => {
let reason = if !payload.is_empty() {
String::from_utf8_lossy(&payload).to_string()
} else {
"Unknown error".to_string()
};
Err(Error::new(ErrorKind::Other, format!("Server rejected metadata: {}", reason)))
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected MetaOk or MetaFail")),
}
}
fn send_snapshot(&mut self, snapshot_hash: &[u8; 32], snapshot_data: &[u8]) -> Result<()> {
println!("Sending snapshot {} bytes...", snapshot_data.len());
println!("Snapshot hash: {}", hex::encode(snapshot_hash));
// Verify hash matches data
let computed_hash = blake3_hash(snapshot_data);
if computed_hash != *snapshot_hash {
return Err(Error::new(ErrorKind::InvalidData, "Snapshot hash mismatch"));
}
let mut payload = Vec::new();
payload.extend_from_slice(snapshot_hash);
payload.extend_from_slice(&(snapshot_data.len() as u32).to_le_bytes());
payload.extend_from_slice(snapshot_data);
self.send_message(Command::SendSnapshot, &payload)?;
let (cmd, payload) = self.receive_message()?;
match cmd {
Command::SnapshotOk => {
println!("✓ Snapshot uploaded successfully");
Ok(())
}
Command::SnapshotFail => {
// Parse SnapshotFail payload: missing_chunks_count + chunks + missing_metas_count + metas
if payload.len() < 8 {
return Err(Error::new(ErrorKind::Other, "Server rejected snapshot: Invalid response format"));
}
let missing_chunks_count = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]) as usize;
let missing_metas_count = u32::from_le_bytes([payload[4], payload[5], payload[6], payload[7]]) as usize;
let mut error_msg = format!("Server rejected snapshot: {} missing chunks, {} missing metadata items",
missing_chunks_count, missing_metas_count);
// Optionally parse the actual missing items for more detailed error
if missing_chunks_count > 0 || missing_metas_count > 0 {
error_msg.push_str(" (run with chunk/metadata verification to see details)");
}
Err(Error::new(ErrorKind::Other, error_msg))
}
_ => Err(Error::new(ErrorKind::InvalidData, "Expected SnapshotOk or SnapshotFail")),
}
}
fn close(&mut self) -> Result<()> {
self.send_message(Command::Close, &[])?;
Ok(())
}
}
/// Hash function using blake3
fn blake3_hash(data: &[u8]) -> [u8; 32] {
blake3::hash(data).into()
}
/// Generate some mock data for testing
fn generate_mock_data() -> Vec<(Vec<u8>, [u8; 32])> {
let mut data_chunks = Vec::new();
// Some test data chunks
let chunks = [
b"Hello, Arkendro sync server! This is test chunk data.".to_vec(),
b"Another test chunk with different content for variety.".to_vec(),
b"Binary data test: \x00\x01\x02\x03\xFF\xFE\xFD\xFC".to_vec(),
];
for chunk in chunks {
let hash = blake3_hash(&chunk);
data_chunks.push((chunk, hash));
}
data_chunks
}
fn main() -> Result<()> {
println!("🚀 Arkendro Sync Client Extended Test");
println!("====================================\n");
// Connect to server
let mut client = SyncClient::connect("127.0.0.1:8380")?;
println!("Connected to sync server\n");
// Test protocol flow
client.hello()?;
// Try to authenticate with hardcoded machine ID (you'll need to create a machine first via the web interface)
let machine_id = 1; // Hardcoded machine ID for testing
match client.authenticate("admin", "password123", machine_id) {
Ok(()) => println!("Authentication successful!\n"),
Err(e) => {
println!("Authentication failed: {}", e);
println!("Make sure you have:");
println!("1. Created a user 'admin' with password 'password123' via the web interface");
println!("2. Created a machine with ID {} that belongs to user 'admin'", machine_id);
client.close()?;
return Ok(());
}
}
println!("📁 Creating test filesystem hierarchy...\n");
// Step 1: Create test file data chunks
let file1_data = b"Hello, this is the content of file1.txt in our test filesystem!";
let file2_data = b"This is file2.log with some different content for testing purposes.";
let file3_data = b"Binary data file: \x00\x01\x02\x03\xFF\xFE\xFD\xFC and some text after.";
let file1_hash = blake3_hash(file1_data);
let file2_hash = blake3_hash(file2_data);
let file3_hash = blake3_hash(file3_data);
// Upload chunks if needed
println!("🔗 Uploading file chunks...");
let chunk_hashes = vec![file1_hash, file2_hash, file3_hash];
let missing_chunks = client.check_chunks(&chunk_hashes)?;
if !missing_chunks.is_empty() {
for &missing_hash in &missing_chunks {
if missing_hash == file1_hash {
client.send_chunk(&file1_hash, file1_data)?;
} else if missing_hash == file2_hash {
client.send_chunk(&file2_hash, file2_data)?;
} else if missing_hash == file3_hash {
client.send_chunk(&file3_hash, file3_data)?;
}
}
} else {
println!("✓ All chunks already exist on server");
}
// Step 2: Create file metadata objects
println!("\n📄 Creating file metadata objects...");
let file1_obj = FileObj::new(file1_data.len() as u64, vec![file1_hash]);
let file2_obj = FileObj::new(file2_data.len() as u64, vec![file2_hash]);
let file3_obj = FileObj::new(file3_data.len() as u64, vec![file3_hash]);
let file1_meta_data = file1_obj.serialize();
let file2_meta_data = file2_obj.serialize();
let file3_meta_data = file3_obj.serialize();
let file1_meta_hash = blake3_hash(&file1_meta_data);
let file2_meta_hash = blake3_hash(&file2_meta_data);
let file3_meta_hash = blake3_hash(&file3_meta_data);
// Upload file metadata
client.send_metadata(MetaType::File, &file1_meta_hash, &file1_meta_data)?;
client.send_metadata(MetaType::File, &file2_meta_hash, &file2_meta_data)?;
client.send_metadata(MetaType::File, &file3_meta_hash, &file3_meta_data)?;
// Step 3: Create directory structures
println!("\n📁 Creating directory structures...");
// Create /logs subdirectory with file2
let logs_dir_entries = vec![
DirEntry {
entry_type: EntryType::File,
name: "app.log".to_string(),
target_meta_hash: file2_meta_hash,
},
];
let logs_dir_obj = DirObj::new(logs_dir_entries);
let logs_dir_data = logs_dir_obj.serialize();
let logs_dir_hash = blake3_hash(&logs_dir_data);
client.send_metadata(MetaType::Dir, &logs_dir_hash, &logs_dir_data)?;
// Create /data subdirectory with file3
let data_dir_entries = vec![
DirEntry {
entry_type: EntryType::File,
name: "binary.dat".to_string(),
target_meta_hash: file3_meta_hash,
},
];
let data_dir_obj = DirObj::new(data_dir_entries);
let data_dir_data = data_dir_obj.serialize();
let data_dir_hash = blake3_hash(&data_dir_data);
client.send_metadata(MetaType::Dir, &data_dir_hash, &data_dir_data)?;
// Create root directory with file1 and subdirectories
let root_dir_entries = vec![
DirEntry {
entry_type: EntryType::File,
name: "readme.txt".to_string(),
target_meta_hash: file1_meta_hash,
},
DirEntry {
entry_type: EntryType::Dir,
name: "logs".to_string(),
target_meta_hash: logs_dir_hash,
},
DirEntry {
entry_type: EntryType::Dir,
name: "data".to_string(),
target_meta_hash: data_dir_hash,
},
];
let root_dir_obj = DirObj::new(root_dir_entries);
let root_dir_data = root_dir_obj.serialize();
let root_dir_hash = blake3_hash(&root_dir_data);
client.send_metadata(MetaType::Dir, &root_dir_hash, &root_dir_data)?;
// Step 4: Create partition
println!("\n💽 Creating partition metadata...");
let partition_obj = PartitionObj::new("test-partition".to_string(), root_dir_hash);
let partition_data = partition_obj.serialize();
let partition_hash = blake3_hash(&partition_data);
client.send_metadata(MetaType::Partition, &partition_hash, &partition_data)?;
// Step 5: Create disk
println!("\n🖥️ Creating disk metadata...");
let disk_obj = DiskObj::new("test-disk-001".to_string(), vec![partition_hash]);
let disk_data = disk_obj.serialize();
let disk_hash = blake3_hash(&disk_data);
client.send_metadata(MetaType::Disk, &disk_hash, &disk_data)?;
// Step 6: Create snapshot
println!("\n📸 Creating snapshot...");
let snapshot_obj = SnapshotObj::new(vec![disk_hash]);
let snapshot_data = snapshot_obj.serialize();
let snapshot_hash = blake3_hash(&snapshot_data);
// Upload snapshot using SendSnapshot command (not SendMeta)
client.send_snapshot(&snapshot_hash, &snapshot_data)?;
// Step 7: Verify everything is stored
println!("\n🔍 Verifying stored objects...");
// Check all metadata objects
let all_metadata = vec![
(MetaType::File, file1_meta_hash),
(MetaType::File, file2_meta_hash),
(MetaType::File, file3_meta_hash),
(MetaType::Dir, logs_dir_hash),
(MetaType::Dir, data_dir_hash),
(MetaType::Dir, root_dir_hash),
(MetaType::Partition, partition_hash),
(MetaType::Disk, disk_hash),
(MetaType::Snapshot, snapshot_hash),
];
let missing_metadata = client.check_metadata(&all_metadata)?;
if missing_metadata.is_empty() {
println!("✓ All metadata objects verified as stored");
} else {
println!("⚠ Warning: {} metadata objects still missing", missing_metadata.len());
for (meta_type, hash) in missing_metadata {
println!(" - Missing {:?}: {}", meta_type, hex::encode(hash));
}
}
// Check all chunks
let all_chunks = vec![file1_hash, file2_hash, file3_hash];
let missing_chunks_final = client.check_chunks(&all_chunks)?;
if missing_chunks_final.is_empty() {
println!("✓ All data chunks verified as stored");
} else {
println!("⚠ Warning: {} chunks still missing", missing_chunks_final.len());
}
println!("\n🎉 Complete filesystem hierarchy created!");
println!("📊 Summary:");
println!(" • 3 files (readme.txt, logs/app.log, data/binary.dat)");
println!(" • 3 directories (/, /logs, /data)");
println!(" • 1 partition (test-partition)");
println!(" • 1 disk (test-disk-001)");
println!(" • 1 snapshot");
println!(" • Snapshot hash: {}", hex::encode(snapshot_hash));
println!("\n✅ All tests completed successfully!");
// Close connection
client.close()?;
Ok(())
}