Compare commits
53 Commits
faabefa928
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
08cf515d2a | ||
|
7ffd64049a | ||
|
0a16e46372 | ||
e595fcbdac
|
|||
fa00747e80
|
|||
4e38b13faa
|
|||
8b1a9be8c2
|
|||
7b3ae6bb6e
|
|||
88e5f3d694
|
|||
7a7a909440
|
|||
2d2b1b9c00
|
|||
|
87efa1cf0e | ||
|
3bb2dcabaf | ||
|
8fe30668e0 | ||
|
804b3e577d | ||
|
42a036a84c | ||
|
17bc9d3f0c | ||
|
a5f3ed1634 | ||
|
29b32ec317 | ||
|
5908ee0f99 | ||
|
54be320dc1 | ||
|
7ef4d8b8b2 | ||
|
0ddfc36eb8 | ||
|
0e82a40d66 | ||
|
19e0407dbd | ||
|
676a2ac869 | ||
|
4d0722d282 | ||
|
8d97de06fd | ||
|
da6fe42d30 | ||
|
16f5162541 | ||
|
2f8b301a61 | ||
|
61418fb072 | ||
|
d3d7a10351 | ||
|
e39a583e95 | ||
|
12f9eebfad | ||
|
0ce3751d08 | ||
|
0eb7e9d4ca | ||
|
439578434e | ||
|
5a9e1e2e2b | ||
|
efe4549f82 | ||
|
1936304e56 | ||
a95b1d7956 | |||
756d96c8de
|
|||
6a104c38ec
|
|||
870be26ba2
|
|||
a2bf22548c
|
|||
a31b1db03e
|
|||
453ae9ceea
|
|||
f03a6935d5
|
|||
f31d10b6e1
|
|||
33e7cc2d59
|
|||
30a383d511
|
|||
36283bdc1e
|
35
.dockerignore
Normal file
@@ -0,0 +1,35 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
README.md
|
||||
LICENSE
|
||||
|
||||
# Rust
|
||||
server/target/
|
||||
**/*.rs.bk
|
||||
|
||||
# Node.js
|
||||
webui/node_modules/
|
||||
webui/dist/
|
||||
webui/.vite/
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
server/data/logs/
|
||||
|
||||
# Database (for development)
|
||||
server/data/db/*.db
|
||||
server/data/backups/
|
||||
|
||||
# Cache
|
||||
.cache/
|
11
.gitignore
vendored
@@ -3,6 +3,7 @@
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
dist/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
@@ -10,9 +11,7 @@ target/
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
# RustRover
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.idea/
|
||||
.vscode
|
||||
|
||||
server/data
|
53
Dockerfile
Normal file
@@ -0,0 +1,53 @@
|
||||
FROM node:20-alpine AS webui-builder
|
||||
|
||||
WORKDIR /app/webui
|
||||
|
||||
COPY webui/package.json webui/pnpm-lock.yaml ./
|
||||
RUN npm install -g pnpm && pnpm install --frozen-lockfile
|
||||
|
||||
COPY webui/ .
|
||||
RUN pnpm build
|
||||
|
||||
FROM rust:1.89-alpine AS rust-builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev pkgconfig openssl-dev
|
||||
|
||||
WORKDIR /app/server
|
||||
|
||||
COPY server/Cargo.toml server/Cargo.lock ./
|
||||
|
||||
COPY server/src ./src
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache sqlite
|
||||
|
||||
RUN addgroup -g 1001 -S appgroup && \
|
||||
adduser -u 1001 -S appuser -G appgroup
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN mkdir -p data/db data/logs data/backups && \
|
||||
chown -R appuser:appgroup /app
|
||||
|
||||
COPY --from=rust-builder /app/server/target/release/server ./server
|
||||
|
||||
COPY --from=webui-builder /app/webui/dist ./dist
|
||||
|
||||
RUN chown -R appuser:appgroup /app
|
||||
|
||||
USER appuser
|
||||
|
||||
EXPOSE 8379
|
||||
|
||||
ENV RUST_LOG=info
|
||||
|
||||
VOLUME ["/app/data/db"]
|
||||
VOLUME ["/app/data/logs"]
|
||||
VOLUME ["/app/data/backups"]
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:8379/ || exit 1
|
||||
|
||||
CMD ["./server"]
|
396
PROTOCOL.md
Normal file
@@ -0,0 +1,396 @@
|
||||
Gute Einsicht — du hast recht: **nur Disk-Hashes** allein reichen nicht, der Sync-Client muss die **gesamte Objekt-Graph-Struktur** (Disk → Partition → RootDir → Dirs → Files → Chunks) erzeugen und der Server muss diese Meta-Objekte speichern/prüfen.
|
||||
Ich aktualisiere das Protokoll so, dass es komplett, binär und KI-implementierbar ist — mit klaren Message-Formaten, Meta-Objekt-Binärlayouts und Pseudocode für Client **(was er erzeugen / berechnen muss)** und für Server **(wie er prüfen & persistieren muss)**.
|
||||
|
||||
Ich halte es pragmatisch: keine Kompression-Extensions, keine Encryption — alles einfach, später erweiterbar.
|
||||
|
||||
---
|
||||
|
||||
# Kurzüberblick (Was passiert)
|
||||
|
||||
1. Client scannt Disk/Partition, chunked Dateien (4 MiB), berechnet Chunks-Hashes (BLAKE3-256, 32B).
|
||||
2. Client baut Binär-Meta-Objekte: FileObj, DirObj, PartitionObj, DiskObj, SnapshotObj (jeweils Body → `meta_hash = BLAKE3(body)`).
|
||||
3. Client spricht Server über TCP an (HELLO → AUTH), fragt in Batches nach fehlenden Chunks/Metas, sendet nur fehlende Chunks/Metas.
|
||||
4. Am Ende sendet er das Snapshot-Commit; Server validiert, schreibt Snapshot-Referenz (Server führt Pointer).
|
||||
|
||||
---
|
||||
|
||||
# Nachrichtengeneralstruktur (Envelopes)
|
||||
|
||||
Jede Nachricht: fixer 24-Byte Header + Payload:
|
||||
|
||||
```
|
||||
struct MsgHeader {
|
||||
u8 cmd; // Befehlscode (siehe Tabelle)
|
||||
u8 flags; // reserved
|
||||
u8 reserved[2];
|
||||
u8 session_id[16]; // 0..0 bevor AUTH_OK
|
||||
u32 payload_len; // LE
|
||||
}
|
||||
```
|
||||
|
||||
Antwort-Nachrichten haben dieselbe Hülle.
|
||||
|
||||
---
|
||||
|
||||
# Command-Codes (u8)
|
||||
|
||||
* 0x01 HELLO
|
||||
* 0x02 HELLO_OK
|
||||
* 0x10 AUTH_USERPASS
|
||||
* 0x11 AUTH_CODE
|
||||
* 0x12 AUTH_OK
|
||||
* 0x13 AUTH_FAIL
|
||||
* 0x20 BATCH_CHECK_CHUNK
|
||||
* 0x21 CHECK_CHUNK_RESP
|
||||
* 0x22 SEND_CHUNK
|
||||
* 0x23 CHUNK_OK
|
||||
* 0x24 CHUNK_FAIL
|
||||
* 0x30 BATCH_CHECK_META
|
||||
* 0x31 CHECK_META_RESP
|
||||
* 0x32 SEND_META
|
||||
* 0x33 META_OK
|
||||
* 0x34 META_FAIL
|
||||
* 0x40 SEND_SNAPSHOT (Snapshot-Commit)
|
||||
* 0x41 SNAPSHOT_OK
|
||||
* 0x42 SNAPSHOT_FAIL
|
||||
* 0xFF CLOSE
|
||||
|
||||
---
|
||||
|
||||
# Wichtige Designentscheidungen (Kurz)
|
||||
|
||||
* **Hashes**: BLAKE3-256 (32 Bytes). Client berechnet alle Hashes (Chunks + Meta bodies).
|
||||
* **Chunks auf Wire**: unkomprimiert (einfach & verlässlich). Kompression wäre später Erweiterung.
|
||||
* **Meta-Objekt-Body**: kompakte binäre Strukturen (siehe unten). `meta_hash = BLAKE3(body)`.
|
||||
* **Batch-Checks**: Client fragt in Batches nach fehlenden Chunks/Metas (+ Server liefert nur die fehlenden Hashes zurück). Minimiert RTT.
|
||||
* **Server persistiert**: `chunks/<ab>/<cd>/<hash>.chk`, `meta/<type>/<ab>/<cd>/<hash>.meta`. Server verwaltet Snapshot-Pointers (z. B. `machines/<client>/snapshots/<id>.ref`).
|
||||
* **Snapshot Commit**: Server validiert Objekt-Graph vor Abschluss; falls etwas fehlt, sendet Liste zurück (Snapshot_FAIL mit missing list).
|
||||
|
||||
---
|
||||
|
||||
# Binary Payload-Formate
|
||||
|
||||
Alle mehrteiligen Zähler / Längen sind little-endian (`LE`).
|
||||
|
||||
## A) BATCH_CHECK_CHUNK (Client → Server)
|
||||
|
||||
```
|
||||
payload:
|
||||
u32 count
|
||||
for i in 0..count:
|
||||
u8[32] chunk_hash
|
||||
```
|
||||
|
||||
## CHECK_CHUNK_RESP (Server → Client)
|
||||
|
||||
```
|
||||
payload:
|
||||
u32 missing_count
|
||||
for i in 0..missing_count:
|
||||
u8[32] missing_chunk_hash
|
||||
```
|
||||
|
||||
## SEND_CHUNK (Client → Server)
|
||||
|
||||
```
|
||||
payload:
|
||||
u8[32] chunk_hash
|
||||
u32 size
|
||||
u8[size] data // raw chunk bytes
|
||||
```
|
||||
|
||||
Server computes BLAKE3(data) and compares to chunk_hash; if equal -> speichert.
|
||||
|
||||
## A) BATCH_CHECK_META
|
||||
|
||||
```
|
||||
payload:
|
||||
u32 count
|
||||
for i in 0..count:
|
||||
u8 meta_type // 1=file,2=dir,3=partition,4=disk,5=snapshot
|
||||
u8[32] meta_hash
|
||||
```
|
||||
|
||||
## CHECK_META_RESP
|
||||
|
||||
```
|
||||
payload:
|
||||
u32 missing_count
|
||||
for i in 0..missing_count:
|
||||
u8 meta_type
|
||||
u8[32] meta_hash
|
||||
```
|
||||
|
||||
## SEND_META
|
||||
|
||||
```
|
||||
payload:
|
||||
u8 meta_type // 1..5
|
||||
u8[32] meta_hash
|
||||
u32 body_len
|
||||
u8[body_len] body_bytes // the canonical body; server will BLAKE3(body_bytes) and compare to meta_hash
|
||||
```
|
||||
|
||||
## SEND_SNAPSHOT (Commit)
|
||||
|
||||
```
|
||||
payload:
|
||||
u8[32] snapshot_hash
|
||||
u32 body_len
|
||||
u8[body_len] snapshot_body // Snapshot body same encoding as meta (server validates body hash == snapshot_hash)
|
||||
```
|
||||
|
||||
Server validates that snapshot_body references only existing meta objects (recursive / direct check). If OK → creates persistent snapshot pointer and replies SNAPSHOT_OK; if not, reply SNAPSHOT_FAIL with missing list (same format as CHECK_META_RESP).
|
||||
|
||||
---
|
||||
|
||||
# Meta-Objekt-Binärformate (Bodies)
|
||||
|
||||
> Client erzeugt `body_bytes` für jedes Meta-Objekt; `meta_hash = BLAKE3(body_bytes)`.
|
||||
|
||||
### FileObj (meta_type = 1)
|
||||
|
||||
```
|
||||
FileObjBody:
|
||||
u8 version (1)
|
||||
u32 fs_type_code // e.g. 1=ext*, 2=ntfs, 3=fat32 (enum)
|
||||
u64 size
|
||||
u32 mode // POSIX mode for linux; 0 for FS without
|
||||
u32 uid
|
||||
u32 gid
|
||||
u64 mtime_unixsec
|
||||
u32 chunk_count
|
||||
for i in 0..chunk_count:
|
||||
u8[32] chunk_hash
|
||||
// optional: xattrs/ACLs TLV (not in v1)
|
||||
```
|
||||
|
||||
### DirObj (meta_type = 2)
|
||||
|
||||
```
|
||||
DirObjBody:
|
||||
u8 version (1)
|
||||
u32 entry_count
|
||||
for each entry:
|
||||
u8 entry_type // 0 = file, 1 = dir, 2 = symlink
|
||||
u16 name_len
|
||||
u8[name_len] name (UTF-8)
|
||||
u8[32] target_meta_hash
|
||||
```
|
||||
|
||||
### PartitionObj (meta_type = 3)
|
||||
|
||||
```
|
||||
PartitionObjBody:
|
||||
u8 version (1)
|
||||
u32 fs_type_code
|
||||
u8[32] root_dir_hash // DirObj hash for root of this partition
|
||||
u64 start_lba
|
||||
u64 end_lba
|
||||
u8[16] type_guid // zeroed if unused
|
||||
```
|
||||
|
||||
### DiskObj (meta_type = 4)
|
||||
|
||||
```
|
||||
DiskObjBody:
|
||||
u8 version (1)
|
||||
u32 partition_count
|
||||
for i in 0..partition_count:
|
||||
u8[32] partition_hash
|
||||
u64 disk_size_bytes
|
||||
u16 serial_len
|
||||
u8[serial_len] serial_bytes
|
||||
```
|
||||
|
||||
### SnapshotObj (meta_type = 5)
|
||||
|
||||
```
|
||||
SnapshotObjBody:
|
||||
u8 version (1)
|
||||
u64 created_at_unixsec
|
||||
u32 disk_count
|
||||
for i in 0..disk_count:
|
||||
u8[32] disk_hash
|
||||
// optional: snapshot metadata (user, note) as TLV extension later
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
# Ablauf (Pseudocode) — **Client-Seite (Sync-Client)**
|
||||
|
||||
(Erzeugt alle Hashes; sendet nur fehlendes per Batch)
|
||||
|
||||
```text
|
||||
FUNCTION client_backup(tcp_conn, computer_id, disks):
|
||||
send_msg(HELLO{client_type=0, auth_type=0})
|
||||
await HELLO_OK
|
||||
|
||||
send_msg(AUTH_USERPASS{username,password})
|
||||
resp = await
|
||||
if resp != AUTH_OK: abort
|
||||
session_id = resp.session_id
|
||||
|
||||
// traverse per-partition to limit memory
|
||||
snapshot_disk_hashes = []
|
||||
FOR disk IN disks:
|
||||
partition_hashes = []
|
||||
FOR part IN disk.partitions:
|
||||
root_dir_hash = process_dir(part.root_path, tcp_conn)
|
||||
part_body = build_partition_body(part.fs_type, root_dir_hash, part.start, part.end, part.guid)
|
||||
part_hash = blake3(part_body)
|
||||
batch_check_and_send_meta_if_missing(tcp_conn, meta_type=3, [(part_hash,part_body)])
|
||||
partition_hashes.append(part_hash)
|
||||
|
||||
disk_body = build_disk_body(partition_hashes, disk.size, disk.serial)
|
||||
disk_hash = blake3(disk_body)
|
||||
batch_check_and_send_meta_if_missing(tcp_conn, meta_type=4, [(disk_hash,disk_body)])
|
||||
snapshot_disk_hashes.append(disk_hash)
|
||||
|
||||
snapshot_body = build_snapshot_body(now(), snapshot_disk_hashes)
|
||||
snapshot_hash = blake3(snapshot_body)
|
||||
// final TRY: ask server if snapshot can be committed (server will verify)
|
||||
send_msg(SEND_SNAPSHOT(snapshot_hash, snapshot_body))
|
||||
resp = await
|
||||
if resp == SNAPSHOT_OK: success
|
||||
else if resp == SNAPSHOT_FAIL: // server returns missing meta list
|
||||
// receive missing metas; client should send the remaining missing meta/chunks (loop)
|
||||
handle_missing_and_retry()
|
||||
```
|
||||
|
||||
Hilfsfunktionen:
|
||||
|
||||
```text
|
||||
FUNCTION process_dir(path, tcp_conn):
|
||||
entries_meta = [] // list of (name, entry_type, target_hash)
|
||||
collect a list meta_to_check_for_this_dir = []
|
||||
FOR entry IN readdir(path):
|
||||
IF entry.is_file:
|
||||
file_hash = process_file(entry.path, tcp_conn) // below
|
||||
entries_meta.append((entry.name, 0, file_hash))
|
||||
ELSE IF entry.is_dir:
|
||||
subdir_hash = process_dir(entry.path, tcp_conn)
|
||||
entries_meta.append((entry.name, 1, subdir_hash))
|
||||
ELSE IF symlink:
|
||||
symlink_body = build_symlink_body(target)
|
||||
symlink_hash = blake3(symlink_body)
|
||||
batch_check_and_send_meta_if_missing(tcp_conn, meta_type=1, [(symlink_hash, symlink_body)])
|
||||
entries_meta.append((entry.name, 2, symlink_hash))
|
||||
|
||||
dir_body = build_dir_body(entries_meta)
|
||||
dir_hash = blake3(dir_body)
|
||||
batch_check_and_send_meta_if_missing(tcp_conn, meta_type=2, [(dir_hash,dir_body)])
|
||||
RETURN dir_hash
|
||||
```
|
||||
|
||||
```text
|
||||
FUNCTION process_file(path, tcp_conn):
|
||||
chunk_hashes = []
|
||||
FOR each chunk IN read_in_chunks(path, 4*1024*1024):
|
||||
chunk_hash = blake3(chunk)
|
||||
chunk_hashes.append(chunk_hash)
|
||||
// Batch-check chunks for this file
|
||||
missing = batch_check_chunks(tcp_conn, chunk_hashes)
|
||||
FOR each missing_hash IN missing:
|
||||
chunk_bytes = read_chunk_by_hash_from_disk(path, missing_hash) // or buffer earlier
|
||||
send_msg(SEND_CHUNK {hash,size,data})
|
||||
await CHUNK_OK
|
||||
|
||||
file_body = build_file_body(fs_type, size, mode, uid, gid, mtime, chunk_hashes)
|
||||
file_hash = blake3(file_body)
|
||||
batch_check_and_send_meta_if_missing(tcp_conn, meta_type=1, [(file_hash,file_body)])
|
||||
RETURN file_hash
|
||||
```
|
||||
|
||||
`batch_check_and_send_meta_if_missing`:
|
||||
|
||||
* Send BATCH_CHECK_META for all items
|
||||
* Server returns list of missing metas
|
||||
* For each missing, send SEND_META(meta_type, meta_hash, body)
|
||||
* Await META_OK
|
||||
|
||||
Bemerkung: batching per directory/file-group reduziert RTT.
|
||||
|
||||
---
|
||||
|
||||
# Ablauf (Pseudocode) — **Server-Seite (Sync-Server)**
|
||||
|
||||
```text
|
||||
ON connection:
|
||||
read HELLO -> verify allowed client type
|
||||
send HELLO_OK OR HELLO_FAIL
|
||||
|
||||
ON AUTH_USERPASS:
|
||||
validate credentials
|
||||
if ok: generate session_id (16B), send AUTH_OK{session_id}
|
||||
else send AUTH_FAIL
|
||||
|
||||
ON BATCH_CHECK_CHUNK:
|
||||
read list of hashes
|
||||
missing_list = []
|
||||
for hash in hashes:
|
||||
if not exists chunks/shard(hash): missing_list.append(hash)
|
||||
send CHECK_CHUNK_RESP {missing_list}
|
||||
|
||||
ON SEND_CHUNK:
|
||||
read chunk_hash, size, data
|
||||
computed = blake3(data)
|
||||
if computed != chunk_hash: send CHUNK_FAIL{reason} and drop
|
||||
else if exists chunk already: send CHUNK_OK
|
||||
else: write atomic to chunks/<ab>/<cd>/<hash>.chk and send CHUNK_OK
|
||||
|
||||
ON BATCH_CHECK_META:
|
||||
similar: check meta/<type>/<hash>.meta exists — return missing list
|
||||
|
||||
ON SEND_META:
|
||||
verify blake3(body) == meta_hash; if ok write meta/<type>/<ab>/<cd>/<hash>.meta atomically; respond META_OK
|
||||
|
||||
ON SEND_SNAPSHOT:
|
||||
verify blake3(snapshot_body) == snapshot_hash
|
||||
// Validate the object graph:
|
||||
missing = validate_graph(snapshot_body) // DFS: disks -> partitions -> dirs -> files -> chunks
|
||||
if missing not empty:
|
||||
send SNAPSHOT_FAIL {missing (as meta list and/or chunk list)}
|
||||
else:
|
||||
store snapshot file and create pointer machines/<client_id>/snapshots/<id>.ref
|
||||
send SNAPSHOT_OK {snapshot_id}
|
||||
```
|
||||
|
||||
`validate_graph`:
|
||||
|
||||
* parse snapshot_body → disk_hashes
|
||||
* for each disk_hash check meta exists; load disk meta → for each partition_hash check meta exists … recursively for dir entries -> file metas -> check chunk existence for each chunk_hash. Collect missing set and return.
|
||||
|
||||
---
|
||||
|
||||
# Verhalten bei `SNAPSHOT_FAIL`
|
||||
|
||||
* Server liefert fehlende meta/chunk-Hashes.
|
||||
* Client sendet diese gezielt (batch) und wiederholt `SEND_SNAPSHOT` (retry).
|
||||
* Alternativ: Client kann beim ersten Versuch inkrementell alle benötigten metas/chunks hochladen (das ist die übliche Reihenfolge dieses Pseudocodes — so fehlt beim Commit nichts mehr).
|
||||
|
||||
---
|
||||
|
||||
# Speicherung / Pfade (Server intern)
|
||||
|
||||
* `chunks/<ab>/<cd>/<hash>.chk` (ab = first 2 hex chars; cd = next 2)
|
||||
* `meta/files/<ab>/<cd>/<hash>.meta`
|
||||
* `meta/dirs/<...>`
|
||||
* `meta/parts/...`
|
||||
* `meta/disks/...`
|
||||
* `meta/snapshots/<snapshot_hash>.meta`
|
||||
* `machines/<client_id>/snapshots/<snapshot_id>.ref` (Pointer -> snapshot_hash + timestamp)
|
||||
|
||||
Atomic writes: `tmp -> rename`.
|
||||
|
||||
---
|
||||
|
||||
# Wichtige Implementations-Hinweise für die KI/Server-Implementierung
|
||||
|
||||
* **Batching ist Pflicht**: Implementiere `BATCH_CHECK_CHUNK` & `BATCH_CHECK_META` effizient (Bitset, HashSet lookups).
|
||||
* **Limits**: begrenze `count` pro Batch (z. B. 1000) — Client muss chunk lists stückeln.
|
||||
* **Validation:** Server muss auf `SEND_SNAPSHOT` den Graph validieren (sonst verliert man Konsistenz).
|
||||
* **Atomic Snapshot Commit:** erst persistieren, wenn Graph vollständig vorhanden.
|
||||
* **SessionID**: muss in Header für alle Nachfolgemsgs verwendet werden.
|
||||
* **Perf:** parallelisiere Chunk-Uploads (mehrere TCP-Tasks) und erlaubt Server mehrere parallele Handshakes.
|
||||
* **Sicherheit:** produktiv TLS/TCP oder VPN; Rate-limit / brute-force Schutz; Provisioning-Codes mit TTL.
|
24
client/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
13
client/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Arkendro Client</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
23
client/package.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"name": "client",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"tauri": "tauri"
|
||||
},
|
||||
"dependencies": {
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"@tauri-apps/api": "^2",
|
||||
"@tauri-apps/plugin-opener": "^2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitejs/plugin-react": "^4.6.0",
|
||||
"vite": "^7.0.4",
|
||||
"@tauri-apps/cli": "^2"
|
||||
}
|
||||
}
|
1179
client/pnpm-lock.yaml
generated
Normal file
2
client/src-tauri/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/target/
|
||||
/gen/schemas
|
5219
client/src-tauri/Cargo.lock
generated
Normal file
25
client/src-tauri/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "client"
|
||||
version = "0.1.0"
|
||||
description = "A Tauri App"
|
||||
authors = ["you"]
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lib]
|
||||
# The `_lib` suffix may seem redundant but it is necessary
|
||||
# to make the lib name unique and wouldn't conflict with the bin name.
|
||||
# This seems to be only an issue on Windows, see https://github.com/rust-lang/cargo/issues/8519
|
||||
name = "client_lib"
|
||||
crate-type = ["staticlib", "cdylib", "rlib"]
|
||||
|
||||
[build-dependencies]
|
||||
tauri-build = { version = "2", features = [] }
|
||||
|
||||
[dependencies]
|
||||
tauri = { version = "2.0.0", features = [ "tray-icon" ] }
|
||||
tauri-plugin-opener = "2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
|
3
client/src-tauri/build.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
tauri_build::build()
|
||||
}
|
10
client/src-tauri/capabilities/default.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"$schema": "../gen/schemas/desktop-schema.json",
|
||||
"identifier": "default",
|
||||
"description": "Capability for the main window",
|
||||
"windows": ["main"],
|
||||
"permissions": [
|
||||
"core:default",
|
||||
"opener:default"
|
||||
]
|
||||
}
|
BIN
client/src-tauri/icons/128x128.png
Normal file
After Width: | Height: | Size: 3.4 KiB |
BIN
client/src-tauri/icons/128x128@2x.png
Normal file
After Width: | Height: | Size: 6.8 KiB |
BIN
client/src-tauri/icons/32x32.png
Normal file
After Width: | Height: | Size: 974 B |
BIN
client/src-tauri/icons/Square107x107Logo.png
Normal file
After Width: | Height: | Size: 2.8 KiB |
BIN
client/src-tauri/icons/Square142x142Logo.png
Normal file
After Width: | Height: | Size: 3.8 KiB |
BIN
client/src-tauri/icons/Square150x150Logo.png
Normal file
After Width: | Height: | Size: 3.9 KiB |
BIN
client/src-tauri/icons/Square284x284Logo.png
Normal file
After Width: | Height: | Size: 7.6 KiB |
BIN
client/src-tauri/icons/Square30x30Logo.png
Normal file
After Width: | Height: | Size: 903 B |
BIN
client/src-tauri/icons/Square310x310Logo.png
Normal file
After Width: | Height: | Size: 8.4 KiB |
BIN
client/src-tauri/icons/Square44x44Logo.png
Normal file
After Width: | Height: | Size: 1.3 KiB |
BIN
client/src-tauri/icons/Square71x71Logo.png
Normal file
After Width: | Height: | Size: 2.0 KiB |
BIN
client/src-tauri/icons/Square89x89Logo.png
Normal file
After Width: | Height: | Size: 2.4 KiB |
BIN
client/src-tauri/icons/StoreLogo.png
Normal file
After Width: | Height: | Size: 1.5 KiB |
BIN
client/src-tauri/icons/icon.icns
Normal file
BIN
client/src-tauri/icons/icon.ico
Normal file
After Width: | Height: | Size: 85 KiB |
BIN
client/src-tauri/icons/icon.png
Normal file
After Width: | Height: | Size: 14 KiB |
29
client/src-tauri/src/lib.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use tauri::{
|
||||
menu::{Menu, MenuItem},
|
||||
tray::TrayIconBuilder
|
||||
};
|
||||
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
pub fn run() {
|
||||
tauri::Builder::default()
|
||||
.setup(|app| {
|
||||
let quit_i = MenuItem::with_id(app, "quit", "Quit", true, None::<&str>)?;
|
||||
let menu = Menu::with_items(app, &[&quit_i])?;
|
||||
|
||||
TrayIconBuilder::new()
|
||||
.menu(&menu)
|
||||
.icon(app.default_window_icon().unwrap().clone())
|
||||
.on_menu_event(|app, event| match event.id.as_ref() {
|
||||
"quit" => {
|
||||
app.exit(0);
|
||||
}
|
||||
_ => {}
|
||||
})
|
||||
.build(app)?;
|
||||
Ok(())
|
||||
})
|
||||
.plugin(tauri_plugin_opener::init())
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
}
|
6
client/src-tauri/src/main.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
|
||||
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
|
||||
|
||||
fn main() {
|
||||
client_lib::run()
|
||||
}
|
35
client/src-tauri/tauri.conf.json
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"$schema": "https://schema.tauri.app/config/2",
|
||||
"productName": "client",
|
||||
"version": "0.1.0",
|
||||
"identifier": "dev.gnm.arkendro-client",
|
||||
"build": {
|
||||
"beforeDevCommand": "pnpm dev",
|
||||
"devUrl": "http://localhost:1420",
|
||||
"beforeBuildCommand": "pnpm build",
|
||||
"frontendDist": "../dist"
|
||||
},
|
||||
"app": {
|
||||
"windows": [
|
||||
{
|
||||
"title": "client",
|
||||
"width": 800,
|
||||
"height": 600
|
||||
}
|
||||
],
|
||||
"security": {
|
||||
"csp": null
|
||||
}
|
||||
},
|
||||
"bundle": {
|
||||
"active": true,
|
||||
"targets": "all",
|
||||
"icon": [
|
||||
"icons/32x32.png",
|
||||
"icons/128x128.png",
|
||||
"icons/128x128@2x.png",
|
||||
"icons/icon.icns",
|
||||
"icons/icon.ico"
|
||||
]
|
||||
}
|
||||
}
|
10
client/src/App.jsx
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
const App = () => {
|
||||
return (
|
||||
<main className="container">
|
||||
<h1>Arkendro client</h1>
|
||||
</main>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
9
client/src/main.jsx
Normal file
@@ -0,0 +1,9 @@
|
||||
import React from "react";
|
||||
import ReactDOM from "react-dom/client";
|
||||
import App from "./App";
|
||||
|
||||
ReactDOM.createRoot(document.getElementById("root")).render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>,
|
||||
);
|
25
client/vite.config.js
Normal file
@@ -0,0 +1,25 @@
|
||||
import { defineConfig } from "vite";
|
||||
import react from "@vitejs/plugin-react";
|
||||
|
||||
const host = process.env.TAURI_DEV_HOST;
|
||||
|
||||
export default defineConfig(async () => ({
|
||||
plugins: [react()],
|
||||
|
||||
clearScreen: false,
|
||||
server: {
|
||||
port: 1420,
|
||||
strictPort: true,
|
||||
host: host || false,
|
||||
hmr: host
|
||||
? {
|
||||
protocol: "ws",
|
||||
host,
|
||||
port: 1421,
|
||||
}
|
||||
: undefined,
|
||||
watch: {
|
||||
ignored: ["**/src-tauri/**"],
|
||||
},
|
||||
},
|
||||
}));
|
56
server/.sqlx/query-2d6e5810f76e780a4a9b54c5ea39d707be614eb304dc6b4f32d8b6d28464c4b5.json
generated
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT pc.id, pc.code, pc.expires_at, pc.used, m.id as machine_id, m.user_id, u.username\n FROM provisioning_codes pc\n JOIN machines m ON pc.machine_id = m.id\n JOIN users u ON m.user_id = u.id\n WHERE pc.code = ? AND pc.used = 0\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "code",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "expires_at",
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "used",
|
||||
"ordinal": 3,
|
||||
"type_info": "Bool"
|
||||
},
|
||||
{
|
||||
"name": "machine_id",
|
||||
"ordinal": 4,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "user_id",
|
||||
"ordinal": 5,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "username",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "2d6e5810f76e780a4a9b54c5ea39d707be614eb304dc6b4f32d8b6d28464c4b5"
|
||||
}
|
26
server/.sqlx/query-43af0c22d05eca56b2a7b1f6eed873102d8e006330fd7d8063657d2df936b3fb.json
generated
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, user_id FROM machines WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "user_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "43af0c22d05eca56b2a7b1f6eed873102d8e006330fd7d8063657d2df936b3fb"
|
||||
}
|
12
server/.sqlx/query-508e673540beae31730d323bbb52d91747bb405ef3d6f4a7f20776fdeb618688.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "UPDATE provisioning_codes SET used = 1 WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "508e673540beae31730d323bbb52d91747bb405ef3d6f4a7f20776fdeb618688"
|
||||
}
|
32
server/.sqlx/query-9f9215a05f729db6f707c84967f4f11033d39d17ded98f4fe9fb48f3d1598596.json
generated
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, username, password_hash FROM users WHERE username = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "username",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "password_hash",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "9f9215a05f729db6f707c84967f4f11033d39d17ded98f4fe9fb48f3d1598596"
|
||||
}
|
26
server/.sqlx/query-cc5f2e47cc53dd29682506ff84f07f7d0914e3141e62b470e84b3886b50764a1.json
generated
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "user_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "cc5f2e47cc53dd29682506ff84f07f7d0914e3141e62b470e84b3886b50764a1"
|
||||
}
|
2516
server/Cargo.lock
generated
Normal file
24
server/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
axum = "0.8.4"
|
||||
tokio = { version = "1.47.1", features = ["full", "signal"] }
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
bcrypt = "0.17.1"
|
||||
uuid = { version = "1.0", features = ["v4", "serde"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors", "fs"] }
|
||||
anyhow = "1.0"
|
||||
rand = "0.8"
|
||||
blake3 = "1.5"
|
||||
bytes = "1.0"
|
||||
bincode = "1.3"
|
||||
hex = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.0"
|
106
server/src/controllers/auth.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use crate::controllers::users::UsersController;
|
||||
use crate::utils::{error::*, models::*, DbPool};
|
||||
use chrono::{Duration, Utc};
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct AuthController;
|
||||
|
||||
impl AuthController {
|
||||
pub async fn login(pool: &DbPool, username: &str, password: &str) -> AppResult<LoginResponse> {
|
||||
let user = UsersController::verify_user_credentials(pool, username, password).await?;
|
||||
|
||||
let session = Self::create_session(pool, user.id).await?;
|
||||
|
||||
Ok(LoginResponse {
|
||||
token: session.token,
|
||||
role: user.role,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn logout(pool: &DbPool, user_id: i64) -> AppResult<()> {
|
||||
sqlx::query("DELETE FROM sessions WHERE user_id = ?")
|
||||
.bind(user_id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_session(pool: &DbPool, user_id: i64) -> AppResult<Session> {
|
||||
let token = Self::generate_session_token();
|
||||
let expires_at = Utc::now() + Duration::days(30);
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO sessions (user_id, token, expires_at)
|
||||
VALUES (?, ?, ?)
|
||||
"#,
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(&token)
|
||||
.bind(expires_at)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(Session {
|
||||
id: result.last_insert_rowid(),
|
||||
user_id,
|
||||
token,
|
||||
created_at: Utc::now(),
|
||||
expires_at,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_session_by_token(pool: &DbPool, token: &str) -> AppResult<Option<Session>> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, user_id, token, created_at, expires_at
|
||||
FROM sessions WHERE token = ? AND expires_at > datetime('now')
|
||||
"#,
|
||||
)
|
||||
.bind(token)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if let Some(row) = row {
|
||||
Ok(Some(Session {
|
||||
id: row.get("id"),
|
||||
user_id: row.get("user_id"),
|
||||
token: row.get("token"),
|
||||
created_at: row.get("created_at"),
|
||||
expires_at: row.get("expires_at"),
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate_user(pool: &DbPool, token: &str) -> AppResult<User> {
|
||||
let session = Self::get_session_by_token(pool, token)
|
||||
.await?
|
||||
.ok_or_else(|| auth_error("Invalid or expired session"))?;
|
||||
|
||||
let user = UsersController::get_user_by_id(pool, session.user_id).await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn cleanup_expired_sessions(pool: &DbPool) -> AppResult<()> {
|
||||
sqlx::query("DELETE FROM sessions WHERE expires_at <= datetime('now')")
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_session_token() -> String {
|
||||
Uuid::new_v4().to_string()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn delete_session_by_token(pool: &DbPool, token: &str) -> AppResult<()> {
|
||||
sqlx::query("DELETE FROM sessions WHERE token = ?")
|
||||
.bind(token)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
365
server/src/controllers/files.rs
Normal file
@@ -0,0 +1,365 @@
|
||||
use crate::sync::storage::Storage;
|
||||
use crate::sync::meta::{MetaObj, EntryType};
|
||||
use crate::sync::protocol::MetaType;
|
||||
use crate::utils::{error::*, models::*, DbPool};
|
||||
use serde::Serialize;
|
||||
use axum::response::Response;
|
||||
use axum::body::Body;
|
||||
use axum::http::{HeaderMap, HeaderValue};
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct FileSystemEntry {
|
||||
pub name: String,
|
||||
pub entry_type: String, // "file", "dir", "symlink"
|
||||
pub size_bytes: Option<u64>,
|
||||
pub meta_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DirectoryListing {
|
||||
pub path: String,
|
||||
pub entries: Vec<FileSystemEntry>,
|
||||
pub parent_hash: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct FileMetadata {
|
||||
pub name: String,
|
||||
pub size_bytes: u64,
|
||||
pub mime_type: String,
|
||||
pub meta_hash: String,
|
||||
}
|
||||
|
||||
pub struct FilesController;
|
||||
|
||||
impl FilesController {
|
||||
/// List directory contents for a partition
|
||||
pub async fn list_partition_root(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
snapshot_id: String,
|
||||
partition_index: usize,
|
||||
user: &User,
|
||||
) -> AppResult<DirectoryListing> {
|
||||
// Verify machine access
|
||||
Self::verify_machine_access(pool, machine_id, user).await?;
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
|
||||
// Get partition hash from snapshot
|
||||
let partition_hash = Self::get_partition_hash(&storage, machine_id, &snapshot_id, partition_index).await?;
|
||||
|
||||
// Load partition metadata to get root directory hash
|
||||
let partition_meta = storage.load_meta(MetaType::Partition, &partition_hash).await
|
||||
.map_err(|_| AppError::NotFoundError("Partition metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Partition metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::Partition(partition_obj) = partition_meta {
|
||||
Self::list_directory_by_hash(&storage, &partition_obj.root_dir_hash, "/".to_string()).await
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid partition metadata".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// List directory contents by directory hash
|
||||
pub async fn list_directory(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
snapshot_id: String,
|
||||
partition_index: usize,
|
||||
dir_hash: String,
|
||||
user: &User,
|
||||
) -> AppResult<DirectoryListing> {
|
||||
// Verify machine access
|
||||
Self::verify_machine_access(pool, machine_id, user).await?;
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
|
||||
// Decode directory hash
|
||||
let hash_bytes = hex::decode(&dir_hash)
|
||||
.map_err(|_| AppError::ValidationError("Invalid directory hash format".to_string()))?;
|
||||
|
||||
if hash_bytes.len() != 32 {
|
||||
return Err(AppError::ValidationError("Directory hash must be 32 bytes".to_string()));
|
||||
}
|
||||
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(&hash_bytes);
|
||||
|
||||
Self::list_directory_by_hash(&storage, &hash, dir_hash).await
|
||||
}
|
||||
|
||||
/// Download a file by file hash with filename
|
||||
pub async fn download_file(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
_snapshot_id: String,
|
||||
_partition_index: usize,
|
||||
file_hash: String,
|
||||
filename: Option<String>,
|
||||
user: &User,
|
||||
) -> AppResult<Response<Body>> {
|
||||
// Verify machine access
|
||||
Self::verify_machine_access(pool, machine_id, user).await?;
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
|
||||
// Decode file hash
|
||||
let hash_bytes = hex::decode(&file_hash)
|
||||
.map_err(|_| AppError::ValidationError("Invalid file hash format".to_string()))?;
|
||||
|
||||
if hash_bytes.len() != 32 {
|
||||
return Err(AppError::ValidationError("File hash must be 32 bytes".to_string()));
|
||||
}
|
||||
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(&hash_bytes);
|
||||
|
||||
// Load file metadata
|
||||
let file_meta = storage.load_meta(MetaType::File, &hash).await
|
||||
.map_err(|_| AppError::NotFoundError("File metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("File metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::File(file_obj) = file_meta {
|
||||
// Reconstruct file content from chunks
|
||||
let mut file_content = Vec::new();
|
||||
|
||||
for chunk_hash in &file_obj.chunk_hashes {
|
||||
let chunk_data = storage.load_chunk(chunk_hash).await
|
||||
.map_err(|_| AppError::NotFoundError(format!("Chunk {} not found", hex::encode(chunk_hash))))?
|
||||
.ok_or_else(|| AppError::NotFoundError(format!("Chunk {} not found", hex::encode(chunk_hash))))?;
|
||||
|
||||
file_content.extend_from_slice(&chunk_data);
|
||||
}
|
||||
|
||||
// Use provided filename or generate a generic one
|
||||
let filename = filename.unwrap_or_else(|| format!("file_{}.bin", &file_hash[..8]));
|
||||
|
||||
// Determine MIME type from file content
|
||||
let mime_type = Self::detect_mime_type(&filename, &file_content);
|
||||
|
||||
// Create response headers
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
"content-type",
|
||||
HeaderValue::from_str(&mime_type).unwrap_or_else(|_| HeaderValue::from_static("application/octet-stream"))
|
||||
);
|
||||
headers.insert(
|
||||
"content-disposition",
|
||||
HeaderValue::from_str(&format!("attachment; filename=\"{}\"", filename))
|
||||
.unwrap_or_else(|_| HeaderValue::from_static("attachment"))
|
||||
);
|
||||
headers.insert(
|
||||
"content-length",
|
||||
HeaderValue::from_str(&file_content.len().to_string()).unwrap()
|
||||
);
|
||||
|
||||
let mut response = Response::new(Body::from(file_content));
|
||||
*response.headers_mut() = headers;
|
||||
|
||||
Ok(response)
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid file metadata".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get file metadata without downloading content
|
||||
pub async fn get_file_metadata(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
snapshot_id: String,
|
||||
partition_index: usize,
|
||||
file_hash: String,
|
||||
user: &User,
|
||||
) -> AppResult<FileMetadata> {
|
||||
// Verify machine access
|
||||
Self::verify_machine_access(pool, machine_id, user).await?;
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
|
||||
// Decode file hash
|
||||
let hash_bytes = hex::decode(&file_hash)
|
||||
.map_err(|_| AppError::ValidationError("Invalid file hash format".to_string()))?;
|
||||
|
||||
if hash_bytes.len() != 32 {
|
||||
return Err(AppError::ValidationError("File hash must be 32 bytes".to_string()));
|
||||
}
|
||||
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(&hash_bytes);
|
||||
|
||||
// Load file metadata
|
||||
let file_meta = storage.load_meta(MetaType::File, &hash).await
|
||||
.map_err(|_| AppError::NotFoundError("File metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("File metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::File(file_obj) = file_meta {
|
||||
let filename = format!("file_{}.bin", &file_hash[..8]);
|
||||
let mime_type = Self::detect_mime_type(&filename, &[]);
|
||||
|
||||
Ok(FileMetadata {
|
||||
name: filename,
|
||||
size_bytes: file_obj.size,
|
||||
mime_type,
|
||||
meta_hash: file_hash,
|
||||
})
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid file metadata".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
|
||||
async fn verify_machine_access(pool: &DbPool, machine_id: i64, user: &User) -> AppResult<()> {
|
||||
let machine = sqlx::query!(
|
||||
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
|
||||
machine_id,
|
||||
user.id
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
|
||||
|
||||
if machine.is_none() {
|
||||
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_partition_hash(
|
||||
storage: &Storage,
|
||||
machine_id: i64,
|
||||
snapshot_id: &str,
|
||||
partition_index: usize,
|
||||
) -> AppResult<[u8; 32]> {
|
||||
// Load snapshot reference to get hash
|
||||
let (snapshot_hash, _) = storage.load_snapshot_ref(machine_id, snapshot_id).await
|
||||
.map_err(|_| AppError::NotFoundError("Snapshot not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Snapshot not found".to_string()))?;
|
||||
|
||||
// Load snapshot metadata
|
||||
let snapshot_meta = storage.load_meta(MetaType::Snapshot, &snapshot_hash).await
|
||||
.map_err(|_| AppError::NotFoundError("Snapshot metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Snapshot metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::Snapshot(snapshot_obj) = snapshot_meta {
|
||||
// Get first disk (assuming single disk for now)
|
||||
if snapshot_obj.disk_hashes.is_empty() {
|
||||
return Err(AppError::NotFoundError("No disks in snapshot".to_string()));
|
||||
}
|
||||
|
||||
let disk_hash = snapshot_obj.disk_hashes[0];
|
||||
|
||||
// Load disk metadata
|
||||
let disk_meta = storage.load_meta(MetaType::Disk, &disk_hash).await
|
||||
.map_err(|_| AppError::NotFoundError("Disk metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Disk metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::Disk(disk_obj) = disk_meta {
|
||||
if partition_index >= disk_obj.partition_hashes.len() {
|
||||
return Err(AppError::NotFoundError("Partition index out of range".to_string()));
|
||||
}
|
||||
|
||||
Ok(disk_obj.partition_hashes[partition_index])
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid disk metadata".to_string()))
|
||||
}
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid snapshot metadata".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_directory_by_hash(
|
||||
storage: &Storage,
|
||||
dir_hash: &[u8; 32],
|
||||
path: String,
|
||||
) -> AppResult<DirectoryListing> {
|
||||
// Load directory metadata
|
||||
let dir_meta = storage.load_meta(MetaType::Dir, dir_hash).await
|
||||
.map_err(|_| AppError::NotFoundError("Directory metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Directory metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::Dir(dir_obj) = dir_meta {
|
||||
let mut entries = Vec::new();
|
||||
|
||||
for entry in dir_obj.entries {
|
||||
let entry_type_str = match entry.entry_type {
|
||||
EntryType::File => "file",
|
||||
EntryType::Dir => "dir",
|
||||
EntryType::Symlink => "symlink",
|
||||
};
|
||||
|
||||
let size_bytes = if entry.entry_type == EntryType::File {
|
||||
// Load file metadata to get size
|
||||
if let Ok(Some(MetaObj::File(file_obj))) = storage.load_meta(MetaType::File, &entry.target_meta_hash).await {
|
||||
Some(file_obj.size)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
entries.push(FileSystemEntry {
|
||||
name: entry.name,
|
||||
entry_type: entry_type_str.to_string(),
|
||||
size_bytes,
|
||||
meta_hash: hex::encode(entry.target_meta_hash),
|
||||
});
|
||||
}
|
||||
|
||||
// Sort entries: directories first, then files, both alphabetically
|
||||
entries.sort_by(|a, b| {
|
||||
match (a.entry_type.as_str(), b.entry_type.as_str()) {
|
||||
("dir", "file") => std::cmp::Ordering::Less,
|
||||
("file", "dir") => std::cmp::Ordering::Greater,
|
||||
_ => a.name.cmp(&b.name),
|
||||
}
|
||||
});
|
||||
|
||||
Ok(DirectoryListing {
|
||||
path,
|
||||
entries,
|
||||
parent_hash: None, // TODO: Implement parent tracking if needed
|
||||
})
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid directory metadata".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_mime_type(filename: &str, _content: &[u8]) -> String {
|
||||
// Simple MIME type detection based on file extension
|
||||
let extension = std::path::Path::new(filename)
|
||||
.extension()
|
||||
.and_then(|ext| ext.to_str())
|
||||
.unwrap_or("")
|
||||
.to_lowercase();
|
||||
|
||||
match extension.as_str() {
|
||||
"txt" | "md" | "readme" => "text/plain",
|
||||
"html" | "htm" => "text/html",
|
||||
"css" => "text/css",
|
||||
"js" => "application/javascript",
|
||||
"json" => "application/json",
|
||||
"xml" => "application/xml",
|
||||
"pdf" => "application/pdf",
|
||||
"zip" => "application/zip",
|
||||
"tar" => "application/x-tar",
|
||||
"gz" => "application/gzip",
|
||||
"jpg" | "jpeg" => "image/jpeg",
|
||||
"png" => "image/png",
|
||||
"gif" => "image/gif",
|
||||
"svg" => "image/svg+xml",
|
||||
"mp4" => "video/mp4",
|
||||
"mp3" => "audio/mpeg",
|
||||
"wav" => "audio/wav",
|
||||
"exe" => "application/x-msdownload",
|
||||
"dll" => "application/x-msdownload",
|
||||
"so" => "application/x-sharedlib",
|
||||
"deb" => "application/vnd.debian.binary-package",
|
||||
"rpm" => "application/x-rpm",
|
||||
_ => "application/octet-stream",
|
||||
}.to_string()
|
||||
}
|
||||
}
|
203
server/src/controllers/machines.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use crate::utils::{base62::Base62, config::ConfigManager, error::*, models::*, DbPool};
|
||||
use chrono::{Duration, Utc};
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct MachinesController;
|
||||
|
||||
impl MachinesController {
|
||||
pub async fn register_machine(pool: &DbPool, user: &User, name: &str) -> AppResult<Machine> {
|
||||
Self::validate_machine_input(name)?;
|
||||
|
||||
let machine_uuid = Uuid::new_v4();
|
||||
|
||||
let machine = Self::create_machine(pool, user.id, &machine_uuid, name).await?;
|
||||
|
||||
Ok(machine)
|
||||
}
|
||||
|
||||
pub async fn create_provisioning_code(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
user: &User,
|
||||
) -> AppResult<ProvisioningCodeResponse> {
|
||||
let machine = Self::get_machine_by_id(pool, machine_id).await?;
|
||||
|
||||
if user.role != UserRole::Admin && machine.user_id != user.id {
|
||||
return Err(forbidden_error("Access denied"));
|
||||
}
|
||||
|
||||
let code: String = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(5)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
|
||||
let external_url = ConfigManager::get_external_url(pool).await?;
|
||||
let provisioning_string = format!("52?#{}/{}", external_url, code);
|
||||
let encoded_code = Base62::encode(&provisioning_string);
|
||||
let expires_at = Utc::now() + Duration::hours(1);
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO provisioning_codes (machine_id, code, expires_at)
|
||||
VALUES (?, ?, ?)
|
||||
"#,
|
||||
)
|
||||
.bind(machine_id)
|
||||
.bind(&code)
|
||||
.bind(expires_at)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(ProvisioningCodeResponse {
|
||||
code: encoded_code,
|
||||
raw_code: code,
|
||||
expires_at,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_machines_for_user(pool: &DbPool, user: &User) -> AppResult<Vec<Machine>> {
|
||||
Self::get_machines_by_user_id(pool, user.id).await
|
||||
}
|
||||
|
||||
pub async fn delete_machine(pool: &DbPool, machine_id: i64, user: &User) -> AppResult<()> {
|
||||
let machine = Self::get_machine_by_id(pool, machine_id).await?;
|
||||
|
||||
if user.role != UserRole::Admin && machine.user_id != user.id {
|
||||
return Err(forbidden_error("Access denied"));
|
||||
}
|
||||
|
||||
Self::delete_machine_by_id(pool, machine_id).await
|
||||
}
|
||||
|
||||
pub async fn get_machine_by_id(pool: &DbPool, id: i64) -> AppResult<Machine> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, user_id, uuid, name, created_at
|
||||
FROM machines WHERE id = ?
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(Machine {
|
||||
id: row.get("id"),
|
||||
user_id: row.get("user_id"),
|
||||
uuid: Uuid::parse_str(&row.get::<String, _>("uuid")).unwrap(),
|
||||
machine_id: row.get::<String, _>("uuid"),
|
||||
name: row.get("name"),
|
||||
created_at: row.get("created_at"),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_machines_by_user_id(pool: &DbPool, user_id: i64) -> AppResult<Vec<Machine>> {
|
||||
let rows = sqlx::query(
|
||||
r#"
|
||||
SELECT id, user_id, uuid, name, created_at
|
||||
FROM machines WHERE user_id = ? ORDER BY created_at DESC
|
||||
"#,
|
||||
)
|
||||
.bind(user_id)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut machines = Vec::new();
|
||||
for row in rows {
|
||||
machines.push(Machine {
|
||||
id: row.get("id"),
|
||||
user_id: row.get("user_id"),
|
||||
uuid: Uuid::parse_str(&row.get::<String, _>("uuid")).unwrap(),
|
||||
machine_id: row.get::<String, _>("uuid"),
|
||||
name: row.get("name"),
|
||||
created_at: row.get("created_at"),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(machines)
|
||||
}
|
||||
|
||||
async fn create_machine(
|
||||
pool: &DbPool,
|
||||
user_id: i64,
|
||||
uuid: &Uuid,
|
||||
name: &str,
|
||||
) -> AppResult<Machine> {
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO machines (user_id, uuid, name)
|
||||
VALUES (?, ?, ?)
|
||||
"#,
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(uuid.to_string())
|
||||
.bind(name)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Self::get_machine_by_id(pool, result.last_insert_rowid()).await
|
||||
}
|
||||
|
||||
async fn machine_exists_by_uuid(pool: &DbPool, uuid: &Uuid) -> AppResult<bool> {
|
||||
let row = sqlx::query("SELECT COUNT(*) as count FROM machines WHERE uuid = ?")
|
||||
.bind(uuid.to_string())
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
let count: i64 = row.get("count");
|
||||
Ok(count > 0)
|
||||
}
|
||||
|
||||
async fn delete_machine_by_id(pool: &DbPool, id: i64) -> AppResult<()> {
|
||||
sqlx::query("DELETE FROM machines WHERE id = ?")
|
||||
.bind(id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_provisioning_code(
|
||||
pool: &DbPool,
|
||||
code: &str,
|
||||
) -> AppResult<Option<ProvisioningCode>> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, machine_id, code, created_at, expires_at, used
|
||||
FROM provisioning_codes WHERE code = ?
|
||||
"#,
|
||||
)
|
||||
.bind(code)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if let Some(row) = row {
|
||||
Ok(Some(ProvisioningCode {
|
||||
id: row.get("id"),
|
||||
machine_id: row.get("machine_id"),
|
||||
code: row.get("code"),
|
||||
created_at: row.get("created_at"),
|
||||
expires_at: row.get("expires_at"),
|
||||
used: row.get("used"),
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn mark_provisioning_code_used(pool: &DbPool, code: &str) -> AppResult<()> {
|
||||
sqlx::query("UPDATE provisioning_codes SET used = 1 WHERE code = ?")
|
||||
.bind(code)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_machine_input(name: &str) -> AppResult<()> {
|
||||
if name.trim().is_empty() {
|
||||
return Err(validation_error("Machine name cannot be empty"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
5
server/src/controllers/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod auth;
|
||||
pub mod machines;
|
||||
pub mod snapshots;
|
||||
pub mod users;
|
||||
pub mod files;
|
184
server/src/controllers/snapshots.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use crate::sync::storage::Storage;
|
||||
use crate::sync::meta::{MetaObj, FsType};
|
||||
use crate::sync::protocol::MetaType;
|
||||
use crate::utils::{error::*, models::*, DbPool};
|
||||
use serde::Serialize;
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
// Basic snapshot info for listing
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SnapshotSummary {
|
||||
pub id: String,
|
||||
pub snapshot_hash: String,
|
||||
pub created_at: String,
|
||||
}
|
||||
|
||||
// Detailed snapshot info with disk/partition data
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct SnapshotDetails {
|
||||
pub id: String,
|
||||
pub snapshot_hash: String,
|
||||
pub created_at: String,
|
||||
pub disks: Vec<DiskInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DiskInfo {
|
||||
pub serial: String,
|
||||
pub size_bytes: u64,
|
||||
pub partitions: Vec<PartitionInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct PartitionInfo {
|
||||
pub fs_type: String,
|
||||
pub start_lba: u64,
|
||||
pub end_lba: u64,
|
||||
pub size_bytes: u64,
|
||||
}
|
||||
|
||||
pub struct SnapshotsController;
|
||||
|
||||
impl SnapshotsController {
|
||||
pub async fn get_machine_snapshots(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
user: &User,
|
||||
) -> AppResult<Vec<SnapshotSummary>> {
|
||||
// Verify machine access
|
||||
let machine = sqlx::query!(
|
||||
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
|
||||
machine_id,
|
||||
user.id
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
|
||||
|
||||
if machine.is_none() {
|
||||
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
|
||||
}
|
||||
|
||||
let _machine = machine.unwrap();
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
let mut snapshot_summaries = Vec::new();
|
||||
|
||||
// List all snapshots for this machine from storage
|
||||
match storage.list_snapshots(machine_id).await {
|
||||
Ok(snapshot_ids) => {
|
||||
for snapshot_id in snapshot_ids {
|
||||
// Load snapshot reference to get hash and timestamp
|
||||
if let Ok(Some((snapshot_hash, created_at_timestamp))) = storage.load_snapshot_ref(machine_id, &snapshot_id).await {
|
||||
let created_at = DateTime::from_timestamp(created_at_timestamp as i64, 0)
|
||||
.unwrap_or_else(|| Utc::now())
|
||||
.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
.to_string();
|
||||
|
||||
snapshot_summaries.push(SnapshotSummary {
|
||||
id: snapshot_id,
|
||||
snapshot_hash: hex::encode(snapshot_hash),
|
||||
created_at,
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// If no snapshots directory exists, return empty list
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by creation time (newest first)
|
||||
snapshot_summaries.sort_by(|a, b| b.created_at.cmp(&a.created_at));
|
||||
|
||||
Ok(snapshot_summaries)
|
||||
}
|
||||
|
||||
pub async fn get_snapshot_details(
|
||||
pool: &DbPool,
|
||||
machine_id: i64,
|
||||
snapshot_id: String,
|
||||
user: &User,
|
||||
) -> AppResult<SnapshotDetails> {
|
||||
// Verify machine access
|
||||
let machine = sqlx::query!(
|
||||
"SELECT id, user_id FROM machines WHERE id = ? AND user_id = ?",
|
||||
machine_id,
|
||||
user.id
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(|e| AppError::DatabaseError(e.to_string()))?;
|
||||
|
||||
if machine.is_none() {
|
||||
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
|
||||
}
|
||||
|
||||
let _machine = machine.unwrap();
|
||||
|
||||
let storage = Storage::new("./data");
|
||||
|
||||
// Load snapshot reference to get hash and timestamp
|
||||
let (snapshot_hash, created_at_timestamp) = storage.load_snapshot_ref(machine_id, &snapshot_id).await
|
||||
.map_err(|_| AppError::NotFoundError("Snapshot not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Snapshot not found".to_string()))?;
|
||||
|
||||
// Load snapshot metadata
|
||||
let snapshot_meta = storage.load_meta(MetaType::Snapshot, &snapshot_hash).await
|
||||
.map_err(|_| AppError::NotFoundError("Snapshot metadata not found".to_string()))?
|
||||
.ok_or_else(|| AppError::NotFoundError("Snapshot metadata not found".to_string()))?;
|
||||
|
||||
if let MetaObj::Snapshot(snapshot_obj) = snapshot_meta {
|
||||
let mut disks = Vec::new();
|
||||
|
||||
for disk_hash in snapshot_obj.disk_hashes {
|
||||
if let Ok(Some(disk_meta)) = storage.load_meta(MetaType::Disk, &disk_hash).await {
|
||||
if let MetaObj::Disk(disk_obj) = disk_meta {
|
||||
let mut partitions = Vec::new();
|
||||
|
||||
for partition_hash in disk_obj.partition_hashes {
|
||||
if let Ok(Some(partition_meta)) = storage.load_meta(MetaType::Partition, &partition_hash).await {
|
||||
if let MetaObj::Partition(partition_obj) = partition_meta {
|
||||
let fs_type_str = match partition_obj.fs_type_code {
|
||||
FsType::Ext => "ext",
|
||||
FsType::Ntfs => "ntfs",
|
||||
FsType::Fat32 => "fat32",
|
||||
FsType::Unknown => "unknown",
|
||||
};
|
||||
|
||||
partitions.push(PartitionInfo {
|
||||
fs_type: fs_type_str.to_string(),
|
||||
start_lba: partition_obj.start_lba,
|
||||
end_lba: partition_obj.end_lba,
|
||||
size_bytes: (partition_obj.end_lba - partition_obj.start_lba) * 512,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
disks.push(DiskInfo {
|
||||
serial: disk_obj.serial,
|
||||
size_bytes: disk_obj.disk_size_bytes,
|
||||
partitions,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamp to readable format
|
||||
let created_at_str = DateTime::<Utc>::from_timestamp(created_at_timestamp as i64, 0)
|
||||
.map(|dt| dt.format("%Y-%m-%d %H:%M:%S").to_string())
|
||||
.unwrap_or_else(|| "Unknown".to_string());
|
||||
|
||||
Ok(SnapshotDetails {
|
||||
id: snapshot_id,
|
||||
snapshot_hash: hex::encode(snapshot_hash),
|
||||
created_at: created_at_str,
|
||||
disks,
|
||||
})
|
||||
} else {
|
||||
Err(AppError::ValidationError("Invalid snapshot metadata".to_string()))
|
||||
}
|
||||
}
|
||||
}
|
202
server/src/controllers/users.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use crate::utils::{error::*, models::*, DbPool};
|
||||
use bcrypt::{hash, verify, DEFAULT_COST};
|
||||
use sqlx::Row;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub struct UsersController;
|
||||
|
||||
impl UsersController {
|
||||
pub async fn create_user(
|
||||
pool: &DbPool,
|
||||
username: &str,
|
||||
password: &str,
|
||||
role: UserRole,
|
||||
storage_limit_gb: i64,
|
||||
) -> AppResult<User> {
|
||||
Self::validate_user_input(username, password)?;
|
||||
|
||||
let password_hash = hash(password, DEFAULT_COST)?;
|
||||
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO users (username, password_hash, role, storage_limit_gb)
|
||||
VALUES (?, ?, ?, ?)
|
||||
"#,
|
||||
)
|
||||
.bind(username)
|
||||
.bind(&password_hash)
|
||||
.bind(role.to_string())
|
||||
.bind(storage_limit_gb)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Self::get_user_by_id(pool, result.last_insert_rowid()).await
|
||||
}
|
||||
|
||||
pub async fn get_user_by_username(pool: &DbPool, username: &str) -> AppResult<Option<User>> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, username, password_hash, role, storage_limit_gb, created_at
|
||||
FROM users WHERE username = ?
|
||||
"#,
|
||||
)
|
||||
.bind(username)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if let Some(row) = row {
|
||||
Ok(Some(User {
|
||||
id: row.get("id"),
|
||||
username: row.get("username"),
|
||||
password_hash: row.get("password_hash"),
|
||||
role: UserRole::from_str(&row.get::<String, _>("role")).unwrap(),
|
||||
storage_limit_gb: row.get("storage_limit_gb"),
|
||||
created_at: row.get("created_at"),
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_user_by_id(pool: &DbPool, id: i64) -> AppResult<User> {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
SELECT id, username, password_hash, role, storage_limit_gb, created_at
|
||||
FROM users WHERE id = ?
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(User {
|
||||
id: row.get("id"),
|
||||
username: row.get("username"),
|
||||
password_hash: row.get("password_hash"),
|
||||
role: UserRole::from_str(&row.get::<String, _>("role")).unwrap(),
|
||||
storage_limit_gb: row.get("storage_limit_gb"),
|
||||
created_at: row.get("created_at"),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_all_users(pool: &DbPool) -> AppResult<Vec<User>> {
|
||||
let rows = sqlx::query(
|
||||
r#"
|
||||
SELECT id, username, password_hash, role, storage_limit_gb, created_at
|
||||
FROM users ORDER BY created_at DESC
|
||||
"#,
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut users = Vec::new();
|
||||
for row in rows {
|
||||
users.push(User {
|
||||
id: row.get("id"),
|
||||
username: row.get("username"),
|
||||
password_hash: row.get("password_hash"),
|
||||
role: UserRole::from_str(&row.get::<String, _>("role")).unwrap(),
|
||||
storage_limit_gb: row.get("storage_limit_gb"),
|
||||
created_at: row.get("created_at"),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(users)
|
||||
}
|
||||
|
||||
pub async fn update_user(
|
||||
pool: &DbPool,
|
||||
id: i64,
|
||||
request: UpdateUserRequest,
|
||||
) -> AppResult<User> {
|
||||
let current_user = Self::get_user_by_id(pool, id).await?;
|
||||
|
||||
let username = request.username.clone().unwrap_or(current_user.username);
|
||||
let role = request.role.unwrap_or(current_user.role);
|
||||
let storage_limit_gb = request
|
||||
.storage_limit_gb
|
||||
.unwrap_or(current_user.storage_limit_gb);
|
||||
|
||||
if request.username.is_some() || request.password.is_some() {
|
||||
Self::validate_user_input(
|
||||
&username,
|
||||
request.password.as_deref().unwrap_or("validpassword"),
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(password) = request.password {
|
||||
let password_hash = hash(&password, DEFAULT_COST)?;
|
||||
sqlx::query(
|
||||
r#"
|
||||
UPDATE users
|
||||
SET username = ?, password_hash = ?, role = ?, storage_limit_gb = ?
|
||||
WHERE id = ?
|
||||
"#,
|
||||
)
|
||||
.bind(&username)
|
||||
.bind(&password_hash)
|
||||
.bind(role.to_string())
|
||||
.bind(storage_limit_gb)
|
||||
.bind(id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
} else {
|
||||
sqlx::query(
|
||||
r#"
|
||||
UPDATE users
|
||||
SET username = ?, role = ?, storage_limit_gb = ?
|
||||
WHERE id = ?
|
||||
"#,
|
||||
)
|
||||
.bind(&username)
|
||||
.bind(role.to_string())
|
||||
.bind(storage_limit_gb)
|
||||
.bind(id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Self::get_user_by_id(pool, id).await
|
||||
}
|
||||
|
||||
pub async fn delete_user(pool: &DbPool, id: i64) -> AppResult<()> {
|
||||
Self::get_user_by_id(pool, id).await?;
|
||||
|
||||
sqlx::query("DELETE FROM users WHERE id = ?")
|
||||
.bind(id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn verify_user_credentials(
|
||||
pool: &DbPool,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> AppResult<User> {
|
||||
let user = Self::get_user_by_username(pool, username)
|
||||
.await?
|
||||
.ok_or_else(|| auth_error("Invalid credentials"))?;
|
||||
|
||||
let is_valid = verify(password, &user.password_hash)?;
|
||||
if !is_valid {
|
||||
return Err(auth_error("Invalid credentials"));
|
||||
}
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
fn validate_user_input(username: &str, password: &str) -> AppResult<()> {
|
||||
if username.trim().is_empty() {
|
||||
return Err(validation_error("Username cannot be empty"));
|
||||
}
|
||||
|
||||
if password.len() < 8 {
|
||||
return Err(validation_error(
|
||||
"Password must be at least 8 characters long",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
111
server/src/main.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
mod controllers;
|
||||
mod routes;
|
||||
mod utils;
|
||||
mod sync;
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::{
|
||||
routing::{delete, get, post, put},
|
||||
Router,
|
||||
};
|
||||
use routes::{accounts, admin, auth, config, machines, setup, snapshots, files};
|
||||
use std::path::Path;
|
||||
use tokio::signal;
|
||||
use tower_http::{
|
||||
cors::CorsLayer,
|
||||
services::{ServeDir, ServeFile},
|
||||
};
|
||||
use utils::init_database;
|
||||
use sync::{SyncServer, server::SyncServerConfig};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let pool = init_database().await?;
|
||||
|
||||
let sync_pool = pool.clone();
|
||||
|
||||
let api_routes = Router::new()
|
||||
.route("/setup/status", get(setup::get_setup_status))
|
||||
.route("/setup/init", post(setup::init_setup))
|
||||
.route("/auth/login", post(auth::login))
|
||||
.route("/auth/logout", post(auth::logout))
|
||||
.route("/accounts/me", get(accounts::me))
|
||||
.route("/admin/users", get(admin::get_users))
|
||||
.route("/admin/users", post(admin::create_user_handler))
|
||||
.route("/admin/users/{id}", put(admin::update_user_handler))
|
||||
.route("/admin/users/{id}", delete(admin::delete_user_handler))
|
||||
.route("/admin/config", get(config::get_all_configs))
|
||||
.route("/admin/config", post(config::set_config))
|
||||
.route("/admin/config/{key}", get(config::get_config))
|
||||
.route("/machines/register", post(machines::register_machine))
|
||||
.route("/machines/provisioning-code", post(machines::create_provisioning_code))
|
||||
.route("/machines", get(machines::get_machines))
|
||||
.route("/machines/{id}", get(machines::get_machine))
|
||||
.route("/machines/{id}", delete(machines::delete_machine))
|
||||
.route("/machines/{id}/snapshots", get(snapshots::get_machine_snapshots))
|
||||
.route("/machines/{machine_id}/snapshots/{snapshot_id}", get(snapshots::get_snapshot_details))
|
||||
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/files", get(files::list_partition_root))
|
||||
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/files/{dir_hash}", get(files::list_directory))
|
||||
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/download/{file_hash}", get(files::download_file))
|
||||
.route("/machines/{machine_id}/snapshots/{snapshot_id}/partitions/{partition_index}/metadata/{file_hash}", get(files::get_file_metadata))
|
||||
.layer(CorsLayer::permissive())
|
||||
.with_state(pool);
|
||||
|
||||
let dist_path = "./dist";
|
||||
let app = Router::new()
|
||||
.nest("/api", api_routes)
|
||||
.nest_service("/assets", ServeDir::new(format!("{}/assets", dist_path)))
|
||||
.route_service("/", ServeFile::new(format!("{}/index.html", dist_path)))
|
||||
.fallback_service(ServeFile::new(format!("{}/index.html", dist_path)))
|
||||
.layer(CorsLayer::permissive());
|
||||
|
||||
if !Path::new(dist_path).exists() {
|
||||
println!("Warning: dist directory not found at {}", dist_path);
|
||||
}
|
||||
|
||||
let sync_config = SyncServerConfig::default();
|
||||
let sync_server = SyncServer::new(sync_config.clone(), sync_pool);
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = sync_server.start().await {
|
||||
eprintln!("Sync server error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
let listener = tokio::net::TcpListener::bind("0.0.0.0:8379").await?;
|
||||
println!("HTTP server running on http://0.0.0.0:8379");
|
||||
println!("Sync server running on {}:{}", sync_config.bind_address, sync_config.port);
|
||||
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
println!("\nShutting down due to Ctrl+C...");
|
||||
},
|
||||
_ = terminate => {
|
||||
println!("\nShutting down due to terminate signal...");
|
||||
},
|
||||
}
|
||||
}
|
6
server/src/routes/accounts.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
use crate::utils::{auth::AuthUser, error::*, models::User};
|
||||
use axum::response::Json;
|
||||
|
||||
pub async fn me(auth_user: AuthUser) -> Result<Json<User>, AppError> {
|
||||
Ok(success_response(auth_user.user))
|
||||
}
|
53
server/src/routes/admin.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use crate::controllers::users::UsersController;
|
||||
use crate::utils::{auth::*, error::*, models::*, DbPool};
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::Json,
|
||||
};
|
||||
|
||||
pub async fn get_users(
|
||||
_admin: AdminUser,
|
||||
State(pool): State<DbPool>,
|
||||
) -> Result<Json<Vec<User>>, AppError> {
|
||||
let users = UsersController::get_all_users(&pool).await?;
|
||||
Ok(success_response(users))
|
||||
}
|
||||
|
||||
pub async fn create_user_handler(
|
||||
_admin: AdminUser,
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<CreateUserRequest>,
|
||||
) -> Result<Json<User>, AppError> {
|
||||
let role = request.role.unwrap_or(UserRole::User);
|
||||
let storage_limit_gb = request.storage_limit_gb.unwrap_or(0);
|
||||
|
||||
let user = UsersController::create_user(
|
||||
&pool,
|
||||
&request.username,
|
||||
&request.password,
|
||||
role,
|
||||
storage_limit_gb,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(success_response(user))
|
||||
}
|
||||
|
||||
pub async fn update_user_handler(
|
||||
_admin: AdminUser,
|
||||
State(pool): State<DbPool>,
|
||||
Path(user_id): Path<i64>,
|
||||
Json(request): Json<UpdateUserRequest>,
|
||||
) -> Result<Json<User>, AppError> {
|
||||
let updated_user = UsersController::update_user(&pool, user_id, request).await?;
|
||||
Ok(success_response(updated_user))
|
||||
}
|
||||
|
||||
pub async fn delete_user_handler(
|
||||
_admin: AdminUser,
|
||||
State(pool): State<DbPool>,
|
||||
Path(user_id): Path<i64>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
UsersController::delete_user(&pool, user_id).await?;
|
||||
Ok(success_message("User deleted successfully"))
|
||||
}
|
19
server/src/routes/auth.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use crate::utils::{auth::*, error::*, models::*, DbPool};
|
||||
use crate::controllers::auth::AuthController;
|
||||
use axum::{extract::State, response::Json};
|
||||
|
||||
pub async fn login(
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<LoginRequest>,
|
||||
) -> Result<Json<LoginResponse>, AppError> {
|
||||
let response = AuthController::login(&pool, &request.username, &request.password).await?;
|
||||
Ok(success_response(response))
|
||||
}
|
||||
|
||||
pub async fn logout(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
AuthController::logout(&pool, auth_user.user.id).await?;
|
||||
Ok(success_message("Logged out successfully"))
|
||||
}
|
113
server/src/routes/config.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use crate::utils::{auth::*, config::ConfigManager, error::*, DbPool};
|
||||
use axum::{extract::State, response::Json};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConfigRequest {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConfigResponse {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConfigDefinition {
|
||||
pub key: String,
|
||||
pub description: String,
|
||||
pub value: Option<String>,
|
||||
pub default_value: Option<String>,
|
||||
pub required: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConfigListResponse {
|
||||
pub configs: Vec<ConfigDefinition>,
|
||||
}
|
||||
|
||||
pub async fn get_all_configs(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
) -> Result<Json<ConfigListResponse>, AppError> {
|
||||
if auth_user.user.role != crate::utils::models::UserRole::Admin {
|
||||
return Err(forbidden_error("Admin access required"));
|
||||
}
|
||||
|
||||
let allowed_configs = vec![
|
||||
ConfigDefinition {
|
||||
key: "EXTERNAL_URL".to_string(),
|
||||
description: "The external URL used for provisioning codes. This should be the public URL where this server can be reached.".to_string(),
|
||||
value: ConfigManager::get_config(&pool, "EXTERNAL_URL").await?,
|
||||
default_value: Some("https://your-domain.com".to_string()),
|
||||
required: true,
|
||||
},
|
||||
ConfigDefinition {
|
||||
key: "SESSION_TIMEOUT_HOURS".to_string(),
|
||||
description: "Number of hours before user sessions expire and require re-authentication.".to_string(),
|
||||
value: ConfigManager::get_config(&pool, "SESSION_TIMEOUT_HOURS").await?,
|
||||
default_value: Some("24".to_string()),
|
||||
required: false,
|
||||
},
|
||||
];
|
||||
|
||||
Ok(success_response(ConfigListResponse {
|
||||
configs: allowed_configs,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn set_config(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<ConfigRequest>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
if auth_user.user.role != crate::utils::models::UserRole::Admin {
|
||||
return Err(forbidden_error("Admin access required"));
|
||||
}
|
||||
|
||||
let allowed_keys = vec!["EXTERNAL_URL", "SESSION_TIMEOUT_HOURS"];
|
||||
|
||||
if !allowed_keys.contains(&request.key.as_str()) {
|
||||
return Err(validation_error("Invalid configuration key"));
|
||||
}
|
||||
|
||||
match request.key.as_str() {
|
||||
"EXTERNAL_URL" => {
|
||||
if request.value.trim().is_empty() {
|
||||
return Err(validation_error("External URL cannot be empty"));
|
||||
}
|
||||
if !request.value.starts_with("http://") && !request.value.starts_with("https://") {
|
||||
return Err(validation_error(
|
||||
"External URL must start with http:// or https://",
|
||||
));
|
||||
}
|
||||
}
|
||||
"SESSION_TIMEOUT_HOURS" => {
|
||||
if request.value.parse::<i32>().is_err() || request.value.parse::<i32>().unwrap() <= 0 {
|
||||
return Err(validation_error("Value must be a positive number"));
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
ConfigManager::set_config(&pool, &request.key, &request.value).await?;
|
||||
Ok(success_message("Configuration updated successfully"))
|
||||
}
|
||||
|
||||
pub async fn get_config(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
axum::extract::Path(key): axum::extract::Path<String>,
|
||||
) -> Result<Json<ConfigResponse>, AppError> {
|
||||
if auth_user.user.role != crate::utils::models::UserRole::Admin {
|
||||
return Err(forbidden_error("Admin access required"));
|
||||
}
|
||||
|
||||
let value = ConfigManager::get_config(&pool, &key)
|
||||
.await?
|
||||
.ok_or_else(|| not_found_error("Configuration key not found"))?;
|
||||
|
||||
Ok(success_response(ConfigResponse { key, value }))
|
||||
}
|
77
server/src/routes/files.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
use axum::{extract::{Path, Query, State}, Json, response::Response};
|
||||
use axum::body::Body;
|
||||
use serde::Deserialize;
|
||||
use crate::controllers::files::{FilesController, DirectoryListing, FileMetadata};
|
||||
use crate::utils::{auth::AuthUser, error::AppResult, DbPool};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct DownloadQuery {
|
||||
filename: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn list_partition_root(
|
||||
State(pool): State<DbPool>,
|
||||
Path((machine_id, snapshot_id, partition_index)): Path<(i64, String, usize)>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Json<DirectoryListing>> {
|
||||
let listing = FilesController::list_partition_root(
|
||||
&pool,
|
||||
machine_id,
|
||||
snapshot_id,
|
||||
partition_index,
|
||||
&auth_user.user,
|
||||
).await?;
|
||||
|
||||
Ok(Json(listing))
|
||||
}
|
||||
|
||||
pub async fn list_directory(
|
||||
State(pool): State<DbPool>,
|
||||
Path((machine_id, snapshot_id, partition_index, dir_hash)): Path<(i64, String, usize, String)>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Json<DirectoryListing>> {
|
||||
let listing = FilesController::list_directory(
|
||||
&pool,
|
||||
machine_id,
|
||||
snapshot_id,
|
||||
partition_index,
|
||||
dir_hash,
|
||||
&auth_user.user,
|
||||
).await?;
|
||||
|
||||
Ok(Json(listing))
|
||||
}
|
||||
|
||||
pub async fn download_file(
|
||||
State(pool): State<DbPool>,
|
||||
Path((machine_id, snapshot_id, partition_index, file_hash)): Path<(i64, String, usize, String)>,
|
||||
Query(query): Query<DownloadQuery>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Response<Body>> {
|
||||
FilesController::download_file(
|
||||
&pool,
|
||||
machine_id,
|
||||
snapshot_id,
|
||||
partition_index,
|
||||
file_hash,
|
||||
query.filename,
|
||||
&auth_user.user,
|
||||
).await
|
||||
}
|
||||
|
||||
pub async fn get_file_metadata(
|
||||
State(pool): State<DbPool>,
|
||||
Path((machine_id, snapshot_id, partition_index, file_hash)): Path<(i64, String, usize, String)>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Json<FileMetadata>> {
|
||||
let metadata = FilesController::get_file_metadata(
|
||||
&pool,
|
||||
machine_id,
|
||||
snapshot_id,
|
||||
partition_index,
|
||||
file_hash,
|
||||
&auth_user.user,
|
||||
).await?;
|
||||
|
||||
Ok(Json(metadata))
|
||||
}
|
68
server/src/routes/machines.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use crate::utils::{auth::*, error::*, models::*, DbPool};
|
||||
use crate::controllers::machines::MachinesController;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::Json,
|
||||
};
|
||||
|
||||
pub async fn register_machine(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<RegisterMachineRequest>,
|
||||
) -> Result<Json<Machine>, AppError> {
|
||||
let machine = MachinesController::register_machine(
|
||||
&pool,
|
||||
&auth_user.user,
|
||||
&request.name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(success_response(machine))
|
||||
}
|
||||
|
||||
pub async fn create_provisioning_code(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<CreateProvisioningCodeRequest>,
|
||||
) -> Result<Json<ProvisioningCodeResponse>, AppError> {
|
||||
let response = MachinesController::create_provisioning_code(
|
||||
&pool,
|
||||
request.machine_id,
|
||||
&auth_user.user,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(success_response(response))
|
||||
}
|
||||
|
||||
pub async fn get_machines(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
) -> Result<Json<Vec<Machine>>, AppError> {
|
||||
let machines = MachinesController::get_machines_for_user(&pool, &auth_user.user).await?;
|
||||
Ok(success_response(machines))
|
||||
}
|
||||
|
||||
pub async fn get_machine(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
Path(machine_id): Path<i64>,
|
||||
) -> Result<Json<Machine>, AppError> {
|
||||
let machine = MachinesController::get_machine_by_id(&pool, machine_id).await?;
|
||||
|
||||
// Check if user has access to this machine
|
||||
if auth_user.user.role != UserRole::Admin && machine.user_id != auth_user.user.id {
|
||||
return Err(AppError::NotFoundError("Machine not found or access denied".to_string()));
|
||||
}
|
||||
|
||||
Ok(success_response(machine))
|
||||
}
|
||||
|
||||
pub async fn delete_machine(
|
||||
auth_user: AuthUser,
|
||||
State(pool): State<DbPool>,
|
||||
Path(machine_id): Path<i64>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
MachinesController::delete_machine(&pool, machine_id, &auth_user.user).await?;
|
||||
Ok(success_message("Machine deleted successfully"))
|
||||
}
|
8
server/src/routes/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod accounts;
|
||||
pub mod admin;
|
||||
pub mod auth;
|
||||
pub mod config;
|
||||
pub mod machines;
|
||||
pub mod setup;
|
||||
pub mod snapshots;
|
||||
pub mod files;
|
32
server/src/routes/setup.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use crate::controllers::users::UsersController;
|
||||
use crate::utils::{database::*, error::*, models::*};
|
||||
use axum::{extract::State, response::Json};
|
||||
|
||||
pub async fn get_setup_status(
|
||||
State(pool): State<DbPool>,
|
||||
) -> Result<Json<SetupStatusResponse>, AppError> {
|
||||
let first_user_exists = check_first_user_exists(&pool).await?;
|
||||
Ok(success_response(SetupStatusResponse { first_user_exists }))
|
||||
}
|
||||
|
||||
pub async fn init_setup(
|
||||
State(pool): State<DbPool>,
|
||||
Json(request): Json<InitSetupRequest>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
let first_user_exists = check_first_user_exists(&pool).await?;
|
||||
|
||||
if first_user_exists {
|
||||
return Err(validation_error("Setup already completed"));
|
||||
}
|
||||
|
||||
UsersController::create_user(
|
||||
&pool,
|
||||
&request.username,
|
||||
&request.password,
|
||||
UserRole::Admin,
|
||||
0,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(success_message("Setup completed successfully"))
|
||||
}
|
32
server/src/routes/snapshots.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use axum::{extract::{Path, State}, Json};
|
||||
use crate::controllers::snapshots::{SnapshotsController, SnapshotSummary, SnapshotDetails};
|
||||
use crate::utils::{auth::AuthUser, error::AppResult, DbPool};
|
||||
|
||||
pub async fn get_machine_snapshots(
|
||||
State(pool): State<DbPool>,
|
||||
Path(machine_id): Path<i64>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Json<Vec<SnapshotSummary>>> {
|
||||
let snapshots = SnapshotsController::get_machine_snapshots(
|
||||
&pool,
|
||||
machine_id,
|
||||
&auth_user.user,
|
||||
).await?;
|
||||
|
||||
Ok(Json(snapshots))
|
||||
}
|
||||
|
||||
pub async fn get_snapshot_details(
|
||||
State(pool): State<DbPool>,
|
||||
Path((machine_id, snapshot_id)): Path<(i64, String)>,
|
||||
auth_user: AuthUser,
|
||||
) -> AppResult<Json<SnapshotDetails>> {
|
||||
let snapshot = SnapshotsController::get_snapshot_details(
|
||||
&pool,
|
||||
machine_id,
|
||||
snapshot_id,
|
||||
&auth_user.user,
|
||||
).await?;
|
||||
|
||||
Ok(Json(snapshot))
|
||||
}
|
605
server/src/sync/meta.rs
Normal file
@@ -0,0 +1,605 @@
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
use crate::sync::protocol::{Hash, MetaType};
|
||||
|
||||
/// Filesystem type codes
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FsType {
|
||||
Ext = 1,
|
||||
Ntfs = 2,
|
||||
Fat32 = 3,
|
||||
Unknown = 0,
|
||||
}
|
||||
|
||||
impl From<u32> for FsType {
|
||||
fn from(value: u32) -> Self {
|
||||
match value {
|
||||
1 => FsType::Ext,
|
||||
2 => FsType::Ntfs,
|
||||
3 => FsType::Fat32,
|
||||
_ => FsType::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Directory entry types
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EntryType {
|
||||
File = 0,
|
||||
Dir = 1,
|
||||
Symlink = 2,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for EntryType {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self> {
|
||||
match value {
|
||||
0 => Ok(EntryType::File),
|
||||
1 => Ok(EntryType::Dir),
|
||||
2 => Ok(EntryType::Symlink),
|
||||
_ => Err(Error::new(ErrorKind::InvalidData, "Unknown entry type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// File metadata object
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileObj {
|
||||
pub version: u8,
|
||||
pub fs_type_code: FsType,
|
||||
pub size: u64,
|
||||
pub mode: u32,
|
||||
pub uid: u32,
|
||||
pub gid: u32,
|
||||
pub mtime_unixsec: u64,
|
||||
pub chunk_hashes: Vec<Hash>,
|
||||
}
|
||||
|
||||
impl FileObj {
|
||||
pub fn new(
|
||||
fs_type_code: FsType,
|
||||
size: u64,
|
||||
mode: u32,
|
||||
uid: u32,
|
||||
gid: u32,
|
||||
mtime_unixsec: u64,
|
||||
chunk_hashes: Vec<Hash>,
|
||||
) -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
fs_type_code,
|
||||
size,
|
||||
mode,
|
||||
uid,
|
||||
gid,
|
||||
mtime_unixsec,
|
||||
chunk_hashes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
buf.put_u8(self.version);
|
||||
buf.put_u32_le(self.fs_type_code as u32);
|
||||
buf.put_u64_le(self.size);
|
||||
buf.put_u32_le(self.mode);
|
||||
buf.put_u32_le(self.uid);
|
||||
buf.put_u32_le(self.gid);
|
||||
buf.put_u64_le(self.mtime_unixsec);
|
||||
buf.put_u32_le(self.chunk_hashes.len() as u32);
|
||||
|
||||
for hash in &self.chunk_hashes {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
pub fn deserialize(mut data: Bytes) -> Result<Self> {
|
||||
if data.remaining() < 41 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "FileObj data too short"));
|
||||
}
|
||||
|
||||
let version = data.get_u8();
|
||||
if version != 1 {
|
||||
return Err(Error::new(ErrorKind::InvalidData, "Unsupported FileObj version"));
|
||||
}
|
||||
|
||||
let fs_type_code = FsType::from(data.get_u32_le());
|
||||
let size = data.get_u64_le();
|
||||
let mode = data.get_u32_le();
|
||||
let uid = data.get_u32_le();
|
||||
let gid = data.get_u32_le();
|
||||
let mtime_unixsec = data.get_u64_le();
|
||||
let chunk_count = data.get_u32_le() as usize;
|
||||
|
||||
if data.remaining() < chunk_count * 32 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "FileObj chunk hashes too short"));
|
||||
}
|
||||
|
||||
let mut chunk_hashes = Vec::with_capacity(chunk_count);
|
||||
for _ in 0..chunk_count {
|
||||
let mut hash = [0u8; 32];
|
||||
data.copy_to_slice(&mut hash);
|
||||
chunk_hashes.push(hash);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
fs_type_code,
|
||||
size,
|
||||
mode,
|
||||
uid,
|
||||
gid,
|
||||
mtime_unixsec,
|
||||
chunk_hashes,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
let serialized = self.serialize()?;
|
||||
Ok(blake3::hash(&serialized).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Directory entry
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DirEntry {
|
||||
pub entry_type: EntryType,
|
||||
pub name: String,
|
||||
pub target_meta_hash: Hash,
|
||||
}
|
||||
|
||||
/// Directory metadata object
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DirObj {
|
||||
pub version: u8,
|
||||
pub entries: Vec<DirEntry>,
|
||||
}
|
||||
|
||||
impl DirObj {
|
||||
pub fn new(entries: Vec<DirEntry>) -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
entries,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
buf.put_u8(self.version);
|
||||
buf.put_u32_le(self.entries.len() as u32);
|
||||
|
||||
for entry in &self.entries {
|
||||
buf.put_u8(entry.entry_type as u8);
|
||||
let name_bytes = entry.name.as_bytes();
|
||||
buf.put_u16_le(name_bytes.len() as u16);
|
||||
buf.put_slice(name_bytes);
|
||||
buf.put_slice(&entry.target_meta_hash);
|
||||
}
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
pub fn deserialize(mut data: Bytes) -> Result<Self> {
|
||||
if data.remaining() < 5 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DirObj data too short"));
|
||||
}
|
||||
|
||||
let version = data.get_u8();
|
||||
if version != 1 {
|
||||
return Err(Error::new(ErrorKind::InvalidData, "Unsupported DirObj version"));
|
||||
}
|
||||
|
||||
let entry_count = data.get_u32_le() as usize;
|
||||
let mut entries = Vec::with_capacity(entry_count);
|
||||
|
||||
for _ in 0..entry_count {
|
||||
if data.remaining() < 35 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DirObj entry too short"));
|
||||
}
|
||||
|
||||
let entry_type = EntryType::try_from(data.get_u8())?;
|
||||
let name_len = data.get_u16_le() as usize;
|
||||
|
||||
if data.remaining() < name_len + 32 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DirObj entry name/hash too short"));
|
||||
}
|
||||
|
||||
let name = String::from_utf8(data.copy_to_bytes(name_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in entry name"))?;
|
||||
|
||||
let mut target_meta_hash = [0u8; 32];
|
||||
data.copy_to_slice(&mut target_meta_hash);
|
||||
|
||||
entries.push(DirEntry {
|
||||
entry_type,
|
||||
name,
|
||||
target_meta_hash,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
entries,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
let serialized = self.serialize()?;
|
||||
Ok(blake3::hash(&serialized).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Partition metadata object
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartitionObj {
|
||||
pub version: u8,
|
||||
pub fs_type_code: FsType,
|
||||
pub root_dir_hash: Hash,
|
||||
pub start_lba: u64,
|
||||
pub end_lba: u64,
|
||||
pub type_guid: [u8; 16],
|
||||
}
|
||||
|
||||
impl PartitionObj {
|
||||
pub fn new(
|
||||
fs_type_code: FsType,
|
||||
root_dir_hash: Hash,
|
||||
start_lba: u64,
|
||||
end_lba: u64,
|
||||
type_guid: [u8; 16],
|
||||
) -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
fs_type_code,
|
||||
root_dir_hash,
|
||||
start_lba,
|
||||
end_lba,
|
||||
type_guid,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
buf.put_u8(self.version);
|
||||
buf.put_u32_le(self.fs_type_code as u32);
|
||||
buf.put_slice(&self.root_dir_hash);
|
||||
buf.put_u64_le(self.start_lba);
|
||||
buf.put_u64_le(self.end_lba);
|
||||
buf.put_slice(&self.type_guid);
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
pub fn deserialize(mut data: Bytes) -> Result<Self> {
|
||||
if data.remaining() < 69 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "PartitionObj data too short"));
|
||||
}
|
||||
|
||||
let version = data.get_u8();
|
||||
if version != 1 {
|
||||
return Err(Error::new(ErrorKind::InvalidData, "Unsupported PartitionObj version"));
|
||||
}
|
||||
|
||||
let fs_type_code = FsType::from(data.get_u32_le());
|
||||
|
||||
let mut root_dir_hash = [0u8; 32];
|
||||
data.copy_to_slice(&mut root_dir_hash);
|
||||
|
||||
let start_lba = data.get_u64_le();
|
||||
let end_lba = data.get_u64_le();
|
||||
|
||||
let mut type_guid = [0u8; 16];
|
||||
data.copy_to_slice(&mut type_guid);
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
fs_type_code,
|
||||
root_dir_hash,
|
||||
start_lba,
|
||||
end_lba,
|
||||
type_guid,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
let serialized = self.serialize()?;
|
||||
Ok(blake3::hash(&serialized).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Disk metadata object
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiskObj {
|
||||
pub version: u8,
|
||||
pub partition_hashes: Vec<Hash>,
|
||||
pub disk_size_bytes: u64,
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
impl DiskObj {
|
||||
pub fn new(partition_hashes: Vec<Hash>, disk_size_bytes: u64, serial: String) -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
partition_hashes,
|
||||
disk_size_bytes,
|
||||
serial,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
buf.put_u8(self.version);
|
||||
buf.put_u32_le(self.partition_hashes.len() as u32);
|
||||
|
||||
for hash in &self.partition_hashes {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
|
||||
buf.put_u64_le(self.disk_size_bytes);
|
||||
|
||||
let serial_bytes = self.serial.as_bytes();
|
||||
buf.put_u16_le(serial_bytes.len() as u16);
|
||||
buf.put_slice(serial_bytes);
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
pub fn deserialize(mut data: Bytes) -> Result<Self> {
|
||||
println!("DiskObj::deserialize: input data length = {}", data.len());
|
||||
|
||||
if data.remaining() < 15 {
|
||||
println!("DiskObj::deserialize: data too short, remaining = {}", data.remaining());
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj data too short"));
|
||||
}
|
||||
|
||||
let version = data.get_u8();
|
||||
println!("DiskObj::deserialize: version = {}", version);
|
||||
if version != 1 {
|
||||
println!("DiskObj::deserialize: unsupported version {}", version);
|
||||
return Err(Error::new(ErrorKind::InvalidData, "Unsupported DiskObj version"));
|
||||
}
|
||||
|
||||
let partition_count = data.get_u32_le() as usize;
|
||||
println!("DiskObj::deserialize: partition_count = {}", partition_count);
|
||||
|
||||
if data.remaining() < partition_count * 32 + 10 {
|
||||
println!("DiskObj::deserialize: not enough data for partitions, remaining = {}, needed = {}",
|
||||
data.remaining(), partition_count * 32 + 10);
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj partitions too short"));
|
||||
}
|
||||
|
||||
let mut partition_hashes = Vec::with_capacity(partition_count);
|
||||
for i in 0..partition_count {
|
||||
let mut hash = [0u8; 32];
|
||||
data.copy_to_slice(&mut hash);
|
||||
println!("DiskObj::deserialize: partition {} hash = {}", i, hex::encode(&hash));
|
||||
partition_hashes.push(hash);
|
||||
}
|
||||
|
||||
let disk_size_bytes = data.get_u64_le();
|
||||
println!("DiskObj::deserialize: disk_size_bytes = {}", disk_size_bytes);
|
||||
|
||||
let serial_len = data.get_u16_le() as usize;
|
||||
println!("DiskObj::deserialize: serial_len = {}", serial_len);
|
||||
|
||||
if data.remaining() < serial_len {
|
||||
println!("DiskObj::deserialize: not enough data for serial, remaining = {}, needed = {}",
|
||||
data.remaining(), serial_len);
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "DiskObj serial too short"));
|
||||
}
|
||||
|
||||
let serial_bytes = data.copy_to_bytes(serial_len).to_vec();
|
||||
println!("DiskObj::deserialize: serial_bytes = {:?}", serial_bytes);
|
||||
|
||||
let serial = String::from_utf8(serial_bytes)
|
||||
.map_err(|e| {
|
||||
println!("DiskObj::deserialize: UTF-8 error: {}", e);
|
||||
Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in serial")
|
||||
})?;
|
||||
|
||||
println!("DiskObj::deserialize: serial = '{}'", serial);
|
||||
println!("DiskObj::deserialize: successfully deserialized");
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
partition_hashes,
|
||||
disk_size_bytes,
|
||||
serial,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
let serialized = self.serialize()?;
|
||||
Ok(blake3::hash(&serialized).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Snapshot metadata object
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SnapshotObj {
|
||||
pub version: u8,
|
||||
pub created_at_unixsec: u64,
|
||||
pub disk_hashes: Vec<Hash>,
|
||||
}
|
||||
|
||||
impl SnapshotObj {
|
||||
pub fn new(created_at_unixsec: u64, disk_hashes: Vec<Hash>) -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
created_at_unixsec,
|
||||
disk_hashes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
buf.put_u8(self.version);
|
||||
buf.put_u64_le(self.created_at_unixsec);
|
||||
buf.put_u32_le(self.disk_hashes.len() as u32);
|
||||
|
||||
for hash in &self.disk_hashes {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
pub fn deserialize(mut data: Bytes) -> Result<Self> {
|
||||
if data.remaining() < 13 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotObj data too short"));
|
||||
}
|
||||
|
||||
let version = data.get_u8();
|
||||
if version != 1 {
|
||||
return Err(Error::new(ErrorKind::InvalidData, "Unsupported SnapshotObj version"));
|
||||
}
|
||||
|
||||
let created_at_unixsec = data.get_u64_le();
|
||||
let disk_count = data.get_u32_le() as usize;
|
||||
|
||||
if data.remaining() < disk_count * 32 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotObj disk hashes too short"));
|
||||
}
|
||||
|
||||
let mut disk_hashes = Vec::with_capacity(disk_count);
|
||||
for _ in 0..disk_count {
|
||||
let mut hash = [0u8; 32];
|
||||
data.copy_to_slice(&mut hash);
|
||||
disk_hashes.push(hash);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
created_at_unixsec,
|
||||
disk_hashes,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
let serialized = self.serialize()?;
|
||||
Ok(blake3::hash(&serialized).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Meta object wrapper
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MetaObj {
|
||||
File(FileObj),
|
||||
Dir(DirObj),
|
||||
Partition(PartitionObj),
|
||||
Disk(DiskObj),
|
||||
Snapshot(SnapshotObj),
|
||||
}
|
||||
|
||||
impl MetaObj {
|
||||
pub fn meta_type(&self) -> MetaType {
|
||||
match self {
|
||||
MetaObj::File(_) => MetaType::File,
|
||||
MetaObj::Dir(_) => MetaType::Dir,
|
||||
MetaObj::Partition(_) => MetaType::Partition,
|
||||
MetaObj::Disk(_) => MetaType::Disk,
|
||||
MetaObj::Snapshot(_) => MetaType::Snapshot,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Result<Bytes> {
|
||||
match self {
|
||||
MetaObj::File(obj) => obj.serialize(),
|
||||
MetaObj::Dir(obj) => obj.serialize(),
|
||||
MetaObj::Partition(obj) => obj.serialize(),
|
||||
MetaObj::Disk(obj) => obj.serialize(),
|
||||
MetaObj::Snapshot(obj) => obj.serialize(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize(meta_type: MetaType, data: Bytes) -> Result<Self> {
|
||||
match meta_type {
|
||||
MetaType::File => Ok(MetaObj::File(FileObj::deserialize(data)?)),
|
||||
MetaType::Dir => Ok(MetaObj::Dir(DirObj::deserialize(data)?)),
|
||||
MetaType::Partition => Ok(MetaObj::Partition(PartitionObj::deserialize(data)?)),
|
||||
MetaType::Disk => Ok(MetaObj::Disk(DiskObj::deserialize(data)?)),
|
||||
MetaType::Snapshot => Ok(MetaObj::Snapshot(SnapshotObj::deserialize(data)?)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_hash(&self) -> Result<Hash> {
|
||||
match self {
|
||||
MetaObj::File(obj) => obj.compute_hash(),
|
||||
MetaObj::Dir(obj) => obj.compute_hash(),
|
||||
MetaObj::Partition(obj) => obj.compute_hash(),
|
||||
MetaObj::Disk(obj) => obj.compute_hash(),
|
||||
MetaObj::Snapshot(obj) => obj.compute_hash(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_file_obj_serialization() {
|
||||
let obj = FileObj::new(
|
||||
FsType::Ext,
|
||||
1024,
|
||||
0o644,
|
||||
1000,
|
||||
1000,
|
||||
1234567890,
|
||||
vec![[1; 32], [2; 32]],
|
||||
);
|
||||
|
||||
let serialized = obj.serialize().unwrap();
|
||||
let deserialized = FileObj::deserialize(serialized).unwrap();
|
||||
|
||||
assert_eq!(obj.fs_type_code, deserialized.fs_type_code);
|
||||
assert_eq!(obj.size, deserialized.size);
|
||||
assert_eq!(obj.chunk_hashes, deserialized.chunk_hashes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dir_obj_serialization() {
|
||||
let entries = vec![
|
||||
DirEntry {
|
||||
entry_type: EntryType::File,
|
||||
name: "test.txt".to_string(),
|
||||
target_meta_hash: [1; 32],
|
||||
},
|
||||
DirEntry {
|
||||
entry_type: EntryType::Dir,
|
||||
name: "subdir".to_string(),
|
||||
target_meta_hash: [2; 32],
|
||||
},
|
||||
];
|
||||
|
||||
let obj = DirObj::new(entries);
|
||||
let serialized = obj.serialize().unwrap();
|
||||
let deserialized = DirObj::deserialize(serialized).unwrap();
|
||||
|
||||
assert_eq!(obj.entries.len(), deserialized.entries.len());
|
||||
assert_eq!(obj.entries[0].name, deserialized.entries[0].name);
|
||||
assert_eq!(obj.entries[1].entry_type, deserialized.entries[1].entry_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_computation() {
|
||||
let obj = FileObj::new(FsType::Ext, 1024, 0o644, 1000, 1000, 1234567890, vec![]);
|
||||
let hash1 = obj.compute_hash().unwrap();
|
||||
let hash2 = obj.compute_hash().unwrap();
|
||||
assert_eq!(hash1, hash2);
|
||||
|
||||
let obj2 = FileObj::new(FsType::Ext, 1025, 0o644, 1000, 1000, 1234567890, vec![]);
|
||||
let hash3 = obj2.compute_hash().unwrap();
|
||||
assert_ne!(hash1, hash3);
|
||||
}
|
||||
}
|
8
server/src/sync/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod protocol;
|
||||
pub mod server;
|
||||
pub mod storage;
|
||||
pub mod session;
|
||||
pub mod meta;
|
||||
pub mod validation;
|
||||
|
||||
pub use server::SyncServer;
|
620
server/src/sync/protocol.rs
Normal file
@@ -0,0 +1,620 @@
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
|
||||
/// Command codes for the sync protocol
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Command {
|
||||
Hello = 0x01,
|
||||
HelloOk = 0x02,
|
||||
AuthUserPass = 0x10,
|
||||
AuthCode = 0x11,
|
||||
AuthOk = 0x12,
|
||||
AuthFail = 0x13,
|
||||
BatchCheckChunk = 0x20,
|
||||
CheckChunkResp = 0x21,
|
||||
SendChunk = 0x22,
|
||||
ChunkOk = 0x23,
|
||||
ChunkFail = 0x24,
|
||||
BatchCheckMeta = 0x30,
|
||||
CheckMetaResp = 0x31,
|
||||
SendMeta = 0x32,
|
||||
MetaOk = 0x33,
|
||||
MetaFail = 0x34,
|
||||
SendSnapshot = 0x40,
|
||||
SnapshotOk = 0x41,
|
||||
SnapshotFail = 0x42,
|
||||
Close = 0xFF,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for Command {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self> {
|
||||
match value {
|
||||
0x01 => Ok(Command::Hello),
|
||||
0x02 => Ok(Command::HelloOk),
|
||||
0x10 => Ok(Command::AuthUserPass),
|
||||
0x11 => Ok(Command::AuthCode),
|
||||
0x12 => Ok(Command::AuthOk),
|
||||
0x13 => Ok(Command::AuthFail),
|
||||
0x20 => Ok(Command::BatchCheckChunk),
|
||||
0x21 => Ok(Command::CheckChunkResp),
|
||||
0x22 => Ok(Command::SendChunk),
|
||||
0x23 => Ok(Command::ChunkOk),
|
||||
0x24 => Ok(Command::ChunkFail),
|
||||
0x30 => Ok(Command::BatchCheckMeta),
|
||||
0x31 => Ok(Command::CheckMetaResp),
|
||||
0x32 => Ok(Command::SendMeta),
|
||||
0x33 => Ok(Command::MetaOk),
|
||||
0x34 => Ok(Command::MetaFail),
|
||||
0x40 => Ok(Command::SendSnapshot),
|
||||
0x41 => Ok(Command::SnapshotOk),
|
||||
0x42 => Ok(Command::SnapshotFail),
|
||||
0xFF => Ok(Command::Close),
|
||||
_ => Err(Error::new(ErrorKind::InvalidData, "Unknown command code")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Message header structure (24 bytes fixed)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MessageHeader {
|
||||
pub cmd: Command,
|
||||
pub flags: u8,
|
||||
pub reserved: [u8; 2],
|
||||
pub session_id: [u8; 16],
|
||||
pub payload_len: u32,
|
||||
}
|
||||
|
||||
impl MessageHeader {
|
||||
pub const SIZE: usize = 24;
|
||||
|
||||
pub fn new(cmd: Command, session_id: [u8; 16], payload_len: u32) -> Self {
|
||||
Self {
|
||||
cmd,
|
||||
flags: 0,
|
||||
reserved: [0; 2],
|
||||
session_id,
|
||||
payload_len,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> [u8; Self::SIZE] {
|
||||
let mut buf = [0u8; Self::SIZE];
|
||||
buf[0] = self.cmd as u8;
|
||||
buf[1] = self.flags;
|
||||
buf[2..4].copy_from_slice(&self.reserved);
|
||||
buf[4..20].copy_from_slice(&self.session_id);
|
||||
buf[20..24].copy_from_slice(&self.payload_len.to_le_bytes());
|
||||
buf
|
||||
}
|
||||
|
||||
pub fn deserialize(buf: &[u8]) -> Result<Self> {
|
||||
if buf.len() < Self::SIZE {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "Header too short"));
|
||||
}
|
||||
|
||||
let cmd = Command::try_from(buf[0])?;
|
||||
let flags = buf[1];
|
||||
let reserved = [buf[2], buf[3]];
|
||||
let mut session_id = [0u8; 16];
|
||||
session_id.copy_from_slice(&buf[4..20]);
|
||||
let payload_len = u32::from_le_bytes([buf[20], buf[21], buf[22], buf[23]]);
|
||||
|
||||
Ok(Self {
|
||||
cmd,
|
||||
flags,
|
||||
reserved,
|
||||
session_id,
|
||||
payload_len,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A 32-byte BLAKE3 hash
|
||||
pub type Hash = [u8; 32];
|
||||
|
||||
/// Meta object types
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum MetaType {
|
||||
File = 1,
|
||||
Dir = 2,
|
||||
Partition = 3,
|
||||
Disk = 4,
|
||||
Snapshot = 5,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for MetaType {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self> {
|
||||
match value {
|
||||
1 => Ok(MetaType::File),
|
||||
2 => Ok(MetaType::Dir),
|
||||
3 => Ok(MetaType::Partition),
|
||||
4 => Ok(MetaType::Disk),
|
||||
5 => Ok(MetaType::Snapshot),
|
||||
_ => Err(Error::new(ErrorKind::InvalidData, "Unknown meta type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Protocol message types
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Message {
|
||||
Hello {
|
||||
client_type: u8,
|
||||
auth_type: u8,
|
||||
},
|
||||
HelloOk,
|
||||
AuthUserPass {
|
||||
username: String,
|
||||
password: String,
|
||||
machine_id: i64,
|
||||
},
|
||||
AuthCode {
|
||||
code: String,
|
||||
},
|
||||
AuthOk {
|
||||
session_id: [u8; 16],
|
||||
},
|
||||
AuthFail {
|
||||
reason: String,
|
||||
},
|
||||
BatchCheckChunk {
|
||||
hashes: Vec<Hash>,
|
||||
},
|
||||
CheckChunkResp {
|
||||
missing_hashes: Vec<Hash>,
|
||||
},
|
||||
SendChunk {
|
||||
hash: Hash,
|
||||
data: Bytes,
|
||||
},
|
||||
ChunkOk,
|
||||
ChunkFail {
|
||||
reason: String,
|
||||
},
|
||||
BatchCheckMeta {
|
||||
items: Vec<(MetaType, Hash)>,
|
||||
},
|
||||
CheckMetaResp {
|
||||
missing_items: Vec<(MetaType, Hash)>,
|
||||
},
|
||||
SendMeta {
|
||||
meta_type: MetaType,
|
||||
meta_hash: Hash,
|
||||
body: Bytes,
|
||||
},
|
||||
MetaOk,
|
||||
MetaFail {
|
||||
reason: String,
|
||||
},
|
||||
SendSnapshot {
|
||||
snapshot_hash: Hash,
|
||||
body: Bytes,
|
||||
},
|
||||
SnapshotOk {
|
||||
snapshot_id: String,
|
||||
},
|
||||
SnapshotFail {
|
||||
missing_chunks: Vec<Hash>,
|
||||
missing_metas: Vec<(MetaType, Hash)>,
|
||||
},
|
||||
Close,
|
||||
}
|
||||
|
||||
impl Message {
|
||||
/// Serialize message payload to bytes
|
||||
pub fn serialize_payload(&self) -> Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
match self {
|
||||
Message::Hello { client_type, auth_type } => {
|
||||
buf.put_u8(*client_type);
|
||||
buf.put_u8(*auth_type);
|
||||
}
|
||||
Message::HelloOk => {
|
||||
// No payload
|
||||
}
|
||||
Message::AuthUserPass { username, password, machine_id } => {
|
||||
let username_bytes = username.as_bytes();
|
||||
let password_bytes = password.as_bytes();
|
||||
buf.put_u16_le(username_bytes.len() as u16);
|
||||
buf.put_slice(username_bytes);
|
||||
buf.put_u16_le(password_bytes.len() as u16);
|
||||
buf.put_slice(password_bytes);
|
||||
buf.put_i64_le(*machine_id);
|
||||
}
|
||||
Message::AuthCode { code } => {
|
||||
let code_bytes = code.as_bytes();
|
||||
buf.put_u16_le(code_bytes.len() as u16);
|
||||
buf.put_slice(code_bytes);
|
||||
}
|
||||
Message::AuthOk { session_id } => {
|
||||
buf.put_slice(session_id);
|
||||
}
|
||||
Message::AuthFail { reason } => {
|
||||
let reason_bytes = reason.as_bytes();
|
||||
buf.put_u16_le(reason_bytes.len() as u16);
|
||||
buf.put_slice(reason_bytes);
|
||||
}
|
||||
Message::BatchCheckChunk { hashes } => {
|
||||
buf.put_u32_le(hashes.len() as u32);
|
||||
for hash in hashes {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
}
|
||||
Message::CheckChunkResp { missing_hashes } => {
|
||||
buf.put_u32_le(missing_hashes.len() as u32);
|
||||
for hash in missing_hashes {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
}
|
||||
Message::SendChunk { hash, data } => {
|
||||
buf.put_slice(hash);
|
||||
buf.put_u32_le(data.len() as u32);
|
||||
buf.put_slice(data);
|
||||
}
|
||||
Message::ChunkOk => {
|
||||
// No payload
|
||||
}
|
||||
Message::ChunkFail { reason } => {
|
||||
let reason_bytes = reason.as_bytes();
|
||||
buf.put_u16_le(reason_bytes.len() as u16);
|
||||
buf.put_slice(reason_bytes);
|
||||
}
|
||||
Message::BatchCheckMeta { items } => {
|
||||
buf.put_u32_le(items.len() as u32);
|
||||
for (meta_type, hash) in items {
|
||||
buf.put_u8(*meta_type as u8);
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
}
|
||||
Message::CheckMetaResp { missing_items } => {
|
||||
buf.put_u32_le(missing_items.len() as u32);
|
||||
for (meta_type, hash) in missing_items {
|
||||
buf.put_u8(*meta_type as u8);
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
}
|
||||
Message::SendMeta { meta_type, meta_hash, body } => {
|
||||
buf.put_u8(*meta_type as u8);
|
||||
buf.put_slice(meta_hash);
|
||||
buf.put_u32_le(body.len() as u32);
|
||||
buf.put_slice(body);
|
||||
}
|
||||
Message::MetaOk => {
|
||||
// No payload
|
||||
}
|
||||
Message::MetaFail { reason } => {
|
||||
let reason_bytes = reason.as_bytes();
|
||||
buf.put_u16_le(reason_bytes.len() as u16);
|
||||
buf.put_slice(reason_bytes);
|
||||
}
|
||||
Message::SendSnapshot { snapshot_hash, body } => {
|
||||
buf.put_slice(snapshot_hash);
|
||||
buf.put_u32_le(body.len() as u32);
|
||||
buf.put_slice(body);
|
||||
}
|
||||
Message::SnapshotOk { snapshot_id } => {
|
||||
let id_bytes = snapshot_id.as_bytes();
|
||||
buf.put_u16_le(id_bytes.len() as u16);
|
||||
buf.put_slice(id_bytes);
|
||||
}
|
||||
Message::SnapshotFail { missing_chunks, missing_metas } => {
|
||||
buf.put_u32_le(missing_chunks.len() as u32);
|
||||
for hash in missing_chunks {
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
buf.put_u32_le(missing_metas.len() as u32);
|
||||
for (meta_type, hash) in missing_metas {
|
||||
buf.put_u8(*meta_type as u8);
|
||||
buf.put_slice(hash);
|
||||
}
|
||||
}
|
||||
Message::Close => {
|
||||
// No payload
|
||||
}
|
||||
}
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
/// Deserialize message payload from bytes
|
||||
pub fn deserialize_payload(cmd: Command, mut payload: Bytes) -> Result<Self> {
|
||||
match cmd {
|
||||
Command::Hello => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "Hello payload too short"));
|
||||
}
|
||||
let client_type = payload.get_u8();
|
||||
let auth_type = payload.get_u8();
|
||||
Ok(Message::Hello { client_type, auth_type })
|
||||
}
|
||||
Command::HelloOk => Ok(Message::HelloOk),
|
||||
Command::AuthUserPass => {
|
||||
if payload.remaining() < 12 { // 4 bytes for lengths + at least 8 bytes for machine_id
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthUserPass payload too short"));
|
||||
}
|
||||
let username_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < username_len + 10 { // 2 bytes for password len + 8 bytes for machine_id
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthUserPass username too short"));
|
||||
}
|
||||
let username = String::from_utf8(payload.copy_to_bytes(username_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in username"))?;
|
||||
let password_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < password_len + 8 { // 8 bytes for machine_id
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthUserPass password too short"));
|
||||
}
|
||||
let password = String::from_utf8(payload.copy_to_bytes(password_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in password"))?;
|
||||
let machine_id = payload.get_i64_le();
|
||||
Ok(Message::AuthUserPass { username, password, machine_id })
|
||||
}
|
||||
Command::AuthCode => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthCode payload too short"));
|
||||
}
|
||||
let code_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < code_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthCode code too short"));
|
||||
}
|
||||
let code = String::from_utf8(payload.copy_to_bytes(code_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in code"))?;
|
||||
Ok(Message::AuthCode { code })
|
||||
}
|
||||
Command::AuthOk => {
|
||||
if payload.remaining() < 16 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthOk payload too short"));
|
||||
}
|
||||
let mut session_id = [0u8; 16];
|
||||
payload.copy_to_slice(&mut session_id);
|
||||
Ok(Message::AuthOk { session_id })
|
||||
}
|
||||
Command::AuthFail => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthFail payload too short"));
|
||||
}
|
||||
let reason_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < reason_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "AuthFail reason too short"));
|
||||
}
|
||||
let reason = String::from_utf8(payload.copy_to_bytes(reason_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in reason"))?;
|
||||
Ok(Message::AuthFail { reason })
|
||||
}
|
||||
Command::BatchCheckChunk => {
|
||||
if payload.remaining() < 4 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "BatchCheckChunk payload too short"));
|
||||
}
|
||||
let count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < count * 32 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "BatchCheckChunk hashes too short"));
|
||||
}
|
||||
let mut hashes = Vec::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
hashes.push(hash);
|
||||
}
|
||||
Ok(Message::BatchCheckChunk { hashes })
|
||||
}
|
||||
Command::CheckChunkResp => {
|
||||
if payload.remaining() < 4 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "CheckChunkResp payload too short"));
|
||||
}
|
||||
let count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < count * 32 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "CheckChunkResp hashes too short"));
|
||||
}
|
||||
let mut missing_hashes = Vec::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
missing_hashes.push(hash);
|
||||
}
|
||||
Ok(Message::CheckChunkResp { missing_hashes })
|
||||
}
|
||||
Command::SendChunk => {
|
||||
if payload.remaining() < 36 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendChunk payload too short"));
|
||||
}
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
let size = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < size {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendChunk data too short"));
|
||||
}
|
||||
let data = payload.copy_to_bytes(size);
|
||||
Ok(Message::SendChunk { hash, data })
|
||||
}
|
||||
Command::ChunkOk => Ok(Message::ChunkOk),
|
||||
Command::ChunkFail => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "ChunkFail payload too short"));
|
||||
}
|
||||
let reason_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < reason_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "ChunkFail reason too short"));
|
||||
}
|
||||
let reason = String::from_utf8(payload.copy_to_bytes(reason_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in reason"))?;
|
||||
Ok(Message::ChunkFail { reason })
|
||||
}
|
||||
Command::BatchCheckMeta => {
|
||||
if payload.remaining() < 4 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "BatchCheckMeta payload too short"));
|
||||
}
|
||||
let count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < count * 33 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "BatchCheckMeta items too short"));
|
||||
}
|
||||
let mut items = Vec::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
let meta_type = MetaType::try_from(payload.get_u8())?;
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
items.push((meta_type, hash));
|
||||
}
|
||||
Ok(Message::BatchCheckMeta { items })
|
||||
}
|
||||
Command::CheckMetaResp => {
|
||||
if payload.remaining() < 4 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "CheckMetaResp payload too short"));
|
||||
}
|
||||
let count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < count * 33 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "CheckMetaResp items too short"));
|
||||
}
|
||||
let mut missing_items = Vec::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
let meta_type = MetaType::try_from(payload.get_u8())?;
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
missing_items.push((meta_type, hash));
|
||||
}
|
||||
Ok(Message::CheckMetaResp { missing_items })
|
||||
}
|
||||
Command::SendMeta => {
|
||||
if payload.remaining() < 37 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendMeta payload too short"));
|
||||
}
|
||||
let meta_type = MetaType::try_from(payload.get_u8())?;
|
||||
let mut meta_hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut meta_hash);
|
||||
let body_len = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < body_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendMeta body too short"));
|
||||
}
|
||||
let body = payload.copy_to_bytes(body_len);
|
||||
Ok(Message::SendMeta { meta_type, meta_hash, body })
|
||||
}
|
||||
Command::MetaOk => Ok(Message::MetaOk),
|
||||
Command::MetaFail => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "MetaFail payload too short"));
|
||||
}
|
||||
let reason_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < reason_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "MetaFail reason too short"));
|
||||
}
|
||||
let reason = String::from_utf8(payload.copy_to_bytes(reason_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in reason"))?;
|
||||
Ok(Message::MetaFail { reason })
|
||||
}
|
||||
Command::SendSnapshot => {
|
||||
if payload.remaining() < 36 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendSnapshot payload too short"));
|
||||
}
|
||||
let mut snapshot_hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut snapshot_hash);
|
||||
let body_len = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < body_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SendSnapshot body too short"));
|
||||
}
|
||||
let body = payload.copy_to_bytes(body_len);
|
||||
Ok(Message::SendSnapshot { snapshot_hash, body })
|
||||
}
|
||||
Command::SnapshotOk => {
|
||||
if payload.remaining() < 2 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotOk payload too short"));
|
||||
}
|
||||
let id_len = payload.get_u16_le() as usize;
|
||||
if payload.remaining() < id_len {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotOk id too short"));
|
||||
}
|
||||
let snapshot_id = String::from_utf8(payload.copy_to_bytes(id_len).to_vec())
|
||||
.map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid UTF-8 in snapshot_id"))?;
|
||||
Ok(Message::SnapshotOk { snapshot_id })
|
||||
}
|
||||
Command::SnapshotFail => {
|
||||
if payload.remaining() < 8 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotFail payload too short"));
|
||||
}
|
||||
let chunk_count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < chunk_count * 32 + 4 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotFail chunks too short"));
|
||||
}
|
||||
let mut missing_chunks = Vec::with_capacity(chunk_count);
|
||||
for _ in 0..chunk_count {
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
missing_chunks.push(hash);
|
||||
}
|
||||
let meta_count = payload.get_u32_le() as usize;
|
||||
if payload.remaining() < meta_count * 33 {
|
||||
return Err(Error::new(ErrorKind::UnexpectedEof, "SnapshotFail metas too short"));
|
||||
}
|
||||
let mut missing_metas = Vec::with_capacity(meta_count);
|
||||
for _ in 0..meta_count {
|
||||
let meta_type = MetaType::try_from(payload.get_u8())?;
|
||||
let mut hash = [0u8; 32];
|
||||
payload.copy_to_slice(&mut hash);
|
||||
missing_metas.push((meta_type, hash));
|
||||
}
|
||||
Ok(Message::SnapshotFail { missing_chunks, missing_metas })
|
||||
}
|
||||
Command::Close => Ok(Message::Close),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the command for this message
|
||||
pub fn command(&self) -> Command {
|
||||
match self {
|
||||
Message::Hello { .. } => Command::Hello,
|
||||
Message::HelloOk => Command::HelloOk,
|
||||
Message::AuthUserPass { .. } => Command::AuthUserPass,
|
||||
Message::AuthCode { .. } => Command::AuthCode,
|
||||
Message::AuthOk { .. } => Command::AuthOk,
|
||||
Message::AuthFail { .. } => Command::AuthFail,
|
||||
Message::BatchCheckChunk { .. } => Command::BatchCheckChunk,
|
||||
Message::CheckChunkResp { .. } => Command::CheckChunkResp,
|
||||
Message::SendChunk { .. } => Command::SendChunk,
|
||||
Message::ChunkOk => Command::ChunkOk,
|
||||
Message::ChunkFail { .. } => Command::ChunkFail,
|
||||
Message::BatchCheckMeta { .. } => Command::BatchCheckMeta,
|
||||
Message::CheckMetaResp { .. } => Command::CheckMetaResp,
|
||||
Message::SendMeta { .. } => Command::SendMeta,
|
||||
Message::MetaOk => Command::MetaOk,
|
||||
Message::MetaFail { .. } => Command::MetaFail,
|
||||
Message::SendSnapshot { .. } => Command::SendSnapshot,
|
||||
Message::SnapshotOk { .. } => Command::SnapshotOk,
|
||||
Message::SnapshotFail { .. } => Command::SnapshotFail,
|
||||
Message::Close => Command::Close,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_header_serialization() {
|
||||
let header = MessageHeader::new(Command::Hello, [1; 16], 42);
|
||||
let serialized = header.serialize();
|
||||
let deserialized = MessageHeader::deserialize(&serialized).unwrap();
|
||||
|
||||
assert_eq!(deserialized.cmd, Command::Hello);
|
||||
assert_eq!(deserialized.session_id, [1; 16]);
|
||||
assert_eq!(deserialized.payload_len, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hello_message() {
|
||||
let msg = Message::Hello { client_type: 1, auth_type: 2 };
|
||||
let payload = msg.serialize_payload().unwrap();
|
||||
let deserialized = Message::deserialize_payload(Command::Hello, payload).unwrap();
|
||||
|
||||
match deserialized {
|
||||
Message::Hello { client_type, auth_type } => {
|
||||
assert_eq!(client_type, 1);
|
||||
assert_eq!(auth_type, 2);
|
||||
}
|
||||
_ => panic!("Wrong message type"),
|
||||
}
|
||||
}
|
||||
}
|
468
server/src/sync/server.rs
Normal file
@@ -0,0 +1,468 @@
|
||||
use anyhow::{Context, Result};
|
||||
use bytes::Bytes;
|
||||
use sqlx::SqlitePool;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::sync::protocol::{Command, Message, MessageHeader, MetaType};
|
||||
use crate::sync::session::{SessionManager, session_cleanup_task};
|
||||
use crate::sync::storage::Storage;
|
||||
use crate::sync::validation::SnapshotValidator;
|
||||
|
||||
/// Configuration for the sync server
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SyncServerConfig {
|
||||
pub bind_address: String,
|
||||
pub port: u16,
|
||||
pub data_dir: String,
|
||||
pub max_connections: usize,
|
||||
pub chunk_size_limit: usize,
|
||||
pub meta_size_limit: usize,
|
||||
pub batch_limit: usize,
|
||||
}
|
||||
|
||||
impl Default for SyncServerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bind_address: "0.0.0.0".to_string(),
|
||||
port: 8380,
|
||||
data_dir: "./data".to_string(),
|
||||
max_connections: 100,
|
||||
chunk_size_limit: 4 * 1024 * 1024, // 4 MiB
|
||||
meta_size_limit: 1024 * 1024, // 1 MiB
|
||||
batch_limit: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Main sync server
|
||||
pub struct SyncServer {
|
||||
config: SyncServerConfig,
|
||||
storage: Storage,
|
||||
session_manager: Arc<SessionManager>,
|
||||
validator: SnapshotValidator,
|
||||
}
|
||||
|
||||
impl SyncServer {
|
||||
pub fn new(config: SyncServerConfig, db_pool: SqlitePool) -> Self {
|
||||
let storage = Storage::new(&config.data_dir);
|
||||
let session_manager = Arc::new(SessionManager::new(db_pool));
|
||||
let validator = SnapshotValidator::new(storage.clone());
|
||||
|
||||
Self {
|
||||
config,
|
||||
storage,
|
||||
session_manager,
|
||||
validator,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the sync server
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
// Initialize storage
|
||||
self.storage.init().await
|
||||
.context("Failed to initialize storage")?;
|
||||
|
||||
let bind_addr = format!("{}:{}", self.config.bind_address, self.config.port);
|
||||
let listener = TcpListener::bind(&bind_addr).await
|
||||
.with_context(|| format!("Failed to bind to {}", bind_addr))?;
|
||||
|
||||
println!("Sync server listening on {}", bind_addr);
|
||||
|
||||
// Start session cleanup task
|
||||
let session_manager_clone = Arc::clone(&self.session_manager);
|
||||
tokio::spawn(async move {
|
||||
session_cleanup_task(session_manager_clone).await;
|
||||
});
|
||||
|
||||
// Accept connections
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, addr)) => {
|
||||
println!("New sync connection from {}", addr);
|
||||
|
||||
let handler = ConnectionHandler::new(
|
||||
stream,
|
||||
self.storage.clone(),
|
||||
Arc::clone(&self.session_manager),
|
||||
self.validator.clone(),
|
||||
self.config.clone(),
|
||||
);
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = handler.handle().await {
|
||||
eprintln!("Connection error from {}: {}", addr, e);
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to accept connection: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Connection handler for individual sync clients
|
||||
struct ConnectionHandler {
|
||||
stream: TcpStream,
|
||||
storage: Storage,
|
||||
session_manager: Arc<SessionManager>,
|
||||
validator: SnapshotValidator,
|
||||
config: SyncServerConfig,
|
||||
session_id: Option<[u8; 16]>,
|
||||
machine_id: Option<i64>,
|
||||
}
|
||||
|
||||
impl ConnectionHandler {
|
||||
fn new(
|
||||
stream: TcpStream,
|
||||
storage: Storage,
|
||||
session_manager: Arc<SessionManager>,
|
||||
validator: SnapshotValidator,
|
||||
config: SyncServerConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
storage,
|
||||
session_manager,
|
||||
validator,
|
||||
config,
|
||||
session_id: None,
|
||||
machine_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the connection
|
||||
async fn handle(mut self) -> Result<()> {
|
||||
loop {
|
||||
// Read message header
|
||||
let header = self.read_header().await?;
|
||||
|
||||
// Read payload with appropriate size limit based on command type
|
||||
let payload = if header.payload_len > 0 {
|
||||
self.read_payload(header.cmd, header.payload_len).await?
|
||||
} else {
|
||||
Bytes::new()
|
||||
};
|
||||
|
||||
// Parse message
|
||||
let message = Message::deserialize_payload(header.cmd, payload)
|
||||
.context("Failed to deserialize message")?;
|
||||
|
||||
// Handle message
|
||||
let response = self.handle_message(message).await?;
|
||||
|
||||
// Send response
|
||||
if let Some(response_msg) = response {
|
||||
self.send_message(response_msg).await?;
|
||||
}
|
||||
|
||||
// Close connection if requested
|
||||
if header.cmd == Command::Close {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up session
|
||||
if let Some(session_id) = self.session_id {
|
||||
self.session_manager.remove_session(&session_id).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read message header
|
||||
async fn read_header(&mut self) -> Result<MessageHeader> {
|
||||
let mut header_buf = [0u8; MessageHeader::SIZE];
|
||||
self.stream.read_exact(&mut header_buf).await
|
||||
.context("Failed to read message header")?;
|
||||
|
||||
MessageHeader::deserialize(&header_buf)
|
||||
.context("Failed to parse message header")
|
||||
}
|
||||
|
||||
/// Read message payload with appropriate size limit based on command type
|
||||
async fn read_payload(&mut self, cmd: Command, len: u32) -> Result<Bytes> {
|
||||
// Use different size limits based on command type
|
||||
let size_limit = match cmd {
|
||||
Command::SendChunk => self.config.chunk_size_limit,
|
||||
_ => self.config.meta_size_limit,
|
||||
};
|
||||
|
||||
if len as usize > size_limit {
|
||||
return Err(anyhow::anyhow!("Payload too large: {} bytes", len));
|
||||
}
|
||||
|
||||
let mut payload_buf = vec![0u8; len as usize];
|
||||
self.stream.read_exact(&mut payload_buf).await
|
||||
.context("Failed to read message payload")?;
|
||||
|
||||
Ok(Bytes::from(payload_buf))
|
||||
}
|
||||
|
||||
/// Send a message
|
||||
async fn send_message(&mut self, message: Message) -> Result<()> {
|
||||
let session_id = self.session_id.unwrap_or([0u8; 16]);
|
||||
let payload = message.serialize_payload()?;
|
||||
|
||||
let header = MessageHeader::new(message.command(), session_id, payload.len() as u32);
|
||||
let header_bytes = header.serialize();
|
||||
|
||||
self.stream.write_all(&header_bytes).await
|
||||
.context("Failed to write message header")?;
|
||||
|
||||
if !payload.is_empty() {
|
||||
self.stream.write_all(&payload).await
|
||||
.context("Failed to write message payload")?;
|
||||
}
|
||||
|
||||
self.stream.flush().await
|
||||
.context("Failed to flush stream")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle a received message
|
||||
async fn handle_message(&mut self, message: Message) -> Result<Option<Message>> {
|
||||
match message {
|
||||
Message::Hello { client_type: _, auth_type: _ } => {
|
||||
Ok(Some(Message::HelloOk))
|
||||
}
|
||||
|
||||
Message::AuthUserPass { username, password, machine_id } => {
|
||||
match self.session_manager.authenticate_userpass(&username, &password, machine_id).await {
|
||||
Ok(session) => {
|
||||
self.session_id = Some(session.session_id);
|
||||
self.machine_id = Some(session.machine_id);
|
||||
Ok(Some(Message::AuthOk { session_id: session.session_id }))
|
||||
}
|
||||
Err(e) => {
|
||||
Ok(Some(Message::AuthFail { reason: e.to_string() }))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Message::AuthCode { code } => {
|
||||
match self.session_manager.authenticate_code(&code).await {
|
||||
Ok(session) => {
|
||||
self.session_id = Some(session.session_id);
|
||||
self.machine_id = Some(session.machine_id);
|
||||
Ok(Some(Message::AuthOk { session_id: session.session_id }))
|
||||
}
|
||||
Err(e) => {
|
||||
Ok(Some(Message::AuthFail { reason: e.to_string() }))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Message::BatchCheckChunk { hashes } => {
|
||||
self.require_auth()?;
|
||||
|
||||
if hashes.len() > self.config.batch_limit {
|
||||
return Err(anyhow::anyhow!("Batch size exceeds limit: {}", hashes.len()));
|
||||
}
|
||||
|
||||
let missing_hashes = self.validator.validate_chunk_batch(&hashes).await?;
|
||||
Ok(Some(Message::CheckChunkResp { missing_hashes }))
|
||||
}
|
||||
|
||||
Message::SendChunk { hash, data } => {
|
||||
self.require_auth()?;
|
||||
|
||||
if data.len() > self.config.chunk_size_limit {
|
||||
return Ok(Some(Message::ChunkFail {
|
||||
reason: format!("Chunk too large: {} bytes", data.len())
|
||||
}));
|
||||
}
|
||||
|
||||
match self.storage.store_chunk(&hash, &data).await {
|
||||
Ok(()) => Ok(Some(Message::ChunkOk)),
|
||||
Err(e) => Ok(Some(Message::ChunkFail { reason: e.to_string() })),
|
||||
}
|
||||
}
|
||||
|
||||
Message::BatchCheckMeta { items } => {
|
||||
self.require_auth()?;
|
||||
|
||||
if items.len() > self.config.batch_limit {
|
||||
return Err(anyhow::anyhow!("Batch size exceeds limit: {}", items.len()));
|
||||
}
|
||||
|
||||
let missing_items = self.validator.validate_meta_batch(&items).await?;
|
||||
Ok(Some(Message::CheckMetaResp { missing_items }))
|
||||
}
|
||||
|
||||
Message::SendMeta { meta_type, meta_hash, body } => {
|
||||
self.require_auth()?;
|
||||
|
||||
if body.len() > self.config.meta_size_limit {
|
||||
return Ok(Some(Message::MetaFail {
|
||||
reason: format!("Meta object too large: {} bytes", body.len())
|
||||
}));
|
||||
}
|
||||
|
||||
match self.storage.store_meta(meta_type, &meta_hash, &body).await {
|
||||
Ok(()) => Ok(Some(Message::MetaOk)),
|
||||
Err(e) => Ok(Some(Message::MetaFail { reason: e.to_string() })),
|
||||
}
|
||||
}
|
||||
|
||||
Message::SendSnapshot { snapshot_hash, body } => {
|
||||
self.require_auth()?;
|
||||
|
||||
if body.len() > self.config.meta_size_limit {
|
||||
println!("Snapshot rejected: size limit exceeded ({} > {})", body.len(), self.config.meta_size_limit);
|
||||
return Ok(Some(Message::SnapshotFail {
|
||||
missing_chunks: vec![],
|
||||
missing_metas: vec![],
|
||||
}));
|
||||
}
|
||||
|
||||
println!("Validating snapshot hash: {}", hex::encode(&snapshot_hash));
|
||||
|
||||
// Validate snapshot
|
||||
match self.validator.validate_snapshot(&snapshot_hash, &body).await {
|
||||
Ok(validation_result) => {
|
||||
println!("Validation result - is_valid: {}, missing_chunks: {}, missing_metas: {}",
|
||||
validation_result.is_valid,
|
||||
validation_result.missing_chunks.len(),
|
||||
validation_result.missing_metas.len());
|
||||
|
||||
if validation_result.is_valid {
|
||||
// Store snapshot meta
|
||||
if let Err(e) = self.storage.store_meta(MetaType::Snapshot, &snapshot_hash, &body).await {
|
||||
println!("Failed to store snapshot meta: {}", e);
|
||||
return Ok(Some(Message::SnapshotFail {
|
||||
missing_chunks: vec![],
|
||||
missing_metas: vec![],
|
||||
}));
|
||||
}
|
||||
|
||||
// Create snapshot reference
|
||||
let snapshot_id = Uuid::new_v4().to_string();
|
||||
let machine_id = *self.machine_id.as_ref().unwrap();
|
||||
let created_at = chrono::Utc::now().timestamp() as u64;
|
||||
|
||||
println!("Creating snapshot reference: machine_id={}, snapshot_id={}", machine_id, snapshot_id);
|
||||
|
||||
if let Err(e) = self.storage.store_snapshot_ref(
|
||||
machine_id,
|
||||
&snapshot_id,
|
||||
&snapshot_hash,
|
||||
created_at
|
||||
).await {
|
||||
println!("Failed to store snapshot reference: {}", e);
|
||||
return Ok(Some(Message::SnapshotFail {
|
||||
missing_chunks: vec![],
|
||||
missing_metas: vec![],
|
||||
}));
|
||||
}
|
||||
|
||||
println!("Snapshot successfully stored with ID: {}", snapshot_id);
|
||||
Ok(Some(Message::SnapshotOk { snapshot_id }))
|
||||
} else {
|
||||
println!("Snapshot validation failed - returning missing items");
|
||||
Ok(Some(Message::SnapshotFail {
|
||||
missing_chunks: validation_result.missing_chunks,
|
||||
missing_metas: validation_result.missing_metas,
|
||||
}))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Snapshot validation error: {}", e);
|
||||
Ok(Some(Message::SnapshotFail {
|
||||
missing_chunks: vec![],
|
||||
missing_metas: vec![],
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Message::Close => {
|
||||
Ok(None) // No response needed
|
||||
}
|
||||
|
||||
// These are response messages that shouldn't be received by the server
|
||||
Message::HelloOk | Message::AuthOk { .. } | Message::AuthFail { .. } |
|
||||
Message::CheckChunkResp { .. } | Message::ChunkOk | Message::ChunkFail { .. } |
|
||||
Message::CheckMetaResp { .. } | Message::MetaOk | Message::MetaFail { .. } |
|
||||
Message::SnapshotOk { .. } | Message::SnapshotFail { .. } => {
|
||||
Err(anyhow::anyhow!("Unexpected response message from client"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Require authentication for protected operations
|
||||
fn require_auth(&self) -> Result<()> {
|
||||
if self.session_id.is_none() {
|
||||
return Err(anyhow::anyhow!("Authentication required"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
use sqlx::sqlite::SqlitePoolOptions;
|
||||
|
||||
async fn setup_test_server() -> (SyncServer, TempDir) {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.connect(":memory:")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create required tables
|
||||
sqlx::query!(
|
||||
r#"
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY,
|
||||
username TEXT UNIQUE NOT NULL,
|
||||
password_hash TEXT NOT NULL,
|
||||
active INTEGER DEFAULT 1
|
||||
)
|
||||
"#
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
CREATE TABLE provisioning_codes (
|
||||
id INTEGER PRIMARY KEY,
|
||||
code TEXT UNIQUE NOT NULL,
|
||||
created_by INTEGER NOT NULL,
|
||||
expires_at TEXT NOT NULL,
|
||||
used INTEGER DEFAULT 0,
|
||||
used_at TEXT,
|
||||
FOREIGN KEY (created_by) REFERENCES users (id)
|
||||
)
|
||||
"#
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let config = SyncServerConfig {
|
||||
data_dir: temp_dir.path().to_string_lossy().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
(SyncServer::new(config, pool), temp_dir)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_server_creation() {
|
||||
let (server, _temp_dir) = setup_test_server().await;
|
||||
|
||||
// Initialize storage to verify everything works
|
||||
server.storage.init().await.unwrap();
|
||||
}
|
||||
}
|
343
server/src/sync/session.rs
Normal file
@@ -0,0 +1,343 @@
|
||||
use anyhow::{Context, Result};
|
||||
use rand::RngCore;
|
||||
use sqlx::SqlitePool;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Session information
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Session {
|
||||
pub session_id: [u8; 16],
|
||||
pub machine_id: i64,
|
||||
pub user_id: i64,
|
||||
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
/// Session manager for sync connections
|
||||
#[derive(Debug)]
|
||||
pub struct SessionManager {
|
||||
sessions: Arc<RwLock<HashMap<[u8; 16], Session>>>,
|
||||
db_pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl SessionManager {
|
||||
pub fn new(db_pool: SqlitePool) -> Self {
|
||||
Self {
|
||||
sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
db_pool,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get database pool reference
|
||||
pub fn get_db_pool(&self) -> &SqlitePool {
|
||||
&self.db_pool
|
||||
}
|
||||
|
||||
/// Generate a new session ID
|
||||
fn generate_session_id() -> [u8; 16] {
|
||||
let mut session_id = [0u8; 16];
|
||||
rand::thread_rng().fill_bytes(&mut session_id);
|
||||
session_id
|
||||
}
|
||||
|
||||
/// Authenticate with username/password and validate machine ownership
|
||||
pub async fn authenticate_userpass(&self, username: &str, password: &str, machine_id: i64) -> Result<Session> {
|
||||
// Query user from database
|
||||
let user = sqlx::query!(
|
||||
"SELECT id, username, password_hash FROM users WHERE username = ?",
|
||||
username
|
||||
)
|
||||
.fetch_optional(&self.db_pool)
|
||||
.await
|
||||
.context("Failed to query user")?;
|
||||
|
||||
let user = user.ok_or_else(|| anyhow::anyhow!("Invalid credentials"))?;
|
||||
|
||||
// Verify password
|
||||
if !bcrypt::verify(password, &user.password_hash)
|
||||
.context("Failed to verify password")? {
|
||||
return Err(anyhow::anyhow!("Invalid credentials"));
|
||||
}
|
||||
|
||||
let user_id = user.id.unwrap_or(0) as i64;
|
||||
|
||||
// Validate machine ownership
|
||||
let machine = sqlx::query!(
|
||||
"SELECT id, user_id FROM machines WHERE id = ?",
|
||||
machine_id
|
||||
)
|
||||
.fetch_optional(&self.db_pool)
|
||||
.await
|
||||
.context("Failed to query machine")?;
|
||||
|
||||
let machine = machine.ok_or_else(|| anyhow::anyhow!("Machine not found"))?;
|
||||
|
||||
let machine_user_id = machine.user_id;
|
||||
if machine_user_id != user_id {
|
||||
return Err(anyhow::anyhow!("Machine does not belong to user"));
|
||||
}
|
||||
|
||||
// Create session with machine ID
|
||||
let session_id = Self::generate_session_id();
|
||||
let machine_id = machine.id; // Use database ID
|
||||
let session = Session {
|
||||
session_id,
|
||||
machine_id,
|
||||
user_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
};
|
||||
|
||||
// Store session
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.insert(session_id, session.clone());
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Authenticate with provisioning code
|
||||
pub async fn authenticate_code(&self, code: &str) -> Result<Session> {
|
||||
// Query provisioning code from database
|
||||
let provisioning_code = sqlx::query!(
|
||||
r#"
|
||||
SELECT pc.id, pc.code, pc.expires_at, pc.used, m.id as machine_id, m.user_id, u.username
|
||||
FROM provisioning_codes pc
|
||||
JOIN machines m ON pc.machine_id = m.id
|
||||
JOIN users u ON m.user_id = u.id
|
||||
WHERE pc.code = ? AND pc.used = 0
|
||||
"#,
|
||||
code
|
||||
)
|
||||
.fetch_optional(&self.db_pool)
|
||||
.await
|
||||
.context("Failed to query provisioning code")?;
|
||||
|
||||
let provisioning_code = provisioning_code
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid or used provisioning code"))?;
|
||||
|
||||
// Check if code is expired
|
||||
let expires_at: chrono::DateTime<chrono::Utc> = chrono::DateTime::from_naive_utc_and_offset(
|
||||
provisioning_code.expires_at,
|
||||
chrono::Utc
|
||||
);
|
||||
|
||||
if chrono::Utc::now() > expires_at {
|
||||
return Err(anyhow::anyhow!("Provisioning code expired"));
|
||||
}
|
||||
|
||||
// Mark code as used
|
||||
sqlx::query!(
|
||||
"UPDATE provisioning_codes SET used = 1 WHERE id = ?",
|
||||
provisioning_code.id
|
||||
)
|
||||
.execute(&self.db_pool)
|
||||
.await
|
||||
.context("Failed to mark provisioning code as used")?;
|
||||
|
||||
// Create session
|
||||
let session_id = Self::generate_session_id();
|
||||
let machine_id = provisioning_code.machine_id.expect("Machine ID should not be null"); // Use machine ID from database
|
||||
let session = Session {
|
||||
session_id,
|
||||
machine_id,
|
||||
user_id: provisioning_code.user_id as i64,
|
||||
created_at: chrono::Utc::now(),
|
||||
};
|
||||
|
||||
// Store session
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.insert(session_id, session.clone());
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
/// Get session by session ID
|
||||
pub async fn get_session(&self, session_id: &[u8; 16]) -> Option<Session> {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions.get(session_id).cloned()
|
||||
}
|
||||
|
||||
/// Validate session and return associated machine ID
|
||||
pub async fn validate_session(&self, session_id: &[u8; 16]) -> Result<i64> {
|
||||
let session = self.get_session(session_id).await
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid session"))?;
|
||||
|
||||
// Check if session is too old (24 hours)
|
||||
let session_age = chrono::Utc::now() - session.created_at;
|
||||
if session_age > chrono::Duration::hours(24) {
|
||||
// Remove expired session
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.remove(session_id);
|
||||
return Err(anyhow::anyhow!("Session expired"));
|
||||
}
|
||||
|
||||
Ok(session.machine_id)
|
||||
}
|
||||
|
||||
/// Remove session
|
||||
pub async fn remove_session(&self, session_id: &[u8; 16]) {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.remove(session_id);
|
||||
}
|
||||
|
||||
/// Clean up expired sessions
|
||||
pub async fn cleanup_expired_sessions(&self) {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let now = chrono::Utc::now();
|
||||
|
||||
sessions.retain(|_, session| {
|
||||
let age = now - session.created_at;
|
||||
age <= chrono::Duration::hours(24)
|
||||
});
|
||||
}
|
||||
|
||||
/// Get active session count
|
||||
pub async fn active_session_count(&self) -> usize {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions.len()
|
||||
}
|
||||
|
||||
/// List active sessions
|
||||
pub async fn list_active_sessions(&self) -> Vec<Session> {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions.values().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Periodic cleanup task for expired sessions
|
||||
pub async fn session_cleanup_task(session_manager: Arc<SessionManager>) {
|
||||
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(3600)); // Every hour
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
session_manager.cleanup_expired_sessions().await;
|
||||
println!("Cleaned up expired sync sessions. Active sessions: {}",
|
||||
session_manager.active_session_count().await);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sqlx::sqlite::SqlitePoolOptions;
|
||||
|
||||
async fn setup_test_db() -> SqlitePool {
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.connect(":memory:")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create tables
|
||||
sqlx::query!(
|
||||
r#"
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY,
|
||||
username TEXT UNIQUE NOT NULL,
|
||||
password_hash TEXT NOT NULL,
|
||||
active INTEGER DEFAULT 1
|
||||
)
|
||||
"#
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
CREATE TABLE provisioning_codes (
|
||||
id INTEGER PRIMARY KEY,
|
||||
code TEXT UNIQUE NOT NULL,
|
||||
created_by INTEGER NOT NULL,
|
||||
expires_at TEXT NOT NULL,
|
||||
used INTEGER DEFAULT 0,
|
||||
used_at TEXT,
|
||||
FOREIGN KEY (created_by) REFERENCES users (id)
|
||||
)
|
||||
"#
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Insert test user
|
||||
let password_hash = bcrypt::hash("password123", bcrypt::DEFAULT_COST).unwrap();
|
||||
sqlx::query!(
|
||||
"INSERT INTO users (username, password_hash) VALUES (?, ?)",
|
||||
"testuser",
|
||||
password_hash
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
pool
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_authenticate_userpass() {
|
||||
let pool = setup_test_db().await;
|
||||
let session_manager = SessionManager::new(pool);
|
||||
|
||||
let session = session_manager
|
||||
.authenticate_userpass("testuser", "password123")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(session.user_id, 1);
|
||||
assert!(!session.machine_id.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_authenticate_userpass_invalid() {
|
||||
let pool = setup_test_db().await;
|
||||
let session_manager = SessionManager::new(pool);
|
||||
|
||||
let result = session_manager
|
||||
.authenticate_userpass("testuser", "wrongpassword")
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_session_validation() {
|
||||
let pool = setup_test_db().await;
|
||||
let session_manager = SessionManager::new(pool);
|
||||
|
||||
let session = session_manager
|
||||
.authenticate_userpass("testuser", "password123")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let machine_id = session_manager
|
||||
.validate_session(&session.session_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(machine_id, session.machine_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_session_cleanup() {
|
||||
let pool = setup_test_db().await;
|
||||
let session_manager = SessionManager::new(pool);
|
||||
|
||||
let session = session_manager
|
||||
.authenticate_userpass("testuser", "password123")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(session_manager.active_session_count().await, 1);
|
||||
|
||||
// Manually expire the session
|
||||
{
|
||||
let mut sessions = session_manager.sessions.write().await;
|
||||
if let Some(mut session) = sessions.get_mut(&session.session_id) {
|
||||
session.created_at = chrono::Utc::now() - chrono::Duration::hours(25);
|
||||
}
|
||||
}
|
||||
|
||||
session_manager.cleanup_expired_sessions().await;
|
||||
assert_eq!(session_manager.active_session_count().await, 0);
|
||||
}
|
||||
}
|
406
server/src/sync/storage.rs
Normal file
@@ -0,0 +1,406 @@
|
||||
use anyhow::{Context, Result};
|
||||
use bytes::Bytes;
|
||||
use std::collections::HashSet;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::fs;
|
||||
use crate::sync::protocol::{Hash, MetaType};
|
||||
use crate::sync::meta::MetaObj;
|
||||
|
||||
/// Storage backend for chunks and metadata objects
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Storage {
|
||||
data_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new<P: AsRef<Path>>(data_dir: P) -> Self {
|
||||
Self {
|
||||
data_dir: data_dir.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize storage directories
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let chunks_dir = self.data_dir.join("sync").join("chunks");
|
||||
let meta_dir = self.data_dir.join("sync").join("meta");
|
||||
let machines_dir = self.data_dir.join("sync").join("machines");
|
||||
|
||||
fs::create_dir_all(&chunks_dir).await
|
||||
.context("Failed to create chunks directory")?;
|
||||
|
||||
fs::create_dir_all(&meta_dir).await
|
||||
.context("Failed to create meta directory")?;
|
||||
|
||||
fs::create_dir_all(&machines_dir).await
|
||||
.context("Failed to create machines directory")?;
|
||||
|
||||
// Create subdirectories for each meta type
|
||||
for meta_type in &["files", "dirs", "partitions", "disks", "snapshots"] {
|
||||
fs::create_dir_all(meta_dir.join(meta_type)).await
|
||||
.with_context(|| format!("Failed to create meta/{} directory", meta_type))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get chunk storage path for a hash
|
||||
fn chunk_path(&self, hash: &Hash) -> PathBuf {
|
||||
let hex = hex::encode(hash);
|
||||
let ab = &hex[0..2];
|
||||
let cd = &hex[2..4];
|
||||
let filename = format!("{}.chk", hex);
|
||||
|
||||
self.data_dir
|
||||
.join("sync")
|
||||
.join("chunks")
|
||||
.join(ab)
|
||||
.join(cd)
|
||||
.join(filename)
|
||||
}
|
||||
|
||||
/// Get meta storage path for a hash and type
|
||||
fn meta_path(&self, meta_type: MetaType, hash: &Hash) -> PathBuf {
|
||||
let hex = hex::encode(hash);
|
||||
let ab = &hex[0..2];
|
||||
let cd = &hex[2..4];
|
||||
let filename = format!("{}.meta", hex);
|
||||
|
||||
let type_dir = match meta_type {
|
||||
MetaType::File => "files",
|
||||
MetaType::Dir => "dirs",
|
||||
MetaType::Partition => "partitions",
|
||||
MetaType::Disk => "disks",
|
||||
MetaType::Snapshot => "snapshots",
|
||||
};
|
||||
|
||||
self.data_dir
|
||||
.join("sync")
|
||||
.join("meta")
|
||||
.join(type_dir)
|
||||
.join(ab)
|
||||
.join(cd)
|
||||
.join(filename)
|
||||
}
|
||||
|
||||
/// Check if a chunk exists
|
||||
pub async fn chunk_exists(&self, hash: &Hash) -> bool {
|
||||
let path = self.chunk_path(hash);
|
||||
path.exists()
|
||||
}
|
||||
|
||||
/// Check if multiple chunks exist
|
||||
pub async fn chunks_exist(&self, hashes: &[Hash]) -> Result<HashSet<Hash>> {
|
||||
let mut existing = HashSet::new();
|
||||
|
||||
for hash in hashes {
|
||||
if self.chunk_exists(hash).await {
|
||||
existing.insert(*hash);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(existing)
|
||||
}
|
||||
|
||||
/// Store a chunk
|
||||
pub async fn store_chunk(&self, hash: &Hash, data: &[u8]) -> Result<()> {
|
||||
// Verify hash
|
||||
let computed_hash = blake3::hash(data);
|
||||
if computed_hash.as_bytes() != hash {
|
||||
return Err(anyhow::anyhow!("Chunk hash mismatch"));
|
||||
}
|
||||
|
||||
let path = self.chunk_path(hash);
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).await
|
||||
.context("Failed to create chunk directory")?;
|
||||
}
|
||||
|
||||
// Write to temporary file first, then rename (atomic write)
|
||||
let temp_path = path.with_extension("tmp");
|
||||
fs::write(&temp_path, data).await
|
||||
.context("Failed to write chunk to temporary file")?;
|
||||
|
||||
fs::rename(&temp_path, &path).await
|
||||
.context("Failed to rename chunk file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a chunk
|
||||
pub async fn load_chunk(&self, hash: &Hash) -> Result<Option<Bytes>> {
|
||||
let path = self.chunk_path(hash);
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let data = fs::read(&path).await
|
||||
.context("Failed to read chunk file")?;
|
||||
|
||||
// Verify hash
|
||||
let computed_hash = blake3::hash(&data);
|
||||
if computed_hash.as_bytes() != hash {
|
||||
return Err(anyhow::anyhow!("Stored chunk hash mismatch"));
|
||||
}
|
||||
|
||||
Ok(Some(Bytes::from(data)))
|
||||
}
|
||||
|
||||
/// Check if a meta object exists
|
||||
pub async fn meta_exists(&self, meta_type: MetaType, hash: &Hash) -> bool {
|
||||
let path = self.meta_path(meta_type, hash);
|
||||
path.exists()
|
||||
}
|
||||
|
||||
/// Check if multiple meta objects exist
|
||||
pub async fn metas_exist(&self, items: &[(MetaType, Hash)]) -> Result<HashSet<(MetaType, Hash)>> {
|
||||
let mut existing = HashSet::new();
|
||||
|
||||
for &(meta_type, hash) in items {
|
||||
if self.meta_exists(meta_type, &hash).await {
|
||||
existing.insert((meta_type, hash));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(existing)
|
||||
}
|
||||
|
||||
/// Store a meta object
|
||||
pub async fn store_meta(&self, meta_type: MetaType, hash: &Hash, body: &[u8]) -> Result<()> {
|
||||
// Verify hash
|
||||
let computed_hash = blake3::hash(body);
|
||||
if computed_hash.as_bytes() != hash {
|
||||
return Err(anyhow::anyhow!("Meta object hash mismatch"));
|
||||
}
|
||||
|
||||
let path = self.meta_path(meta_type, hash);
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).await
|
||||
.context("Failed to create meta directory")?;
|
||||
}
|
||||
|
||||
// Write to temporary file first, then rename (atomic write)
|
||||
let temp_path = path.with_extension("tmp");
|
||||
fs::write(&temp_path, body).await
|
||||
.context("Failed to write meta to temporary file")?;
|
||||
|
||||
fs::rename(&temp_path, &path).await
|
||||
.context("Failed to rename meta file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a meta object
|
||||
pub async fn load_meta(&self, meta_type: MetaType, hash: &Hash) -> Result<Option<MetaObj>> {
|
||||
let path = self.meta_path(meta_type, hash);
|
||||
|
||||
if !path.exists() {
|
||||
println!("Meta file does not exist: {:?}", path);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
println!("Reading meta file: {:?}", path);
|
||||
let data = fs::read(&path).await
|
||||
.context("Failed to read meta file")?;
|
||||
|
||||
println!("Read {} bytes from meta file", data.len());
|
||||
|
||||
// Verify hash
|
||||
let computed_hash = blake3::hash(&data);
|
||||
if computed_hash.as_bytes() != hash {
|
||||
println!("Hash mismatch: expected {}, got {}", hex::encode(hash), hex::encode(computed_hash.as_bytes()));
|
||||
return Err(anyhow::anyhow!("Stored meta object hash mismatch"));
|
||||
}
|
||||
|
||||
println!("Hash verified, deserializing {:?} object", meta_type);
|
||||
let meta_obj = MetaObj::deserialize(meta_type, Bytes::from(data))
|
||||
.context("Failed to deserialize meta object")?;
|
||||
|
||||
println!("Successfully deserialized meta object");
|
||||
Ok(Some(meta_obj))
|
||||
}
|
||||
|
||||
/// Get snapshot storage path for a machine
|
||||
fn snapshot_ref_path(&self, machine_id: i64, snapshot_id: &str) -> PathBuf {
|
||||
self.data_dir
|
||||
.join("sync")
|
||||
.join("machines")
|
||||
.join(machine_id.to_string())
|
||||
.join("snapshots")
|
||||
.join(format!("{}.ref", snapshot_id))
|
||||
}
|
||||
|
||||
/// Store a snapshot reference
|
||||
pub async fn store_snapshot_ref(
|
||||
&self,
|
||||
machine_id: i64,
|
||||
snapshot_id: &str,
|
||||
snapshot_hash: &Hash,
|
||||
created_at: u64
|
||||
) -> Result<()> {
|
||||
let path = self.snapshot_ref_path(machine_id, snapshot_id);
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).await
|
||||
.context("Failed to create snapshot reference directory")?;
|
||||
}
|
||||
|
||||
// Create snapshot reference content
|
||||
let content = format!("{}:{}", hex::encode(snapshot_hash), created_at);
|
||||
|
||||
// Write to temporary file first, then rename (atomic write)
|
||||
let temp_path = path.with_extension("tmp");
|
||||
fs::write(&temp_path, content).await
|
||||
.context("Failed to write snapshot reference to temporary file")?;
|
||||
|
||||
fs::rename(&temp_path, &path).await
|
||||
.context("Failed to rename snapshot reference file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a snapshot reference
|
||||
pub async fn load_snapshot_ref(&self, machine_id: i64, snapshot_id: &str) -> Result<Option<(Hash, u64)>> {
|
||||
let path = self.snapshot_ref_path(machine_id, snapshot_id);
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let content = fs::read_to_string(&path).await
|
||||
.context("Failed to read snapshot reference file")?;
|
||||
|
||||
let parts: Vec<&str> = content.trim().split(':').collect();
|
||||
if parts.len() != 2 {
|
||||
return Err(anyhow::anyhow!("Invalid snapshot reference format"));
|
||||
}
|
||||
|
||||
let snapshot_hash: Hash = hex::decode(parts[0])
|
||||
.context("Failed to decode snapshot hash")?
|
||||
.try_into()
|
||||
.map_err(|_| anyhow::anyhow!("Invalid snapshot hash length"))?;
|
||||
|
||||
let created_at: u64 = parts[1].parse()
|
||||
.context("Failed to parse snapshot timestamp")?;
|
||||
|
||||
Ok(Some((snapshot_hash, created_at)))
|
||||
}
|
||||
|
||||
/// List snapshots for a machine
|
||||
pub async fn list_snapshots(&self, machine_id: i64) -> Result<Vec<String>> {
|
||||
let snapshots_dir = self.data_dir
|
||||
.join("sync")
|
||||
.join("machines")
|
||||
.join(machine_id.to_string())
|
||||
.join("snapshots");
|
||||
|
||||
if !snapshots_dir.exists() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut entries = fs::read_dir(&snapshots_dir).await
|
||||
.context("Failed to read snapshots directory")?;
|
||||
|
||||
let mut snapshots = Vec::new();
|
||||
while let Some(entry) = entries.next_entry().await
|
||||
.context("Failed to read snapshot entry")? {
|
||||
|
||||
if let Some(file_name) = entry.file_name().to_str() {
|
||||
if file_name.ends_with(".ref") {
|
||||
let snapshot_id = file_name.trim_end_matches(".ref");
|
||||
snapshots.push(snapshot_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
snapshots.sort();
|
||||
Ok(snapshots)
|
||||
}
|
||||
|
||||
/// Delete old snapshots, keeping only the latest N
|
||||
pub async fn cleanup_snapshots(&self, machine_id: i64, keep_count: usize) -> Result<()> {
|
||||
let mut snapshots = self.list_snapshots(machine_id).await?;
|
||||
|
||||
if snapshots.len() <= keep_count {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
snapshots.sort();
|
||||
snapshots.reverse(); // Most recent first
|
||||
|
||||
// Delete older snapshots
|
||||
for snapshot_id in snapshots.iter().skip(keep_count) {
|
||||
let path = self.snapshot_ref_path(machine_id, snapshot_id);
|
||||
if path.exists() {
|
||||
fs::remove_file(&path).await
|
||||
.with_context(|| format!("Failed to delete snapshot {}", snapshot_id))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Add hex crate to dependencies
|
||||
use hex;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_storage_init() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = Storage::new(temp_dir.path());
|
||||
storage.init().await.unwrap();
|
||||
|
||||
assert!(temp_dir.path().join("sync/chunks").exists());
|
||||
assert!(temp_dir.path().join("sync/meta/files").exists());
|
||||
assert!(temp_dir.path().join("sync/machines").exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_chunk_storage() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = Storage::new(temp_dir.path());
|
||||
storage.init().await.unwrap();
|
||||
|
||||
let data = b"test chunk data";
|
||||
let hash = blake3::hash(data).into();
|
||||
|
||||
// Store chunk
|
||||
storage.store_chunk(&hash, data).await.unwrap();
|
||||
assert!(storage.chunk_exists(&hash).await);
|
||||
|
||||
// Load chunk
|
||||
let loaded = storage.load_chunk(&hash).await.unwrap().unwrap();
|
||||
assert_eq!(loaded.as_ref(), data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_snapshot_ref_storage() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = Storage::new(temp_dir.path());
|
||||
storage.init().await.unwrap();
|
||||
|
||||
let machine_id = 123i64;
|
||||
let snapshot_id = "snapshot-001";
|
||||
let snapshot_hash = [1u8; 32];
|
||||
let created_at = 1234567890;
|
||||
|
||||
storage.store_snapshot_ref(machine_id, snapshot_id, &snapshot_hash, created_at)
|
||||
.await.unwrap();
|
||||
|
||||
let loaded = storage.load_snapshot_ref(machine_id, snapshot_id)
|
||||
.await.unwrap().unwrap();
|
||||
|
||||
assert_eq!(loaded.0, snapshot_hash);
|
||||
assert_eq!(loaded.1, created_at);
|
||||
}
|
||||
}
|
235
server/src/sync/validation.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
use anyhow::{Context, Result};
|
||||
use std::collections::{HashSet, VecDeque};
|
||||
use crate::sync::protocol::{Hash, MetaType};
|
||||
use crate::sync::storage::Storage;
|
||||
use crate::sync::meta::{MetaObj, SnapshotObj, EntryType};
|
||||
|
||||
/// Validation result for snapshot commits
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ValidationResult {
|
||||
pub is_valid: bool,
|
||||
pub missing_chunks: Vec<Hash>,
|
||||
pub missing_metas: Vec<(MetaType, Hash)>,
|
||||
}
|
||||
|
||||
impl ValidationResult {
|
||||
pub fn valid() -> Self {
|
||||
Self {
|
||||
is_valid: true,
|
||||
missing_chunks: Vec::new(),
|
||||
missing_metas: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn invalid(missing_chunks: Vec<Hash>, missing_metas: Vec<(MetaType, Hash)>) -> Self {
|
||||
Self {
|
||||
is_valid: false,
|
||||
missing_chunks,
|
||||
missing_metas,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_missing(&self) -> bool {
|
||||
!self.missing_chunks.is_empty() || !self.missing_metas.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Validator for snapshot object graphs
|
||||
#[derive(Clone)]
|
||||
pub struct SnapshotValidator {
|
||||
storage: Storage,
|
||||
}
|
||||
|
||||
impl SnapshotValidator {
|
||||
pub fn new(storage: Storage) -> Self {
|
||||
Self { storage }
|
||||
}
|
||||
|
||||
/// Validate a complete snapshot object graph using BFS only
|
||||
pub async fn validate_snapshot(&self, snapshot_hash: &Hash, snapshot_body: &[u8]) -> Result<ValidationResult> {
|
||||
// Use the BFS implementation
|
||||
self.validate_snapshot_bfs(snapshot_hash, snapshot_body).await
|
||||
}
|
||||
|
||||
/// Validate a batch of meta objects (for incremental validation)
|
||||
pub async fn validate_meta_batch(&self, metas: &[(MetaType, Hash)]) -> Result<Vec<(MetaType, Hash)>> {
|
||||
let mut missing = Vec::new();
|
||||
|
||||
for &(meta_type, hash) in metas {
|
||||
if !self.storage.meta_exists(meta_type, &hash).await {
|
||||
missing.push((meta_type, hash));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(missing)
|
||||
}
|
||||
|
||||
/// Validate a batch of chunks (for incremental validation)
|
||||
pub async fn validate_chunk_batch(&self, chunks: &[Hash]) -> Result<Vec<Hash>> {
|
||||
let mut missing = Vec::new();
|
||||
|
||||
for &hash in chunks {
|
||||
if !self.storage.chunk_exists(&hash).await {
|
||||
missing.push(hash);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(missing)
|
||||
}
|
||||
|
||||
/// Perform a breadth-first validation (useful for large snapshots)
|
||||
pub async fn validate_snapshot_bfs(&self, snapshot_hash: &Hash, snapshot_body: &[u8]) -> Result<ValidationResult> {
|
||||
// Verify snapshot hash
|
||||
let computed_hash = blake3::hash(snapshot_body);
|
||||
if computed_hash.as_bytes() != snapshot_hash {
|
||||
return Err(anyhow::anyhow!("Snapshot hash mismatch"));
|
||||
}
|
||||
|
||||
// Parse snapshot object
|
||||
let snapshot_obj = SnapshotObj::deserialize(bytes::Bytes::from(snapshot_body.to_vec()))
|
||||
.context("Failed to deserialize snapshot object")?;
|
||||
|
||||
let mut missing_chunks = Vec::new();
|
||||
let mut missing_metas = Vec::new();
|
||||
let mut visited_metas = HashSet::new();
|
||||
let mut queue = VecDeque::new();
|
||||
|
||||
// Initialize queue with disk hashes
|
||||
for disk_hash in &snapshot_obj.disk_hashes {
|
||||
queue.push_back((MetaType::Disk, *disk_hash));
|
||||
}
|
||||
|
||||
// BFS traversal
|
||||
while let Some((meta_type, hash)) = queue.pop_front() {
|
||||
let meta_key = (meta_type, hash);
|
||||
|
||||
if visited_metas.contains(&meta_key) {
|
||||
continue;
|
||||
}
|
||||
visited_metas.insert(meta_key);
|
||||
|
||||
// Check if meta exists
|
||||
if !self.storage.meta_exists(meta_type, &hash).await {
|
||||
println!("Missing metadata: {:?} hash {}", meta_type, hex::encode(&hash));
|
||||
missing_metas.push((meta_type, hash));
|
||||
continue; // Skip loading if missing
|
||||
}
|
||||
|
||||
// Load and process meta object
|
||||
println!("Loading metadata: {:?} hash {}", meta_type, hex::encode(&hash));
|
||||
if let Some(meta_obj) = self.storage.load_meta(meta_type, &hash).await
|
||||
.context("Failed to load meta object")? {
|
||||
|
||||
match meta_obj {
|
||||
MetaObj::Disk(disk) => {
|
||||
for partition_hash in &disk.partition_hashes {
|
||||
queue.push_back((MetaType::Partition, *partition_hash));
|
||||
}
|
||||
}
|
||||
MetaObj::Partition(partition) => {
|
||||
queue.push_back((MetaType::Dir, partition.root_dir_hash));
|
||||
}
|
||||
MetaObj::Dir(dir) => {
|
||||
for entry in &dir.entries {
|
||||
match entry.entry_type {
|
||||
EntryType::File | EntryType::Symlink => {
|
||||
queue.push_back((MetaType::File, entry.target_meta_hash));
|
||||
}
|
||||
EntryType::Dir => {
|
||||
queue.push_back((MetaType::Dir, entry.target_meta_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
MetaObj::File(file) => {
|
||||
// Check chunk dependencies
|
||||
for chunk_hash in &file.chunk_hashes {
|
||||
if !self.storage.chunk_exists(chunk_hash).await {
|
||||
missing_chunks.push(*chunk_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
MetaObj::Snapshot(_) => {
|
||||
// Snapshots shouldn't be nested
|
||||
return Err(anyhow::anyhow!("Unexpected nested snapshot object"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if missing_chunks.is_empty() && missing_metas.is_empty() {
|
||||
Ok(ValidationResult::valid())
|
||||
} else {
|
||||
Ok(ValidationResult::invalid(missing_chunks, missing_metas))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
use crate::sync::meta::*;
|
||||
|
||||
async fn setup_test_storage() -> Storage {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = Storage::new(temp_dir.path());
|
||||
storage.init().await.unwrap();
|
||||
storage
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_validate_empty_snapshot() {
|
||||
let storage = setup_test_storage().await;
|
||||
let validator = SnapshotValidator::new(storage);
|
||||
|
||||
let snapshot = SnapshotObj::new(1234567890, vec![]);
|
||||
let snapshot_body = snapshot.serialize().unwrap();
|
||||
let snapshot_hash = snapshot.compute_hash().unwrap();
|
||||
|
||||
let result = validator.validate_snapshot(&snapshot_hash, &snapshot_body)
|
||||
.await.unwrap();
|
||||
|
||||
assert!(result.is_valid);
|
||||
assert!(result.missing_chunks.is_empty());
|
||||
assert!(result.missing_metas.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_validate_missing_disk() {
|
||||
let storage = setup_test_storage().await;
|
||||
let validator = SnapshotValidator::new(storage);
|
||||
|
||||
let missing_disk_hash = [1u8; 32];
|
||||
let snapshot = SnapshotObj::new(1234567890, vec![missing_disk_hash]);
|
||||
let snapshot_body = snapshot.serialize().unwrap();
|
||||
let snapshot_hash = snapshot.compute_hash().unwrap();
|
||||
|
||||
let result = validator.validate_snapshot(&snapshot_hash, &snapshot_body)
|
||||
.await.unwrap();
|
||||
|
||||
assert!(!result.is_valid);
|
||||
assert!(result.missing_chunks.is_empty());
|
||||
assert_eq!(result.missing_metas.len(), 1);
|
||||
assert_eq!(result.missing_metas[0], (MetaType::Disk, missing_disk_hash));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_validate_chunk_batch() {
|
||||
let storage = setup_test_storage().await;
|
||||
let validator = SnapshotValidator::new(storage);
|
||||
|
||||
let chunk_data = b"test chunk";
|
||||
let chunk_hash = blake3::hash(chunk_data).into();
|
||||
let missing_hash = [1u8; 32];
|
||||
|
||||
// Store one chunk
|
||||
storage.store_chunk(&chunk_hash, chunk_data).await.unwrap();
|
||||
|
||||
let chunks = vec![chunk_hash, missing_hash];
|
||||
let missing = validator.validate_chunk_batch(&chunks).await.unwrap();
|
||||
|
||||
assert_eq!(missing.len(), 1);
|
||||
assert_eq!(missing[0], missing_hash);
|
||||
}
|
||||
}
|
63
server/src/utils/auth.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use crate::controllers::auth::AuthController;
|
||||
use crate::utils::{models::*, DbPool};
|
||||
use axum::{
|
||||
extract::FromRequestParts,
|
||||
http::{header::AUTHORIZATION, request::Parts, StatusCode},
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthUser {
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl FromRequestParts<DbPool> for AuthUser {
|
||||
type Rejection = StatusCode;
|
||||
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &DbPool,
|
||||
) -> Result<Self, Self::Rejection> {
|
||||
let auth_header = parts
|
||||
.headers
|
||||
.get(AUTHORIZATION)
|
||||
.and_then(|header| header.to_str().ok())
|
||||
.ok_or(StatusCode::UNAUTHORIZED)?;
|
||||
|
||||
if !auth_header.starts_with("Bearer ") {
|
||||
return Err(StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
|
||||
let token = &auth_header[7..];
|
||||
|
||||
let user = AuthController::authenticate_user(state, token)
|
||||
.await
|
||||
.map_err(|_| StatusCode::UNAUTHORIZED)?;
|
||||
|
||||
Ok(AuthUser { user })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AdminUser {
|
||||
#[allow(dead_code)]
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl FromRequestParts<DbPool> for AdminUser {
|
||||
type Rejection = StatusCode;
|
||||
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &DbPool,
|
||||
) -> Result<Self, Self::Rejection> {
|
||||
let auth_user = AuthUser::from_request_parts(parts, state).await?;
|
||||
|
||||
if auth_user.user.role != UserRole::Admin {
|
||||
return Err(StatusCode::FORBIDDEN);
|
||||
}
|
||||
|
||||
Ok(AdminUser {
|
||||
user: auth_user.user,
|
||||
})
|
||||
}
|
||||
}
|
108
server/src/utils/base62.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
const CHARS: &str = "rYTSJ96O2ntiEBkuwQq0vdslyfI8Ph51bpae3LgHoFZAxj7WmzUNCGXcR4MDKV";
|
||||
|
||||
pub struct Base62;
|
||||
|
||||
impl Base62 {
|
||||
pub fn encode(input: &str) -> String {
|
||||
if input.is_empty() {
|
||||
return String::new();
|
||||
}
|
||||
|
||||
let bytes = input.as_bytes();
|
||||
let alphabet_chars: Vec<char> = CHARS.chars().collect();
|
||||
|
||||
let mut number = bytes.iter().fold(String::from("0"), |acc, &byte| {
|
||||
Self::multiply_and_add(&acc, 256, byte as u32)
|
||||
});
|
||||
|
||||
if number == "0" {
|
||||
return "0".to_string();
|
||||
}
|
||||
|
||||
let mut result = String::new();
|
||||
while number != "0" {
|
||||
let (new_number, remainder) = Self::divide_by(&number, 62);
|
||||
result.push(alphabet_chars[remainder as usize]);
|
||||
number = new_number;
|
||||
}
|
||||
|
||||
result.chars().rev().collect()
|
||||
}
|
||||
|
||||
pub fn decode(encoded: &str) -> Option<String> {
|
||||
if encoded.is_empty() {
|
||||
return Some(String::new());
|
||||
}
|
||||
|
||||
let char_to_value: std::collections::HashMap<char, u32> = CHARS
|
||||
.chars()
|
||||
.enumerate()
|
||||
.map(|(i, c)| (c, i as u32))
|
||||
.collect();
|
||||
|
||||
let mut number = String::from("0");
|
||||
for c in encoded.chars() {
|
||||
let value = *char_to_value.get(&c)?;
|
||||
number = Self::multiply_and_add(&number, 62, value);
|
||||
}
|
||||
|
||||
if number == "0" {
|
||||
return Some(String::new());
|
||||
}
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
while number != "0" {
|
||||
let (new_number, remainder) = Self::divide_by(&number, 256);
|
||||
bytes.push(remainder as u8);
|
||||
number = new_number;
|
||||
}
|
||||
|
||||
bytes.reverse();
|
||||
String::from_utf8(bytes).ok()
|
||||
}
|
||||
|
||||
fn multiply_and_add(num_str: &str, base: u32, add: u32) -> String {
|
||||
let mut result = Vec::new();
|
||||
let mut carry = add;
|
||||
|
||||
for c in num_str.chars().rev() {
|
||||
let digit = c.to_digit(10).unwrap_or(0);
|
||||
let product = digit * base + carry;
|
||||
result.push((product % 10).to_string());
|
||||
carry = product / 10;
|
||||
}
|
||||
|
||||
while carry > 0 {
|
||||
result.push((carry % 10).to_string());
|
||||
carry /= 10;
|
||||
}
|
||||
|
||||
if result.is_empty() {
|
||||
"0".to_string()
|
||||
} else {
|
||||
result.into_iter().rev().collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn divide_by(num_str: &str, base: u32) -> (String, u32) {
|
||||
let mut quotient = String::new();
|
||||
let mut remainder = 0u32;
|
||||
|
||||
for c in num_str.chars() {
|
||||
let digit = c.to_digit(10).unwrap_or(0);
|
||||
let current = remainder * 10 + digit;
|
||||
let q = current / base;
|
||||
remainder = current % base;
|
||||
|
||||
if !quotient.is_empty() || q > 0 {
|
||||
quotient.push_str(&q.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if quotient.is_empty() {
|
||||
quotient = "0".to_string();
|
||||
}
|
||||
|
||||
(quotient, remainder)
|
||||
}
|
||||
}
|
44
server/src/utils/config.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use crate::utils::{error::*, DbPool};
|
||||
use sqlx::Row;
|
||||
|
||||
pub struct ConfigManager;
|
||||
|
||||
impl ConfigManager {
|
||||
pub async fn get_config(pool: &DbPool, key: &str) -> AppResult<Option<String>> {
|
||||
let row = sqlx::query("SELECT value FROM config WHERE key = ?")
|
||||
.bind(key)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if let Some(row) = row {
|
||||
Ok(Some(row.get("value")))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_config(pool: &DbPool, key: &str, value: &str) -> AppResult<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO config (key, value, updated_at)
|
||||
VALUES (?, ?, CURRENT_TIMESTAMP)
|
||||
ON CONFLICT(key) DO UPDATE SET
|
||||
value = excluded.value,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
"#,
|
||||
)
|
||||
.bind(key)
|
||||
.bind(value)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_external_url(pool: &DbPool) -> AppResult<String> {
|
||||
match Self::get_config(pool, "EXTERNAL_URL").await? {
|
||||
Some(url) => Ok(url),
|
||||
None => Err(internal_error("EXTERNAL_URL not configured")),
|
||||
}
|
||||
}
|
||||
}
|
124
server/src/utils/database.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
use crate::utils::{ensure_data_directories, get_database_path, AppResult};
|
||||
use sqlx::{sqlite::SqlitePool, Pool, Row, Sqlite};
|
||||
use std::path::Path;
|
||||
|
||||
pub type DbPool = Pool<Sqlite>;
|
||||
|
||||
pub async fn init_database() -> AppResult<DbPool> {
|
||||
ensure_data_directories()?;
|
||||
|
||||
let db_path = get_database_path()?;
|
||||
|
||||
if !Path::new(&db_path).exists() {
|
||||
std::fs::File::create(&db_path)?;
|
||||
}
|
||||
|
||||
let database_url = format!("sqlite://{}", db_path);
|
||||
|
||||
let pool = SqlitePool::connect(&database_url).await?;
|
||||
|
||||
run_migrations(&pool).await?;
|
||||
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
async fn run_migrations(pool: &DbPool) -> AppResult<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT UNIQUE NOT NULL,
|
||||
password_hash TEXT NOT NULL,
|
||||
role TEXT CHECK(role IN ('admin','user')) NOT NULL,
|
||||
storage_limit_gb INTEGER NOT NULL DEFAULT 0,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
token TEXT UNIQUE NOT NULL,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at DATETIME NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS machines (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
uuid TEXT UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS provisioning_codes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
machine_id INTEGER NOT NULL,
|
||||
code TEXT UNIQUE NOT NULL,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at DATETIME NOT NULL,
|
||||
used BOOLEAN DEFAULT 0,
|
||||
FOREIGN KEY(machine_id) REFERENCES machines(id) ON DELETE CASCADE
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS snapshots (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
machine_id INTEGER NOT NULL,
|
||||
snapshot_hash TEXT NOT NULL,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY(machine_id) REFERENCES machines(id) ON DELETE CASCADE
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn check_first_user_exists(pool: &DbPool) -> AppResult<bool> {
|
||||
let row = sqlx::query("SELECT COUNT(*) as count FROM users")
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
let count: i64 = row.get("count");
|
||||
Ok(count > 0)
|
||||
}
|
76
server/src/utils/db_path.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use crate::utils::error::{internal_error, AppResult};
|
||||
use std::fs;
|
||||
|
||||
pub fn get_database_path() -> AppResult<String> {
|
||||
let db_dir = "data/db";
|
||||
let db_path = format!("{}/arkendro.db", db_dir);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(db_dir) {
|
||||
return Err(internal_error(&format!(
|
||||
"Failed to create database directory: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(db_path)
|
||||
}
|
||||
|
||||
pub fn ensure_data_directories() -> AppResult<()> {
|
||||
let directories = ["data", "data/db", "data/backups", "data/logs"];
|
||||
|
||||
for dir in directories.iter() {
|
||||
if let Err(e) = fs::create_dir_all(dir) {
|
||||
return Err(internal_error(&format!(
|
||||
"Failed to create directory '{}': {}",
|
||||
dir, e
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_data_path(filename: &str) -> String {
|
||||
format!("data/{}", filename)
|
||||
}
|
||||
|
||||
pub fn get_backup_path(filename: &str) -> String {
|
||||
format!("data/backups/{}", filename)
|
||||
}
|
||||
|
||||
pub fn get_log_path(filename: &str) -> String {
|
||||
format!("data/logs/{}", filename)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_database_path_creation() {
|
||||
let _ = fs::remove_dir_all("data");
|
||||
|
||||
let db_path = get_database_path().expect("Should create database path");
|
||||
assert_eq!(db_path, "data/db/arkendro.db");
|
||||
|
||||
assert!(Path::new("data/db").exists());
|
||||
|
||||
let _ = fs::remove_dir_all("data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ensure_data_directories() {
|
||||
let _ = fs::remove_dir_all("data");
|
||||
|
||||
ensure_data_directories().expect("Should create all directories");
|
||||
|
||||
assert!(Path::new("data").exists());
|
||||
assert!(Path::new("data/db").exists());
|
||||
assert!(Path::new("data/backups").exists());
|
||||
assert!(Path::new("data/logs").exists());
|
||||
|
||||
let _ = fs::remove_dir_all("data");
|
||||
}
|
||||
}
|
142
server/src/utils/error.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use axum::{
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Json, Response},
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AppError {
|
||||
DatabaseError(String),
|
||||
ValidationError(String),
|
||||
AuthenticationError(String),
|
||||
AuthorizationError(String),
|
||||
NotFoundError(String),
|
||||
ConflictError(String),
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for AppError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
AppError::DatabaseError(msg) => write!(f, "Database error: {}", msg),
|
||||
AppError::ValidationError(msg) => write!(f, "Validation error: {}", msg),
|
||||
AppError::AuthenticationError(msg) => write!(f, "Authentication error: {}", msg),
|
||||
AppError::AuthorizationError(msg) => write!(f, "Authorization error: {}", msg),
|
||||
AppError::NotFoundError(msg) => write!(f, "Not found: {}", msg),
|
||||
AppError::ConflictError(msg) => write!(f, "Conflict: {}", msg),
|
||||
AppError::InternalError(msg) => write!(f, "Internal error: {}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AppError {}
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
let (status, error_message) = match self {
|
||||
AppError::DatabaseError(_) => (StatusCode::INTERNAL_SERVER_ERROR, "Database error"),
|
||||
AppError::ValidationError(ref msg) => (StatusCode::BAD_REQUEST, msg.as_str()),
|
||||
AppError::AuthenticationError(ref msg) => (StatusCode::UNAUTHORIZED, msg.as_str()),
|
||||
AppError::AuthorizationError(ref msg) => (StatusCode::FORBIDDEN, msg.as_str()),
|
||||
AppError::NotFoundError(ref msg) => (StatusCode::NOT_FOUND, msg.as_str()),
|
||||
AppError::ConflictError(ref msg) => (StatusCode::CONFLICT, msg.as_str()),
|
||||
AppError::InternalError(_) => {
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error")
|
||||
}
|
||||
};
|
||||
|
||||
let body = Json(json!({
|
||||
"error": error_message
|
||||
}));
|
||||
|
||||
(status, body).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<anyhow::Error> for AppError {
|
||||
fn from(err: anyhow::Error) -> Self {
|
||||
if let Some(sqlx_err) = err.downcast_ref::<sqlx::Error>() {
|
||||
match sqlx_err {
|
||||
sqlx::Error::RowNotFound => {
|
||||
AppError::NotFoundError("Resource not found".to_string())
|
||||
}
|
||||
sqlx::Error::Database(db_err) => {
|
||||
if db_err.message().contains("UNIQUE constraint failed") {
|
||||
AppError::ConflictError("Resource already exists".to_string())
|
||||
} else {
|
||||
AppError::DatabaseError(db_err.message().to_string())
|
||||
}
|
||||
}
|
||||
_ => AppError::DatabaseError("Database operation failed".to_string()),
|
||||
}
|
||||
} else {
|
||||
AppError::InternalError(err.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bcrypt::BcryptError> for AppError {
|
||||
fn from(_: bcrypt::BcryptError) -> Self {
|
||||
AppError::InternalError("Password hashing error".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<sqlx::Error> for AppError {
|
||||
fn from(err: sqlx::Error) -> Self {
|
||||
match err {
|
||||
sqlx::Error::RowNotFound => AppError::NotFoundError("Resource not found".to_string()),
|
||||
sqlx::Error::Database(db_err) => {
|
||||
if db_err.message().contains("UNIQUE constraint failed") {
|
||||
AppError::ConflictError("Resource already exists".to_string())
|
||||
} else {
|
||||
AppError::DatabaseError(db_err.message().to_string())
|
||||
}
|
||||
}
|
||||
_ => AppError::DatabaseError("Database operation failed".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for AppError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
AppError::InternalError(format!("IO error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
pub type AppResult<T> = Result<T, AppError>;
|
||||
|
||||
pub fn validation_error(msg: &str) -> AppError {
|
||||
AppError::ValidationError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn auth_error(msg: &str) -> AppError {
|
||||
AppError::AuthenticationError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn forbidden_error(msg: &str) -> AppError {
|
||||
AppError::AuthorizationError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn not_found_error(msg: &str) -> AppError {
|
||||
AppError::NotFoundError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn conflict_error(msg: &str) -> AppError {
|
||||
AppError::ConflictError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn internal_error(msg: &str) -> AppError {
|
||||
AppError::InternalError(msg.to_string())
|
||||
}
|
||||
|
||||
pub fn success_response<T>(data: T) -> Json<T>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
Json(data)
|
||||
}
|
||||
|
||||
pub fn success_message(msg: &str) -> Json<serde_json::Value> {
|
||||
Json(json!({ "message": msg }))
|
||||
}
|
11
server/src/utils/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
pub mod auth;
|
||||
pub mod base62;
|
||||
pub mod config;
|
||||
pub mod database;
|
||||
pub mod db_path;
|
||||
pub mod error;
|
||||
pub mod models;
|
||||
|
||||
pub use database::*;
|
||||
pub use db_path::*;
|
||||
pub use error::*;
|
156
server/src/utils/models.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{DateTime, Utc};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct User {
|
||||
pub id: i64,
|
||||
pub username: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub password_hash: String,
|
||||
pub role: UserRole,
|
||||
pub storage_limit_gb: i64,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UserRole {
|
||||
Admin,
|
||||
User,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for UserRole {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
UserRole::Admin => write!(f, "admin"),
|
||||
UserRole::User => write!(f, "user"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for UserRole {
|
||||
type Err = &'static str;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"admin" => Ok(UserRole::Admin),
|
||||
"user" => Ok(UserRole::User),
|
||||
_ => Err("Invalid role"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateUserRequest {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub role: Option<UserRole>,
|
||||
pub storage_limit_gb: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UpdateUserRequest {
|
||||
pub username: Option<String>,
|
||||
pub password: Option<String>,
|
||||
pub role: Option<UserRole>,
|
||||
pub storage_limit_gb: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Session {
|
||||
pub id: i64,
|
||||
pub user_id: i64,
|
||||
pub token: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub expires_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginResponse {
|
||||
pub token: String,
|
||||
pub role: UserRole,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Machine {
|
||||
pub id: i64,
|
||||
pub user_id: i64,
|
||||
pub uuid: Uuid,
|
||||
#[serde(rename = "machine_id")]
|
||||
pub machine_id: String,
|
||||
pub name: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct RegisterMachineRequest {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UseProvisioningCodeRequest {
|
||||
pub code: String,
|
||||
pub uuid: Uuid,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateProvisioningCodeRequest {
|
||||
pub machine_id: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProvisioningCodeResponse {
|
||||
pub code: String,
|
||||
pub raw_code: String,
|
||||
pub expires_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProvisioningCode {
|
||||
pub id: i64,
|
||||
pub machine_id: i64,
|
||||
pub code: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub expires_at: DateTime<Utc>,
|
||||
pub used: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Snapshot {
|
||||
pub id: i64,
|
||||
pub machine_id: i64,
|
||||
pub snapshot_hash: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SetupStatusResponse {
|
||||
pub first_user_exists: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InitSetupRequest {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ErrorResponse {
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
impl ErrorResponse {
|
||||
pub fn new(message: &str) -> Self {
|
||||
Self {
|
||||
error: message.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
76
sync_client_test/Cargo.lock
generated
Normal file
@@ -0,0 +1,76 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "arrayref"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
||||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"constant_time_eq",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "sync_client_test"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"hex",
|
||||
]
|
8
sync_client_test/Cargo.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "sync_client_test"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
blake3 = "1.5"
|
||||
hex = "0.4"
|
1051
sync_client_test/src/main.rs
Normal file
24
webui/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
29
webui/eslint.config.js
Normal file
@@ -0,0 +1,29 @@
|
||||
import js from '@eslint/js'
|
||||
import globals from 'globals'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import reactRefresh from 'eslint-plugin-react-refresh'
|
||||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{js,jsx}'],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
reactHooks.configs['recommended-latest'],
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
parserOptions: {
|
||||
ecmaVersion: 'latest',
|
||||
ecmaFeatures: { jsx: true },
|
||||
sourceType: 'module',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
|
||||
},
|
||||
},
|
||||
])
|
12
webui/index.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Arkendro</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
8
webui/jsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
}
|
||||
}
|
||||
}
|
32
webui/package.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "webui",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"lint": "eslint .",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fontsource/plus-jakarta-sans": "^5.2.6",
|
||||
"@phosphor-icons/react": "^2.1.10",
|
||||
"classnames": "^2.5.1",
|
||||
"react": "^19.1.1",
|
||||
"react-dom": "^19.1.1",
|
||||
"react-router-dom": "^7.8.2",
|
||||
"sass-embedded": "^1.92.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.33.0",
|
||||
"@types/react": "^19.1.10",
|
||||
"@types/react-dom": "^19.1.7",
|
||||
"@vitejs/plugin-react": "^5.0.0",
|
||||
"eslint": "^9.33.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.20",
|
||||
"globals": "^16.3.0",
|
||||
"vite": "^7.1.2"
|
||||
}
|
||||
}
|
2311
webui/pnpm-lock.yaml
generated
Normal file
39
webui/src/App.jsx
Normal file
@@ -0,0 +1,39 @@
|
||||
import {createBrowserRouter, Navigate, RouterProvider} from "react-router-dom";
|
||||
import {UserProvider} from '@/common/contexts/UserContext.jsx';
|
||||
import {ToastProvider} from '@/common/contexts/ToastContext.jsx';
|
||||
import "@/common/styles/main.sass";
|
||||
import Root from "@/common/layouts/Root.jsx";
|
||||
import UserManagement from "@/pages/UserManagement";
|
||||
import SystemSettings from "@/pages/SystemSettings";
|
||||
import Machines, {MachineDetails} from "@/pages/Machines";
|
||||
import "@fontsource/plus-jakarta-sans/300.css";
|
||||
import "@fontsource/plus-jakarta-sans/400.css";
|
||||
import "@fontsource/plus-jakarta-sans/600.css";
|
||||
import "@fontsource/plus-jakarta-sans/700.css";
|
||||
import "@fontsource/plus-jakarta-sans/800.css";
|
||||
|
||||
const Placeholder = ({title}) => <div className="content"><h2 style={{fontSize: '1rem'}}>{title}</h2><p
|
||||
className="muted">Content coming soon.</p></div>;
|
||||
|
||||
const App = () => {
|
||||
const router = createBrowserRouter([
|
||||
{
|
||||
path: "/",
|
||||
element: <Root/>,
|
||||
children: [
|
||||
{path: "/", element: <Navigate to="/dashboard"/>},
|
||||
{path: "/dashboard", element: <Placeholder title="Dashboard"/>},
|
||||
{path: "/machines", element: <Machines/>},
|
||||
{path: "/machines/:id", element: <MachineDetails/>},
|
||||
{path: "/servers", element: <Placeholder title="Servers"/>},
|
||||
{path: "/settings", element: <Placeholder title="Settings"/>},
|
||||
{path: "/admin/users", element: <UserManagement/>},
|
||||
{path: "/admin/settings", element: <SystemSettings/>},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
return <UserProvider><ToastProvider><RouterProvider router={router}/></ToastProvider></UserProvider>;
|
||||
};
|
||||
|
||||
export default App;
|
23
webui/src/common/components/Avatar/Avatar.jsx
Normal file
@@ -0,0 +1,23 @@
|
||||
import React from 'react';
|
||||
import './styles.sass';
|
||||
|
||||
export const Avatar = ({
|
||||
children,
|
||||
size = 'md',
|
||||
variant = 'default',
|
||||
className = '',
|
||||
...rest
|
||||
}) => {
|
||||
const avatarClasses = [
|
||||
'avatar',
|
||||
`avatar--${size}`,
|
||||
`avatar--${variant}`,
|
||||
className
|
||||
].filter(Boolean).join(' ');
|
||||
|
||||
return (
|
||||
<div className={avatarClasses} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
};
|
1
webui/src/common/components/Avatar/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { Avatar as default } from './Avatar.jsx';
|
35
webui/src/common/components/Avatar/styles.sass
Normal file
@@ -0,0 +1,35 @@
|
||||
.avatar
|
||||
background: var(--bg-elev)
|
||||
border: 1px solid var(--border)
|
||||
border-radius: 50%
|
||||
display: flex
|
||||
align-items: center
|
||||
justify-content: center
|
||||
color: var(--text-dim)
|
||||
flex-shrink: 0
|
||||
|
||||
&--sm
|
||||
width: 32px
|
||||
height: 32px
|
||||
|
||||
&--md
|
||||
width: 48px
|
||||
height: 48px
|
||||
|
||||
&--lg
|
||||
width: 64px
|
||||
height: 64px
|
||||
|
||||
&--xl
|
||||
width: 80px
|
||||
height: 80px
|
||||
|
||||
&--primary
|
||||
background: var(--accent)
|
||||
color: white
|
||||
border-color: var(--accent)
|
||||
|
||||
&--success
|
||||
background: #16a34a
|
||||
color: white
|
||||
border-color: #16a34a
|
23
webui/src/common/components/Badge/Badge.jsx
Normal file
@@ -0,0 +1,23 @@
|
||||
import React from 'react';
|
||||
import './styles.sass';
|
||||
|
||||
export const Badge = ({
|
||||
children,
|
||||
variant = 'default',
|
||||
size = 'md',
|
||||
className = '',
|
||||
...rest
|
||||
}) => {
|
||||
const badgeClasses = [
|
||||
'badge',
|
||||
`badge--${variant}`,
|
||||
`badge--${size}`,
|
||||
className
|
||||
].filter(Boolean).join(' ');
|
||||
|
||||
return (
|
||||
<span className={badgeClasses} {...rest}>
|
||||
{children}
|
||||
</span>
|
||||
);
|
||||
};
|
1
webui/src/common/components/Badge/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { Badge as default } from './Badge.jsx';
|
53
webui/src/common/components/Badge/styles.sass
Normal file
@@ -0,0 +1,53 @@
|
||||
.badge
|
||||
display: inline-flex
|
||||
align-items: center
|
||||
justify-content: center
|
||||
border-radius: 12px
|
||||
font-weight: 600
|
||||
text-transform: uppercase
|
||||
letter-spacing: 0.5px
|
||||
white-space: nowrap
|
||||
|
||||
&--sm
|
||||
padding: 0.125rem 0.5rem
|
||||
font-size: 0.65rem
|
||||
|
||||
&--md
|
||||
padding: 0.25rem 0.75rem
|
||||
font-size: 0.75rem
|
||||
|
||||
&--lg
|
||||
padding: 0.375rem 1rem
|
||||
font-size: 0.85rem
|
||||
|
||||
&--default
|
||||
background: var(--bg-elev)
|
||||
color: var(--text-dim)
|
||||
|
||||
&--primary
|
||||
background: rgba(15, 98, 254, 0.1)
|
||||
color: #0f62fe
|
||||
|
||||
&--success
|
||||
background: rgba(22, 163, 74, 0.1)
|
||||
color: #16a34a
|
||||
|
||||
&--warning
|
||||
background: rgba(245, 158, 11, 0.1)
|
||||
color: #f59e0b
|
||||
|
||||
&--danger
|
||||
background: rgba(217, 48, 37, 0.1)
|
||||
color: #d93025
|
||||
|
||||
&--admin
|
||||
background: #e3f2fd
|
||||
color: #1976d2
|
||||
|
||||
&--user
|
||||
background: var(--bg-elev)
|
||||
color: var(--text-dim)
|
||||
|
||||
&--subtle
|
||||
background: var(--bg-elev)
|
||||
color: var(--text-dim)
|
41
webui/src/common/components/Button/Button.jsx
Normal file
@@ -0,0 +1,41 @@
|
||||
import React from "react";
|
||||
import cn from "classnames";
|
||||
import "./styles.sass";
|
||||
|
||||
export const Button = ({
|
||||
as: Component = "button",
|
||||
variant = "primary",
|
||||
size = "md",
|
||||
full = false,
|
||||
icon,
|
||||
iconRight,
|
||||
loading = false,
|
||||
disabled,
|
||||
className,
|
||||
children,
|
||||
...rest
|
||||
}) => {
|
||||
const isDisabled = disabled || loading;
|
||||
const isIconOnly = (icon || iconRight) && !children;
|
||||
|
||||
return (
|
||||
<Component
|
||||
className={cn(
|
||||
"btn",
|
||||
`btn--${variant}`,
|
||||
`btn--${size}`,
|
||||
full && "btn--full",
|
||||
loading && "is-loading",
|
||||
isIconOnly && "btn--icon-only",
|
||||
className
|
||||
)}
|
||||
disabled={isDisabled}
|
||||
{...rest}
|
||||
>
|
||||
{loading && <span className="btn-spinner" aria-hidden />}
|
||||
{icon && <span className="btn-icon btn-icon--left">{icon}</span>}
|
||||
{children && <span className="btn-label">{children}</span>}
|
||||
{iconRight && <span className="btn-icon btn-icon--right">{iconRight}</span>}
|
||||
</Component>
|
||||
);
|
||||
};
|
1
webui/src/common/components/Button/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { Button as default } from "./Button.jsx";
|
105
webui/src/common/components/Button/styles.sass
Normal file
@@ -0,0 +1,105 @@
|
||||
.btn
|
||||
--c-bg: #ffffff
|
||||
--c-bg-hover: #f2f5f8
|
||||
--c-bg-active: #e6ebf0
|
||||
--c-border: #dfe3e8
|
||||
--c-border-hover: #c7ced6
|
||||
--c-text: #1f2429
|
||||
--c-accent: #0f62fe
|
||||
--c-danger: #d93025
|
||||
position: relative
|
||||
display: inline-flex
|
||||
align-items: center
|
||||
justify-content: center
|
||||
gap: .6rem
|
||||
font-family: inherit
|
||||
font-weight: 600
|
||||
line-height: 1.2
|
||||
cursor: pointer
|
||||
border: 1px solid var(--c-border)
|
||||
background: var(--c-bg)
|
||||
color: var(--c-text)
|
||||
border-radius: 12px
|
||||
transition: all .2s ease
|
||||
user-select: none
|
||||
text-decoration: none
|
||||
&:hover:not(:disabled)
|
||||
background: var(--c-bg-hover)
|
||||
border-color: var(--c-border-hover)
|
||||
&:active:not(:disabled)
|
||||
background: var(--c-bg-active)
|
||||
transform: translateY(1px)
|
||||
&:focus-visible
|
||||
outline: 2px solid var(--c-accent)
|
||||
outline-offset: 2px
|
||||
&:disabled
|
||||
opacity: .55
|
||||
cursor: not-allowed
|
||||
&.btn--full
|
||||
width: 100%
|
||||
&.btn--sm
|
||||
font-size: .85rem
|
||||
padding: .7rem 1rem
|
||||
&.btn--md
|
||||
font-size: .95rem
|
||||
padding: .85rem 1.25rem
|
||||
&.btn--lg
|
||||
font-size: 1.05rem
|
||||
padding: 1rem 1.5rem
|
||||
&.btn--primary
|
||||
--c-bg: #1f2429
|
||||
--c-bg-hover: #374048
|
||||
--c-bg-active: #2a3038
|
||||
--c-border: #1f2429
|
||||
--c-text: #ffffff
|
||||
background: var(--c-bg)
|
||||
border-color: var(--c-border)
|
||||
&:hover:not(:disabled)
|
||||
background: var(--c-bg-hover)
|
||||
&.btn--subtle
|
||||
--c-bg: #f0f3f6
|
||||
--c-bg-hover: #e6ebf0
|
||||
--c-bg-active: #dfe3e8
|
||||
--c-border: #dfe3e8
|
||||
&.btn--danger
|
||||
--c-bg: #d93025
|
||||
--c-bg-hover: #c22b21
|
||||
--c-bg-active: #a9241b
|
||||
--c-border: #d93025
|
||||
--c-text: #ffffff
|
||||
background: var(--c-bg)
|
||||
border-color: var(--c-border)
|
||||
|
||||
&.btn--icon-only
|
||||
padding: 0.75rem
|
||||
aspect-ratio: 1
|
||||
justify-content: center
|
||||
|
||||
&.btn--sm
|
||||
padding: 0.6rem
|
||||
|
||||
&.btn--lg
|
||||
padding: 0.9rem
|
||||
|
||||
.btn-icon
|
||||
margin: 0
|
||||
|
||||
.btn-icon
|
||||
display: inline-flex
|
||||
align-items: center
|
||||
&--left
|
||||
margin-right: .25rem
|
||||
&--right
|
||||
margin-left: .25rem
|
||||
|
||||
.btn-spinner
|
||||
width: 14px
|
||||
height: 14px
|
||||
border: 2px solid rgba(0,0,0,.15)
|
||||
border-top-color: var(--c-text)
|
||||
border-radius: 50%
|
||||
animation: spin .7s linear infinite
|
||||
|
||||
@keyframes spin
|
||||
to
|
||||
transform: rotate(360deg)
|
43
webui/src/common/components/Card/Card.jsx
Normal file
@@ -0,0 +1,43 @@
|
||||
import React from 'react';
|
||||
import './styles.sass';
|
||||
|
||||
export const Card = ({
|
||||
children,
|
||||
className = '',
|
||||
hover = false,
|
||||
padding = 'md',
|
||||
variant = 'default',
|
||||
...rest
|
||||
}) => {
|
||||
const cardClasses = [
|
||||
'card',
|
||||
`card--${variant}`,
|
||||
`card--padding-${padding}`,
|
||||
hover && 'card--hover',
|
||||
className
|
||||
].filter(Boolean).join(' ');
|
||||
|
||||
return (
|
||||
<div className={cardClasses} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export const CardHeader = ({ children, className = '' }) => (
|
||||
<div className={`card-header ${className}`}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
|
||||
export const CardBody = ({ children, className = '' }) => (
|
||||
<div className={`card-body ${className}`}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
|
||||
export const CardFooter = ({ children, className = '' }) => (
|
||||
<div className={`card-footer ${className}`}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
1
webui/src/common/components/Card/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { Card as default, CardHeader, CardBody, CardFooter } from './Card.jsx';
|
43
webui/src/common/components/Card/styles.sass
Normal file
@@ -0,0 +1,43 @@
|
||||
.card
|
||||
background: var(--bg-alt)
|
||||
border: 1px solid var(--border)
|
||||
border-radius: var(--radius-lg)
|
||||
transition: all 0.2s ease
|
||||
|
||||
&--hover:hover
|
||||
border-color: var(--border-strong)
|
||||
transform: translateY(-2px)
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1)
|
||||
|
||||
&--padding-none
|
||||
padding: 0
|
||||
|
||||
&--padding-sm
|
||||
padding: 1rem
|
||||
|
||||
&--padding-md
|
||||
padding: 1.5rem
|
||||
|
||||
&--padding-lg
|
||||
padding: 2rem
|
||||
|
||||
&--variant-elevated
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1)
|
||||
|
||||
&--variant-outlined
|
||||
border-width: 2px
|
||||
|
||||
.card-header
|
||||
margin-bottom: 1rem
|
||||
|
||||
&:last-child
|
||||
margin-bottom: 0
|
||||
|
||||
.card-body
|
||||
flex: 1
|
||||
|
||||
.card-footer
|
||||
margin-top: 1rem
|
||||
|
||||
&:first-child
|
||||
margin-top: 0
|
28
webui/src/common/components/DetailItem/DetailItem.jsx
Normal file
@@ -0,0 +1,28 @@
|
||||
import React from 'react';
|
||||
import './styles.sass';
|
||||
|
||||
export const DetailItem = ({
|
||||
icon,
|
||||
children,
|
||||
className = '',
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<div className={`detail-item ${className}`} {...rest}>
|
||||
{icon && <span className="detail-item-icon">{icon}</span>}
|
||||
<span className="detail-item-content">{children}</span>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export const DetailList = ({
|
||||
children,
|
||||
className = '',
|
||||
...rest
|
||||
}) => {
|
||||
return (
|
||||
<div className={`detail-list ${className}`} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
};
|
1
webui/src/common/components/DetailItem/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { DetailItem as default, DetailList } from './DetailItem.jsx';
|
23
webui/src/common/components/DetailItem/styles.sass
Normal file
@@ -0,0 +1,23 @@
|
||||
.detail-list
|
||||
display: flex
|
||||
flex-direction: column
|
||||
gap: 0.75rem
|
||||
|
||||
.detail-item
|
||||
display: flex
|
||||
align-items: center
|
||||
gap: 0.75rem
|
||||
color: var(--text-dim)
|
||||
font-size: 0.9rem
|
||||
|
||||
.detail-item-icon
|
||||
color: var(--text-dim)
|
||||
display: inline-flex
|
||||
flex-shrink: 0
|
||||
|
||||
svg
|
||||
color: inherit
|
||||
|
||||
.detail-item-content
|
||||
flex: 1
|
||||
min-width: 0
|
28
webui/src/common/components/EmptyState/EmptyState.jsx
Normal file
@@ -0,0 +1,28 @@
|
||||
import React from 'react';
|
||||
import './styles.sass';
|
||||
|
||||
export const EmptyState = ({
|
||||
icon,
|
||||
title,
|
||||
description,
|
||||
action,
|
||||
size = 'md',
|
||||
variant = 'default',
|
||||
className = ''
|
||||
}) => {
|
||||
const emptyStateClasses = [
|
||||
'empty-state',
|
||||
`empty-state--${size}`,
|
||||
`empty-state--${variant}`,
|
||||
className
|
||||
].filter(Boolean).join(' ');
|
||||
|
||||
return (
|
||||
<div className={emptyStateClasses}>
|
||||
{icon && <div className="empty-state-icon">{icon}</div>}
|
||||
{title && <h3 className="empty-state-title">{title}</h3>}
|
||||
{description && <p className="empty-state-description">{description}</p>}
|
||||
{action && <div className="empty-state-action">{action}</div>}
|
||||
</div>
|
||||
);
|
||||
};
|
1
webui/src/common/components/EmptyState/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { EmptyState as default } from './EmptyState.jsx';
|