Compare commits

..

29 Commits

Author SHA1 Message Date
5f79a9f0d2 Added timeouts and a struct for empty response 2024-08-17 12:04:53 +03:00
9e3b9527d3 Applying the DefaultBodyLimit layer only to file uploads and modifications 2024-08-15 22:43:00 +03:00
a3e4ac2b2e Final preparation 2024-08-15 20:41:59 +03:00
ab138e8536 Updated deps 2024-08-15 16:03:11 +03:00
62f55043a5 Small optimization for non unix targets 2024-08-11 11:37:51 +03:00
ec7fbc07a0 Removed file size limit 2024-08-11 10:25:36 +03:00
1c9bd104e0 Tweaks for the desktop client 2024-08-10 09:01:06 +03:00
8eb5be96b3 Small change 2024-08-06 16:44:49 +03:00
2b12996453 Removed E in ErrorHandlingExt 2024-08-06 16:39:43 +03:00
75afab933d More error handling improvements 2024-08-06 16:02:44 +03:00
eba30d1e9d Permission guard simplification 2024-08-05 23:45:00 +03:00
9f76228ebe Error handling 2024-08-05 23:32:16 +03:00
8a4e2dc467 Timezone and folder creation fixes 2024-08-05 21:06:25 +03:00
8d297fffdf Prepared queries 2024-08-04 13:51:19 +03:00
ea5c65b6e5 Moved login and register to users 2024-08-04 12:38:50 +03:00
7669a02a95 Cleanup 2024-08-04 12:34:46 +03:00
bac5584b46 Search changes 2024-08-04 10:03:35 +03:00
b6c71ee35b Registration and fixes 2024-08-04 09:48:41 +03:00
94bb1371fa Switched auth_post to accept a form instead of a json 2024-08-03 21:05:28 +03:00
0614c4cad0 Expanded token lifespan to 30 days 2024-08-03 20:48:43 +03:00
c4ff602ec7 Now checking that user_id from claims exists 2024-08-03 20:15:08 +03:00
9f36d8e663 Removed utoipa 2024-08-03 19:41:29 +03:00
40f0526500 Added ability to get the info of the current user 2024-08-03 16:45:54 +03:00
cd3ab9b6bc Disabled lto for release build for now 2024-08-03 16:16:42 +03:00
9217ae46cb Auth 2024-08-03 16:16:31 +03:00
f6ed06de48 get_structure endpoint 2024-08-02 12:32:23 +03:00
32d207a991 Changes 2024-07-31 19:17:59 +03:00
ea718be066 Changes 2024-07-31 12:30:18 +03:00
d4c1cdb582 User endpoints 2024-07-30 20:26:31 +03:00
71 changed files with 2060 additions and 1104 deletions

10
.dockerignore Normal file
View File

@ -0,0 +1,10 @@
**/target/
**/.vscode/
**/.env
**/.git/
**/.dockerignore
**/Dockerfile
**/compose.yaml
**/LICENSE
**/README.md
files/

View File

@ -0,0 +1,39 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n users.user_id,\n permission_type as \"permission_type: PermissionRaw\"\nFROM\n permissions\n INNER JOIN users ON permissions.user_id = users.user_id\nWHERE\n folder_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "permission_type: PermissionRaw",
"type_info": {
"Custom": {
"name": "permission",
"kind": {
"Enum": [
"read",
"write",
"manage"
]
}
}
}
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
false
]
},
"hash": "003349bc951a935fdfb285f99a726c221e3d1d02cb9e47b4c385545298b27217"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT folder_id FROM folders WHERE folder_id = $1 AND owner_id = $2", "query": "SELECT folder_id FROM files WHERE file_id = $1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -11,13 +11,12 @@
], ],
"parameters": { "parameters": {
"Left": [ "Left": [
"Uuid", "Uuid"
"Int4"
] ]
}, },
"nullable": [ "nullable": [
false false
] ]
}, },
"hash": "dc98f1a609e67b642aed635b26239328e6456e69c12dc8561c474fff3dcd14d5" "hash": "09299172474d10a07387b74f4d714bf389b5422334bd1aa2a0e6f2d63ebdd623"
} }

View File

@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "WITH\n permitted as (\n SELECT\n folder_id\n FROM\n permissions\n WHERE\n user_id = $1\n )\nSELECT\n folder_id, owner_id, folder_name, created_at\nFROM\n folders\nWHERE\n folder_id IN (\n SELECT\n folder_id\n FROM\n permitted\n )\n AND parent_folder_id NOT IN (\n SELECT\n folder_id\n FROM\n permitted\n )",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "folder_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "1c5dda0e613ee57819d4c9534f3bcd8809f313026a187a2eff66fa4f7ba888a5"
}

View File

@ -0,0 +1,28 @@
{
"db_name": "PostgreSQL",
"query": "SELECT user_id, hashed_password FROM users WHERE username = $1 OR email = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "hashed_password",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false
]
},
"hash": "20af817890cb184e17d193e18132796e02e5e7352542f507acda25e9cd6cfc61"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1", "query": "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE folder_id = $1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -21,7 +21,7 @@
{ {
"ordinal": 3, "ordinal": 3,
"name": "created_at", "name": "created_at",
"type_info": "Timestamp" "type_info": "Timestamptz"
} }
], ],
"parameters": { "parameters": {
@ -36,5 +36,5 @@
false false
] ]
}, },
"hash": "9cc887509746b773ebbc8c130331b768f9a1deeab34d56aa3b0a833d718114fe" "hash": "3028a7c8ec616933e490ed267967b1406552c2b7c69f4f1f02a147df5411e692"
} }

View File

@ -1,24 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4",
"Varchar"
]
},
"nullable": [
false
]
},
"hash": "3dd4a65d3106d742c2221c0589ac68d4621c6e351f9fbb7aa58629ff2d829234"
}

View File

@ -0,0 +1,17 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(parent_folder_id, owner_id, folder_name, folder_id) VALUES ($1, $2, $3, $4)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Int4",
"Varchar",
"Uuid"
]
},
"nullable": []
},
"hash": "3faa32dd95822ae8687784817f68e48e726eedd2b7af7e52712974b4f04a8f80"
}

View File

@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n EXISTS (\n SELECT\n file_name as name\n FROM\n files\n WHERE\n folder_id = $1\n AND file_name = $2\n UNION\n SELECT\n folder_name as name\n FROM\n folders\n WHERE\n parent_folder_id = $1\n AND folder_name = $2\n )",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "exists",
"type_info": "Bool"
}
],
"parameters": {
"Left": [
"Uuid",
"Text"
]
},
"nullable": [
null
]
},
"hash": "443854b9fb234840588e0100774d3ab51c4532b14d50e5ca3578fe6311e2017a"
}

View File

@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE files SET (sha512, file_size, updated_at) = ($2, $3, NOW()) WHERE file_id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Bytea",
"Int8"
]
},
"nullable": []
},
"hash": "4c7c701a22c49eebb4ba31a21dab15a5dbe2eaba99036932790c7c09599d4cd7"
}

View File

@ -0,0 +1,27 @@
{
"db_name": "PostgreSQL",
"query": "WITH RECURSIVE folder_hierarchy AS (\n -- Start with the given directory\n SELECT \n folder_id \n FROM \n folders \n WHERE \n folder_id = $1\n\n UNION ALL\n\n -- Recursively find all subdirectories\n SELECT \n f.folder_id\n FROM \n folders f\n INNER JOIN \n folder_hierarchy fh ON f.parent_folder_id = fh.folder_id\n)\nINSERT INTO permissions(user_id, folder_id, permission_type)\nSELECT $2::integer as user_id, fh.folder_id::UUID as folder_id, $3\nFROM folder_hierarchy fh\nON CONFLICT (user_id, folder_id) DO UPDATE\nSET permission_type = $3",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Int4",
{
"Custom": {
"name": "permission",
"kind": {
"Enum": [
"read",
"write",
"manage"
]
}
}
}
]
},
"nullable": []
},
"hash": "595739c03acbe706107e34a3ebebec8c8f21f70ccc51b5366eff962d9af391d7"
}

View File

@ -1,13 +1,12 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "INSERT INTO files(file_id, folder_id, owner_id, file_name, file_size, sha512) VALUES ($1, $2, $3, $4, $5, $6)", "query": "INSERT INTO files(file_id, folder_id, file_name, file_size, sha512) VALUES ($1, $2, $3, $4, $5)",
"describe": { "describe": {
"columns": [], "columns": [],
"parameters": { "parameters": {
"Left": [ "Left": [
"Uuid", "Uuid",
"Uuid", "Uuid",
"Int4",
"Varchar", "Varchar",
"Int8", "Int8",
"Bytea" "Bytea"
@ -15,5 +14,5 @@
}, },
"nullable": [] "nullable": []
}, },
"hash": "9a70e24a3de68f4a66718124bd3ca959bd0a992e5e0dda3baae52b8cb545ce66" "hash": "6b58c84cdc19cea97ef025211a98879bb5cc80a934490125a19c960133f6d93d"
} }

View File

@ -0,0 +1,36 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING user_id, username, email",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "username",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "email",
"type_info": "Varchar"
}
],
"parameters": {
"Left": [
"Int4",
"Varchar",
"Varchar"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "70a68acb301745ef393185c2bef92627648a6e419303adb40f56c09d55291cbd"
}

View File

@ -0,0 +1,34 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n permission_type as \"permission_type: PermissionRaw\"\nFROM\n permissions\nWHERE\n folder_id = $1\n AND user_id = $2\nUNION\nSELECT\n 'manage' as \"permission_type: PermissionRaw\"\nFROM\n folders\nWHERE\n folder_id = $1\n AND owner_id = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "permission_type: PermissionRaw",
"type_info": {
"Custom": {
"name": "permission",
"kind": {
"Enum": [
"read",
"write",
"manage"
]
}
}
}
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
null
]
},
"hash": "87f7df91208438a35516604f57f0443e0f12db718e23acd374f6f7ace65f467d"
}

View File

@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "WITH RECURSIVE folder_hierarchy AS (\n -- Start with the given directory\n SELECT \n folder_id \n FROM \n folders \n WHERE \n folder_id = $1\n\n UNION ALL\n\n -- Recursively find all subdirectories\n SELECT \n f.folder_id\n FROM \n folders f\n INNER JOIN \n folder_hierarchy fh ON f.parent_folder_id = fh.folder_id\n)\nDELETE FROM permissions WHERE user_id = $2 AND folder_id IN (SELECT folder_id FROM folder_hierarchy)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": []
},
"hash": "948f13b631bcc7df1919a9639443f0ed932c4cb37f2ba5bf6f000eb84b265ae2"
}

View File

@ -1,23 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT file_id FROM files WHERE file_id = $1 AND owner_id = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
false
]
},
"hash": "9a26dab9efbbbb92b7be27792b581a0156210fdc0aadd3756f7003186f428374"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT EXISTS(SELECT folder_id FROM folders WHERE parent_folder_id = $1 AND folder_name = $2)", "query": "SELECT EXISTS(SELECT user_id FROM users WHERE user_id = $1)",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -11,13 +11,12 @@
], ],
"parameters": { "parameters": {
"Left": [ "Left": [
"Uuid", "Int4"
"Text"
] ]
}, },
"nullable": [ "nullable": [
null null
] ]
}, },
"hash": "cd3591c61f3cc036158d8d55ec22a04adaf62ec4b05ba73da9253128b7bbb5b1" "hash": "a04a4e8d3a394883a2f1052074bd43fcadafa0c1ba66f36ac49fc54b5c4150b3"
} }

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "WITH\n deleted_files AS (\n DELETE FROM files USING folders\n WHERE\n files.folder_id = folders.folder_id\n AND folders.owner_id = $1 RETURNING files.file_id\n ),\n deleted_users AS (\n DELETE FROM users\n WHERE\n user_id = $1\n )\nSELECT\n *\nFROM\n deleted_files;",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false
]
},
"hash": "a54829e9cd90e55022c2f6dd413b797efaafd1c4793b60886140bfe9ea6df592"
}

View File

@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n user_id, username, email, \n GREATEST (\n similarity (email, $1),\n similarity (username, $1)\n ) as \"similarity!\"\nFROM\n users\nORDER BY\n \"similarity!\" DESC\nLIMIT 20",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "username",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "email",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "similarity!",
"type_info": "Float4"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
null
]
},
"hash": "e0d415b13ccf7aa865558395eb6997bfff50762d36cf3742470a897f4588c802"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT file_id, owner_id, file_name, file_size, sha512, created_at, updated_at FROM files WHERE folder_id = $1", "query": "SELECT file_id, file_name, file_size, encode(sha512, 'base64') as \"sha512!\", created_at, updated_at FROM files WHERE folder_id = $1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -10,33 +10,28 @@
}, },
{ {
"ordinal": 1, "ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "file_name", "name": "file_name",
"type_info": "Varchar" "type_info": "Varchar"
}, },
{ {
"ordinal": 3, "ordinal": 2,
"name": "file_size", "name": "file_size",
"type_info": "Int8" "type_info": "Int8"
}, },
{
"ordinal": 3,
"name": "sha512!",
"type_info": "Text"
},
{ {
"ordinal": 4, "ordinal": 4,
"name": "sha512", "name": "created_at",
"type_info": "Bytea" "type_info": "Timestamptz"
}, },
{ {
"ordinal": 5, "ordinal": 5,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 6,
"name": "updated_at", "name": "updated_at",
"type_info": "Timestamp" "type_info": "Timestamptz"
} }
], ],
"parameters": { "parameters": {
@ -48,11 +43,10 @@
false, false,
false, false,
false, false,
false, null,
false,
false, false,
false false
] ]
}, },
"hash": "5a51ab540453327bdd75f49991f402fac6b1d8fb0a760d420236e2b41d3e7fcf" "hash": "e125c9f06cb89c6ddd2653ed45c576da3aecfb9fb74aabf202e83406fc8c8fff"
} }

View File

@ -0,0 +1,41 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n f.folder_id,\n owner_id,\n folder_name,\n created_at\nFROM\n folders f\n LEFT JOIN permissions p ON f.folder_id = p.folder_id\nWHERE\n parent_folder_id = $1\n AND (p.user_id = $2 OR f.owner_id = $2)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "folder_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "ef707c0f6d2ef0d66e71929167b5c82bb8bf923736e6c797711bc3124f0693bc"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id", "query": "INSERT INTO users(username, email, hashed_password) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING RETURNING user_id",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -12,12 +12,13 @@
"parameters": { "parameters": {
"Left": [ "Left": [
"Varchar", "Varchar",
"Varchar" "Varchar",
"Bytea"
] ]
}, },
"nullable": [ "nullable": [
false false
] ]
}, },
"hash": "9602875e192fd321f3a773aa7eb5145cb0d1e7f31def733fd11394e9ad6c0d21" "hash": "fb94ebf44aff9c5c56cc43ef47f571b4dc1fcdcbc595aef4d245ee2454b0a458"
} }

View File

@ -0,0 +1,34 @@
{
"db_name": "PostgreSQL",
"query": "SELECT user_id, username, email FROM users WHERE user_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "username",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "email",
"type_info": "Varchar"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "fd53e5f2c7e8aa87f3cf4e430a6ec3a632ce125fdb092dbd17630e952d4e0d9e"
}

962
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@ edition = "2021"
[profile.release] [profile.release]
debug = 1 debug = 1
lto = true
codegen-units = 1
[lints.clippy] [lints.clippy]
pedantic = "warn" pedantic = "warn"
@ -24,10 +22,12 @@ axum-extra = { version = "0.9", features = ["typed-header"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
dotenvy = "0.15" dotenvy = "0.15"
futures = "0.3" futures = "0.3"
itertools = "0.13"
jsonwebtoken = "9" jsonwebtoken = "9"
oauth2 = "4" rand = "0.8"
reqwest = { version = "0.12", features = ["http2", "rustls-tls", "json"] } scrypt = { version = "0.11", default-features = false, features = ["std"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1"
sha2 = "0.10" sha2 = "0.10"
sqlx = { version = "0.8", features = [ sqlx = { version = "0.8", features = [
"postgres", "postgres",
@ -37,12 +37,18 @@ sqlx = { version = "0.8", features = [
"chrono", "chrono",
"uuid", "uuid",
] } ] }
tokio = { version = "1", features = ["rt-multi-thread"] } subtle = "2"
tokio = { version = "1", features = [
"parking_lot",
"rt-multi-thread",
"signal",
] }
tokio-util = { version = "0.7" } tokio-util = { version = "0.7" }
tower = { version = "0.4" } tower = { version = "0.4" }
tower-http = { version = "0.5", features = [ tower-http = { version = "0.5", features = [
"compression-full", "compression-full",
"sensitive-headers", "sensitive-headers",
"timeout",
"trace", "trace",
"util", "util",
] } ] }
@ -51,4 +57,5 @@ tracing-subscriber = { version = "0.3", features = [
"parking_lot", "parking_lot",
"env-filter", "env-filter",
] } ] }
uuid = { version = "1", features = ["serde", "v4"] } uuid = { version = "1", features = ["serde", "v7"] }
validator = { version = "0.18", features = ["derive"] }

19
Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM rust:slim AS chef
RUN cargo install cargo-chef
WORKDIR /app
FROM chef AS planner
COPY . .
RUN cargo chef prepare
FROM chef AS builder
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release
COPY . .
RUN cargo b -r
FROM debian:stable-slim
EXPOSE 3000
WORKDIR /app
COPY --from=builder /app/target/release/project .
CMD [ "./project" ]

25
compose-dev.yaml Normal file
View File

@ -0,0 +1,25 @@
services:
backend:
build: .
ports:
- 3000:3000
environment:
JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: 'postgresql://tester:testing123!@backend_db/backend'
depends_on:
- backend_db
backend_db:
image: ghcr.io/fboulnois/pg_uuidv7:1.5.0
environment:
- POSTGRES_USER=tester
- POSTGRES_PASSWORD=testing123!
- POSTGRES_DB=backend
ports:
- 5432:5432
volumes:
- backend_db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes:
backend_db_data:

View File

@ -1,14 +1,22 @@
services: services:
db: backend:
image: postgres:alpine build: .
environment:
JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: 'postgresql://tester:testing123!@backend_db/backend'
depends_on:
- backend_db
restart: unless-stopped
backend_db:
image: ghcr.io/fboulnois/pg_uuidv7:1.5.0
environment: environment:
- POSTGRES_USER=tester - POSTGRES_USER=tester
- POSTGRES_PASSWORD=testing123! - POSTGRES_PASSWORD=testing123!
- POSTGRES_DB=testing - POSTGRES_DB=backend
ports:
- 5432:5432
volumes: volumes:
- postgres_data:/var/lib/postgresql/data - backend_db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes: volumes:
postgres_data: backend_db_data:

View File

@ -7,3 +7,7 @@ DROP TABLE folders;
DROP TABLE users; DROP TABLE users;
DROP TYPE permission; DROP TYPE permission;
DROP EXTENSION IF EXISTS pg_trgm;
DROP EXTENSION IF EXISTS pg_uuidv7;

View File

@ -1,28 +1,33 @@
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS pg_uuidv7;
CREATE TABLE CREATE TABLE
users ( users (
user_id SERIAL PRIMARY KEY, user_id SERIAL PRIMARY KEY,
username VARCHAR(50) NOT NULL UNIQUE, username VARCHAR(50) NOT NULL UNIQUE,
email VARCHAR(100) NOT NULL UNIQUE email VARCHAR(100) NOT NULL UNIQUE,
hashed_password BYTEA NOT NULL
); );
CREATE TABLE CREATE TABLE
folders ( folders (
folder_id UUID PRIMARY KEY DEFAULT gen_random_uuid (), folder_id UUID PRIMARY KEY DEFAULT uuid_generate_v7 (),
parent_folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE DEFAULT null, parent_folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE DEFAULT null,
owner_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL, owner_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL,
folder_name VARCHAR(255) NOT NULL, folder_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL
); );
CREATE TABLE CREATE TABLE
files ( files (
file_id UUID PRIMARY KEY DEFAULT gen_random_uuid (), file_id UUID PRIMARY KEY DEFAULT uuid_generate_v7 (),
folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE NOT NULL, folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE NOT NULL,
file_name VARCHAR(255) NOT NULL, file_name VARCHAR(255) NOT NULL,
file_size BIGINT NOT NULL, file_size BIGINT NOT NULL,
sha512 BYTEA NOT NULL, sha512 BYTEA NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL
); );
CREATE TYPE permission AS ENUM ('read', 'write', 'manage'); CREATE TYPE permission AS ENUM ('read', 'write', 'manage');

1
sql/create_folder.sql Normal file
View File

@ -0,0 +1 @@
INSERT INTO folders(parent_folder_id, owner_id, folder_name, folder_id) VALUES ($1, $2, $3, $4)

16
sql/delete_user.sql Normal file
View File

@ -0,0 +1,16 @@
WITH
deleted_files AS (
DELETE FROM files USING folders
WHERE
files.folder_id = folders.folder_id
AND folders.owner_id = $1 RETURNING files.file_id
),
deleted_users AS (
DELETE FROM users
WHERE
user_id = $1
)
SELECT
*
FROM
deleted_files;

View File

@ -1,5 +1,5 @@
SELECT SELECT
username, users.user_id,
permission_type as "permission_type: PermissionRaw" permission_type as "permission_type: PermissionRaw"
FROM FROM
permissions permissions

11
sql/get_folders.sql Normal file
View File

@ -0,0 +1,11 @@
SELECT
f.folder_id,
owner_id,
folder_name,
created_at
FROM
folders f
LEFT JOIN permissions p ON f.folder_id = p.folder_id
WHERE
parent_folder_id = $1
AND (p.user_id = $2 OR f.owner_id = $2)

View File

@ -8,7 +8,7 @@ WITH
user_id = $1 user_id = $1
) )
SELECT SELECT
folder_id folder_id, owner_id, folder_name, created_at
FROM FROM
folders folders
WHERE WHERE

18
sql/name_exists.sql Normal file
View File

@ -0,0 +1,18 @@
SELECT
EXISTS (
SELECT
file_name as name
FROM
files
WHERE
folder_id = $1
AND file_name = $2
UNION
SELECT
folder_name as name
FROM
folders
WHERE
parent_folder_id = $1
AND folder_name = $2
)

11
sql/search_for_user.sql Normal file
View File

@ -0,0 +1,11 @@
SELECT
user_id, username, email,
GREATEST (
similarity (email, $1),
similarity (username, $1)
) as "similarity!"
FROM
users
ORDER BY
"similarity!" DESC
LIMIT 20

View File

@ -1,31 +1,204 @@
use std::{array::TryFromSliceError, sync::LazyLock};
use axum::{ use axum::{
extract::{FromRequestParts, Query}, extract::{FromRef, FromRequestParts},
http::{request::Parts, StatusCode}, http::request::Parts,
RequestPartsExt, RequestPartsExt,
}; };
use serde::Deserialize; use axum_extra::{
headers::{authorization::Bearer, Authorization},
TypedHeader,
};
use chrono::{TimeDelta, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use rand::{rngs::OsRng, RngCore};
use serde::{Deserialize, Serialize};
use subtle::ConstantTimeEq;
use crate::AppState; use crate::prelude::*;
#[derive(Deserialize, Debug)] pub const HASH_LENGTH: usize = 64;
pub const SALT_LENGTH: usize = 64;
static PARAMS: LazyLock<scrypt::Params> =
LazyLock::new(|| scrypt::Params::new(14, 8, 1, HASH_LENGTH).unwrap());
static KEYS: LazyLock<Keys> = LazyLock::new(|| {
let secret = std::env::var("JWT_SECRET").expect("JWT_SECRET must be set");
Keys::from_secret(secret.as_bytes())
});
struct Keys {
encoding_key: EncodingKey,
decoding_key: DecodingKey,
}
impl Keys {
fn from_secret(secret: &[u8]) -> Self {
Self {
encoding_key: EncodingKey::from_secret(secret),
decoding_key: DecodingKey::from_secret(secret),
}
}
}
/// Forces the evaluation of the keys. They will be created upon first use otherwise
pub fn force_init_keys() {
LazyLock::force(&KEYS);
}
/// Hashes the bytes using Scrypt with the given salt
#[must_use]
fn hash_scrypt(bytes: &[u8], salt: &[u8]) -> [u8; HASH_LENGTH] {
let mut hash = [0; HASH_LENGTH];
scrypt::scrypt(bytes, salt, &PARAMS, &mut hash).unwrap();
hash
}
/// Verifieble scrypt hashed bytes
#[cfg_attr(test, derive(PartialEq))] // == OPERATOR MUSTN'T BE USED OUTSIZE OF TESTS
pub struct HashedBytes {
pub hash: [u8; HASH_LENGTH],
pub salt: [u8; SALT_LENGTH],
}
impl HashedBytes {
/// Hashes the bytes
#[must_use]
pub fn hash_bytes(bytes: &[u8]) -> Self {
let mut salt = [0; SALT_LENGTH];
OsRng.fill_bytes(&mut salt);
Self {
hash: hash_scrypt(bytes, &salt),
salt,
}
}
/// Parses the bytes where the first `HASH_LENGTH` bytes are the hash and the latter `SALT_LENGTH` bytes are the salt
pub fn from_bytes(bytes: &[u8]) -> Result<Self, TryFromSliceError> {
let (hash, salt) = bytes.split_at(HASH_LENGTH);
let result = Self {
hash: hash.try_into()?,
salt: salt.try_into()?,
};
Ok(result)
}
#[must_use]
pub fn verify(&self, bytes: &[u8]) -> bool {
let hash = hash_scrypt(bytes, self.salt.as_ref());
hash.ct_eq(self.hash.as_ref()).into()
}
pub fn as_bytes(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.hash.len() + self.salt.len());
result.extend_from_slice(&self.hash);
result.extend_from_slice(&self.salt);
result
}
}
pub async fn authenticate_user(
username: &str,
password: &str,
pool: &Pool,
) -> anyhow::Result<Option<i32>> {
let Some((user_id, hash)) = db::users::get_hash(username, pool).await? else {
return Ok(None);
};
let hash = HashedBytes::from_bytes(&hash)?;
Ok(hash.verify(password.as_bytes()).then_some(user_id))
}
#[derive(Debug, Serialize)]
pub struct Token {
access_token: String,
token_type: &'static str,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Claims { pub struct Claims {
pub user_id: i32, pub user_id: i32,
pub exp: i64,
}
const JWT_ALGORITHM: jsonwebtoken::Algorithm = jsonwebtoken::Algorithm::HS256;
impl Claims {
pub fn new(user_id: i32) -> Self {
Self {
user_id,
exp: (Utc::now() + TimeDelta::days(30)).timestamp(),
}
}
pub fn encode(self) -> Result<Token, GeneralError> {
let access_token = encode(&Header::new(JWT_ALGORITHM), &self, &KEYS.encoding_key)
.handle_internal("Token creation error")?;
let token = Token {
access_token,
token_type: "Bearer",
};
Ok(token)
}
} }
#[axum::async_trait] #[axum::async_trait]
impl FromRequestParts<AppState> for Claims { impl<T> FromRequestParts<T> for Claims
type Rejection = StatusCode; where
Pool: FromRef<T>,
T: Sync,
{
type Rejection = GeneralError;
async fn from_request_parts( async fn from_request_parts(parts: &mut Parts, state: &T) -> Result<Self, Self::Rejection> {
parts: &mut Parts, const INVALID_TOKEN: GeneralError =
_state: &AppState, GeneralError::const_message(StatusCode::UNAUTHORIZED, "Invalid token");
) -> Result<Self, Self::Rejection> {
match parts.extract().await { let pool = Pool::from_ref(state);
Ok(Query(claims)) => Ok(claims), let TypedHeader(Authorization(bearer)) = parts
Err(err) => { .extract::<TypedHeader<Authorization<Bearer>>>()
tracing::debug!(%err, "Autharization failed"); .await
Err(StatusCode::UNAUTHORIZED) .map_err(|_| INVALID_TOKEN)?;
let claims: Claims = decode(
bearer.token(),
&KEYS.decoding_key,
&Validation::new(JWT_ALGORITHM),
)
.map_err(|_| INVALID_TOKEN)?
.claims;
db::users::exists(claims.user_id, &pool)
.await
.handle_internal("Token validation error")?
.then_some(claims)
.ok_or(GeneralError::const_message(
StatusCode::UNAUTHORIZED,
"Wrong credentials",
))
} }
} }
#[cfg(test)]
mod tests {
use super::HashedBytes;
const PASSWORD: &str = "Password12313#!#4)$*!#";
#[test]
fn test_hash_conversion() {
let bytes = HashedBytes::hash_bytes(PASSWORD.as_bytes());
let bytes2 = HashedBytes::from_bytes(&bytes.as_bytes()).unwrap();
assert!(bytes == bytes2);
}
#[test]
fn test_hash() {
assert!(HashedBytes::hash_bytes(PASSWORD.as_bytes()).verify(PASSWORD.as_bytes()));
}
#[test]
fn test_different_hash() {
assert!(!HashedBytes::hash_bytes(PASSWORD.as_bytes()).verify(b"Different Password"));
} }
} }

View File

@ -1,8 +1,6 @@
use uuid::Uuid; use db::permissions::PermissionType;
use crate::Pool; use crate::prelude::*;
use super::permissions::PermissionType;
pub async fn insert( pub async fn insert(
file_id: Uuid, file_id: Uuid,
@ -33,18 +31,20 @@ pub async fn update(file_id: Uuid, size: i64, hash: Vec<u8>, pool: &Pool) -> sql
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)] #[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FileWithoutParentId { pub struct FileWithoutParentId {
file_id: Uuid, pub file_id: Uuid,
file_name: String, pub file_name: String,
file_size: i64, pub file_size: i64,
sha512: String, pub sha512: String,
created_at: chrono::NaiveDateTime, pub created_at: chrono::DateTime<chrono::Utc>,
updated_at: chrono::NaiveDateTime, pub updated_at: chrono::DateTime<chrono::Utc>,
} }
pub async fn get_files(folder_id: Uuid, pool: &Pool) -> sqlx::Result<Vec<FileWithoutParentId>> { pub fn get_files(
folder_id: Uuid,
pool: &Pool,
) -> impl Stream<Item = sqlx::Result<FileWithoutParentId>> + '_ {
sqlx::query_as!(FileWithoutParentId, r#"SELECT file_id, file_name, file_size, encode(sha512, 'base64') as "sha512!", created_at, updated_at FROM files WHERE folder_id = $1"#, folder_id) sqlx::query_as!(FileWithoutParentId, r#"SELECT file_id, file_name, file_size, encode(sha512, 'base64') as "sha512!", created_at, updated_at FROM files WHERE folder_id = $1"#, folder_id)
.fetch_all(pool) .fetch(pool)
.await
} }
async fn get_folder_id(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<Uuid>> { async fn get_folder_id(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<Uuid>> {
@ -67,10 +67,11 @@ pub async fn get_permissions(
} }
pub async fn get_name(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<String>> { pub async fn get_name(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<String>> {
let record = sqlx::query!("SELECT file_name FROM files WHERE file_id = $1", file_id) let name = sqlx::query!("SELECT file_name FROM files WHERE file_id = $1", file_id)
.fetch_optional(pool) .fetch_optional(pool)
.await?; .await?
Ok(record.map(|record| record.file_name)) .map(|record| record.file_name);
Ok(name)
} }
pub async fn delete(file_id: Uuid, pool: &Pool) -> sqlx::Result<bool> { pub async fn delete(file_id: Uuid, pool: &Pool) -> sqlx::Result<bool> {

View File

@ -1,9 +1,4 @@
use std::collections::HashSet; use crate::{db::permissions::PermissionRaw, prelude::*};
use futures::TryStreamExt;
use uuid::Uuid;
use crate::{db::permissions::PermissionRaw, Pool};
use super::permissions::PermissionType; use super::permissions::PermissionType;
@ -19,12 +14,10 @@ pub async fn get_permissions(
Ok(permission.into()) Ok(permission.into())
} }
pub async fn get_names(folder_id: Uuid, pool: &Pool) -> sqlx::Result<HashSet<String>> { pub fn get_names(folder_id: Uuid, pool: &Pool) -> impl Stream<Item = sqlx::Result<String>> + '_ {
sqlx::query!("SELECT folder_name as name FROM folders WHERE parent_folder_id = $1 UNION SELECT file_name as name FROM files WHERE folder_id = $1", folder_id) sqlx::query!("SELECT folder_name as name FROM folders WHERE parent_folder_id = $1 UNION SELECT file_name as name FROM files WHERE folder_id = $1", folder_id)
.fetch(pool) .fetch(pool)
.map_ok(|record| record.name.unwrap()) .map_ok(|record| record.name.unwrap())
.try_collect::<HashSet<String>>()
.await
} }
pub async fn get_root(user_id: i32, pool: &Pool) -> sqlx::Result<Uuid> { pub async fn get_root(user_id: i32, pool: &Pool) -> sqlx::Result<Uuid> {
@ -37,7 +30,7 @@ pub async fn get_root(user_id: i32, pool: &Pool) -> sqlx::Result<Uuid> {
.map(|row| row.folder_id) .map(|row| row.folder_id)
} }
pub async fn get_by_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Result<Option<Uuid>> { pub async fn process_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Result<Option<Uuid>> {
match id { match id {
Some(id) => get_permissions(id, user_id, pool) Some(id) => get_permissions(id, user_id, pool)
.await .await
@ -49,62 +42,75 @@ pub async fn get_by_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Res
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)] #[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FolderWithoutParentId { pub struct FolderWithoutParentId {
folder_id: Uuid, pub folder_id: Uuid,
owner_id: i32, pub owner_id: i32,
folder_name: String, pub folder_name: String,
created_at: chrono::NaiveDateTime, pub created_at: chrono::DateTime<chrono::Utc>,
} }
pub async fn get_folders( pub async fn get_by_id(
parent_folder_id: Uuid, folder_id: Uuid,
pool: &Pool, pool: &Pool,
) -> sqlx::Result<Vec<FolderWithoutParentId>> { ) -> sqlx::Result<Option<FolderWithoutParentId>> {
sqlx::query_as!( sqlx::query_as!(
FolderWithoutParentId, FolderWithoutParentId,
"SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1", "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE folder_id = $1",
parent_folder_id, folder_id
) )
.fetch_all(pool) .fetch_optional(pool)
.await .await
} }
pub async fn exists_by_name( /// Get folders that user can read
parent_folder_id: Uuid, ///
folder_name: &str, /// # Warning
pool: &Pool, ///
) -> sqlx::Result<bool> { /// This function doesn't check that the user can read the parent folder itself
sqlx::query!( pub fn get_folders(
"SELECT EXISTS(SELECT folder_id FROM folders WHERE parent_folder_id = $1 AND folder_name = $2)",
parent_folder_id,
folder_name
)
.fetch_one(pool)
.await
.and_then(|row| {
row.exists.ok_or(sqlx::Error::RowNotFound)
})
}
pub async fn insert(
parent_folder_id: Uuid, parent_folder_id: Uuid,
user_id: i32, user_id: i32,
folder_name: &str,
pool: &Pool, pool: &Pool,
) -> sqlx::Result<Uuid> { ) -> impl Stream<Item = sqlx::Result<FolderWithoutParentId>> + '_ {
sqlx::query!("INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id", sqlx::query_file_as!(
FolderWithoutParentId,
"sql/get_folders.sql",
parent_folder_id, parent_folder_id,
user_id, user_id
folder_name
) )
.fetch_one(pool) .fetch(pool)
.await
.map(|record| record.folder_id)
} }
pub async fn delete(folder_id: Uuid, pool: &Pool) -> sqlx::Result<Vec<Uuid>> { pub async fn name_exists(parent_folder_id: Uuid, name: &str, pool: &Pool) -> sqlx::Result<bool> {
sqlx::query_file!("sql/name_exists.sql", parent_folder_id, name)
.fetch_one(pool)
.await
.map(|row| row.exists.unwrap_or(false))
}
/// Creates a folder in the database. Do not use this function to create the ROOT folder
pub async fn insert(parent_folder_id: Uuid, folder_name: &str, pool: &Pool) -> sqlx::Result<Uuid> {
let folder_id = Uuid::now_v7();
let owner_id = get_by_id(parent_folder_id, pool)
.await?
.ok_or(sqlx::Error::RowNotFound)?
.owner_id;
let result = sqlx::query_file!(
"sql/create_folder.sql",
parent_folder_id,
owner_id,
folder_name,
folder_id
)
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(sqlx::Error::RowNotFound);
}
Ok(folder_id)
}
pub fn delete(folder_id: Uuid, pool: &Pool) -> impl Stream<Item = sqlx::Result<Uuid>> + '_ {
sqlx::query_file!("sql/delete_folder.sql", folder_id) sqlx::query_file!("sql/delete_folder.sql", folder_id)
.fetch(pool) .fetch(pool)
.map_ok(|row| row.file_id) .map_ok(|row| row.file_id)
.try_collect()
.await
} }

View File

@ -1,3 +1,4 @@
pub mod file; pub mod file;
pub mod folder; pub mod folder;
pub mod permissions; pub mod permissions;
pub mod users;

View File

@ -1,13 +1,11 @@
use std::collections::HashMap; use std::{borrow::Cow, collections::HashMap};
use axum::http::StatusCode; use db::folder::FolderWithoutParentId;
use futures::TryStreamExt as _;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::Pool; use crate::prelude::*;
#[derive(sqlx::Type, Debug, Serialize, Deserialize)] #[derive(sqlx::Type, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[sqlx(type_name = "permission")] #[sqlx(type_name = "permission")]
#[sqlx(rename_all = "lowercase")] #[sqlx(rename_all = "lowercase")]
pub enum PermissionRaw { pub enum PermissionRaw {
@ -37,46 +35,74 @@ impl From<Option<PermissionRaw>> for PermissionType {
} }
} }
impl From<PermissionType> for PermissionRaw {
fn from(value: PermissionType) -> Self {
match value {
PermissionType::Manage => Self::Manage,
PermissionType::Write => Self::Write,
PermissionType::Read => Self::Read,
PermissionType::NoPermission => unreachable!(),
}
}
}
impl PermissionType { impl PermissionType {
pub fn can_read(self) -> bool { pub fn can_read(self) -> bool {
self >= PermissionType::Read self >= PermissionType::Read
} }
pub fn can_read_guard(self) -> Result<(), StatusCode> { fn can_read_guard(self) -> GeneralResult<()> {
if !self.can_read() { self.can_read().then_some(()).item_not_found()?;
return Err(StatusCode::NOT_FOUND);
}
Ok(()) Ok(())
} }
pub fn can_write_guard(self) -> Result<(), StatusCode> { fn can_write_guard(self) -> GeneralResult<()> {
self.can_read_guard()?; self.can_read_guard()?;
if self < PermissionType::Write { if self < PermissionType::Write {
return Err(StatusCode::FORBIDDEN); return Err(GeneralError::message(
StatusCode::FORBIDDEN,
"Cannot write to the folder",
));
} }
Ok(()) Ok(())
} }
pub fn can_manage_guard(self) -> Result<(), StatusCode> { fn can_manage_guard(self) -> GeneralResult<()> {
self.can_read_guard()?; self.can_read_guard()?;
if self < PermissionType::Manage { if self < PermissionType::Manage {
return Err(StatusCode::FORBIDDEN); return Err(GeneralError::message(
StatusCode::FORBIDDEN,
"Cannot manage the folder",
));
} }
Ok(()) Ok(())
} }
} }
pub trait PermissionExt {
fn can_read_guard(self) -> GeneralResult<()>;
fn can_write_guard(self) -> GeneralResult<()>;
fn can_manage_guard(self) -> GeneralResult<()>;
}
fn permissions_error(error: sqlx::Error) -> GeneralError {
GeneralError {
status_code: StatusCode::INTERNAL_SERVER_ERROR,
message: Cow::Borrowed("Error getting permissions"),
error: Some(error.into()),
}
}
fn apply_guard(
result: sqlx::Result<PermissionType>,
func: impl FnOnce(PermissionType) -> GeneralResult<()>,
) -> GeneralResult<()> {
result.map_err(permissions_error).and_then(func)
}
impl PermissionExt for sqlx::Result<PermissionType> {
fn can_read_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_read_guard)
}
fn can_write_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_write_guard)
}
fn can_manage_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_manage_guard)
}
}
pub async fn insert( pub async fn insert(
user_id: i32, user_id: i32,
folder_id: Uuid, folder_id: Uuid,
@ -97,10 +123,10 @@ pub async fn insert(
pub async fn get_all_for_folder( pub async fn get_all_for_folder(
folder_id: Uuid, folder_id: Uuid,
pool: &Pool, pool: &Pool,
) -> sqlx::Result<HashMap<String, PermissionRaw>> { ) -> sqlx::Result<HashMap<i32, PermissionRaw>> {
sqlx::query_file!("sql/get_all_permissions_for_folder.sql", folder_id) sqlx::query_file!("sql/get_all_permissions_for_folder.sql", folder_id)
.fetch(pool) .fetch(pool)
.map_ok(|record| (record.username, record.permission_type)) .map_ok(|record| (record.user_id, record.permission_type))
.try_collect() .try_collect()
.await .await
} }
@ -112,10 +138,16 @@ pub async fn delete_for_folder(folder_id: Uuid, user_id: i32, pool: &Pool) -> sq
.map(|_| ()) .map(|_| ())
} }
pub async fn get_top_level_permitted_folders(user_id: i32, pool: &Pool) -> sqlx::Result<Vec<Uuid>> { pub async fn get_top_level_permitted_folders(
sqlx::query_file!("sql/get_top_level_folder.sql", user_id) user_id: i32,
pool: &Pool,
) -> sqlx::Result<Vec<FolderWithoutParentId>> {
sqlx::query_file_as!(
FolderWithoutParentId,
"sql/get_top_level_folder.sql",
user_id
)
.fetch(pool) .fetch(pool)
.map_ok(|record| record.folder_id)
.try_collect() .try_collect()
.await .await
} }

107
src/db/users.rs Normal file
View File

@ -0,0 +1,107 @@
use crate::prelude::*;
/// Creates user and returns its id
pub async fn create_user(
user_name: &str,
user_email: &str,
hashed_password: &[u8],
pool: &Pool,
) -> sqlx::Result<Option<i32>> {
let Some(record) = sqlx::query!(
"INSERT INTO users(username, email, hashed_password) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING RETURNING user_id",
user_name,
user_email,
hashed_password
)
.fetch_optional(pool)
.await?
else {
return Ok(None);
};
let id = record.user_id;
sqlx::query!(
"INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)",
id,
"ROOT"
)
.execute(pool)
.await?;
Ok(Some(id))
}
/// Deletes the user and returns the files that must be deleted
pub fn delete_user(user_id: i32, pool: &Pool) -> impl Stream<Item = sqlx::Result<Uuid>> + '_ {
sqlx::query_file!("sql/delete_user.sql", user_id)
.fetch(pool)
.map_ok(|record| record.file_id)
}
#[derive(Serialize, Debug)]
pub struct UserInfo {
user_id: i32,
username: String,
email: String,
}
pub async fn update(
user_id: i32,
username: &str,
email: &str,
pool: &Pool,
) -> sqlx::Result<UserInfo> {
sqlx::query_as!(
UserInfo,
"UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING user_id, username, email",
user_id,
username,
email
)
.fetch_one(pool)
.await
}
pub async fn exists(user_id: i32, pool: &Pool) -> sqlx::Result<bool> {
sqlx::query!(
"SELECT EXISTS(SELECT user_id FROM users WHERE user_id = $1)",
user_id
)
.fetch_one(pool)
.await
.map(|record| record.exists.unwrap_or(false))
}
pub async fn get(user_id: i32, pool: &Pool) -> sqlx::Result<Option<UserInfo>> {
sqlx::query_as!(
UserInfo,
"SELECT user_id, username, email FROM users WHERE user_id = $1",
user_id
)
.fetch_optional(pool)
.await
}
/// Gets the hashed password field by either the email or the username
pub async fn get_hash(search_string: &str, pool: &Pool) -> sqlx::Result<Option<(i32, Vec<u8>)>> {
let record = sqlx::query!(
"SELECT user_id, hashed_password FROM users WHERE username = $1 OR email = $1",
search_string
)
.fetch_optional(pool)
.await?;
Ok(record.map(|record| (record.user_id, record.hashed_password)))
}
#[derive(Serialize, Debug)]
pub struct UserSearch {
pub user_id: i32,
pub username: String,
pub email: String,
pub similarity: f32,
}
pub fn search_for_user<'a>(
search_string: &str,
pool: &'a Pool,
) -> BoxStream<'a, sqlx::Result<UserSearch>> {
sqlx::query_file_as!(UserSearch, "sql/search_for_user.sql", search_string).fetch(pool)
}

View File

@ -1,4 +1,4 @@
pub use crate::prelude::*; use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct Params { pub struct Params {
@ -9,24 +9,20 @@ pub async fn delete(
Query(params): Query<Params>, Query(params): Query<Params>,
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let deleted = db::file::delete(params.file_id, &state.pool) db::file::delete(params.file_id, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error deleting the file")?;
if !deleted {
return Err(StatusCode::NOT_FOUND); // Will not happen most of the time due to can write guard
}
state state
.storage .storage
.delete(params.file_id) .delete(params.file_id)
.await .await
.handle_internal()?; .handle_internal("Error deleting the file")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -12,16 +12,15 @@ pub async fn download(
Query(params): Query<Params>, Query(params): Query<Params>,
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
) -> Result<impl IntoResponse, StatusCode> { ) -> GeneralResult<impl IntoResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_read_guard()?; .can_read_guard()?;
let mut name = db::file::get_name(params.file_id, &state.pool) let mut name = db::file::get_name(params.file_id, &state.pool)
.await .await
.handle_internal()? .handle_internal("Error getting file info")?
.ok_or(StatusCode::NOT_FOUND)?; .item_not_found()?;
name = name name = name
.chars() .chars()
.fold(String::with_capacity(name.len()), |mut result, char| { .fold(String::with_capacity(name.len()), |mut result, char| {
@ -32,7 +31,11 @@ pub async fn download(
result result
}); });
let file = state.storage.read(params.file_id).await.handle_internal()?; let file = state
.storage
.read(params.file_id)
.await
.handle_internal("Error reading the file")?;
let body = Body::from_stream(ReaderStream::new(file)); let body = Body::from_stream(ReaderStream::new(file));
let disposition = format!("attachment; filename=\"{name}\""); let disposition = format!("attachment; filename=\"{name}\"");
let headers = [(header::CONTENT_DISPOSITION, disposition)]; let headers = [(header::CONTENT_DISPOSITION, disposition)];

View File

@ -12,18 +12,22 @@ pub async fn modify(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
mut multipart: Multipart, mut multipart: Multipart,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
// Very weird work around // Very weird work around to get the first file in multipart
let mut field = loop { let mut field = loop {
match multipart.next_field().await { match multipart.next_field().await {
Ok(Some(field)) if field.file_name().is_some() => break field, Ok(Some(field)) if field.file_name().is_some() => break field,
Ok(Some(_)) => continue, Ok(Some(_)) => continue,
_ => return Err(StatusCode::BAD_REQUEST), _ => {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"No file in the multipart",
))
}
} }
}; };
@ -31,19 +35,22 @@ pub async fn modify(
.storage .storage
.write(params.file_id) .write(params.file_id)
.await .await
.handle_internal()? .handle_internal("Error writing to the file")?
.ok_or(StatusCode::NOT_FOUND)?; .item_not_found()?;
let (hash, size) = match crate::FileStorage::write_to_file(&mut file, &mut field).await { let (hash, size) = crate::FileStorage::write_to_file(&mut file, &mut field)
Ok(values) => values, .await
Err(err) => { .map_err(|err| {
tracing::warn!(%err); tracing::warn!(%err);
return Err(StatusCode::INTERNAL_SERVER_ERROR); GeneralError::message(
} StatusCode::INTERNAL_SERVER_ERROR,
}; "Error writing to the file",
)
})?;
db::file::update(params.file_id, size, hash, &state.pool) db::file::update(params.file_id, size, hash, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error updating the file")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -1,7 +1,9 @@
use std::collections::HashMap; use std::{
collections::{HashMap, HashSet},
fmt::Write as _,
};
use axum::extract::multipart::{self, Multipart}; use axum::extract::multipart::{self, Multipart};
use tokio::io::AsyncWrite;
use crate::prelude::*; use crate::prelude::*;
@ -10,25 +12,60 @@ pub struct Params {
parent_folder: Uuid, parent_folder: Uuid,
} }
#[derive(Serialize, Debug, Default)]
pub struct Response {
success: HashMap<Box<str>, Uuid>,
error: HashMap<Box<str>, &'static str>,
}
fn validate_name(name: &str, existing_names: &HashSet<String>) -> Result<(), &'static str> {
if name.len() > 255 {
return Err("Name too long");
}
if existing_names.contains(name) {
return Err("Item with that name already exists");
}
Ok(())
}
async fn create_file( async fn create_file(
file_id: Uuid, storage: &crate::FileStorage,
file: impl AsyncWrite + Unpin,
file_name: &str, file_name: &str,
field: &mut multipart::Field<'_>, field: &mut multipart::Field<'_>,
parent_folder: Uuid, parent_folder: Uuid,
pool: &Pool, pool: &Pool,
) -> bool { ) -> anyhow::Result<Uuid> {
let (hash, size) = match crate::FileStorage::write_to_file(file, field).await { let (file_id, file) = storage.create().await?;
Ok(values) => values, let result = async {
Err(err) => { let (hash, size) = crate::FileStorage::write_to_file(file, field).await?;
tracing::warn!(%err); db::file::insert(file_id, parent_folder, file_name, size, hash, pool).await?;
return false; anyhow::Result::Ok(())
} }
}; .await;
db::file::insert(file_id, parent_folder, file_name, size, hash, pool) match result {
Ok(()) => Ok(file_id),
Err(err) => {
let _ = storage.delete(file_id).await;
Err(err)
}
}
}
async fn parse_field(
field: &mut multipart::Field<'_>,
name: &str,
storage: &crate::FileStorage,
parent_folder: Uuid,
pool: &Pool,
existing_names: &HashSet<String>,
) -> Result<Uuid, &'static str> {
validate_name(name, existing_names)?;
create_file(storage, name, field, parent_folder, pool)
.await .await
.inspect_err(|err| tracing::warn!(%err)) .map_err(|err| {
.is_ok() tracing::warn!(%err, "Error creating the file");
"Error creating the file"
})
} }
pub async fn upload( pub async fn upload(
@ -36,42 +73,57 @@ pub async fn upload(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
mut multi: Multipart, mut multi: Multipart,
) -> Result<Json<HashMap<String, Uuid>>, StatusCode> { ) -> GeneralResult<Json<Response>> {
db::folder::get_permissions(params.parent_folder, claims.user_id, &state.pool) db::folder::get_permissions(params.parent_folder, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let existing_names = db::folder::get_names(params.parent_folder, &state.pool) let existing_names: HashSet<String> = db::folder::get_names(params.parent_folder, &state.pool)
.try_collect()
.await .await
.handle_internal()?; .handle_internal("Error getting existing names")?;
let mut result = HashMap::new();
let mut response = Response::default();
while let Ok(Some(mut field)) = multi.next_field().await { while let Ok(Some(mut field)) = multi.next_field().await {
let Some(file_name) = field.file_name().map(ToOwned::to_owned) else { let Some(file_name) = field.file_name().map(Box::<str>::from) else {
continue; continue;
}; };
if existing_names.contains(&file_name) {
continue; let parse_result = parse_field(
}
let Ok((file_id, mut file)) = state.storage.create().await else {
tracing::warn!("Couldn't create uuid for new file");
continue;
};
let is_success = create_file(
file_id,
&mut file,
&file_name,
&mut field, &mut field,
&file_name,
&state.storage,
params.parent_folder, params.parent_folder,
&state.pool, &state.pool,
&existing_names,
) )
.await; .await;
if !is_success {
let _ = state.storage.delete(file_id).await; match parse_result {
continue; Ok(uuid) => {
response.success.insert(file_name, uuid);
}
Err(err) => {
response.error.insert(file_name, err);
}
} }
result.insert(file_name, file_id);
} }
Ok(Json(result)) if !response.success.is_empty() {
return Ok(Json(response));
}
if response.error.is_empty() {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"No files sent",
));
}
let mut message = "No file successfully uploaded:".to_owned();
for (key, val) in response.error {
write!(message, "\n{key}: {val}").unwrap();
}
Err(GeneralError::message(StatusCode::BAD_REQUEST, message))
} }

View File

@ -7,31 +7,33 @@ pub struct Params {
} }
pub async fn create( pub async fn create(
State(state): State<AppState>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Json(params): Json<Params>,
) -> Result<Json<Uuid>, StatusCode> { ) -> GeneralResult<Json<Uuid>> {
db::folder::get_permissions(params.parent_folder_id, claims.user_id, &state.pool) db::folder::get_permissions(params.parent_folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let exists = if params.folder_name.len() > 255 {
db::folder::exists_by_name(params.parent_folder_id, &params.folder_name, &state.pool) return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Folder name too long",
));
}
let exists = db::folder::name_exists(params.parent_folder_id, &params.folder_name, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting existing names")?;
if exists { if exists {
return Err(StatusCode::CONFLICT); return Err(GeneralError::message(
StatusCode::CONFLICT,
"Name already taken",
));
} }
let id = db::folder::insert( db::folder::insert(params.parent_folder_id, &params.folder_name, &pool)
params.parent_folder_id,
claims.user_id,
&params.folder_name,
&state.pool,
)
.await .await
.handle_internal()?; .handle_internal("Error creating the folder")
.map(Json)
Ok(Json(id))
} }

View File

@ -8,28 +8,30 @@ pub struct Params {
pub async fn delete( pub async fn delete(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Query(params): Query<Params>,
) -> Result<(), StatusCode> { ) -> GeneralResult<EmptyResponse> {
let root = db::folder::get_root(claims.user_id, &state.pool) let root = db::folder::get_root(claims.user_id, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error getting the root folder")?;
if params.folder_id == root { if params.folder_id == root {
return Err(StatusCode::BAD_REQUEST); return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot delete the root folder",
));
} }
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool) db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let files_to_delete = db::folder::delete(params.folder_id, &state.pool)
.await
.handle_internal()?;
let storage = &state.storage; let storage = &state.storage;
futures::stream::iter(files_to_delete) db::folder::delete(params.folder_id, &state.pool)
.for_each_concurrent(5, |file| async move { .try_for_each_concurrent(5, |file| async move {
let _ = storage.delete(file).await; let _ = storage.delete(file).await;
})
.await;
Ok(()) Ok(())
})
.await
.handle_internal("Error deleting the fodler")?;
Ok(EmptyResponse)
} }

View File

@ -0,0 +1,56 @@
use db::{file::FileWithoutParentId, folder::FolderWithoutParentId};
use tokio::try_join;
use super::list::Params;
use crate::prelude::*;
#[derive(Serialize, Debug)]
pub struct FolderStructure {
#[serde(flatten)]
folder_base: FolderWithoutParentId,
folders: Vec<FolderStructure>,
files: Vec<FileWithoutParentId>,
}
impl From<FolderWithoutParentId> for FolderStructure {
fn from(value: FolderWithoutParentId) -> Self {
FolderStructure {
folder_base: value,
folders: Vec::new(),
files: Vec::new(),
}
}
}
pub async fn structure(
Query(params): Query<Params>,
State(pool): State<Pool>,
claims: Claims,
) -> GeneralResult<Json<FolderStructure>> {
let folder_id = db::folder::process_id(params.folder_id, claims.user_id, &pool)
.await
.handle_internal("Error processing id")?
.item_not_found()?;
let folder = db::folder::get_by_id(folder_id, &pool)
.await
.handle_internal("Error getting folder info")?
.item_not_found()?;
let mut response: FolderStructure = folder.into();
let mut stack = vec![&mut response];
while let Some(folder) = stack.pop() {
let (files, folders) = try_join!(
db::file::get_files(folder.folder_base.folder_id, &pool).try_collect(),
db::folder::get_folders(folder.folder_base.folder_id, claims.user_id, &pool)
.map_ok(Into::into)
.try_collect()
)
.handle_internal("Error getting folder contents")?;
folder.folders = folders;
folder.files = files;
stack.extend(folder.folders.iter_mut());
}
Ok(Json(response))
}

View File

@ -4,7 +4,7 @@ use crate::prelude::*;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct Params { pub struct Params {
folder_id: Option<Uuid>, pub(super) folder_id: Option<Uuid>,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
@ -16,19 +16,19 @@ pub struct Response {
pub async fn list( pub async fn list(
Query(params): Query<Params>, Query(params): Query<Params>,
State(state): State<AppState>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
) -> Result<Json<Response>, StatusCode> { ) -> GeneralResult<Json<Response>> {
let folder_id = db::folder::get_by_id(params.folder_id, claims.user_id, &state.pool) let folder_id = db::folder::process_id(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()? .handle_internal("Error processing id")?
.ok_or(StatusCode::NOT_FOUND)?; .handle(StatusCode::NOT_FOUND, "Item not found")?;
let (files, folders) = try_join!( let (files, folders) = try_join!(
db::file::get_files(folder_id, &state.pool), db::file::get_files(folder_id, &pool).try_collect(),
db::folder::get_folders(folder_id, &state.pool) db::folder::get_folders(folder_id, claims.user_id, &pool).try_collect()
) )
.handle_internal()?; .handle_internal("Error getting folder contents")?;
Ok(Json(Response { Ok(Json(Response {
folder_id, folder_id,

View File

@ -1,3 +1,4 @@
pub mod create; pub mod create;
pub mod delete; pub mod delete;
pub mod get_structure;
pub mod list; pub mod list;

View File

@ -1,3 +1,4 @@
pub mod file; pub mod file;
pub mod folder; pub mod folder;
pub mod permissions; pub mod permissions;
pub mod users;

View File

@ -7,20 +7,19 @@ pub struct Params {
} }
pub async fn delete( pub async fn delete(
State(pool): State<Pool>,
claims: Claims, claims: Claims,
State(state): State<AppState>, Query(params): Query<Params>,
Json(params): Json<Params>, ) -> GeneralResult<EmptyResponse> {
) -> Result<StatusCode, StatusCode> {
if params.user_id != claims.user_id { if params.user_id != claims.user_id {
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool) db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_manage_guard()?; .can_manage_guard()?;
} }
db::permissions::delete_for_folder(params.folder_id, params.user_id, &state.pool) db::permissions::delete_for_folder(params.folder_id, params.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error deleting the permissions")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -10,17 +10,16 @@ pub struct Params {
} }
pub async fn get( pub async fn get(
State(pool): State<Pool>,
Query(params): Query<Params>, Query(params): Query<Params>,
claims: Claims, claims: Claims,
State(state): State<AppState>, ) -> GeneralResult<Json<HashMap<i32, PermissionRaw>>> {
) -> Result<Json<HashMap<String, PermissionRaw>>, StatusCode> { db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool)
.await .await
.handle_internal()? .can_read_guard()?;
.can_manage_guard()?;
let permissions = db::permissions::get_all_for_folder(params.folder_id, &state.pool) db::permissions::get_all_for_folder(params.folder_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting permissions")
Ok(Json(permissions)) .map(Json)
} }

View File

@ -0,0 +1,13 @@
use db::folder::FolderWithoutParentId;
use crate::prelude::*;
pub async fn get_top_level(
State(pool): State<Pool>,
claims: Claims,
) -> GeneralResult<Json<Vec<FolderWithoutParentId>>> {
db::permissions::get_top_level_permitted_folders(claims.user_id, &pool)
.await
.handle_internal("Error reading from the database")
.map(Json)
}

View File

@ -1,11 +0,0 @@
use crate::prelude::*;
pub async fn get_top_level(
State(state): State<AppState>,
claims: Claims,
) -> Result<Json<Vec<Uuid>>, StatusCode> {
let folders = db::permissions::get_top_level_permitted_folders(claims.user_id, &state.pool)
.await
.handle_internal()?;
Ok(Json(folders))
}

View File

@ -1,4 +1,4 @@
pub mod delete; pub mod delete;
pub mod get; pub mod get;
pub mod get_top_level_permitted_folders; pub mod get_top_level;
pub mod set; pub mod set;

View File

@ -1,6 +1,4 @@
use db::permissions::PermissionRaw; use crate::{db::permissions::PermissionRaw, prelude::*};
use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct Params { pub struct Params {
@ -11,29 +9,49 @@ pub struct Params {
pub async fn set( pub async fn set(
claims: Claims, claims: Claims,
State(state): State<AppState>, State(pool): State<Pool>,
Json(params): Json<Params>, Json(params): Json<Params>,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
let root = db::folder::get_root(claims.user_id, &state.pool) let root = db::folder::get_root(claims.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting the root folder")?;
if params.folder_id == root { if params.folder_id == root {
return Err(StatusCode::BAD_REQUEST); return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set permissions for the root folder",
));
} }
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool) db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_manage_guard()?; .can_manage_guard()?;
if params.user_id == claims.user_id {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set your own permissions",
));
}
let folder_info = db::folder::get_by_id(params.folder_id, &pool)
.await
.handle_internal("Error getting folder info")?
.item_not_found()?;
if folder_info.owner_id == params.user_id {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set permissions of the folder's owner",
));
}
db::permissions::insert( db::permissions::insert(
params.user_id, params.user_id,
params.folder_id, params.folder_id,
params.permission_type, params.permission_type,
&state.pool, &pool,
) )
.await .await
.handle_internal()?; .handle_internal("Error writing to the database")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -0,0 +1,19 @@
use std::time::Duration;
use crate::prelude::*;
pub async fn delete(
State(AppState { pool, ref storage }): State<AppState>,
claims: Claims,
) -> GeneralResult<EmptyResponse> {
tokio::time::sleep(Duration::from_secs(100)).await;
db::users::delete_user(claims.user_id, &pool)
.try_for_each_concurrent(5, |file_id| async move {
let _ = storage.delete(file_id).await;
Ok(())
})
.await
.handle_internal("Error deleting the user")?;
Ok(EmptyResponse)
}

View File

@ -0,0 +1,26 @@
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
user_id: i32,
}
type Response = GeneralResult<Json<db::users::UserInfo>>;
pub async fn get(State(pool): State<Pool>, Query(params): Query<Params>) -> Response {
db::users::get(params.user_id, &pool)
.await
.handle_internal("Error getting the user")?
.handle(StatusCode::NOT_FOUND, "User not found")
.map(Json)
}
pub async fn current(state: State<Pool>, claims: Claims) -> Response {
get(
state,
Query(Params {
user_id: claims.user_id,
}),
)
.await
}

View File

@ -0,0 +1,26 @@
use axum::Form;
use crate::{
auth::{authenticate_user, Token},
prelude::*,
};
#[derive(Deserialize, Debug)]
pub struct Params {
username: String,
password: String,
}
pub async fn login(
State(pool): State<Pool>,
Form(payload): Form<Params>,
) -> GeneralResult<Json<Token>> {
let user_id = authenticate_user(&payload.username, &payload.password, &pool)
.await
.handle_internal("Error getting user from database")?
.handle(
StatusCode::NOT_FOUND,
"User with this name and password doesn't exist",
)?;
Claims::new(user_id).encode().map(Json)
}

View File

@ -0,0 +1,6 @@
pub mod delete;
pub mod get;
pub mod login;
pub mod put;
pub mod register;
pub mod search;

View File

@ -0,0 +1,23 @@
use validator::Validate;
use crate::prelude::*;
#[derive(Deserialize, Debug, Validate)]
pub struct Params {
#[validate(length(min = 3, max = 10))]
username: String,
#[validate(email)]
email: String,
}
pub async fn put(
State(pool): State<Pool>,
claims: Claims,
Json(params): Json<Params>,
) -> GeneralResult<Json<db::users::UserInfo>> {
params.validate()?;
db::users::update(claims.user_id, &params.username, &params.email, &pool)
.await
.handle_internal("Error updating the user")
.map(Json)
}

View File

@ -0,0 +1,64 @@
use axum::Form;
use itertools::Itertools;
use validator::{Validate, ValidationError};
use crate::{
auth::{HashedBytes, Token},
prelude::*,
};
#[derive(Deserialize, Debug, Validate)]
pub struct Params {
#[validate(length(min = 3, max = 10))]
username: String,
#[validate(email)]
email: String,
#[validate(length(min = 6), custom(function = "validate_password"))]
password: String,
}
fn validate_password(password: &str) -> Result<(), ValidationError> {
let mut has_lower = false;
let mut has_upper = false;
let mut has_number = false;
let mut has_special = false;
for char in password.chars() {
if char.is_lowercase() {
has_lower = true;
} else if char.is_uppercase() {
has_upper = true;
} else if char.is_ascii_digit() {
has_number = true;
} else {
has_special = true;
}
}
let msg = [has_lower, has_upper, has_number, has_special]
.into_iter()
.zip(["No lower", "No upper", "No numbers", "No special"])
.filter_map(|(param, msg)| (!param).then_some(msg))
.format(" ")
.to_string();
if !msg.is_empty() {
return Err(ValidationError::new("invalid_password").with_message(msg.into()));
}
Ok(())
}
pub async fn register(
State(pool): State<Pool>,
Form(params): Form<Params>,
) -> GeneralResult<Json<Token>> {
params.validate()?;
let password = HashedBytes::hash_bytes(params.password.as_bytes()).as_bytes();
let id = db::users::create_user(&params.username, &params.email, &password, &pool)
.await
.handle_internal("Error creating the user")?
.handle(
StatusCode::BAD_REQUEST,
"The username or the email are taken",
)?;
Claims::new(id).encode().map(Json)
}

View File

@ -0,0 +1,19 @@
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
search_string: String,
}
pub async fn search(
State(pool): State<Pool>,
Query(params): Query<Params>,
) -> GeneralResult<Json<Vec<db::users::UserSearch>>> {
db::users::search_for_user(&params.search_string, &pool)
.take(20)
.try_filter(|user| future::ready(user.similarity > 0.1))
.try_collect()
.await
.handle_internal("Error getting users from the database")
.map(Json)
}

View File

@ -1,28 +1,103 @@
use axum::http::StatusCode; use std::borrow::Cow;
use axum::{http::StatusCode, response::IntoResponse};
type BoxError = Box<dyn std::error::Error>; type BoxError = Box<dyn std::error::Error>;
pub fn handle_error(error: impl Into<BoxError>) { /// Common error type for the project
let error: BoxError = error.into(); pub struct GeneralError {
tracing::error!(error); /// Response status code
pub status_code: StatusCode,
/// Message to send to the user
pub message: Cow<'static, str>,
/// Error to log
pub error: Option<BoxError>,
} }
pub trait ErrorHandlingExt<T, E> impl GeneralError {
pub fn message(status_code: StatusCode, message: impl Into<Cow<'static, str>>) -> Self {
Self {
status_code,
message: message.into(),
error: None,
}
}
pub const fn const_message(status_code: StatusCode, message: &'static str) -> Self {
Self {
status_code,
message: Cow::Borrowed(message),
error: None,
}
}
}
impl IntoResponse for GeneralError {
fn into_response(self) -> axum::response::Response {
if let Some(err) = self.error {
tracing::error!(err, message = %self.message, status_code = ?self.status_code);
}
(self.status_code, self.message).into_response()
}
}
impl From<validator::ValidationErrors> for GeneralError {
fn from(value: validator::ValidationErrors) -> Self {
GeneralError::message(StatusCode::BAD_REQUEST, value.to_string())
}
}
pub type GeneralResult<T> = Result<T, GeneralError>;
pub trait ErrorHandlingExt<T>
where where
Self: Sized, Self: Sized,
{ {
fn handle(self, code: StatusCode) -> Result<T, StatusCode>; fn handle(
self,
status_code: StatusCode,
message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T>;
fn handle_internal(self) -> Result<T, StatusCode> { fn handle_internal(self, message: impl Into<Cow<'static, str>>) -> GeneralResult<T> {
self.handle(StatusCode::INTERNAL_SERVER_ERROR) self.handle(StatusCode::INTERNAL_SERVER_ERROR, message)
} }
} }
impl<T, E: Into<BoxError>> ErrorHandlingExt<T, E> for Result<T, E> { impl<T, E: Into<BoxError>> ErrorHandlingExt<T> for Result<T, E> {
fn handle(self, code: StatusCode) -> Result<T, StatusCode> { fn handle(
self.map_err(|err| { self,
handle_error(err); status_code: StatusCode,
code message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T> {
self.map_err(|err| GeneralError {
status_code,
message: message.into(),
error: Some(err.into()),
}) })
} }
} }
impl<T> ErrorHandlingExt<T> for Option<T> {
fn handle(
self,
status_code: StatusCode,
message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T> {
self.ok_or_else(|| GeneralError {
status_code,
message: message.into(),
error: None,
})
}
}
pub trait ItemNotFoundExt<T> {
fn item_not_found(self) -> Result<T, GeneralError>;
}
impl<T> ItemNotFoundExt<T> for Option<T> {
fn item_not_found(self) -> GeneralResult<T> {
self.handle(StatusCode::NOT_FOUND, "Item not found")
}
}

View File

@ -5,14 +5,14 @@ use std::{
}; };
use axum::body::Bytes; use axum::body::Bytes;
use futures::{Stream, StreamExt};
use sha2::Digest as _; use sha2::Digest as _;
use tokio::{ use tokio::{
fs, fs,
io::{AsyncWrite, AsyncWriteExt, BufWriter}, io::{AsyncWrite, AsyncWriteExt, BufWriter},
}; };
use tokio_util::io::StreamReader; use tokio_util::io::StreamReader;
use uuid::Uuid;
use crate::prelude::*;
#[derive(Clone)] #[derive(Clone)]
pub struct FileStorage(Arc<Path>); pub struct FileStorage(Arc<Path>);
@ -21,7 +21,7 @@ impl FileStorage {
pub fn new() -> anyhow::Result<Self> { pub fn new() -> anyhow::Result<Self> {
let var = env::var("DRIVE_STORAGE_PATH"); let var = env::var("DRIVE_STORAGE_PATH");
let path_str = match var { let path_str = match var {
Ok(ref string) => string, Ok(ref string) => string.trim(),
Err(err) => { Err(err) => {
tracing::info!( tracing::info!(
%err, %err,
@ -55,7 +55,7 @@ impl FileStorage {
pub async fn create(&self) -> anyhow::Result<(Uuid, impl tokio::io::AsyncWrite)> { pub async fn create(&self) -> anyhow::Result<(Uuid, impl tokio::io::AsyncWrite)> {
let mut error = anyhow::anyhow!("Error creating a file"); let mut error = anyhow::anyhow!("Error creating a file");
for _ in 0..3 { for _ in 0..3 {
let file_id = Uuid::new_v4(); let file_id = Uuid::now_v7();
match self.create_inner(file_id).await { match self.create_inner(file_id).await {
Ok(file) => return Ok((file_id, file)), Ok(file) => return Ok((file_id, file)),
Err(err) => error = error.context(err), Err(err) => error = error.context(err),
@ -95,6 +95,7 @@ impl FileStorage {
const BUF_CAP: usize = 64 * 1024 * 1024; // 64 MiB const BUF_CAP: usize = 64 * 1024 * 1024; // 64 MiB
let mut hash = sha2::Sha512::new(); let mut hash = sha2::Sha512::new();
let mut size: i64 = 0; let mut size: i64 = 0;
let stream = stream.map(|value| { let stream = stream.map(|value| {
let bytes = value.map_err(io::Error::other)?; let bytes = value.map_err(io::Error::other)?;
hash.update(&bytes); hash.update(&bytes);
@ -104,10 +105,12 @@ impl FileStorage {
.ok_or_else(|| io::Error::other(anyhow::anyhow!("Size calculation overflow")))?; .ok_or_else(|| io::Error::other(anyhow::anyhow!("Size calculation overflow")))?;
io::Result::Ok(bytes) io::Result::Ok(bytes)
}); });
let mut reader = StreamReader::new(stream); let mut reader = StreamReader::new(stream);
let mut writer = BufWriter::with_capacity(BUF_CAP, file); let mut writer = BufWriter::with_capacity(BUF_CAP, file);
tokio::io::copy_buf(&mut reader, &mut writer).await?; tokio::io::copy_buf(&mut reader, &mut writer).await?;
writer.flush().await?; writer.flush().await?;
let hash = hash.finalize().to_vec(); let hash = hash.finalize().to_vec();
Ok((hash, size)) Ok((hash, size))
} }

View File

@ -4,41 +4,19 @@ mod endpoints;
mod errors; mod errors;
mod file_storage; mod file_storage;
mod prelude; mod prelude;
mod util;
use std::{env, net::Ipv4Addr};
use axum::Router;
use file_storage::FileStorage; use file_storage::FileStorage;
use tokio::net::TcpListener;
type Pool = sqlx::postgres::PgPool; type Pool = sqlx::postgres::PgPool;
#[derive(Clone)] #[derive(Clone, axum::extract::FromRef)]
struct AppState { struct AppState {
pool: Pool, pool: Pool,
storage: FileStorage, storage: FileStorage,
} }
async fn create_user(user_name: &str, user_email: &str, pool: &Pool) -> anyhow::Result<i32> { async fn create_test_users(pool: &Pool) -> anyhow::Result<()> {
let id = sqlx::query!(
"INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id",
user_name,
user_email
)
.fetch_one(pool)
.await?
.user_id;
sqlx::query!(
"INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)",
id,
"ROOT"
)
.execute(pool)
.await?;
Ok(id)
}
async fn create_debug_users(pool: &Pool) -> anyhow::Result<()> {
let count = sqlx::query!("SELECT count(user_id) FROM users") let count = sqlx::query!("SELECT count(user_id) FROM users")
.fetch_one(pool) .fetch_one(pool)
.await? .await?
@ -47,72 +25,165 @@ async fn create_debug_users(pool: &Pool) -> anyhow::Result<()> {
if count > 0 { if count > 0 {
return Ok(()); return Ok(());
} }
let hash1 = auth::HashedBytes::hash_bytes(b"Password1").as_bytes();
let hash2 = auth::HashedBytes::hash_bytes(b"Password2").as_bytes();
tokio::try_join!( tokio::try_join!(
create_user("Test1", "test1@example.com", pool), db::users::create_user("Test1", "test1@example.com", &hash1, pool),
create_user("Test2", "test2@example.com", pool) db::users::create_user("Test2", "test2@example.com", &hash2, pool)
)?; )?;
Ok(()) Ok(())
} }
fn init_tracing() {
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
let mut err = None;
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|inner_err| {
err = Some(inner_err);
"debug,sqlx=info,axum::rejection=trace".parse().unwrap()
}),
)
.with(tracing_subscriber::fmt::layer())
.init();
if let Some(err) = err {
tracing::info!(
%err,
"Error constructing EnvFilter, falling back to using the default"
);
}
}
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
// TODO: add utoipa and utoipauto for swagger use std::{env, net::Ipv4Addr};
if env::var("RUST_BACKTRACE").is_err() {
env::set_var("RUST_BACKTRACE", "1");
}
let _ = dotenvy::dotenv(); let _ = dotenvy::dotenv();
tracing_subscriber::fmt::init(); init_tracing();
auth::force_init_keys();
let pool = match env::var("DATABASE_URL") { let pool = match env::var("DATABASE_URL") {
Ok(url) => Pool::connect(&url).await?, Ok(url) => Pool::connect(&url).await?,
Err(err) => anyhow::bail!("Error getting database url: {err}"), Err(err) => anyhow::bail!("Error getting database url: {err}"),
}; };
sqlx::migrate!().run(&pool).await?; sqlx::migrate!().run(&pool).await?;
create_debug_users(&pool).await?; if let Ok("1") = env::var("DEVELOPMENT").as_deref().map(str::trim_ascii) {
create_test_users(&pool).await?;
}
let storage = file_storage::FileStorage::new()?; let state = AppState {
pool,
let state = AppState { pool, storage }; storage: FileStorage::new()?,
};
let router = app(state); let router = app(state);
let addr = (Ipv4Addr::UNSPECIFIED, 3000); let addr = (Ipv4Addr::UNSPECIFIED, 3000);
let listener = TcpListener::bind(addr).await?; let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, router).await?; axum::serve(listener, router)
.with_graceful_shutdown(shutdown_signal())
.await?;
Ok(()) Ok(())
} }
fn app(state: AppState) -> Router { async fn shutdown_signal() {
use axum::{http::header, routing::get}; use tokio::signal;
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
{
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
tokio::select! {
() = ctrl_c => {}
() = terminate => {}
}
}
#[cfg(not(unix))]
ctrl_c.await;
}
fn app(state: AppState) -> axum::Router {
use axum::{
extract::DefaultBodyLimit,
handler::Handler as _,
http::header,
routing::{get, post},
};
use endpoints::{ use endpoints::{
file, folder, file, folder,
permissions::{self, get_top_level_permitted_folders::get_top_level}, permissions::{self, get_top_level::get_top_level},
users,
};
use tower_http::{
timeout::TimeoutLayer,
trace::{MakeSpan, TraceLayer},
ServiceBuilderExt as _,
}; };
use tower_http::ServiceBuilderExt as _;
let sensitive_headers = [header::AUTHORIZATION, header::COOKIE]; #[derive(Clone, Copy)]
struct SpanMaker;
let middleware = tower::ServiceBuilder::new() impl<B> MakeSpan<B> for SpanMaker {
.sensitive_headers(sensitive_headers) fn make_span(&mut self, request: &axum::http::Request<B>) -> tracing::Span {
.trace_for_http() tracing::debug_span!(
"request",
method = %request.method(),
uri = %request.uri(),
version = ?request.version(),
headers = ?request.headers(),
request_id = %uuid::Uuid::now_v7()
)
}
}
const TEN_GIBIBYTES: usize = 10 * 1024 * 1024 * 1024;
let body_limit = DefaultBodyLimit::max(TEN_GIBIBYTES);
let timeout = TimeoutLayer::new(std::time::Duration::from_secs(10));
let common_middleware = tower::ServiceBuilder::new()
.sensitive_headers([header::AUTHORIZATION, header::COOKIE])
.layer(TraceLayer::new_for_http().make_span_with(SpanMaker))
.compression(); .compression();
// Build route service let file_router = axum::Router::new().route(
Router::new() "/",
.route(
"/files",
get(file::download::download) get(file::download::download)
.post(file::upload::upload) .post(file::upload::upload.layer(body_limit.clone()))
.delete(file::delete::delete) .delete(file::delete::delete.layer(timeout))
.patch(file::modify::modify), .patch(file::modify::modify.layer(body_limit.clone())),
) );
let general_router = axum::Router::new()
.route( .route(
"/folders", "/folders",
get(folder::list::list) get(folder::list::list)
.post(folder::create::create) .post(folder::create::create)
.delete(folder::delete::delete), .delete(folder::delete::delete),
) )
.route("/folders/structure", get(folder::get_structure::structure))
.route( .route(
"/permissions", "/permissions",
get(permissions::get::get) get(permissions::get::get)
@ -123,6 +194,21 @@ fn app(state: AppState) -> Router {
"/permissions/get_top_level_permitted_folders", "/permissions/get_top_level_permitted_folders",
get(get_top_level), get(get_top_level),
) )
.layer(middleware) .route(
"/users",
get(users::get::get)
.delete(users::delete::delete)
.put(users::put::put),
)
.route("/users/current", get(users::get::current))
.route("/users/search", get(users::search::search))
.route("/users/register", post(users::register::register))
.route("/users/authorize", post(users::login::login))
.layer(timeout);
axum::Router::new()
.nest("/files", file_router)
.nest("/", general_router)
.layer(common_middleware)
.with_state(state) .with_state(state)
} }

View File

@ -1,8 +1,14 @@
pub(crate) use crate::{auth::Claims, db, errors::ErrorHandlingExt as _, AppState, Pool}; pub(crate) use crate::{
auth::Claims,
db::{self, permissions::PermissionExt as _},
errors::{ErrorHandlingExt as _, GeneralError, GeneralResult, ItemNotFoundExt as _},
util::EmptyResponse,
AppState, Pool,
};
pub use axum::{ pub use axum::{
extract::{Json, Query, State}, extract::{Json, Query, State},
http::StatusCode, http::StatusCode,
}; };
pub use futures::StreamExt as _; pub use futures::{future, stream::BoxStream, Stream, StreamExt as _, TryStreamExt as _};
pub use serde::{Deserialize, Serialize}; pub use serde::{Deserialize, Serialize};
pub use uuid::Uuid; pub use uuid::Uuid;

11
src/util.rs Normal file
View File

@ -0,0 +1,11 @@
use axum::response::IntoResponse;
use crate::prelude::*;
pub struct EmptyResponse;
impl IntoResponse for EmptyResponse {
fn into_response(self) -> axum::response::Response {
StatusCode::NO_CONTENT.into_response()
}
}