Compare commits

..

26 Commits

Author SHA1 Message Date
5f79a9f0d2 Added timeouts and a struct for empty response 2024-08-17 12:04:53 +03:00
9e3b9527d3 Applying the DefaultBodyLimit layer only to file uploads and modifications 2024-08-15 22:43:00 +03:00
a3e4ac2b2e Final preparation 2024-08-15 20:41:59 +03:00
ab138e8536 Updated deps 2024-08-15 16:03:11 +03:00
62f55043a5 Small optimization for non unix targets 2024-08-11 11:37:51 +03:00
ec7fbc07a0 Removed file size limit 2024-08-11 10:25:36 +03:00
1c9bd104e0 Tweaks for the desktop client 2024-08-10 09:01:06 +03:00
8eb5be96b3 Small change 2024-08-06 16:44:49 +03:00
2b12996453 Removed E in ErrorHandlingExt 2024-08-06 16:39:43 +03:00
75afab933d More error handling improvements 2024-08-06 16:02:44 +03:00
eba30d1e9d Permission guard simplification 2024-08-05 23:45:00 +03:00
9f76228ebe Error handling 2024-08-05 23:32:16 +03:00
8a4e2dc467 Timezone and folder creation fixes 2024-08-05 21:06:25 +03:00
8d297fffdf Prepared queries 2024-08-04 13:51:19 +03:00
ea5c65b6e5 Moved login and register to users 2024-08-04 12:38:50 +03:00
7669a02a95 Cleanup 2024-08-04 12:34:46 +03:00
bac5584b46 Search changes 2024-08-04 10:03:35 +03:00
b6c71ee35b Registration and fixes 2024-08-04 09:48:41 +03:00
94bb1371fa Switched auth_post to accept a form instead of a json 2024-08-03 21:05:28 +03:00
0614c4cad0 Expanded token lifespan to 30 days 2024-08-03 20:48:43 +03:00
c4ff602ec7 Now checking that user_id from claims exists 2024-08-03 20:15:08 +03:00
9f36d8e663 Removed utoipa 2024-08-03 19:41:29 +03:00
40f0526500 Added ability to get the info of the current user 2024-08-03 16:45:54 +03:00
cd3ab9b6bc Disabled lto for release build for now 2024-08-03 16:16:42 +03:00
9217ae46cb Auth 2024-08-03 16:16:31 +03:00
f6ed06de48 get_structure endpoint 2024-08-02 12:32:23 +03:00
57 changed files with 1552 additions and 1027 deletions

10
.dockerignore Normal file
View File

@ -0,0 +1,10 @@
**/target/
**/.vscode/
**/.env
**/.git/
**/.dockerignore
**/Dockerfile
**/compose.yaml
**/LICENSE
**/README.md
files/

View File

@ -1,12 +1,12 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT\n username,\n permission_type as \"permission_type: PermissionRaw\"\nFROM\n permissions\n INNER JOIN users ON permissions.user_id = users.user_id\nWHERE\n folder_id = $1", "query": "SELECT\n users.user_id,\n permission_type as \"permission_type: PermissionRaw\"\nFROM\n permissions\n INNER JOIN users ON permissions.user_id = users.user_id\nWHERE\n folder_id = $1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"ordinal": 0, "ordinal": 0,
"name": "username", "name": "user_id",
"type_info": "Varchar" "type_info": "Int4"
}, },
{ {
"ordinal": 1, "ordinal": 1,
@ -35,5 +35,5 @@
false false
] ]
}, },
"hash": "39b78c7f3266bea5e3e44aa372574319cb74dea6b3d0bc16d25e29ca28803317" "hash": "003349bc951a935fdfb285f99a726c221e3d1d02cb9e47b4c385545298b27217"
} }

View File

@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "WITH\n permitted as (\n SELECT\n folder_id\n FROM\n permissions\n WHERE\n user_id = $1\n )\nSELECT\n folder_id, owner_id, folder_name, created_at\nFROM\n folders\nWHERE\n folder_id IN (\n SELECT\n folder_id\n FROM\n permitted\n )\n AND parent_folder_id NOT IN (\n SELECT\n folder_id\n FROM\n permitted\n )",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "folder_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "1c5dda0e613ee57819d4c9534f3bcd8809f313026a187a2eff66fa4f7ba888a5"
}

View File

@ -0,0 +1,28 @@
{
"db_name": "PostgreSQL",
"query": "SELECT user_id, hashed_password FROM users WHERE username = $1 OR email = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "hashed_password",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false
]
},
"hash": "20af817890cb184e17d193e18132796e02e5e7352542f507acda25e9cd6cfc61"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1", "query": "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE folder_id = $1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -21,7 +21,7 @@
{ {
"ordinal": 3, "ordinal": 3,
"name": "created_at", "name": "created_at",
"type_info": "Timestamp" "type_info": "Timestamptz"
} }
], ],
"parameters": { "parameters": {
@ -36,5 +36,5 @@
false false
] ]
}, },
"hash": "9cc887509746b773ebbc8c130331b768f9a1deeab34d56aa3b0a833d718114fe" "hash": "3028a7c8ec616933e490ed267967b1406552c2b7c69f4f1f02a147df5411e692"
} }

View File

@ -1,24 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4",
"Varchar"
]
},
"nullable": [
false
]
},
"hash": "3dd4a65d3106d742c2221c0589ac68d4621c6e351f9fbb7aa58629ff2d829234"
}

View File

@ -0,0 +1,17 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(parent_folder_id, owner_id, folder_name, folder_id) VALUES ($1, $2, $3, $4)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Int4",
"Varchar",
"Uuid"
]
},
"nullable": []
},
"hash": "3faa32dd95822ae8687784817f68e48e726eedd2b7af7e52712974b4f04a8f80"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING *", "query": "UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING user_id, username, email",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -32,5 +32,5 @@
false false
] ]
}, },
"hash": "347a486f9ea5183b1c4c16234a1833ea61970ea7f901dd57c0715ae3dbddd164" "hash": "70a68acb301745ef393185c2bef92627648a6e419303adb40f56c09d55291cbd"
} }

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT EXISTS(SELECT user_id FROM users WHERE user_id = $1)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "exists",
"type_info": "Bool"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
null
]
},
"hash": "a04a4e8d3a394883a2f1052074bd43fcadafa0c1ba66f36ac49fc54b5c4150b3"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "SELECT\n user_id, username, email\nFROM\n users\nORDER BY\n GREATEST (\n similarity (email, $1),\n similarity (username, $1)\n ) DESC", "query": "SELECT\n user_id, username, email, \n GREATEST (\n similarity (email, $1),\n similarity (username, $1)\n ) as \"similarity!\"\nFROM\n users\nORDER BY\n \"similarity!\" DESC\nLIMIT 20",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -17,6 +17,11 @@
"ordinal": 2, "ordinal": 2,
"name": "email", "name": "email",
"type_info": "Varchar" "type_info": "Varchar"
},
{
"ordinal": 3,
"name": "similarity!",
"type_info": "Float4"
} }
], ],
"parameters": { "parameters": {
@ -27,8 +32,9 @@
"nullable": [ "nullable": [
false, false,
false, false,
false false,
null
] ]
}, },
"hash": "61a26b3321bb5b58a0b90e61b2cdcacfb46a03eb0c0a89839c9b3eff53cb7e56" "hash": "e0d415b13ccf7aa865558395eb6997bfff50762d36cf3742470a897f4588c802"
} }

View File

@ -26,12 +26,12 @@
{ {
"ordinal": 4, "ordinal": 4,
"name": "created_at", "name": "created_at",
"type_info": "Timestamp" "type_info": "Timestamptz"
}, },
{ {
"ordinal": 5, "ordinal": 5,
"name": "updated_at", "name": "updated_at",
"type_info": "Timestamp" "type_info": "Timestamptz"
} }
], ],
"parameters": { "parameters": {

View File

@ -0,0 +1,41 @@
{
"db_name": "PostgreSQL",
"query": "SELECT\n f.folder_id,\n owner_id,\n folder_name,\n created_at\nFROM\n folders f\n LEFT JOIN permissions p ON f.folder_id = p.folder_id\nWHERE\n parent_folder_id = $1\n AND (p.user_id = $2 OR f.owner_id = $2)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "folder_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "ef707c0f6d2ef0d66e71929167b5c82bb8bf923736e6c797711bc3124f0693bc"
}

View File

@ -1,22 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "WITH\n permitted as (\n SELECT\n folder_id\n FROM\n permissions\n WHERE\n user_id = $1\n )\nSELECT\n folder_id\nFROM\n folders\nWHERE\n folder_id IN (\n SELECT\n folder_id\n FROM\n permitted\n )\n AND parent_folder_id NOT IN (\n SELECT\n folder_id\n FROM\n permitted\n )",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false
]
},
"hash": "f9e36f45f25dd2439a7a0b16b6df356a0a2a47e70b6e031ea5a0442adc86725b"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id", "query": "INSERT INTO users(username, email, hashed_password) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING RETURNING user_id",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -12,12 +12,13 @@
"parameters": { "parameters": {
"Left": [ "Left": [
"Varchar", "Varchar",
"Varchar" "Varchar",
"Bytea"
] ]
}, },
"nullable": [ "nullable": [
false false
] ]
}, },
"hash": "9602875e192fd321f3a773aa7eb5145cb0d1e7f31def733fd11394e9ad6c0d21" "hash": "fb94ebf44aff9c5c56cc43ef47f571b4dc1fcdcbc595aef4d245ee2454b0a458"
} }

887
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@ edition = "2021"
[profile.release] [profile.release]
debug = 1 debug = 1
lto = true
codegen-units = 1
[lints.clippy] [lints.clippy]
pedantic = "warn" pedantic = "warn"
@ -24,14 +22,12 @@ axum-extra = { version = "0.9", features = ["typed-header"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
dotenvy = "0.15" dotenvy = "0.15"
futures = "0.3" futures = "0.3"
itertools = "0.13"
jsonwebtoken = "9" jsonwebtoken = "9"
oauth2 = "4" rand = "0.8"
reqwest = { version = "0.12", features = [ scrypt = { version = "0.11", default-features = false, features = ["std"] }
"http2",
"rustls-tls",
"json",
], default-features = false }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1"
sha2 = "0.10" sha2 = "0.10"
sqlx = { version = "0.8", features = [ sqlx = { version = "0.8", features = [
"postgres", "postgres",
@ -41,12 +37,18 @@ sqlx = { version = "0.8", features = [
"chrono", "chrono",
"uuid", "uuid",
] } ] }
tokio = { version = "1", features = ["rt-multi-thread"] } subtle = "2"
tokio = { version = "1", features = [
"parking_lot",
"rt-multi-thread",
"signal",
] }
tokio-util = { version = "0.7" } tokio-util = { version = "0.7" }
tower = { version = "0.4" } tower = { version = "0.4" }
tower-http = { version = "0.5", features = [ tower-http = { version = "0.5", features = [
"compression-full", "compression-full",
"sensitive-headers", "sensitive-headers",
"timeout",
"trace", "trace",
"util", "util",
] } ] }
@ -55,6 +57,5 @@ tracing-subscriber = { version = "0.3", features = [
"parking_lot", "parking_lot",
"env-filter", "env-filter",
] } ] }
utoipa = { version = "4", features = ["axum_extras", "uuid", "chrono"] }
utoipauto = "0.1"
uuid = { version = "1", features = ["serde", "v7"] } uuid = { version = "1", features = ["serde", "v7"] }
validator = { version = "0.18", features = ["derive"] }

19
Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM rust:slim AS chef
RUN cargo install cargo-chef
WORKDIR /app
FROM chef AS planner
COPY . .
RUN cargo chef prepare
FROM chef AS builder
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release
COPY . .
RUN cargo b -r
FROM debian:stable-slim
EXPOSE 3000
WORKDIR /app
COPY --from=builder /app/target/release/project .
CMD [ "./project" ]

25
compose-dev.yaml Normal file
View File

@ -0,0 +1,25 @@
services:
backend:
build: .
ports:
- 3000:3000
environment:
JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: 'postgresql://tester:testing123!@backend_db/backend'
depends_on:
- backend_db
backend_db:
image: ghcr.io/fboulnois/pg_uuidv7:1.5.0
environment:
- POSTGRES_USER=tester
- POSTGRES_PASSWORD=testing123!
- POSTGRES_DB=backend
ports:
- 5432:5432
volumes:
- backend_db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes:
backend_db_data:

View File

@ -1,14 +1,22 @@
services: services:
db: backend:
build: .
environment:
JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: 'postgresql://tester:testing123!@backend_db/backend'
depends_on:
- backend_db
restart: unless-stopped
backend_db:
image: ghcr.io/fboulnois/pg_uuidv7:1.5.0 image: ghcr.io/fboulnois/pg_uuidv7:1.5.0
environment: environment:
- POSTGRES_USER=tester - POSTGRES_USER=tester
- POSTGRES_PASSWORD=testing123! - POSTGRES_PASSWORD=testing123!
- POSTGRES_DB=testing - POSTGRES_DB=backend
ports:
- 5432:5432
volumes: volumes:
- postgres_data:/var/lib/postgresql/data - backend_db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes: volumes:
postgres_data: backend_db_data:

View File

@ -1,7 +1,3 @@
DROP EXTENSION IF EXISTS pg_trgm;
DROP EXTENSION IF EXISTS pg_uuidv7;
DROP TABLE permissions; DROP TABLE permissions;
DROP TABLE files; DROP TABLE files;
@ -11,3 +7,7 @@ DROP TABLE folders;
DROP TABLE users; DROP TABLE users;
DROP TYPE permission; DROP TYPE permission;
DROP EXTENSION IF EXISTS pg_trgm;
DROP EXTENSION IF EXISTS pg_uuidv7;

View File

@ -6,7 +6,8 @@ CREATE TABLE
users ( users (
user_id SERIAL PRIMARY KEY, user_id SERIAL PRIMARY KEY,
username VARCHAR(50) NOT NULL UNIQUE, username VARCHAR(50) NOT NULL UNIQUE,
email VARCHAR(100) NOT NULL UNIQUE email VARCHAR(100) NOT NULL UNIQUE,
hashed_password BYTEA NOT NULL
); );
CREATE TABLE CREATE TABLE
@ -15,7 +16,7 @@ CREATE TABLE
parent_folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE DEFAULT null, parent_folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE DEFAULT null,
owner_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL, owner_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL,
folder_name VARCHAR(255) NOT NULL, folder_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL
); );
CREATE TABLE CREATE TABLE
@ -25,8 +26,8 @@ CREATE TABLE
file_name VARCHAR(255) NOT NULL, file_name VARCHAR(255) NOT NULL,
file_size BIGINT NOT NULL, file_size BIGINT NOT NULL,
sha512 BYTEA NOT NULL, sha512 BYTEA NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL
); );
CREATE TYPE permission AS ENUM ('read', 'write', 'manage'); CREATE TYPE permission AS ENUM ('read', 'write', 'manage');

1
sql/create_folder.sql Normal file
View File

@ -0,0 +1 @@
INSERT INTO folders(parent_folder_id, owner_id, folder_name, folder_id) VALUES ($1, $2, $3, $4)

View File

@ -1,5 +1,5 @@
SELECT SELECT
username, users.user_id,
permission_type as "permission_type: PermissionRaw" permission_type as "permission_type: PermissionRaw"
FROM FROM
permissions permissions

11
sql/get_folders.sql Normal file
View File

@ -0,0 +1,11 @@
SELECT
f.folder_id,
owner_id,
folder_name,
created_at
FROM
folders f
LEFT JOIN permissions p ON f.folder_id = p.folder_id
WHERE
parent_folder_id = $1
AND (p.user_id = $2 OR f.owner_id = $2)

View File

@ -8,7 +8,7 @@ WITH
user_id = $1 user_id = $1
) )
SELECT SELECT
folder_id folder_id, owner_id, folder_name, created_at
FROM FROM
folders folders
WHERE WHERE

View File

@ -1,9 +1,11 @@
SELECT SELECT
user_id, username, email user_id, username, email,
FROM
users
ORDER BY
GREATEST ( GREATEST (
similarity (email, $1), similarity (email, $1),
similarity (username, $1) similarity (username, $1)
) DESC ) as "similarity!"
FROM
users
ORDER BY
"similarity!" DESC
LIMIT 20

View File

@ -1,31 +1,204 @@
use std::{array::TryFromSliceError, sync::LazyLock};
use axum::{ use axum::{
extract::{FromRequestParts, Query}, extract::{FromRef, FromRequestParts},
http::{request::Parts, StatusCode}, http::request::Parts,
RequestPartsExt, RequestPartsExt,
}; };
use serde::Deserialize; use axum_extra::{
headers::{authorization::Bearer, Authorization},
TypedHeader,
};
use chrono::{TimeDelta, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use rand::{rngs::OsRng, RngCore};
use serde::{Deserialize, Serialize};
use subtle::ConstantTimeEq;
use crate::AppState; use crate::prelude::*;
#[derive(Deserialize, Debug)] pub const HASH_LENGTH: usize = 64;
pub struct Claims { pub const SALT_LENGTH: usize = 64;
pub user_id: i32,
static PARAMS: LazyLock<scrypt::Params> =
LazyLock::new(|| scrypt::Params::new(14, 8, 1, HASH_LENGTH).unwrap());
static KEYS: LazyLock<Keys> = LazyLock::new(|| {
let secret = std::env::var("JWT_SECRET").expect("JWT_SECRET must be set");
Keys::from_secret(secret.as_bytes())
});
struct Keys {
encoding_key: EncodingKey,
decoding_key: DecodingKey,
} }
#[axum::async_trait] impl Keys {
impl FromRequestParts<AppState> for Claims { fn from_secret(secret: &[u8]) -> Self {
type Rejection = StatusCode; Self {
encoding_key: EncodingKey::from_secret(secret),
async fn from_request_parts( decoding_key: DecodingKey::from_secret(secret),
parts: &mut Parts,
_state: &AppState,
) -> Result<Self, Self::Rejection> {
match parts.extract().await {
Ok(Query(claims)) => Ok(claims),
Err(err) => {
tracing::debug!(%err, "Autharization failed");
Err(StatusCode::UNAUTHORIZED)
}
} }
} }
} }
/// Forces the evaluation of the keys. They will be created upon first use otherwise
pub fn force_init_keys() {
LazyLock::force(&KEYS);
}
/// Hashes the bytes using Scrypt with the given salt
#[must_use]
fn hash_scrypt(bytes: &[u8], salt: &[u8]) -> [u8; HASH_LENGTH] {
let mut hash = [0; HASH_LENGTH];
scrypt::scrypt(bytes, salt, &PARAMS, &mut hash).unwrap();
hash
}
/// Verifieble scrypt hashed bytes
#[cfg_attr(test, derive(PartialEq))] // == OPERATOR MUSTN'T BE USED OUTSIZE OF TESTS
pub struct HashedBytes {
pub hash: [u8; HASH_LENGTH],
pub salt: [u8; SALT_LENGTH],
}
impl HashedBytes {
/// Hashes the bytes
#[must_use]
pub fn hash_bytes(bytes: &[u8]) -> Self {
let mut salt = [0; SALT_LENGTH];
OsRng.fill_bytes(&mut salt);
Self {
hash: hash_scrypt(bytes, &salt),
salt,
}
}
/// Parses the bytes where the first `HASH_LENGTH` bytes are the hash and the latter `SALT_LENGTH` bytes are the salt
pub fn from_bytes(bytes: &[u8]) -> Result<Self, TryFromSliceError> {
let (hash, salt) = bytes.split_at(HASH_LENGTH);
let result = Self {
hash: hash.try_into()?,
salt: salt.try_into()?,
};
Ok(result)
}
#[must_use]
pub fn verify(&self, bytes: &[u8]) -> bool {
let hash = hash_scrypt(bytes, self.salt.as_ref());
hash.ct_eq(self.hash.as_ref()).into()
}
pub fn as_bytes(&self) -> Vec<u8> {
let mut result = Vec::with_capacity(self.hash.len() + self.salt.len());
result.extend_from_slice(&self.hash);
result.extend_from_slice(&self.salt);
result
}
}
pub async fn authenticate_user(
username: &str,
password: &str,
pool: &Pool,
) -> anyhow::Result<Option<i32>> {
let Some((user_id, hash)) = db::users::get_hash(username, pool).await? else {
return Ok(None);
};
let hash = HashedBytes::from_bytes(&hash)?;
Ok(hash.verify(password.as_bytes()).then_some(user_id))
}
#[derive(Debug, Serialize)]
pub struct Token {
access_token: String,
token_type: &'static str,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Claims {
pub user_id: i32,
pub exp: i64,
}
const JWT_ALGORITHM: jsonwebtoken::Algorithm = jsonwebtoken::Algorithm::HS256;
impl Claims {
pub fn new(user_id: i32) -> Self {
Self {
user_id,
exp: (Utc::now() + TimeDelta::days(30)).timestamp(),
}
}
pub fn encode(self) -> Result<Token, GeneralError> {
let access_token = encode(&Header::new(JWT_ALGORITHM), &self, &KEYS.encoding_key)
.handle_internal("Token creation error")?;
let token = Token {
access_token,
token_type: "Bearer",
};
Ok(token)
}
}
#[axum::async_trait]
impl<T> FromRequestParts<T> for Claims
where
Pool: FromRef<T>,
T: Sync,
{
type Rejection = GeneralError;
async fn from_request_parts(parts: &mut Parts, state: &T) -> Result<Self, Self::Rejection> {
const INVALID_TOKEN: GeneralError =
GeneralError::const_message(StatusCode::UNAUTHORIZED, "Invalid token");
let pool = Pool::from_ref(state);
let TypedHeader(Authorization(bearer)) = parts
.extract::<TypedHeader<Authorization<Bearer>>>()
.await
.map_err(|_| INVALID_TOKEN)?;
let claims: Claims = decode(
bearer.token(),
&KEYS.decoding_key,
&Validation::new(JWT_ALGORITHM),
)
.map_err(|_| INVALID_TOKEN)?
.claims;
db::users::exists(claims.user_id, &pool)
.await
.handle_internal("Token validation error")?
.then_some(claims)
.ok_or(GeneralError::const_message(
StatusCode::UNAUTHORIZED,
"Wrong credentials",
))
}
}
#[cfg(test)]
mod tests {
use super::HashedBytes;
const PASSWORD: &str = "Password12313#!#4)$*!#";
#[test]
fn test_hash_conversion() {
let bytes = HashedBytes::hash_bytes(PASSWORD.as_bytes());
let bytes2 = HashedBytes::from_bytes(&bytes.as_bytes()).unwrap();
assert!(bytes == bytes2);
}
#[test]
fn test_hash() {
assert!(HashedBytes::hash_bytes(PASSWORD.as_bytes()).verify(PASSWORD.as_bytes()));
}
#[test]
fn test_different_hash() {
assert!(!HashedBytes::hash_bytes(PASSWORD.as_bytes()).verify(b"Different Password"));
}
}

View File

@ -1,8 +1,6 @@
use uuid::Uuid; use db::permissions::PermissionType;
use crate::Pool; use crate::prelude::*;
use super::permissions::PermissionType;
pub async fn insert( pub async fn insert(
file_id: Uuid, file_id: Uuid,
@ -33,18 +31,20 @@ pub async fn update(file_id: Uuid, size: i64, hash: Vec<u8>, pool: &Pool) -> sql
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)] #[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FileWithoutParentId { pub struct FileWithoutParentId {
file_id: Uuid, pub file_id: Uuid,
file_name: String, pub file_name: String,
file_size: i64, pub file_size: i64,
sha512: String, pub sha512: String,
created_at: chrono::NaiveDateTime, pub created_at: chrono::DateTime<chrono::Utc>,
updated_at: chrono::NaiveDateTime, pub updated_at: chrono::DateTime<chrono::Utc>,
} }
pub async fn get_files(folder_id: Uuid, pool: &Pool) -> sqlx::Result<Vec<FileWithoutParentId>> { pub fn get_files(
folder_id: Uuid,
pool: &Pool,
) -> impl Stream<Item = sqlx::Result<FileWithoutParentId>> + '_ {
sqlx::query_as!(FileWithoutParentId, r#"SELECT file_id, file_name, file_size, encode(sha512, 'base64') as "sha512!", created_at, updated_at FROM files WHERE folder_id = $1"#, folder_id) sqlx::query_as!(FileWithoutParentId, r#"SELECT file_id, file_name, file_size, encode(sha512, 'base64') as "sha512!", created_at, updated_at FROM files WHERE folder_id = $1"#, folder_id)
.fetch_all(pool) .fetch(pool)
.await
} }
async fn get_folder_id(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<Uuid>> { async fn get_folder_id(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<Uuid>> {
@ -67,10 +67,11 @@ pub async fn get_permissions(
} }
pub async fn get_name(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<String>> { pub async fn get_name(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<String>> {
let record = sqlx::query!("SELECT file_name FROM files WHERE file_id = $1", file_id) let name = sqlx::query!("SELECT file_name FROM files WHERE file_id = $1", file_id)
.fetch_optional(pool) .fetch_optional(pool)
.await?; .await?
Ok(record.map(|record| record.file_name)) .map(|record| record.file_name);
Ok(name)
} }
pub async fn delete(file_id: Uuid, pool: &Pool) -> sqlx::Result<bool> { pub async fn delete(file_id: Uuid, pool: &Pool) -> sqlx::Result<bool> {

View File

@ -1,7 +1,4 @@
use futures::{Stream, TryStreamExt}; use crate::{db::permissions::PermissionRaw, prelude::*};
use uuid::Uuid;
use crate::{db::permissions::PermissionRaw, Pool};
use super::permissions::PermissionType; use super::permissions::PermissionType;
@ -33,7 +30,7 @@ pub async fn get_root(user_id: i32, pool: &Pool) -> sqlx::Result<Uuid> {
.map(|row| row.folder_id) .map(|row| row.folder_id)
} }
pub async fn get_by_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Result<Option<Uuid>> { pub async fn process_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Result<Option<Uuid>> {
match id { match id {
Some(id) => get_permissions(id, user_id, pool) Some(id) => get_permissions(id, user_id, pool)
.await .await
@ -45,25 +42,44 @@ pub async fn get_by_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Res
#[derive(Debug, serde::Serialize)] #[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)] #[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FolderWithoutParentId { pub struct FolderWithoutParentId {
folder_id: Uuid, pub folder_id: Uuid,
owner_id: i32, pub owner_id: i32,
folder_name: String, pub folder_name: String,
created_at: chrono::NaiveDateTime, pub created_at: chrono::DateTime<chrono::Utc>,
} }
pub async fn get_folders( pub async fn get_by_id(
parent_folder_id: Uuid, folder_id: Uuid,
pool: &Pool, pool: &Pool,
) -> sqlx::Result<Vec<FolderWithoutParentId>> { ) -> sqlx::Result<Option<FolderWithoutParentId>> {
sqlx::query_as!( sqlx::query_as!(
FolderWithoutParentId, FolderWithoutParentId,
"SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1", "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE folder_id = $1",
parent_folder_id, folder_id
) )
.fetch_all(pool) .fetch_optional(pool)
.await .await
} }
/// Get folders that user can read
///
/// # Warning
///
/// This function doesn't check that the user can read the parent folder itself
pub fn get_folders(
parent_folder_id: Uuid,
user_id: i32,
pool: &Pool,
) -> impl Stream<Item = sqlx::Result<FolderWithoutParentId>> + '_ {
sqlx::query_file_as!(
FolderWithoutParentId,
"sql/get_folders.sql",
parent_folder_id,
user_id
)
.fetch(pool)
}
pub async fn name_exists(parent_folder_id: Uuid, name: &str, pool: &Pool) -> sqlx::Result<bool> { pub async fn name_exists(parent_folder_id: Uuid, name: &str, pool: &Pool) -> sqlx::Result<bool> {
sqlx::query_file!("sql/name_exists.sql", parent_folder_id, name) sqlx::query_file!("sql/name_exists.sql", parent_folder_id, name)
.fetch_one(pool) .fetch_one(pool)
@ -71,20 +87,26 @@ pub async fn name_exists(parent_folder_id: Uuid, name: &str, pool: &Pool) -> sql
.map(|row| row.exists.unwrap_or(false)) .map(|row| row.exists.unwrap_or(false))
} }
pub async fn insert( /// Creates a folder in the database. Do not use this function to create the ROOT folder
parent_folder_id: Uuid, pub async fn insert(parent_folder_id: Uuid, folder_name: &str, pool: &Pool) -> sqlx::Result<Uuid> {
user_id: i32, let folder_id = Uuid::now_v7();
folder_name: &str, let owner_id = get_by_id(parent_folder_id, pool)
pool: &Pool, .await?
) -> sqlx::Result<Uuid> { .ok_or(sqlx::Error::RowNotFound)?
sqlx::query!("INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id", .owner_id;
parent_folder_id, let result = sqlx::query_file!(
user_id, "sql/create_folder.sql",
folder_name parent_folder_id,
) owner_id,
.fetch_one(pool) folder_name,
.await folder_id
.map(|record| record.folder_id) )
.execute(pool)
.await?;
if result.rows_affected() == 0 {
return Err(sqlx::Error::RowNotFound);
}
Ok(folder_id)
} }
pub fn delete(folder_id: Uuid, pool: &Pool) -> impl Stream<Item = sqlx::Result<Uuid>> + '_ { pub fn delete(folder_id: Uuid, pool: &Pool) -> impl Stream<Item = sqlx::Result<Uuid>> + '_ {

View File

@ -1,13 +1,11 @@
use std::collections::HashMap; use std::{borrow::Cow, collections::HashMap};
use axum::http::StatusCode; use db::folder::FolderWithoutParentId;
use futures::TryStreamExt as _;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::Pool; use crate::prelude::*;
#[derive(sqlx::Type, Debug, Serialize, Deserialize)] #[derive(sqlx::Type, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[sqlx(type_name = "permission")] #[sqlx(type_name = "permission")]
#[sqlx(rename_all = "lowercase")] #[sqlx(rename_all = "lowercase")]
pub enum PermissionRaw { pub enum PermissionRaw {
@ -37,46 +35,74 @@ impl From<Option<PermissionRaw>> for PermissionType {
} }
} }
impl From<PermissionType> for PermissionRaw {
fn from(value: PermissionType) -> Self {
match value {
PermissionType::Manage => Self::Manage,
PermissionType::Write => Self::Write,
PermissionType::Read => Self::Read,
PermissionType::NoPermission => unreachable!(),
}
}
}
impl PermissionType { impl PermissionType {
pub fn can_read(self) -> bool { pub fn can_read(self) -> bool {
self >= PermissionType::Read self >= PermissionType::Read
} }
pub fn can_read_guard(self) -> Result<(), StatusCode> { fn can_read_guard(self) -> GeneralResult<()> {
if !self.can_read() { self.can_read().then_some(()).item_not_found()?;
return Err(StatusCode::NOT_FOUND);
}
Ok(()) Ok(())
} }
pub fn can_write_guard(self) -> Result<(), StatusCode> { fn can_write_guard(self) -> GeneralResult<()> {
self.can_read_guard()?; self.can_read_guard()?;
if self < PermissionType::Write { if self < PermissionType::Write {
return Err(StatusCode::FORBIDDEN); return Err(GeneralError::message(
StatusCode::FORBIDDEN,
"Cannot write to the folder",
));
} }
Ok(()) Ok(())
} }
pub fn can_manage_guard(self) -> Result<(), StatusCode> { fn can_manage_guard(self) -> GeneralResult<()> {
self.can_read_guard()?; self.can_read_guard()?;
if self < PermissionType::Manage { if self < PermissionType::Manage {
return Err(StatusCode::FORBIDDEN); return Err(GeneralError::message(
StatusCode::FORBIDDEN,
"Cannot manage the folder",
));
} }
Ok(()) Ok(())
} }
} }
pub trait PermissionExt {
fn can_read_guard(self) -> GeneralResult<()>;
fn can_write_guard(self) -> GeneralResult<()>;
fn can_manage_guard(self) -> GeneralResult<()>;
}
fn permissions_error(error: sqlx::Error) -> GeneralError {
GeneralError {
status_code: StatusCode::INTERNAL_SERVER_ERROR,
message: Cow::Borrowed("Error getting permissions"),
error: Some(error.into()),
}
}
fn apply_guard(
result: sqlx::Result<PermissionType>,
func: impl FnOnce(PermissionType) -> GeneralResult<()>,
) -> GeneralResult<()> {
result.map_err(permissions_error).and_then(func)
}
impl PermissionExt for sqlx::Result<PermissionType> {
fn can_read_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_read_guard)
}
fn can_write_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_write_guard)
}
fn can_manage_guard(self) -> GeneralResult<()> {
apply_guard(self, PermissionType::can_manage_guard)
}
}
pub async fn insert( pub async fn insert(
user_id: i32, user_id: i32,
folder_id: Uuid, folder_id: Uuid,
@ -97,10 +123,10 @@ pub async fn insert(
pub async fn get_all_for_folder( pub async fn get_all_for_folder(
folder_id: Uuid, folder_id: Uuid,
pool: &Pool, pool: &Pool,
) -> sqlx::Result<HashMap<String, PermissionRaw>> { ) -> sqlx::Result<HashMap<i32, PermissionRaw>> {
sqlx::query_file!("sql/get_all_permissions_for_folder.sql", folder_id) sqlx::query_file!("sql/get_all_permissions_for_folder.sql", folder_id)
.fetch(pool) .fetch(pool)
.map_ok(|record| (record.username, record.permission_type)) .map_ok(|record| (record.user_id, record.permission_type))
.try_collect() .try_collect()
.await .await
} }
@ -112,10 +138,16 @@ pub async fn delete_for_folder(folder_id: Uuid, user_id: i32, pool: &Pool) -> sq
.map(|_| ()) .map(|_| ())
} }
pub async fn get_top_level_permitted_folders(user_id: i32, pool: &Pool) -> sqlx::Result<Vec<Uuid>> { pub async fn get_top_level_permitted_folders(
sqlx::query_file!("sql/get_top_level_folder.sql", user_id) user_id: i32,
.fetch(pool) pool: &Pool,
.map_ok(|record| record.folder_id) ) -> sqlx::Result<Vec<FolderWithoutParentId>> {
.try_collect() sqlx::query_file_as!(
.await FolderWithoutParentId,
"sql/get_top_level_folder.sql",
user_id
)
.fetch(pool)
.try_collect()
.await
} }

View File

@ -1,19 +1,24 @@
use futures::{stream::BoxStream, Stream, TryStreamExt}; use crate::prelude::*;
use serde::Serialize;
use uuid::Uuid;
use crate::Pool;
/// Creates user and returns its id /// Creates user and returns its id
pub async fn create_user(user_name: &str, user_email: &str, pool: &Pool) -> sqlx::Result<i32> { pub async fn create_user(
let id = sqlx::query!( user_name: &str,
"INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id", user_email: &str,
hashed_password: &[u8],
pool: &Pool,
) -> sqlx::Result<Option<i32>> {
let Some(record) = sqlx::query!(
"INSERT INTO users(username, email, hashed_password) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING RETURNING user_id",
user_name, user_name,
user_email user_email,
hashed_password
) )
.fetch_one(pool) .fetch_optional(pool)
.await? .await?
.user_id; else {
return Ok(None);
};
let id = record.user_id;
sqlx::query!( sqlx::query!(
"INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)", "INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)",
id, id,
@ -21,7 +26,7 @@ pub async fn create_user(user_name: &str, user_email: &str, pool: &Pool) -> sqlx
) )
.execute(pool) .execute(pool)
.await?; .await?;
Ok(id) Ok(Some(id))
} }
/// Deletes the user and returns the files that must be deleted /// Deletes the user and returns the files that must be deleted
@ -46,7 +51,7 @@ pub async fn update(
) -> sqlx::Result<UserInfo> { ) -> sqlx::Result<UserInfo> {
sqlx::query_as!( sqlx::query_as!(
UserInfo, UserInfo,
"UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING *", "UPDATE users SET username = $2, email = $3 WHERE user_id = $1 RETURNING user_id, username, email",
user_id, user_id,
username, username,
email email
@ -55,19 +60,48 @@ pub async fn update(
.await .await
} }
pub async fn get(user_id: i32, pool: &Pool) -> sqlx::Result<UserInfo> { pub async fn exists(user_id: i32, pool: &Pool) -> sqlx::Result<bool> {
sqlx::query!(
"SELECT EXISTS(SELECT user_id FROM users WHERE user_id = $1)",
user_id
)
.fetch_one(pool)
.await
.map(|record| record.exists.unwrap_or(false))
}
pub async fn get(user_id: i32, pool: &Pool) -> sqlx::Result<Option<UserInfo>> {
sqlx::query_as!( sqlx::query_as!(
UserInfo, UserInfo,
"SELECT user_id, username, email FROM users WHERE user_id = $1", "SELECT user_id, username, email FROM users WHERE user_id = $1",
user_id user_id
) )
.fetch_one(pool) .fetch_optional(pool)
.await .await
} }
/// Gets the hashed password field by either the email or the username
pub async fn get_hash(search_string: &str, pool: &Pool) -> sqlx::Result<Option<(i32, Vec<u8>)>> {
let record = sqlx::query!(
"SELECT user_id, hashed_password FROM users WHERE username = $1 OR email = $1",
search_string
)
.fetch_optional(pool)
.await?;
Ok(record.map(|record| (record.user_id, record.hashed_password)))
}
#[derive(Serialize, Debug)]
pub struct UserSearch {
pub user_id: i32,
pub username: String,
pub email: String,
pub similarity: f32,
}
pub fn search_for_user<'a>( pub fn search_for_user<'a>(
search_string: &str, search_string: &str,
pool: &'a Pool, pool: &'a Pool,
) -> BoxStream<'a, sqlx::Result<UserInfo>> { ) -> BoxStream<'a, sqlx::Result<UserSearch>> {
sqlx::query_file_as!(UserInfo, "sql/search_for_user.sql", search_string).fetch(pool) sqlx::query_file_as!(UserSearch, "sql/search_for_user.sql", search_string).fetch(pool)
} }

View File

@ -1,4 +1,4 @@
pub use crate::prelude::*; use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct Params { pub struct Params {
@ -9,24 +9,20 @@ pub async fn delete(
Query(params): Query<Params>, Query(params): Query<Params>,
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let deleted = db::file::delete(params.file_id, &state.pool) db::file::delete(params.file_id, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error deleting the file")?;
if !deleted {
return Err(StatusCode::NOT_FOUND); // Will not happen most of the time due to can write guard
}
state state
.storage .storage
.delete(params.file_id) .delete(params.file_id)
.await .await
.handle_internal()?; .handle_internal("Error deleting the file")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -12,16 +12,15 @@ pub async fn download(
Query(params): Query<Params>, Query(params): Query<Params>,
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
) -> Result<impl IntoResponse, StatusCode> { ) -> GeneralResult<impl IntoResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_read_guard()?; .can_read_guard()?;
let mut name = db::file::get_name(params.file_id, &state.pool) let mut name = db::file::get_name(params.file_id, &state.pool)
.await .await
.handle_internal()? .handle_internal("Error getting file info")?
.ok_or(StatusCode::NOT_FOUND)?; .item_not_found()?;
name = name name = name
.chars() .chars()
.fold(String::with_capacity(name.len()), |mut result, char| { .fold(String::with_capacity(name.len()), |mut result, char| {
@ -32,7 +31,11 @@ pub async fn download(
result result
}); });
let file = state.storage.read(params.file_id).await.handle_internal()?; let file = state
.storage
.read(params.file_id)
.await
.handle_internal("Error reading the file")?;
let body = Body::from_stream(ReaderStream::new(file)); let body = Body::from_stream(ReaderStream::new(file));
let disposition = format!("attachment; filename=\"{name}\""); let disposition = format!("attachment; filename=\"{name}\"");
let headers = [(header::CONTENT_DISPOSITION, disposition)]; let headers = [(header::CONTENT_DISPOSITION, disposition)];

View File

@ -12,10 +12,9 @@ pub async fn modify(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
mut multipart: Multipart, mut multipart: Multipart,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool) db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
// Very weird work around to get the first file in multipart // Very weird work around to get the first file in multipart
@ -23,7 +22,12 @@ pub async fn modify(
match multipart.next_field().await { match multipart.next_field().await {
Ok(Some(field)) if field.file_name().is_some() => break field, Ok(Some(field)) if field.file_name().is_some() => break field,
Ok(Some(_)) => continue, Ok(Some(_)) => continue,
_ => return Err(StatusCode::BAD_REQUEST), _ => {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"No file in the multipart",
))
}
} }
}; };
@ -31,19 +35,22 @@ pub async fn modify(
.storage .storage
.write(params.file_id) .write(params.file_id)
.await .await
.handle_internal()? .handle_internal("Error writing to the file")?
.ok_or(StatusCode::NOT_FOUND)?; .item_not_found()?;
let (hash, size) = crate::FileStorage::write_to_file(&mut file, &mut field) let (hash, size) = crate::FileStorage::write_to_file(&mut file, &mut field)
.await .await
.map_err(|err| { .map_err(|err| {
tracing::warn!(%err); tracing::warn!(%err);
StatusCode::INTERNAL_SERVER_ERROR GeneralError::message(
StatusCode::INTERNAL_SERVER_ERROR,
"Error writing to the file",
)
})?; })?;
db::file::update(params.file_id, size, hash, &state.pool) db::file::update(params.file_id, size, hash, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error updating the file")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -1,8 +1,9 @@
use std::collections::{HashMap, HashSet}; use std::{
collections::{HashMap, HashSet},
fmt::Write as _,
};
use axum::extract::multipart::{self, Multipart}; use axum::extract::multipart::{self, Multipart};
use futures::TryStreamExt;
use tokio::io::AsyncWrite;
use crate::prelude::*; use crate::prelude::*;
@ -11,25 +12,60 @@ pub struct Params {
parent_folder: Uuid, parent_folder: Uuid,
} }
#[derive(Serialize, Debug, Default)]
pub struct Response {
success: HashMap<Box<str>, Uuid>,
error: HashMap<Box<str>, &'static str>,
}
fn validate_name(name: &str, existing_names: &HashSet<String>) -> Result<(), &'static str> {
if name.len() > 255 {
return Err("Name too long");
}
if existing_names.contains(name) {
return Err("Item with that name already exists");
}
Ok(())
}
async fn create_file( async fn create_file(
file_id: Uuid, storage: &crate::FileStorage,
file: impl AsyncWrite + Unpin,
file_name: &str, file_name: &str,
field: &mut multipart::Field<'_>, field: &mut multipart::Field<'_>,
parent_folder: Uuid, parent_folder: Uuid,
pool: &Pool, pool: &Pool,
) -> bool { ) -> anyhow::Result<Uuid> {
let (hash, size) = match crate::FileStorage::write_to_file(file, field).await { let (file_id, file) = storage.create().await?;
Ok(values) => values, let result = async {
let (hash, size) = crate::FileStorage::write_to_file(file, field).await?;
db::file::insert(file_id, parent_folder, file_name, size, hash, pool).await?;
anyhow::Result::Ok(())
}
.await;
match result {
Ok(()) => Ok(file_id),
Err(err) => { Err(err) => {
tracing::warn!(%err); let _ = storage.delete(file_id).await;
return false; Err(err)
} }
}; }
db::file::insert(file_id, parent_folder, file_name, size, hash, pool) }
async fn parse_field(
field: &mut multipart::Field<'_>,
name: &str,
storage: &crate::FileStorage,
parent_folder: Uuid,
pool: &Pool,
existing_names: &HashSet<String>,
) -> Result<Uuid, &'static str> {
validate_name(name, existing_names)?;
create_file(storage, name, field, parent_folder, pool)
.await .await
.inspect_err(|err| tracing::warn!(%err)) .map_err(|err| {
.is_ok() tracing::warn!(%err, "Error creating the file");
"Error creating the file"
})
} }
pub async fn upload( pub async fn upload(
@ -37,43 +73,57 @@ pub async fn upload(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
mut multi: Multipart, mut multi: Multipart,
) -> Result<Json<HashMap<String, Uuid>>, StatusCode> { ) -> GeneralResult<Json<Response>> {
db::folder::get_permissions(params.parent_folder, claims.user_id, &state.pool) db::folder::get_permissions(params.parent_folder, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let existing_names: HashSet<String> = db::folder::get_names(params.parent_folder, &state.pool) let existing_names: HashSet<String> = db::folder::get_names(params.parent_folder, &state.pool)
.try_collect() .try_collect()
.await .await
.handle_internal()?; .handle_internal("Error getting existing names")?;
let mut result = HashMap::new();
let mut response = Response::default();
while let Ok(Some(mut field)) = multi.next_field().await { while let Ok(Some(mut field)) = multi.next_field().await {
let Some(file_name) = field.file_name().map(ToOwned::to_owned) else { let Some(file_name) = field.file_name().map(Box::<str>::from) else {
continue; continue;
}; };
if existing_names.contains(&file_name) {
continue; let parse_result = parse_field(
}
let Ok((file_id, mut file)) = state.storage.create().await else {
tracing::warn!("Couldn't create uuid for new file");
continue;
};
let is_success = create_file(
file_id,
&mut file,
&file_name,
&mut field, &mut field,
&file_name,
&state.storage,
params.parent_folder, params.parent_folder,
&state.pool, &state.pool,
&existing_names,
) )
.await; .await;
if !is_success {
let _ = state.storage.delete(file_id).await; match parse_result {
continue; Ok(uuid) => {
response.success.insert(file_name, uuid);
}
Err(err) => {
response.error.insert(file_name, err);
}
} }
result.insert(file_name, file_id);
} }
Ok(Json(result)) if !response.success.is_empty() {
return Ok(Json(response));
}
if response.error.is_empty() {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"No files sent",
));
}
let mut message = "No file successfully uploaded:".to_owned();
for (key, val) in response.error {
write!(message, "\n{key}: {val}").unwrap();
}
Err(GeneralError::message(StatusCode::BAD_REQUEST, message))
} }

View File

@ -10,27 +10,30 @@ pub async fn create(
State(pool): State<Pool>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Json(params): Json<Params>,
) -> Result<Json<Uuid>, StatusCode> { ) -> GeneralResult<Json<Uuid>> {
db::folder::get_permissions(params.parent_folder_id, claims.user_id, &pool) db::folder::get_permissions(params.parent_folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
if params.folder_name.len() > 255 {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Folder name too long",
));
}
let exists = db::folder::name_exists(params.parent_folder_id, &params.folder_name, &pool) let exists = db::folder::name_exists(params.parent_folder_id, &params.folder_name, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting existing names")?;
if exists { if exists {
return Err(StatusCode::CONFLICT); return Err(GeneralError::message(
StatusCode::CONFLICT,
"Name already taken",
));
} }
let id = db::folder::insert( db::folder::insert(params.parent_folder_id, &params.folder_name, &pool)
params.parent_folder_id, .await
claims.user_id, .handle_internal("Error creating the folder")
&params.folder_name, .map(Json)
&pool,
)
.await
.handle_internal()?;
Ok(Json(id))
} }

View File

@ -1,5 +1,3 @@
use futures::TryStreamExt;
use crate::prelude::*; use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@ -10,18 +8,20 @@ pub struct Params {
pub async fn delete( pub async fn delete(
State(state): State<AppState>, State(state): State<AppState>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Query(params): Query<Params>,
) -> Result<(), StatusCode> { ) -> GeneralResult<EmptyResponse> {
let root = db::folder::get_root(claims.user_id, &state.pool) let root = db::folder::get_root(claims.user_id, &state.pool)
.await .await
.handle_internal()?; .handle_internal("Error getting the root folder")?;
if params.folder_id == root { if params.folder_id == root {
return Err(StatusCode::BAD_REQUEST); return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot delete the root folder",
));
} }
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool) db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool)
.await .await
.handle_internal()?
.can_write_guard()?; .can_write_guard()?;
let storage = &state.storage; let storage = &state.storage;
@ -31,5 +31,7 @@ pub async fn delete(
Ok(()) Ok(())
}) })
.await .await
.handle_internal() .handle_internal("Error deleting the fodler")?;
Ok(EmptyResponse)
} }

View File

@ -0,0 +1,56 @@
use db::{file::FileWithoutParentId, folder::FolderWithoutParentId};
use tokio::try_join;
use super::list::Params;
use crate::prelude::*;
#[derive(Serialize, Debug)]
pub struct FolderStructure {
#[serde(flatten)]
folder_base: FolderWithoutParentId,
folders: Vec<FolderStructure>,
files: Vec<FileWithoutParentId>,
}
impl From<FolderWithoutParentId> for FolderStructure {
fn from(value: FolderWithoutParentId) -> Self {
FolderStructure {
folder_base: value,
folders: Vec::new(),
files: Vec::new(),
}
}
}
pub async fn structure(
Query(params): Query<Params>,
State(pool): State<Pool>,
claims: Claims,
) -> GeneralResult<Json<FolderStructure>> {
let folder_id = db::folder::process_id(params.folder_id, claims.user_id, &pool)
.await
.handle_internal("Error processing id")?
.item_not_found()?;
let folder = db::folder::get_by_id(folder_id, &pool)
.await
.handle_internal("Error getting folder info")?
.item_not_found()?;
let mut response: FolderStructure = folder.into();
let mut stack = vec![&mut response];
while let Some(folder) = stack.pop() {
let (files, folders) = try_join!(
db::file::get_files(folder.folder_base.folder_id, &pool).try_collect(),
db::folder::get_folders(folder.folder_base.folder_id, claims.user_id, &pool)
.map_ok(Into::into)
.try_collect()
)
.handle_internal("Error getting folder contents")?;
folder.folders = folders;
folder.files = files;
stack.extend(folder.folders.iter_mut());
}
Ok(Json(response))
}

View File

@ -4,7 +4,7 @@ use crate::prelude::*;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct Params { pub struct Params {
folder_id: Option<Uuid>, pub(super) folder_id: Option<Uuid>,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
@ -18,17 +18,17 @@ pub async fn list(
Query(params): Query<Params>, Query(params): Query<Params>,
State(pool): State<Pool>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
) -> Result<Json<Response>, StatusCode> { ) -> GeneralResult<Json<Response>> {
let folder_id = db::folder::get_by_id(params.folder_id, claims.user_id, &pool) let folder_id = db::folder::process_id(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()? .handle_internal("Error processing id")?
.ok_or(StatusCode::NOT_FOUND)?; .handle(StatusCode::NOT_FOUND, "Item not found")?;
let (files, folders) = try_join!( let (files, folders) = try_join!(
db::file::get_files(folder_id, &pool), db::file::get_files(folder_id, &pool).try_collect(),
db::folder::get_folders(folder_id, &pool) db::folder::get_folders(folder_id, claims.user_id, &pool).try_collect()
) )
.handle_internal()?; .handle_internal("Error getting folder contents")?;
Ok(Json(Response { Ok(Json(Response {
folder_id, folder_id,

View File

@ -1,3 +1,4 @@
pub mod create; pub mod create;
pub mod delete; pub mod delete;
pub mod get_structure;
pub mod list; pub mod list;

View File

@ -9,18 +9,17 @@ pub struct Params {
pub async fn delete( pub async fn delete(
State(pool): State<Pool>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Query(params): Query<Params>,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
if params.user_id != claims.user_id { if params.user_id != claims.user_id {
db::folder::get_permissions(params.folder_id, claims.user_id, &pool) db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_manage_guard()?; .can_manage_guard()?;
} }
db::permissions::delete_for_folder(params.folder_id, params.user_id, &pool) db::permissions::delete_for_folder(params.folder_id, params.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error deleting the permissions")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -13,14 +13,13 @@ pub async fn get(
State(pool): State<Pool>, State(pool): State<Pool>,
Query(params): Query<Params>, Query(params): Query<Params>,
claims: Claims, claims: Claims,
) -> Result<Json<HashMap<String, PermissionRaw>>, StatusCode> { ) -> GeneralResult<Json<HashMap<i32, PermissionRaw>>> {
db::folder::get_permissions(params.folder_id, claims.user_id, &pool) db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()? .can_read_guard()?;
.can_manage_guard()?;
let permissions = db::permissions::get_all_for_folder(params.folder_id, &pool) db::permissions::get_all_for_folder(params.folder_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting permissions")
Ok(Json(permissions)) .map(Json)
} }

View File

@ -1,11 +1,13 @@
use db::folder::FolderWithoutParentId;
use crate::prelude::*; use crate::prelude::*;
pub async fn get_top_level( pub async fn get_top_level(
State(pool): State<Pool>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
) -> Result<Json<Vec<Uuid>>, StatusCode> { ) -> GeneralResult<Json<Vec<FolderWithoutParentId>>> {
let folders = db::permissions::get_top_level_permitted_folders(claims.user_id, &pool) db::permissions::get_top_level_permitted_folders(claims.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error reading from the database")
Ok(Json(folders)) .map(Json)
} }

View File

@ -1,6 +1,4 @@
use db::permissions::PermissionRaw; use crate::{db::permissions::PermissionRaw, prelude::*};
use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct Params { pub struct Params {
@ -13,19 +11,39 @@ pub async fn set(
claims: Claims, claims: Claims,
State(pool): State<Pool>, State(pool): State<Pool>,
Json(params): Json<Params>, Json(params): Json<Params>,
) -> Result<StatusCode, StatusCode> { ) -> GeneralResult<EmptyResponse> {
let root = db::folder::get_root(claims.user_id, &pool) let root = db::folder::get_root(claims.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting the root folder")?;
if params.folder_id == root { if params.folder_id == root {
return Err(StatusCode::BAD_REQUEST); return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set permissions for the root folder",
));
} }
db::folder::get_permissions(params.folder_id, claims.user_id, &pool) db::folder::get_permissions(params.folder_id, claims.user_id, &pool)
.await .await
.handle_internal()?
.can_manage_guard()?; .can_manage_guard()?;
if params.user_id == claims.user_id {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set your own permissions",
));
}
let folder_info = db::folder::get_by_id(params.folder_id, &pool)
.await
.handle_internal("Error getting folder info")?
.item_not_found()?;
if folder_info.owner_id == params.user_id {
return Err(GeneralError::message(
StatusCode::BAD_REQUEST,
"Cannot set permissions of the folder's owner",
));
}
db::permissions::insert( db::permissions::insert(
params.user_id, params.user_id,
params.folder_id, params.folder_id,
@ -33,7 +51,7 @@ pub async fn set(
&pool, &pool,
) )
.await .await
.handle_internal()?; .handle_internal("Error writing to the database")?;
Ok(StatusCode::NO_CONTENT) Ok(EmptyResponse)
} }

View File

@ -1,17 +0,0 @@
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
username: String,
email: String,
}
pub async fn create(
State(pool): State<Pool>,
Json(params): Json<Params>,
) -> Result<Json<i32>, StatusCode> {
let id = db::users::create_user(&params.username, &params.email, &pool)
.await
.handle_internal()?;
Ok(Json(id))
}

View File

@ -1,16 +1,19 @@
use futures::TryStreamExt; use std::time::Duration;
use crate::prelude::*; use crate::prelude::*;
pub async fn delete( pub async fn delete(
State(AppState { pool, ref storage }): State<AppState>, State(AppState { pool, ref storage }): State<AppState>,
claims: Claims, claims: Claims,
) -> Result<(), StatusCode> { ) -> GeneralResult<EmptyResponse> {
tokio::time::sleep(Duration::from_secs(100)).await;
db::users::delete_user(claims.user_id, &pool) db::users::delete_user(claims.user_id, &pool)
.try_for_each_concurrent(5, |file_id| async move { .try_for_each_concurrent(5, |file_id| async move {
let _ = storage.delete(file_id).await; let _ = storage.delete(file_id).await;
Ok(()) Ok(())
}) })
.await .await
.handle_internal() .handle_internal("Error deleting the user")?;
Ok(EmptyResponse)
} }

View File

@ -5,12 +5,22 @@ pub struct Params {
user_id: i32, user_id: i32,
} }
pub async fn get( type Response = GeneralResult<Json<db::users::UserInfo>>;
State(pool): State<Pool>,
Query(params): Query<Params>, pub async fn get(State(pool): State<Pool>, Query(params): Query<Params>) -> Response {
) -> Result<Json<db::users::UserInfo>, StatusCode> { db::users::get(params.user_id, &pool)
let info = db::users::get(params.user_id, &pool)
.await .await
.handle_internal()?; .handle_internal("Error getting the user")?
Ok(Json(info)) .handle(StatusCode::NOT_FOUND, "User not found")
.map(Json)
}
pub async fn current(state: State<Pool>, claims: Claims) -> Response {
get(
state,
Query(Params {
user_id: claims.user_id,
}),
)
.await
} }

View File

@ -0,0 +1,26 @@
use axum::Form;
use crate::{
auth::{authenticate_user, Token},
prelude::*,
};
#[derive(Deserialize, Debug)]
pub struct Params {
username: String,
password: String,
}
pub async fn login(
State(pool): State<Pool>,
Form(payload): Form<Params>,
) -> GeneralResult<Json<Token>> {
let user_id = authenticate_user(&payload.username, &payload.password, &pool)
.await
.handle_internal("Error getting user from database")?
.handle(
StatusCode::NOT_FOUND,
"User with this name and password doesn't exist",
)?;
Claims::new(user_id).encode().map(Json)
}

View File

@ -1,5 +1,6 @@
pub mod create;
pub mod delete; pub mod delete;
pub mod get; pub mod get;
pub mod login;
pub mod put; pub mod put;
pub mod register;
pub mod search; pub mod search;

View File

@ -1,8 +1,12 @@
use validator::Validate;
use crate::prelude::*; use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug, Validate)]
pub struct Params { pub struct Params {
#[validate(length(min = 3, max = 10))]
username: String, username: String,
#[validate(email)]
email: String, email: String,
} }
@ -10,9 +14,10 @@ pub async fn put(
State(pool): State<Pool>, State(pool): State<Pool>,
claims: Claims, claims: Claims,
Json(params): Json<Params>, Json(params): Json<Params>,
) -> Result<Json<db::users::UserInfo>, StatusCode> { ) -> GeneralResult<Json<db::users::UserInfo>> {
let info = db::users::update(claims.user_id, &params.username, &params.email, &pool) params.validate()?;
db::users::update(claims.user_id, &params.username, &params.email, &pool)
.await .await
.handle_internal()?; .handle_internal("Error updating the user")
Ok(Json(info)) .map(Json)
} }

View File

@ -0,0 +1,64 @@
use axum::Form;
use itertools::Itertools;
use validator::{Validate, ValidationError};
use crate::{
auth::{HashedBytes, Token},
prelude::*,
};
#[derive(Deserialize, Debug, Validate)]
pub struct Params {
#[validate(length(min = 3, max = 10))]
username: String,
#[validate(email)]
email: String,
#[validate(length(min = 6), custom(function = "validate_password"))]
password: String,
}
fn validate_password(password: &str) -> Result<(), ValidationError> {
let mut has_lower = false;
let mut has_upper = false;
let mut has_number = false;
let mut has_special = false;
for char in password.chars() {
if char.is_lowercase() {
has_lower = true;
} else if char.is_uppercase() {
has_upper = true;
} else if char.is_ascii_digit() {
has_number = true;
} else {
has_special = true;
}
}
let msg = [has_lower, has_upper, has_number, has_special]
.into_iter()
.zip(["No lower", "No upper", "No numbers", "No special"])
.filter_map(|(param, msg)| (!param).then_some(msg))
.format(" ")
.to_string();
if !msg.is_empty() {
return Err(ValidationError::new("invalid_password").with_message(msg.into()));
}
Ok(())
}
pub async fn register(
State(pool): State<Pool>,
Form(params): Form<Params>,
) -> GeneralResult<Json<Token>> {
params.validate()?;
let password = HashedBytes::hash_bytes(params.password.as_bytes()).as_bytes();
let id = db::users::create_user(&params.username, &params.email, &password, &pool)
.await
.handle_internal("Error creating the user")?
.handle(
StatusCode::BAD_REQUEST,
"The username or the email are taken",
)?;
Claims::new(id).encode().map(Json)
}

View File

@ -1,5 +1,3 @@
use futures::TryStreamExt;
use crate::prelude::*; use crate::prelude::*;
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@ -10,11 +8,12 @@ pub struct Params {
pub async fn search( pub async fn search(
State(pool): State<Pool>, State(pool): State<Pool>,
Query(params): Query<Params>, Query(params): Query<Params>,
) -> sqlx::Result<Json<Vec<db::users::UserInfo>>, StatusCode> { ) -> GeneralResult<Json<Vec<db::users::UserSearch>>> {
let users = db::users::search_for_user(&params.search_string, &pool) db::users::search_for_user(&params.search_string, &pool)
.take(20) .take(20)
.try_filter(|user| future::ready(user.similarity > 0.1))
.try_collect() .try_collect()
.await .await
.handle_internal()?; .handle_internal("Error getting users from the database")
Ok(Json(users)) .map(Json)
} }

View File

@ -1,28 +1,103 @@
use axum::http::StatusCode; use std::borrow::Cow;
use axum::{http::StatusCode, response::IntoResponse};
type BoxError = Box<dyn std::error::Error>; type BoxError = Box<dyn std::error::Error>;
pub fn handle_error(error: impl Into<BoxError>) { /// Common error type for the project
let error: BoxError = error.into(); pub struct GeneralError {
tracing::error!(error); /// Response status code
pub status_code: StatusCode,
/// Message to send to the user
pub message: Cow<'static, str>,
/// Error to log
pub error: Option<BoxError>,
} }
pub trait ErrorHandlingExt<T, E> impl GeneralError {
pub fn message(status_code: StatusCode, message: impl Into<Cow<'static, str>>) -> Self {
Self {
status_code,
message: message.into(),
error: None,
}
}
pub const fn const_message(status_code: StatusCode, message: &'static str) -> Self {
Self {
status_code,
message: Cow::Borrowed(message),
error: None,
}
}
}
impl IntoResponse for GeneralError {
fn into_response(self) -> axum::response::Response {
if let Some(err) = self.error {
tracing::error!(err, message = %self.message, status_code = ?self.status_code);
}
(self.status_code, self.message).into_response()
}
}
impl From<validator::ValidationErrors> for GeneralError {
fn from(value: validator::ValidationErrors) -> Self {
GeneralError::message(StatusCode::BAD_REQUEST, value.to_string())
}
}
pub type GeneralResult<T> = Result<T, GeneralError>;
pub trait ErrorHandlingExt<T>
where where
Self: Sized, Self: Sized,
{ {
fn handle(self, code: StatusCode) -> Result<T, StatusCode>; fn handle(
self,
status_code: StatusCode,
message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T>;
fn handle_internal(self) -> Result<T, StatusCode> { fn handle_internal(self, message: impl Into<Cow<'static, str>>) -> GeneralResult<T> {
self.handle(StatusCode::INTERNAL_SERVER_ERROR) self.handle(StatusCode::INTERNAL_SERVER_ERROR, message)
} }
} }
impl<T, E: Into<BoxError>> ErrorHandlingExt<T, E> for Result<T, E> { impl<T, E: Into<BoxError>> ErrorHandlingExt<T> for Result<T, E> {
fn handle(self, code: StatusCode) -> Result<T, StatusCode> { fn handle(
self.map_err(|err| { self,
handle_error(err); status_code: StatusCode,
code message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T> {
self.map_err(|err| GeneralError {
status_code,
message: message.into(),
error: Some(err.into()),
}) })
} }
} }
impl<T> ErrorHandlingExt<T> for Option<T> {
fn handle(
self,
status_code: StatusCode,
message: impl Into<Cow<'static, str>>,
) -> GeneralResult<T> {
self.ok_or_else(|| GeneralError {
status_code,
message: message.into(),
error: None,
})
}
}
pub trait ItemNotFoundExt<T> {
fn item_not_found(self) -> Result<T, GeneralError>;
}
impl<T> ItemNotFoundExt<T> for Option<T> {
fn item_not_found(self) -> GeneralResult<T> {
self.handle(StatusCode::NOT_FOUND, "Item not found")
}
}

View File

@ -5,14 +5,14 @@ use std::{
}; };
use axum::body::Bytes; use axum::body::Bytes;
use futures::{Stream, StreamExt};
use sha2::Digest as _; use sha2::Digest as _;
use tokio::{ use tokio::{
fs, fs,
io::{AsyncWrite, AsyncWriteExt, BufWriter}, io::{AsyncWrite, AsyncWriteExt, BufWriter},
}; };
use tokio_util::io::StreamReader; use tokio_util::io::StreamReader;
use uuid::Uuid;
use crate::prelude::*;
#[derive(Clone)] #[derive(Clone)]
pub struct FileStorage(Arc<Path>); pub struct FileStorage(Arc<Path>);
@ -95,6 +95,7 @@ impl FileStorage {
const BUF_CAP: usize = 64 * 1024 * 1024; // 64 MiB const BUF_CAP: usize = 64 * 1024 * 1024; // 64 MiB
let mut hash = sha2::Sha512::new(); let mut hash = sha2::Sha512::new();
let mut size: i64 = 0; let mut size: i64 = 0;
let stream = stream.map(|value| { let stream = stream.map(|value| {
let bytes = value.map_err(io::Error::other)?; let bytes = value.map_err(io::Error::other)?;
hash.update(&bytes); hash.update(&bytes);
@ -104,10 +105,12 @@ impl FileStorage {
.ok_or_else(|| io::Error::other(anyhow::anyhow!("Size calculation overflow")))?; .ok_or_else(|| io::Error::other(anyhow::anyhow!("Size calculation overflow")))?;
io::Result::Ok(bytes) io::Result::Ok(bytes)
}); });
let mut reader = StreamReader::new(stream); let mut reader = StreamReader::new(stream);
let mut writer = BufWriter::with_capacity(BUF_CAP, file); let mut writer = BufWriter::with_capacity(BUF_CAP, file);
tokio::io::copy_buf(&mut reader, &mut writer).await?; tokio::io::copy_buf(&mut reader, &mut writer).await?;
writer.flush().await?; writer.flush().await?;
let hash = hash.finalize().to_vec(); let hash = hash.finalize().to_vec();
Ok((hash, size)) Ok((hash, size))
} }

View File

@ -4,27 +4,18 @@ mod endpoints;
mod errors; mod errors;
mod file_storage; mod file_storage;
mod prelude; mod prelude;
mod util;
use std::{env, net::Ipv4Addr};
use axum::{extract::FromRef, Router};
use file_storage::FileStorage; use file_storage::FileStorage;
use tokio::net::TcpListener;
type Pool = sqlx::postgres::PgPool; type Pool = sqlx::postgres::PgPool;
#[derive(Clone)] #[derive(Clone, axum::extract::FromRef)]
struct AppState { struct AppState {
pool: Pool, pool: Pool,
storage: FileStorage, storage: FileStorage,
} }
impl FromRef<AppState> for Pool {
fn from_ref(input: &AppState) -> Self {
input.pool.clone()
}
}
async fn create_test_users(pool: &Pool) -> anyhow::Result<()> { async fn create_test_users(pool: &Pool) -> anyhow::Result<()> {
let count = sqlx::query!("SELECT count(user_id) FROM users") let count = sqlx::query!("SELECT count(user_id) FROM users")
.fetch_one(pool) .fetch_one(pool)
@ -34,21 +25,52 @@ async fn create_test_users(pool: &Pool) -> anyhow::Result<()> {
if count > 0 { if count > 0 {
return Ok(()); return Ok(());
} }
let hash1 = auth::HashedBytes::hash_bytes(b"Password1").as_bytes();
let hash2 = auth::HashedBytes::hash_bytes(b"Password2").as_bytes();
tokio::try_join!( tokio::try_join!(
db::users::create_user("Test1", "test1@example.com", pool), db::users::create_user("Test1", "test1@example.com", &hash1, pool),
db::users::create_user("Test2", "test2@example.com", pool) db::users::create_user("Test2", "test2@example.com", &hash2, pool)
)?; )?;
Ok(()) Ok(())
} }
fn init_tracing() {
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
let mut err = None;
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|inner_err| {
err = Some(inner_err);
"debug,sqlx=info,axum::rejection=trace".parse().unwrap()
}),
)
.with(tracing_subscriber::fmt::layer())
.init();
if let Some(err) = err {
tracing::info!(
%err,
"Error constructing EnvFilter, falling back to using the default"
);
}
}
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
// TODO: add utoipa and utoipauto for swagger use std::{env, net::Ipv4Addr};
if env::var("RUST_BACKTRACE").is_err() {
env::set_var("RUST_BACKTRACE", "1");
}
let _ = dotenvy::dotenv(); let _ = dotenvy::dotenv();
tracing_subscriber::fmt::init(); init_tracing();
auth::force_init_keys();
let pool = match env::var("DATABASE_URL") { let pool = match env::var("DATABASE_URL") {
Ok(url) => Pool::connect(&url).await?, Ok(url) => Pool::connect(&url).await?,
@ -66,44 +88,102 @@ async fn main() -> anyhow::Result<()> {
let router = app(state); let router = app(state);
let addr = (Ipv4Addr::UNSPECIFIED, 3000); let addr = (Ipv4Addr::UNSPECIFIED, 3000);
let listener = TcpListener::bind(addr).await?; let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, router).await?; axum::serve(listener, router)
.with_graceful_shutdown(shutdown_signal())
.await?;
Ok(()) Ok(())
} }
fn app(state: AppState) -> Router { async fn shutdown_signal() {
use axum::{http::header, routing::get}; use tokio::signal;
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
{
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
tokio::select! {
() = ctrl_c => {}
() = terminate => {}
}
}
#[cfg(not(unix))]
ctrl_c.await;
}
fn app(state: AppState) -> axum::Router {
use axum::{
extract::DefaultBodyLimit,
handler::Handler as _,
http::header,
routing::{get, post},
};
use endpoints::{ use endpoints::{
file, folder, file, folder,
permissions::{self, get_top_level::get_top_level}, permissions::{self, get_top_level::get_top_level},
users, users,
}; };
use tower_http::ServiceBuilderExt as _; use tower_http::{
timeout::TimeoutLayer,
trace::{MakeSpan, TraceLayer},
ServiceBuilderExt as _,
};
let sensitive_headers = [header::AUTHORIZATION, header::COOKIE]; #[derive(Clone, Copy)]
struct SpanMaker;
let middleware = tower::ServiceBuilder::new() impl<B> MakeSpan<B> for SpanMaker {
.sensitive_headers(sensitive_headers) fn make_span(&mut self, request: &axum::http::Request<B>) -> tracing::Span {
.trace_for_http() tracing::debug_span!(
"request",
method = %request.method(),
uri = %request.uri(),
version = ?request.version(),
headers = ?request.headers(),
request_id = %uuid::Uuid::now_v7()
)
}
}
const TEN_GIBIBYTES: usize = 10 * 1024 * 1024 * 1024;
let body_limit = DefaultBodyLimit::max(TEN_GIBIBYTES);
let timeout = TimeoutLayer::new(std::time::Duration::from_secs(10));
let common_middleware = tower::ServiceBuilder::new()
.sensitive_headers([header::AUTHORIZATION, header::COOKIE])
.layer(TraceLayer::new_for_http().make_span_with(SpanMaker))
.compression(); .compression();
// Build route service let file_router = axum::Router::new().route(
Router::new() "/",
.route( get(file::download::download)
"/files", .post(file::upload::upload.layer(body_limit.clone()))
get(file::download::download) .delete(file::delete::delete.layer(timeout))
.post(file::upload::upload) .patch(file::modify::modify.layer(body_limit.clone())),
.delete(file::delete::delete) );
.patch(file::modify::modify),
) let general_router = axum::Router::new()
.route( .route(
"/folders", "/folders",
get(folder::list::list) get(folder::list::list)
.post(folder::create::create) .post(folder::create::create)
.delete(folder::delete::delete), .delete(folder::delete::delete),
) )
.route("/folders/structure", get(folder::get_structure::structure))
.route( .route(
"/permissions", "/permissions",
get(permissions::get::get) get(permissions::get::get)
@ -117,11 +197,18 @@ fn app(state: AppState) -> Router {
.route( .route(
"/users", "/users",
get(users::get::get) get(users::get::get)
.post(users::create::create)
.delete(users::delete::delete) .delete(users::delete::delete)
.put(users::put::put), .put(users::put::put),
) )
.route("/users/current", get(users::get::current))
.route("/users/search", get(users::search::search)) .route("/users/search", get(users::search::search))
.layer(middleware) .route("/users/register", post(users::register::register))
.route("/users/authorize", post(users::login::login))
.layer(timeout);
axum::Router::new()
.nest("/files", file_router)
.nest("/", general_router)
.layer(common_middleware)
.with_state(state) .with_state(state)
} }

View File

@ -1,8 +1,14 @@
pub(crate) use crate::{auth::Claims, db, errors::ErrorHandlingExt as _, AppState, Pool}; pub(crate) use crate::{
auth::Claims,
db::{self, permissions::PermissionExt as _},
errors::{ErrorHandlingExt as _, GeneralError, GeneralResult, ItemNotFoundExt as _},
util::EmptyResponse,
AppState, Pool,
};
pub use axum::{ pub use axum::{
extract::{Json, Query, State}, extract::{Json, Query, State},
http::StatusCode, http::StatusCode,
}; };
pub use futures::StreamExt as _; pub use futures::{future, stream::BoxStream, Stream, StreamExt as _, TryStreamExt as _};
pub use serde::{Deserialize, Serialize}; pub use serde::{Deserialize, Serialize};
pub use uuid::Uuid; pub use uuid::Uuid;

11
src/util.rs Normal file
View File

@ -0,0 +1,11 @@
use axum::response::IntoResponse;
use crate::prelude::*;
pub struct EmptyResponse;
impl IntoResponse for EmptyResponse {
fn into_response(self) -> axum::response::Response {
StatusCode::NO_CONTENT.into_response()
}
}