Initial commit

This commit is contained in:
StNicolay 2024-06-27 15:04:57 +03:00
commit e8114c515d
Signed by: StNicolay
GPG Key ID: 9693D04DCD962B0D
40 changed files with 4180 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
target/
.env
.vscode/
files/

View File

@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT count(user_id) FROM users",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "count",
"type_info": "Int8"
}
],
"parameters": {
"Left": []
},
"nullable": [
null
]
},
"hash": "079619fa3c92d73b0d35c5f52c95380c74aa890967d0af21ce7b3b28034d346a"
}

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "WITH RECURSIVE folder_hierarchy AS (\n -- Start with the given directory\n SELECT \n folder_id \n FROM \n folders \n WHERE \n folder_id = $1\n\n UNION ALL\n\n -- Recursively find all subdirectories\n SELECT \n f.folder_id\n FROM \n folders f\n INNER JOIN \n folder_hierarchy fh ON f.parent_folder_id = fh.folder_id\n),\ndeleted_files AS (\n -- Delete the files and return their IDs\n DELETE FROM \n files \n WHERE \n folder_id IN (SELECT folder_id FROM folder_hierarchy)\n RETURNING file_id\n),\ndeleted_folders AS (\n -- Delete the directories\n DELETE FROM \n folders \n WHERE \n folder_id IN (SELECT folder_id FROM folder_hierarchy)\n)\n-- Return the IDs of deleted files\nSELECT \n file_id \nFROM \n deleted_files;\n",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false
]
},
"hash": "13785c2084644fe5a0054c67c16e117dcbfae4f15cd09d364706394989739dc3"
}

View File

@ -0,0 +1,24 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4",
"Varchar"
]
},
"nullable": [
false
]
},
"hash": "3dd4a65d3106d742c2221c0589ac68d4621c6e351f9fbb7aa58629ff2d829234"
}

View File

@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM files WHERE file_id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": []
},
"hash": "480948d23f23a77581e939f274075a1544b314a31910298363f00bd299652502"
}

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT folder_id FROM folders WHERE owner_id = $1 AND parent_folder_id IS null",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false
]
},
"hash": "548c3b5a2e8967ff1b045673778868303c3c85661504181b32366e8992f7cbb2"
}

View File

@ -0,0 +1,58 @@
{
"db_name": "PostgreSQL",
"query": "SELECT file_id, owner_id, file_name, file_size, sha512, created_at, updated_at FROM files WHERE folder_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "file_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "file_size",
"type_info": "Int8"
},
{
"ordinal": 4,
"name": "sha512",
"type_info": "Bytea"
},
{
"ordinal": 5,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 6,
"name": "updated_at",
"type_info": "Timestamp"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false
]
},
"hash": "5a51ab540453327bdd75f49991f402fac6b1d8fb0a760d420236e2b41d3e7fcf"
}

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT file_name FROM files WHERE file_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_name",
"type_info": "Varchar"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false
]
},
"hash": "6c28322e53dd262c72ed9dddf2e224a2d1c74628ce18aaf3a6a94ddbdcafbb12"
}

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT folder_name as name FROM folders WHERE parent_folder_id = $1 UNION SELECT file_name as name FROM files WHERE folder_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "name",
"type_info": "Varchar"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
null
]
},
"hash": "78ae96baca92fb148e2fc82809dea2d5027d778e0387965790a16a83df0b2c81"
}

View File

@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "user_id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false
]
},
"hash": "9602875e192fd321f3a773aa7eb5145cb0d1e7f31def733fd11394e9ad6c0d21"
}

View File

@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT file_id FROM files WHERE file_id = $1 AND owner_id = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
false
]
},
"hash": "9a26dab9efbbbb92b7be27792b581a0156210fdc0aadd3756f7003186f428374"
}

View File

@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO files(file_id, folder_id, owner_id, file_name, file_size, sha512) VALUES ($1, $2, $3, $4, $5, $6)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Uuid",
"Int4",
"Varchar",
"Int8",
"Bytea"
]
},
"nullable": []
},
"hash": "9a70e24a3de68f4a66718124bd3ca959bd0a992e5e0dda3baae52b8cb545ce66"
}

View File

@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "owner_id",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "folder_name",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamp"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "9cc887509746b773ebbc8c130331b768f9a1deeab34d56aa3b0a833d718114fe"
}

View File

@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT EXISTS(SELECT folder_id FROM folders WHERE parent_folder_id = $1 AND folder_name = $2)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "exists",
"type_info": "Bool"
}
],
"parameters": {
"Left": [
"Uuid",
"Text"
]
},
"nullable": [
null
]
},
"hash": "cd3591c61f3cc036158d8d55ec22a04adaf62ec4b05ba73da9253128b7bbb5b1"
}

View File

@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT folder_id FROM folders WHERE folder_id = $1 AND owner_id = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "folder_id",
"type_info": "Uuid"
}
],
"parameters": {
"Left": [
"Uuid",
"Int4"
]
},
"nullable": [
false
]
},
"hash": "dc98f1a609e67b642aed635b26239328e6456e69c12dc8561c474fff3dcd14d5"
}

View File

@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4",
"Varchar"
]
},
"nullable": []
},
"hash": "f6518f4378226650a10e32212c5065fbae00d5fa0a5470b8ad718422e88315a9"
}

2879
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

48
Cargo.toml Normal file
View File

@ -0,0 +1,48 @@
[package]
name = "project"
version = "0.1.0"
edition = "2021"
[profile.release]
debug = 1
lto = true
codegen-units = 1
[lints.clippy]
pedantic = "warn"
all = "warn"
[dependencies]
anyhow = { version = "1", features = ["backtrace"] }
axum = { version = "0.7", features = ["http2", "macros", "multipart"] }
axum-extra = { version = "0.9", features = ["typed-header"] }
chrono = { version = "0.4", features = ["serde"] }
dotenvy = "0.15"
futures = "0.3"
jsonwebtoken = "9"
oauth2 = "4"
serde = { version = "1", features = ["derive"] }
sha2 = "0.10"
sqlx = { version = "0.8", features = [
"postgres",
"runtime-tokio-rustls",
"macros",
"migrate",
"chrono",
"uuid",
] }
tokio = { version = "1", features = ["rt-multi-thread"] }
tokio-util = { version = "0.7" }
tower = { version = "0.4" }
tower-http = { version = "0.5", features = [
"compression-full",
"sensitive-headers",
"trace",
"util",
] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = [
"parking_lot",
"env-filter",
] }
uuid = { version = "1", features = ["serde", "v4"] }

14
compose.yaml Normal file
View File

@ -0,0 +1,14 @@
services:
db:
image: postgres:alpine
environment:
- POSTGRES_USER=tester
- POSTGRES_PASSWORD=testing123!
- POSTGRES_DB=testing
ports:
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data
volumes:
postgres_data:

View File

@ -0,0 +1,9 @@
DROP TABLE permissions;
DROP TABLE files;
DROP TABLE folders;
DROP TABLE users;
DROP TYPE permission;

View File

@ -0,0 +1,36 @@
CREATE TABLE
users (
user_id SERIAL PRIMARY KEY,
username VARCHAR(50) NOT NULL UNIQUE,
email VARCHAR(100) NOT NULL UNIQUE
);
CREATE TABLE
folders (
folder_id UUID PRIMARY KEY DEFAULT gen_random_uuid (),
parent_folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE DEFAULT null,
owner_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL,
folder_name VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE TABLE
files (
file_id UUID PRIMARY KEY DEFAULT gen_random_uuid (),
folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE NOT NULL,
file_name VARCHAR(255) NOT NULL,
file_size BIGINT NOT NULL,
sha512 BYTEA NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE TYPE permission AS ENUM ('read', 'write', 'manage');
CREATE TABLE
permissions (
permission_id SERIAL PRIMARY KEY,
user_id INT REFERENCES users (user_id) ON DELETE CASCADE NOT NULL,
folder_id UUID REFERENCES folders (folder_id) ON DELETE CASCADE,
permission_type permission NOT NULL
);

39
sql/delete_folder.sql Normal file
View File

@ -0,0 +1,39 @@
WITH RECURSIVE folder_hierarchy AS (
-- Start with the given directory
SELECT
folder_id
FROM
folders
WHERE
folder_id = $1
UNION ALL
-- Recursively find all subdirectories
SELECT
f.folder_id
FROM
folders f
INNER JOIN
folder_hierarchy fh ON f.parent_folder_id = fh.folder_id
),
deleted_files AS (
-- Delete the files and return their IDs
DELETE FROM
files
WHERE
folder_id IN (SELECT folder_id FROM folder_hierarchy)
RETURNING file_id
),
deleted_folders AS (
-- Delete the directories
DELETE FROM
folders
WHERE
folder_id IN (SELECT folder_id FROM folder_hierarchy)
)
-- Return the IDs of deleted files
SELECT
file_id
FROM
deleted_files;

31
src/auth.rs Normal file
View File

@ -0,0 +1,31 @@
use axum::{
extract::{FromRequestParts, Query},
http::{request::Parts, StatusCode},
RequestPartsExt,
};
use serde::Deserialize;
use crate::AppState;
#[derive(Deserialize, Debug)]
pub struct Claims {
pub user_id: i32,
}
#[axum::async_trait]
impl FromRequestParts<AppState> for Claims {
type Rejection = StatusCode;
async fn from_request_parts(
parts: &mut Parts,
_state: &AppState,
) -> Result<Self, Self::Rejection> {
match parts.extract().await {
Ok(Query(claims)) => Ok(claims),
Err(err) => {
tracing::debug!(%err, "Autharization failed");
Err(StatusCode::UNAUTHORIZED)
}
}
}
}

65
src/db/file.rs Normal file
View File

@ -0,0 +1,65 @@
use uuid::Uuid;
use crate::Pool;
use super::permissions::PermissionType;
pub async fn insert(
file_id: Uuid,
parent_folder: Uuid,
name: &str,
size: i64,
hash: Vec<u8>,
pool: &Pool,
) -> sqlx::Result<()> {
sqlx::query!("INSERT INTO files(file_id, folder_id, file_name, file_size, sha512) VALUES ($1, $2, $3, $4, $5)", file_id, parent_folder, name, size, hash)
.execute(pool)
.await
.map(|_| ())
}
#[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FileWithoutParentId {
file_id: Uuid,
file_name: String,
file_size: i64,
sha512: Vec<u8>,
created_at: chrono::NaiveDateTime,
updated_at: chrono::NaiveDateTime,
}
pub async fn get_files(folder_id: Uuid, pool: &Pool) -> sqlx::Result<Vec<FileWithoutParentId>> {
sqlx::query_as!(FileWithoutParentId, "SELECT file_id, file_name, file_size, sha512, created_at, updated_at FROM files WHERE folder_id = $1", folder_id)
.fetch_all(pool)
.await
}
pub async fn get_permissions(
file_id: Uuid,
user_id: i32,
pool: &Pool,
) -> sqlx::Result<PermissionType> {
let record = sqlx::query!(
"SELECT file_id FROM files JOIN folders ON files.folder_id = folders.folder_id WHERE file_id = $1 AND owner_id = $2",
file_id,
user_id
)
.fetch_optional(pool)
.await?;
Ok(record.map(|_| PermissionType::Write).unwrap_or_default())
}
pub async fn get_name(file_id: Uuid, pool: &Pool) -> sqlx::Result<Option<String>> {
let record = sqlx::query!("SELECT file_name FROM files WHERE file_id = $1", file_id)
.fetch_optional(pool)
.await?;
Ok(record.map(|record| record.file_name))
}
pub async fn delete(file_id: Uuid, pool: &Pool) -> sqlx::Result<bool> {
sqlx::query!("DELETE FROM files WHERE file_id = $1", file_id)
.execute(pool)
.await
.map(|result| result.rows_affected() > 0)
}

115
src/db/folder.rs Normal file
View File

@ -0,0 +1,115 @@
use std::collections::HashSet;
use futures::TryStreamExt;
use uuid::Uuid;
use crate::Pool;
use super::permissions::PermissionType;
pub async fn get_permissions(
folder_id: Uuid,
user_id: i32,
pool: &Pool,
) -> sqlx::Result<PermissionType> {
let permission = sqlx::query!(
"SELECT folder_id FROM folders WHERE folder_id = $1 AND owner_id = $2",
folder_id,
user_id
)
.fetch_optional(pool)
.await?
.map(|_| PermissionType::Write)
.unwrap_or_default();
Ok(permission)
}
pub async fn get_names(folder_id: Uuid, pool: &Pool) -> sqlx::Result<HashSet<String>> {
sqlx::query!("SELECT folder_name as name FROM folders WHERE parent_folder_id = $1 UNION SELECT file_name as name FROM files WHERE folder_id = $1", folder_id)
.fetch(pool)
.map_ok(|record| record.name.unwrap())
.try_collect::<HashSet<String>>()
.await
}
pub async fn get_root(user_id: i32, pool: &Pool) -> sqlx::Result<Uuid> {
sqlx::query!(
"SELECT folder_id FROM folders WHERE owner_id = $1 AND parent_folder_id IS null",
user_id
)
.fetch_one(pool)
.await
.map(|row| row.folder_id)
}
pub async fn get_by_id(id: Option<Uuid>, user_id: i32, pool: &Pool) -> sqlx::Result<Option<Uuid>> {
match id {
Some(id) => get_permissions(id, user_id, pool)
.await
.map(|permissions| permissions.can_read().then_some(id)),
None => get_root(user_id, pool).await.map(Some),
}
}
#[derive(Debug, serde::Serialize)]
#[allow(clippy::struct_field_names, clippy::module_name_repetitions)]
pub struct FolderWithoutParentId {
folder_id: Uuid,
owner_id: i32,
folder_name: String,
created_at: chrono::NaiveDateTime,
}
pub async fn get_folders(
parent_folder_id: Uuid,
pool: &Pool,
) -> sqlx::Result<Vec<FolderWithoutParentId>> {
sqlx::query_as!(
FolderWithoutParentId,
"SELECT folder_id, owner_id, folder_name, created_at FROM folders WHERE parent_folder_id = $1",
parent_folder_id,
)
.fetch_all(pool)
.await
}
pub async fn exists_by_name(
parent_folder_id: Uuid,
folder_name: &str,
pool: &Pool,
) -> sqlx::Result<bool> {
sqlx::query!(
"SELECT EXISTS(SELECT folder_id FROM folders WHERE parent_folder_id = $1 AND folder_name = $2)",
parent_folder_id,
folder_name
)
.fetch_one(pool)
.await
.and_then(|row| {
row.exists.ok_or(sqlx::Error::RowNotFound)
})
}
pub async fn insert(
parent_folder_id: Uuid,
user_id: i32,
folder_name: &str,
pool: &Pool,
) -> sqlx::Result<Uuid> {
sqlx::query!("INSERT INTO folders(parent_folder_id, owner_id, folder_name) VALUES ($1, $2, $3) RETURNING folder_id",
parent_folder_id,
user_id,
folder_name
)
.fetch_one(pool)
.await
.map(|record| record.folder_id)
}
pub async fn delete(folder_id: Uuid, pool: &Pool) -> sqlx::Result<Vec<Uuid>> {
sqlx::query_file!("sql/delete_folder.sql", folder_id)
.fetch(pool)
.map_ok(|row| row.file_id)
.try_collect()
.await
}

3
src/db/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod file;
pub mod folder;
pub mod permissions;

59
src/db/permissions.rs Normal file
View File

@ -0,0 +1,59 @@
use axum::http::StatusCode;
#[derive(sqlx::Type)]
#[sqlx(type_name = "permission", rename_all = "lowercase")]
pub(super) enum PermissionRaw {
Read,
Write,
Manage,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum PermissionType {
#[default]
NoPermission = 1,
Read,
Write,
Manage,
}
impl From<Option<PermissionRaw>> for PermissionType {
fn from(value: Option<PermissionRaw>) -> PermissionType {
use PermissionRaw as PR;
match value {
Some(PR::Read) => PermissionType::Read,
Some(PR::Write) => PermissionType::Write,
Some(PR::Manage) => PermissionType::Manage,
None => PermissionType::NoPermission,
}
}
}
impl PermissionType {
pub fn can_read(self) -> bool {
self >= PermissionType::Read
}
pub fn can_read_guard(self) -> Result<(), StatusCode> {
if !self.can_read() {
return Err(StatusCode::NOT_FOUND);
}
Ok(())
}
pub fn can_write_guard(self) -> Result<(), StatusCode> {
self.can_read_guard()?;
if self < PermissionType::Write {
return Err(StatusCode::FORBIDDEN);
}
Ok(())
}
pub fn can_manage_guard(self) -> Result<(), StatusCode> {
self.can_read_guard()?;
if self < PermissionType::Manage {
return Err(StatusCode::FORBIDDEN);
}
Ok(())
}
}

View File

@ -0,0 +1,32 @@
pub use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
file_id: Uuid,
}
pub async fn delete(
Query(params): Query<Params>,
State(state): State<AppState>,
claims: Claims,
) -> Result<StatusCode, StatusCode> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await
.handle_internal()?
.can_write_guard()?;
let deleted = db::file::delete(params.file_id, &state.pool)
.await
.handle_internal()?;
if !deleted {
return Err(StatusCode::NOT_FOUND); // Will not happen most of the time due to can write guard
}
state
.storage
.delete(params.file_id)
.await
.handle_internal()?;
Ok(StatusCode::NO_CONTENT)
}

View File

@ -0,0 +1,41 @@
use axum::{body::Body, http::header, response::IntoResponse};
use tokio_util::io::ReaderStream;
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
file_id: Uuid,
}
pub async fn download(
Query(params): Query<Params>,
State(state): State<AppState>,
claims: Claims,
) -> Result<impl IntoResponse, StatusCode> {
db::file::get_permissions(params.file_id, claims.user_id, &state.pool)
.await
.handle_internal()?
.can_read_guard()?;
let mut name = db::file::get_name(params.file_id, &state.pool)
.await
.handle_internal()?
.ok_or(StatusCode::NOT_FOUND)?;
name = name
.chars()
.fold(String::with_capacity(name.len()), |mut result, char| {
if ['\\', '"'].contains(&char) {
result.push('\\');
}
result.push(char);
result
});
let file = state.storage.read(params.file_id).await.handle_internal()?;
let body = Body::from_stream(ReaderStream::new(file));
let disposition = format!("attachment; filename=\"{name}\"");
let headers = [(header::CONTENT_DISPOSITION, disposition)];
Ok((headers, body))
}

View File

@ -0,0 +1,3 @@
pub mod delete;
pub mod download;
pub mod upload;

View File

@ -0,0 +1,94 @@
use std::collections::HashMap;
use std::io;
use axum::extract::multipart::{self, Multipart};
use sha2::Digest as _;
use tokio::io::{AsyncWrite, BufWriter};
use tokio_util::io::StreamReader;
use crate::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Params {
parent_folder: Uuid,
}
async fn write_file(
file_id: Uuid,
file: impl AsyncWrite + Unpin,
file_name: &str,
field: &mut multipart::Field<'_>,
parent_folder: Uuid,
pool: &Pool,
) -> bool {
const BUF_CAP: usize = 64 * 1024 * 1024; // 64 MiB
let mut hash = sha2::Sha512::new();
let mut size: i64 = 0;
let stream = field.map(|value| match value {
Ok(bytes) => {
hash.update(&bytes);
size = i64::try_from(bytes.len())
.ok()
.and_then(|part_size| size.checked_add(part_size))
.ok_or_else(|| io::Error::other(anyhow::anyhow!("Size calculation overflow")))?;
Ok(bytes)
}
Err(err) => Err(io::Error::other(err)),
});
let mut reader = StreamReader::new(stream);
let mut writer = BufWriter::with_capacity(BUF_CAP, file);
if let Err(err) = tokio::io::copy(&mut reader, &mut writer).await {
tracing::warn!(%err);
return false;
}
let hash = hash.finalize().to_vec();
db::file::insert(file_id, parent_folder, file_name, size, hash, pool)
.await
.inspect_err(|err| tracing::warn!(%err))
.is_ok()
}
pub async fn upload(
Query(params): Query<Params>,
State(state): State<AppState>,
claims: Claims,
mut multi: Multipart,
) -> Result<Json<HashMap<String, Uuid>>, StatusCode> {
db::folder::get_permissions(params.parent_folder, claims.user_id, &state.pool)
.await
.handle_internal()?
.can_write_guard()?;
let existing_names = db::folder::get_names(params.parent_folder, &state.pool)
.await
.handle_internal()?;
let mut result = HashMap::new();
while let Ok(Some(mut field)) = multi.next_field().await {
let Some(file_name) = field.file_name().map(ToOwned::to_owned) else {
continue;
};
if existing_names.contains(&file_name) {
continue;
}
let Ok((file_id, mut file)) = state.storage.create().await else {
tracing::warn!("Couldn't create uuid for new file");
continue;
};
let is_success = write_file(
file_id,
&mut file,
&file_name,
&mut field,
params.parent_folder,
&state.pool,
)
.await;
if !is_success {
let _ = state.storage.delete(file_id).await;
continue;
}
result.insert(file_name, file_id);
}
Ok(Json(result))
}

View File

@ -0,0 +1,37 @@
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
folder_name: String,
parent_folder_id: Uuid,
}
pub async fn create(
State(state): State<AppState>,
claims: Claims,
Json(params): Json<Params>,
) -> Result<Json<Uuid>, StatusCode> {
db::folder::get_permissions(params.parent_folder_id, claims.user_id, &state.pool)
.await
.handle_internal()?
.can_write_guard()?;
let exists =
db::folder::exists_by_name(params.parent_folder_id, &params.folder_name, &state.pool)
.await
.handle_internal()?;
if exists {
return Err(StatusCode::CONFLICT);
}
let id = db::folder::insert(
params.parent_folder_id,
claims.user_id,
&params.folder_name,
&state.pool,
)
.await
.handle_internal()?;
Ok(Json(id))
}

View File

@ -0,0 +1,35 @@
use crate::prelude::*;
#[derive(Deserialize, Debug)]
pub struct Params {
folder_id: Uuid,
}
pub async fn delete(
State(state): State<AppState>,
claims: Claims,
Json(params): Json<Params>,
) -> Result<(), StatusCode> {
let root = db::folder::get_root(claims.user_id, &state.pool)
.await
.handle_internal()?;
if params.folder_id == root {
return Err(StatusCode::BAD_REQUEST);
}
db::folder::get_permissions(params.folder_id, claims.user_id, &state.pool)
.await
.handle_internal()?
.can_write_guard()?;
let files_to_delete = db::folder::delete(params.folder_id, &state.pool)
.await
.handle_internal()?;
let storage = &state.storage;
futures::stream::iter(files_to_delete)
.for_each_concurrent(5, |file| async move {
let _ = storage.delete(file).await;
})
.await;
Ok(())
}

View File

@ -0,0 +1,38 @@
use tokio::try_join;
use crate::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Params {
folder_id: Option<Uuid>,
}
#[derive(Debug, Serialize)]
pub struct Response {
folder_id: Uuid,
files: Vec<db::file::FileWithoutParentId>,
folders: Vec<db::folder::FolderWithoutParentId>,
}
pub async fn list(
Query(params): Query<Params>,
State(state): State<AppState>,
claims: Claims,
) -> Result<Json<Response>, StatusCode> {
let folder_id = db::folder::get_by_id(params.folder_id, claims.user_id, &state.pool)
.await
.handle_internal()?
.ok_or(StatusCode::NOT_FOUND)?;
let (files, folders) = try_join!(
db::file::get_files(folder_id, &state.pool),
db::folder::get_folders(folder_id, &state.pool)
)
.handle_internal()?;
Ok(Json(Response {
folder_id,
files,
folders,
}))
}

View File

@ -0,0 +1,3 @@
pub mod create;
pub mod delete;
pub mod list;

2
src/endpoints/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod file;
pub mod folder;

28
src/errors.rs Normal file
View File

@ -0,0 +1,28 @@
use axum::http::StatusCode;
type BoxError = Box<dyn std::error::Error>;
pub fn handle_error(error: impl Into<BoxError>) {
let error: BoxError = error.into();
tracing::error!(error);
}
pub trait ErrorHandlingExt<T, E>
where
Self: Sized,
{
fn handle(self, code: StatusCode) -> Result<T, StatusCode>;
fn handle_internal(self) -> Result<T, StatusCode> {
self.handle(StatusCode::INTERNAL_SERVER_ERROR)
}
}
impl<T, E: Into<BoxError>> ErrorHandlingExt<T, E> for Result<T, E> {
fn handle(self, code: StatusCode) -> Result<T, StatusCode> {
self.map_err(|err| {
handle_error(err);
code
})
}
}

73
src/file_storage.rs Normal file
View File

@ -0,0 +1,73 @@
use std::{
env, io,
path::{Path, PathBuf},
sync::Arc,
};
use tokio::fs;
use uuid::Uuid;
#[derive(Clone)]
pub struct FileStorage(Arc<Path>);
impl FileStorage {
pub fn new() -> anyhow::Result<Self> {
let var = env::var("DRIVE_STORAGE_PATH");
let path_str = match var {
Ok(ref string) => string,
Err(err) => {
tracing::info!(
%err,
"Error getting DRIVE_STORAGE_PATH variable. Defaulting to ./files"
);
"./files"
}
};
let path = Path::new(path_str);
match path.metadata() {
Ok(meta) => anyhow::ensure!(meta.is_dir(), "Expected path to a directory"),
Err(err) if err.kind() == io::ErrorKind::NotFound => {
std::fs::create_dir_all(path)?;
}
Err(err) => return Err(err.into()),
};
Ok(FileStorage(path.into()))
}
fn path_for_file(&self, file_id: Uuid) -> PathBuf {
let file_name = file_id.as_hyphenated().to_string();
self.0.join(file_name)
}
async fn create_inner(&self, file_id: Uuid) -> anyhow::Result<impl tokio::io::AsyncWrite> {
fs::File::create_new(self.path_for_file(file_id))
.await
.map_err(Into::into)
}
pub async fn create(&self) -> anyhow::Result<(Uuid, impl tokio::io::AsyncWrite)> {
let mut error = anyhow::anyhow!("Error creating a file");
for _ in 0..3 {
let file_id = Uuid::new_v4();
match self.create_inner(file_id).await {
Ok(file) => return Ok((file_id, file)),
Err(err) => error = error.context(err),
}
}
Err(error)
}
pub async fn read(&self, file_id: Uuid) -> anyhow::Result<impl tokio::io::AsyncRead> {
fs::File::open(self.path_for_file(file_id))
.await
.map_err(Into::into)
}
pub async fn delete(&self, file_id: Uuid) -> anyhow::Result<bool> {
match fs::remove_file(self.path_for_file(file_id)).await {
Ok(()) => Ok(true),
Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(false),
Err(err) => Err(err.into()),
}
}
}

114
src/main.rs Normal file
View File

@ -0,0 +1,114 @@
mod auth;
mod db;
mod endpoints;
mod errors;
mod file_storage;
mod prelude;
use std::{env, net::Ipv4Addr};
use axum::{routing::get, Router};
use file_storage::FileStorage;
use tokio::net::TcpListener;
type Pool = sqlx::postgres::PgPool;
#[derive(Clone)]
struct AppState {
pool: Pool,
storage: FileStorage,
}
async fn create_user(user_name: &str, user_email: &str, pool: &Pool) -> anyhow::Result<i32> {
let id = sqlx::query!(
"INSERT INTO users(username, email) VALUES ($1, $2) RETURNING user_id",
user_name,
user_email
)
.fetch_one(pool)
.await?
.user_id;
sqlx::query!(
"INSERT INTO folders(owner_id, folder_name) VALUES ($1, $2)",
id,
"ROOT"
)
.execute(pool)
.await?;
Ok(id)
}
async fn create_debug_users(pool: &Pool) -> anyhow::Result<()> {
let count = sqlx::query!("SELECT count(user_id) FROM users")
.fetch_one(pool)
.await?
.count
.unwrap_or(0);
if count > 0 {
return Ok(());
}
tokio::try_join!(
create_user("Test1", "test1@example.com", pool),
create_user("Test2", "test2@example.com", pool)
)?;
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// TODO: add utoipa and utoipauto for swagger
let _ = dotenvy::dotenv();
tracing_subscriber::fmt::init();
let pool = match env::var("DATABASE_URL") {
Ok(url) => Pool::connect(&url).await?,
Err(err) => anyhow::bail!("Error getting database url: {err}"),
};
sqlx::migrate!().run(&pool).await?;
create_debug_users(&pool).await?;
let storage = file_storage::FileStorage::new()?;
let state = AppState { pool, storage };
let router = app(state);
let addr = (Ipv4Addr::UNSPECIFIED, 3000);
let listener = TcpListener::bind(addr).await?;
axum::serve(listener, router).await?;
Ok(())
}
fn app(state: AppState) -> Router {
use axum::http::header;
use endpoints::{file, folder};
use tower_http::ServiceBuilderExt as _;
let sensitive_headers = [header::AUTHORIZATION, header::COOKIE];
let middleware = tower::ServiceBuilder::new()
.sensitive_headers(sensitive_headers)
.trace_for_http()
.compression();
// Build route service
Router::new()
.route(
"/files",
get(file::download::download)
.post(file::upload::upload)
.delete(file::delete::delete),
)
.route(
"/folders",
get(folder::list::list)
.post(folder::create::create)
.delete(folder::delete::delete),
)
.layer(middleware)
.with_state(state)
}

8
src/prelude.rs Normal file
View File

@ -0,0 +1,8 @@
pub(crate) use crate::{auth::Claims, db, errors::ErrorHandlingExt as _, AppState, Pool};
pub use axum::{
extract::{Json, Query, State},
http::StatusCode,
};
pub use futures::StreamExt as _;
pub use serde::{Deserialize, Serialize};
pub use uuid::Uuid;