1
0
Fork 0
forked from gorb/backend

Merge branch 'main' into wip/images

This commit is contained in:
Radical 2025-05-23 13:45:17 +02:00
commit 149b81973d
54 changed files with 1201 additions and 1691 deletions

View file

@ -21,7 +21,6 @@ regex = "1.11"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
simple_logger = "5.0.0" simple_logger = "5.0.0"
sqlx = { version = "0.8", features = ["runtime-tokio", "tls-native-tls", "postgres"] }
redis = { version = "0.31.0", features= ["tokio-comp"] } redis = { version = "0.31.0", features= ["tokio-comp"] }
tokio-tungstenite = { version = "0.26", features = ["native-tls", "url"] } tokio-tungstenite = { version = "0.26", features = ["native-tls", "url"] }
toml = "0.8" toml = "0.8"
@ -32,6 +31,11 @@ actix-ws = "0.3.0"
futures-util = "0.3.31" futures-util = "0.3.31"
bunny-api-tokio = "0.2.1" bunny-api-tokio = "0.2.1"
bindet = "0.3.2" bindet = "0.3.2"
deadpool = "0.12"
diesel = { version = "2.2", features = ["uuid"] }
diesel-async = { version = "0.5", features = ["deadpool", "postgres", "async-connection-wrapper"] }
diesel_migrations = { version = "2.2.0", features = ["postgres"] }
thiserror = "2.0.12"
[dependencies.tokio] [dependencies.tokio]
version = "1.44" version = "1.44"

3
build.rs Normal file
View file

@ -0,0 +1,3 @@
fn main() {
println!("cargo:rerun-if-changed=migrations");
}

9
diesel.toml Normal file
View file

@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
[migrations_directory]
dir = "migrations"

0
migrations/.keep Normal file
View file

View file

@ -0,0 +1,6 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View file

@ -0,0 +1,36 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View file

@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
DROP INDEX idx_unique_username_active;
DROP INDEX idx_unique_email_active;
DROP TABLE users;

View file

@ -0,0 +1,20 @@
-- Your SQL goes here
CREATE TABLE users (
uuid uuid PRIMARY KEY NOT NULL,
username varchar(32) NOT NULL,
display_name varchar(64) DEFAULT NULL,
password varchar(512) NOT NULL,
email varchar(100) NOT NULL,
email_verified boolean NOT NULL DEFAULT FALSE,
is_deleted boolean NOT NULL DEFAULT FALSE,
deleted_at int8 DEFAULT NULL,
CONSTRAINT unique_username_active UNIQUE NULLS NOT DISTINCT (username, is_deleted),
CONSTRAINT unique_email_active UNIQUE NULLS NOT DISTINCT (email, is_deleted)
);
CREATE UNIQUE INDEX idx_unique_username_active
ON users(username)
WHERE is_deleted = FALSE;
CREATE UNIQUE INDEX idx_unique_email_active
ON users(email)
WHERE is_deleted = FALSE;

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE instance_permissions;

View file

@ -0,0 +1,5 @@
-- Your SQL goes here
CREATE TABLE instance_permissions (
uuid uuid PRIMARY KEY NOT NULL REFERENCES users(uuid),
administrator boolean NOT NULL DEFAULT FALSE
);

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE access_tokens;
DROP TABLE refresh_tokens;

View file

@ -0,0 +1,13 @@
-- Your SQL goes here
CREATE TABLE refresh_tokens (
token varchar(64) PRIMARY KEY UNIQUE NOT NULL,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL,
device_name varchar(16) NOT NULL
);
CREATE TABLE access_tokens (
token varchar(32) PRIMARY KEY UNIQUE NOT NULL,
refresh_token varchar(64) UNIQUE NOT NULL REFERENCES refresh_tokens(token) ON UPDATE CASCADE ON DELETE CASCADE,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL
);

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE guild_members;
DROP TABLE guilds;

View file

@ -0,0 +1,13 @@
-- Your SQL goes here
CREATE TABLE guilds (
uuid uuid PRIMARY KEY NOT NULL,
owner_uuid uuid NOT NULL REFERENCES users(uuid),
name VARCHAR(100) NOT NULL,
description VARCHAR(300)
);
CREATE TABLE guild_members (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
nickname VARCHAR(100) DEFAULT NULL
);

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE role_members;
DROP TABLE roles;

View file

@ -0,0 +1,15 @@
-- Your SQL goes here
CREATE TABLE roles (
uuid uuid UNIQUE NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name VARCHAR(50) NOT NULL,
color int NOT NULL DEFAULT 16777215,
position int NOT NULL,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (uuid, guild_uuid)
);
CREATE TABLE role_members (
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
member_uuid uuid NOT NULL REFERENCES guild_members(uuid) ON DELETE CASCADE,
PRIMARY KEY (role_uuid, member_uuid)
);

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE channel_permissions;
DROP TABLE channels;

View file

@ -0,0 +1,13 @@
-- Your SQL goes here
CREATE TABLE channels (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name varchar(32) NOT NULL,
description varchar(500) NOT NULL
);
CREATE TABLE channel_permissions (
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (channel_uuid, role_uuid)
);

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE messages;

View file

@ -0,0 +1,7 @@
-- Your SQL goes here
CREATE TABLE messages (
uuid uuid PRIMARY KEY NOT NULL,
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
message varchar(4000) NOT NULL
);

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE invites;

View file

@ -0,0 +1,6 @@
-- Your SQL goes here
CREATE TABLE invites (
id varchar(32) PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid)
);

View file

@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
UPDATE channels SET description = '' WHERE description IS NULL;
ALTER TABLE ONLY channels ALTER COLUMN description SET NOT NULL;
ALTER TABLE ONLY channels ALTER COLUMN description DROP DEFAULT;

View file

@ -0,0 +1,3 @@
-- Your SQL goes here
ALTER TABLE ONLY channels ALTER COLUMN description DROP NOT NULL;
ALTER TABLE ONLY channels ALTER COLUMN description SET DEFAULT NULL;

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
ALTER TABLE guilds DROP COLUMN icon;

View file

@ -0,0 +1,2 @@
-- Your SQL goes here
ALTER TABLE guilds ADD COLUMN icon VARCHAR(100) DEFAULT NULL;

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
ALTER TABLE users DROP COLUMN avatar;

View file

@ -0,0 +1,2 @@
-- Your SQL goes here
ALTER TABLE users ADD COLUMN avatar varchar(100) DEFAULT NULL;

View file

@ -1,14 +1,14 @@
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use actix_web::{Error, HttpResponse, post, web}; use actix_web::{HttpResponse, post, web};
use argon2::{PasswordHash, PasswordVerifier}; use argon2::{PasswordHash, PasswordVerifier};
use log::error; use diesel::{dsl::insert_into, ExpressionMethods, QueryDsl};
use diesel_async::RunQueryDsl;
use serde::Deserialize; use serde::Deserialize;
use uuid::Uuid;
use crate::{ use crate::{
Data, error::Error, api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX}, schema::*, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data
api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX},
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
use super::Response; use super::Response;
@ -29,66 +29,42 @@ pub async fn response(
return Ok(HttpResponse::Forbidden().json(r#"{ "password_hashed": false }"#)); return Ok(HttpResponse::Forbidden().json(r#"{ "password_hashed": false }"#));
} }
use users::dsl;
let mut conn = data.pool.get().await?;
if EMAIL_REGEX.is_match(&login_information.username) { if EMAIL_REGEX.is_match(&login_information.username) {
let row = // FIXME: error handling, right now i just want this to work
sqlx::query_as("SELECT CAST(uuid as VARCHAR), password FROM users WHERE email = $1") let (uuid, password): (Uuid, String) = dsl::users
.bind(&login_information.username) .filter(dsl::email.eq(&login_information.username))
.fetch_one(&data.pool) .select((dsl::uuid, dsl::password))
.await; .get_result(&mut conn)
.await?;
if let Err(error) = row { return login(
if error.to_string()
== "no rows returned by a query that expected to return at least one row"
{
return Ok(HttpResponse::Unauthorized().finish());
}
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, password): (String, String) = row.unwrap();
return Ok(login(
data.clone(), data.clone(),
uuid, uuid,
login_information.password.clone(), login_information.password.clone(),
password, password,
login_information.device_name.clone(), login_information.device_name.clone(),
) )
.await); .await;
} else if USERNAME_REGEX.is_match(&login_information.username) { } else if USERNAME_REGEX.is_match(&login_information.username) {
let row = // FIXME: error handling, right now i just want this to work
sqlx::query_as("SELECT CAST(uuid as VARCHAR), password FROM users WHERE username = $1") let (uuid, password): (Uuid, String) = dsl::users
.bind(&login_information.username) .filter(dsl::username.eq(&login_information.username))
.fetch_one(&data.pool) .select((dsl::uuid, dsl::password))
.await; .get_result(&mut conn)
.await?;
if let Err(error) = row { return login(
if error.to_string()
== "no rows returned by a query that expected to return at least one row"
{
return Ok(HttpResponse::Unauthorized().finish());
}
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, password): (String, String) = row.unwrap();
return Ok(login(
data.clone(), data.clone(),
uuid, uuid,
login_information.password.clone(), login_information.password.clone(),
password, password,
login_information.device_name.clone(), login_information.device_name.clone(),
) )
.await); .await;
} }
Ok(HttpResponse::Unauthorized().finish()) Ok(HttpResponse::Unauthorized().finish())
@ -96,79 +72,45 @@ pub async fn response(
async fn login( async fn login(
data: actix_web::web::Data<Data>, data: actix_web::web::Data<Data>,
uuid: String, uuid: Uuid,
request_password: String, request_password: String,
database_password: String, database_password: String,
device_name: String, device_name: String,
) -> HttpResponse { ) -> Result<HttpResponse, Error> {
let parsed_hash_raw = PasswordHash::new(&database_password); let mut conn = data.pool.get().await?;
if let Err(error) = parsed_hash_raw { let parsed_hash = PasswordHash::new(&database_password).map_err(|e| Error::PasswordHashError(e.to_string()))?;
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let parsed_hash = parsed_hash_raw.unwrap();
if data if data
.argon2 .argon2
.verify_password(request_password.as_bytes(), &parsed_hash) .verify_password(request_password.as_bytes(), &parsed_hash)
.is_err() .is_err()
{ {
return HttpResponse::Unauthorized().finish(); return Err(Error::Unauthorized("Wrong username or password".to_string()));
} }
let refresh_token_raw = generate_refresh_token(); let refresh_token = generate_refresh_token()?;
let access_token_raw = generate_access_token(); let access_token = generate_access_token()?;
if let Err(error) = refresh_token_raw {
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let refresh_token = refresh_token_raw.unwrap();
if let Err(error) = access_token_raw {
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let access_token = access_token_raw.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)?
.unwrap()
.as_secs() as i64; .as_secs() as i64;
if let Err(error) = sqlx::query(&format!( use refresh_tokens::dsl as rdsl;
"INSERT INTO refresh_tokens (token, uuid, created_at, device_name) VALUES ($1, '{}', $2, $3 )",
uuid
))
.bind(&refresh_token)
.bind(current_time)
.bind(device_name)
.execute(&data.pool)
.await
{
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
if let Err(error) = sqlx::query(&format!( insert_into(refresh_tokens::table)
"INSERT INTO access_tokens (token, refresh_token, uuid, created_at) VALUES ($1, $2, '{}', $3 )", .values((rdsl::token.eq(&refresh_token), rdsl::uuid.eq(uuid), rdsl::created_at.eq(current_time), rdsl::device_name.eq(device_name)))
uuid .execute(&mut conn)
)) .await?;
.bind(&access_token)
.bind(&refresh_token)
.bind(current_time)
.execute(&data.pool)
.await
{
error!("{}", error);
return HttpResponse::InternalServerError().finish()
}
HttpResponse::Ok() use access_tokens::dsl as adsl;
insert_into(access_tokens::table)
.values((adsl::token.eq(&access_token), adsl::refresh_token.eq(&refresh_token), adsl::uuid.eq(uuid), adsl::created_at.eq(current_time)))
.execute(&mut conn)
.await?;
Ok(HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token)) .cookie(refresh_token_cookie(refresh_token))
.json(Response { access_token }) .json(Response { access_token }))
} }

View file

@ -1,16 +1,17 @@
use std::{ use std::{
str::FromStr,
sync::LazyLock, sync::LazyLock,
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use actix_web::{HttpResponse, Scope, web}; use actix_web::{Scope, web};
use log::error; use diesel::{ExpressionMethods, QueryDsl};
use diesel_async::RunQueryDsl;
use regex::Regex; use regex::Regex;
use serde::Serialize; use serde::Serialize;
use sqlx::Postgres;
use uuid::Uuid; use uuid::Uuid;
use crate::{error::Error, Conn, schema::access_tokens::dsl};
mod login; mod login;
mod refresh; mod refresh;
mod register; mod register;
@ -40,40 +41,30 @@ pub fn web() -> Scope {
pub async fn check_access_token( pub async fn check_access_token(
access_token: &str, access_token: &str,
pool: &sqlx::Pool<Postgres>, conn: &mut Conn,
) -> Result<Uuid, HttpResponse> { ) -> Result<Uuid, Error> {
let row = sqlx::query_as( let (uuid, created_at): (Uuid, i64) = dsl::access_tokens
"SELECT CAST(uuid as VARCHAR), created_at FROM access_tokens WHERE token = $1", .filter(dsl::token.eq(access_token))
) .select((dsl::uuid, dsl::created_at))
.bind(access_token) .get_result(conn)
.fetch_one(pool) .await
.await; .map_err(|error| {
if error == diesel::result::Error::NotFound {
if let Err(error) = row { Error::Unauthorized("Invalid access token".to_string())
if error.to_string() } else {
== "no rows returned by a query that expected to return at least one row" Error::from(error)
{
return Err(HttpResponse::Unauthorized().finish());
} }
})?;
error!("{}", error);
return Err(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, created_at): (String, i64) = row.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)?
.unwrap()
.as_secs() as i64; .as_secs() as i64;
let lifetime = current_time - created_at; let lifetime = current_time - created_at;
if lifetime > 3600 { if lifetime > 3600 {
return Err(HttpResponse::Unauthorized().finish()); return Err(Error::Unauthorized("Invalid access token".to_string()));
} }
Ok(Uuid::from_str(&uuid).unwrap()) Ok(uuid)
} }

View file

@ -1,10 +1,11 @@
use actix_web::{Error, HttpRequest, HttpResponse, post, web}; use actix_web::{HttpRequest, HttpResponse, post, web};
use diesel::{delete, update, ExpressionMethods, QueryDsl};
use diesel_async::RunQueryDsl;
use log::error; use log::error;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use crate::{ use crate::{
Data, error::Error, schema::{access_tokens::{self, dsl}, refresh_tokens::{self, dsl as rdsl}}, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
use super::Response; use super::Response;
@ -20,23 +21,23 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
let mut refresh_token = String::from(recv_refresh_token_cookie.unwrap().value()); let mut refresh_token = String::from(recv_refresh_token_cookie.unwrap().value());
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)?
.unwrap()
.as_secs() as i64; .as_secs() as i64;
if let Ok(row) = sqlx::query_scalar("SELECT created_at FROM refresh_tokens WHERE token = $1") let mut conn = data.pool.get().await?;
.bind(&refresh_token)
.fetch_one(&data.pool) if let Ok(created_at) = rdsl::refresh_tokens
.filter(rdsl::token.eq(&refresh_token))
.select(rdsl::created_at)
.get_result::<i64>(&mut conn)
.await .await
{ {
let created_at: i64 = row;
let lifetime = current_time - created_at; let lifetime = current_time - created_at;
if lifetime > 2592000 { if lifetime > 2592000 {
if let Err(error) = sqlx::query("DELETE FROM refresh_tokens WHERE token = $1") if let Err(error) = delete(refresh_tokens::table)
.bind(&refresh_token) .filter(rdsl::token.eq(&refresh_token))
.execute(&data.pool) .execute(&mut conn)
.await .await
{ {
error!("{}", error); error!("{}", error);
@ -52,8 +53,7 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
} }
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)?
.unwrap()
.as_secs() as i64; .as_secs() as i64;
if lifetime > 1987200 { if lifetime > 1987200 {
@ -66,13 +66,13 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
let new_refresh_token = new_refresh_token.unwrap(); let new_refresh_token = new_refresh_token.unwrap();
match sqlx::query( match update(refresh_tokens::table)
"UPDATE refresh_tokens SET token = $1, created_at = $2 WHERE token = $3", .filter(rdsl::token.eq(&refresh_token))
) .set((
.bind(&new_refresh_token) rdsl::token.eq(&new_refresh_token),
.bind(current_time) rdsl::created_at.eq(current_time),
.bind(&refresh_token) ))
.execute(&data.pool) .execute(&mut conn)
.await .await
{ {
Ok(_) => { Ok(_) => {
@ -84,27 +84,16 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
} }
} }
let access_token = generate_access_token(); let access_token = generate_access_token()?;
if access_token.is_err() { update(access_tokens::table)
error!("{}", access_token.unwrap_err()); .filter(dsl::refresh_token.eq(&refresh_token))
return Ok(HttpResponse::InternalServerError().finish()); .set((
} dsl::token.eq(&access_token),
dsl::created_at.eq(current_time),
let access_token = access_token.unwrap(); ))
.execute(&mut conn)
if let Err(error) = sqlx::query( .await?;
"UPDATE access_tokens SET token = $1, created_at = $2 WHERE refresh_token = $3",
)
.bind(&access_token)
.bind(current_time)
.bind(&refresh_token)
.execute(&data.pool)
.await
{
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token)) .cookie(refresh_token_cookie(refresh_token))

View file

@ -1,19 +1,18 @@
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use actix_web::{Error, HttpResponse, post, web}; use actix_web::{HttpResponse, post, web};
use argon2::{ use argon2::{
PasswordHasher, PasswordHasher,
password_hash::{SaltString, rand_core::OsRng}, password_hash::{SaltString, rand_core::OsRng},
}; };
use log::error; use diesel::{dsl::insert_into, ExpressionMethods};
use diesel_async::RunQueryDsl;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use uuid::Uuid; use uuid::Uuid;
use super::Response; use super::Response;
use crate::{ use crate::{
Data, api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX}, error::Error, schema::{access_tokens::{self, dsl as adsl}, refresh_tokens::{self, dsl as rdsl}, users::{self, dsl as udsl}}, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data
api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX},
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
#[derive(Deserialize)] #[derive(Deserialize)]
@ -92,91 +91,49 @@ pub async fn res(
.argon2 .argon2
.hash_password(account_information.password.as_bytes(), &salt) .hash_password(account_information.password.as_bytes(), &salt)
{ {
let mut conn = data.pool.get().await?;
// TODO: Check security of this implementation // TODO: Check security of this implementation
return Ok( insert_into(users::table)
match sqlx::query(&format!( .values((
"INSERT INTO users (uuid, username, password, email) VALUES ( '{}', $1, $2, $3 )", udsl::uuid.eq(uuid),
uuid udsl::username.eq(&account_information.identifier),
udsl::password.eq(hashed_password.to_string()),
udsl::email.eq(&account_information.email),
)) ))
.bind(&account_information.identifier) .execute(&mut conn)
.bind(hashed_password.to_string()) .await?;
.bind(&account_information.email)
.execute(&data.pool)
.await
{
Ok(_out) => {
let refresh_token = generate_refresh_token();
let access_token = generate_access_token();
if refresh_token.is_err() { let refresh_token = generate_refresh_token()?;
error!("{}", refresh_token.unwrap_err()); let access_token = generate_access_token()?;
return Ok(HttpResponse::InternalServerError().finish());
}
let refresh_token = refresh_token.unwrap();
if access_token.is_err() {
error!("{}", access_token.unwrap_err());
return Ok(HttpResponse::InternalServerError().finish());
}
let access_token = access_token.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)?
.unwrap()
.as_secs() as i64; .as_secs() as i64;
if let Err(error) = sqlx::query(&format!("INSERT INTO refresh_tokens (token, uuid, created_at, device_name) VALUES ($1, '{}', $2, $3 )", uuid)) insert_into(refresh_tokens::table)
.bind(&refresh_token) .values((
.bind(current_time) rdsl::token.eq(&refresh_token),
.bind(&account_information.device_name) rdsl::uuid.eq(uuid),
.execute(&data.pool) rdsl::created_at.eq(current_time),
.await { rdsl::device_name.eq(&account_information.device_name),
error!("{}", error); ))
return Ok(HttpResponse::InternalServerError().finish()) .execute(&mut conn)
} .await?;
if let Err(error) = sqlx::query(&format!("INSERT INTO access_tokens (token, refresh_token, uuid, created_at) VALUES ($1, $2, '{}', $3 )", uuid)) insert_into(access_tokens::table)
.bind(&access_token) .values((
.bind(&refresh_token) adsl::token.eq(&access_token),
.bind(current_time) adsl::refresh_token.eq(&refresh_token),
.execute(&data.pool) adsl::uuid.eq(uuid),
.await { adsl::created_at.eq(current_time),
error!("{}", error); ))
return Ok(HttpResponse::InternalServerError().finish()) .execute(&mut conn)
} .await?;
HttpResponse::Ok() return Ok(HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token)) .cookie(refresh_token_cookie(refresh_token))
.json(Response { access_token }) .json(Response { access_token }))
}
Err(error) => {
let err_msg = error.as_database_error().unwrap().message();
match err_msg {
err_msg
if err_msg.contains("unique") && err_msg.contains("username_key") =>
{
HttpResponse::Forbidden().json(ResponseError {
gorb_id_available: false,
..Default::default()
})
}
err_msg if err_msg.contains("unique") && err_msg.contains("email_key") => {
HttpResponse::Forbidden().json(ResponseError {
email_available: false,
..Default::default()
})
}
_ => {
error!("{}", err_msg);
HttpResponse::InternalServerError().finish()
}
}
}
},
);
} }
Ok(HttpResponse::InternalServerError().finish()) Ok(HttpResponse::InternalServerError().finish())

View file

@ -1,10 +1,10 @@
use actix_web::{Error, HttpRequest, HttpResponse, post, web}; use actix_web::{HttpRequest, HttpResponse, post, web};
use argon2::{PasswordHash, PasswordVerifier}; use argon2::{PasswordHash, PasswordVerifier};
use futures::future; use diesel::{delete, ExpressionMethods, QueryDsl};
use log::error; use diesel_async::RunQueryDsl;
use serde::{Deserialize, Serialize}; use serde::Deserialize;
use crate::{Data, api::v1::auth::check_access_token, utils::get_auth_header}; use crate::{api::v1::auth::check_access_token, error::Error, schema::users::dsl as udsl, schema::refresh_tokens::{self, dsl as rdsl}, utils::get_auth_header, Data};
#[derive(Deserialize)] #[derive(Deserialize)]
struct RevokeRequest { struct RevokeRequest {
@ -12,17 +12,6 @@ struct RevokeRequest {
device_name: String, device_name: String,
} }
#[derive(Serialize)]
struct Response {
deleted: bool,
}
impl Response {
fn new(deleted: bool) -> Self {
Self { deleted }
}
}
// TODO: Should maybe be a delete request? // TODO: Should maybe be a delete request?
#[post("/revoke")] #[post("/revoke")]
pub async fn res( pub async fn res(
@ -32,85 +21,33 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let uuid = check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized { let database_password: String = udsl::users
return Ok(error); .filter(udsl::uuid.eq(uuid))
} .select(udsl::password)
.get_result(&mut conn)
.await?;
let uuid = authorized.unwrap(); let hashed_password = PasswordHash::new(&database_password).map_err(|e| Error::PasswordHashError(e.to_string()))?;
let database_password_raw = sqlx::query_scalar(&format!(
"SELECT password FROM users WHERE uuid = '{}'",
uuid
))
.fetch_one(&data.pool)
.await;
if let Err(error) = database_password_raw {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let database_password: String = database_password_raw.unwrap();
let hashed_password_raw = PasswordHash::new(&database_password);
if let Err(error) = hashed_password_raw {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let hashed_password = hashed_password_raw.unwrap();
if data if data
.argon2 .argon2
.verify_password(revoke_request.password.as_bytes(), &hashed_password) .verify_password(revoke_request.password.as_bytes(), &hashed_password)
.is_err() .is_err()
{ {
return Ok(HttpResponse::Unauthorized().finish()); return Err(Error::Unauthorized("Wrong username or password".to_string()));
} }
let tokens_raw = sqlx::query_scalar(&format!( delete(refresh_tokens::table)
"SELECT token FROM refresh_tokens WHERE uuid = '{}' AND device_name = $1", .filter(rdsl::uuid.eq(uuid))
uuid .filter(rdsl::device_name.eq(&revoke_request.device_name))
)) .execute(&mut conn)
.bind(&revoke_request.device_name) .await?;
.fetch_all(&data.pool)
.await;
if tokens_raw.is_err() { Ok(HttpResponse::Ok().finish())
error!("{:?}", tokens_raw);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let tokens: Vec<String> = tokens_raw.unwrap();
let mut refresh_tokens_delete = vec![];
for token in tokens {
refresh_tokens_delete.push(
sqlx::query("DELETE FROM refresh_tokens WHERE token = $1")
.bind(token.clone())
.execute(&data.pool),
);
}
let results = future::join_all(refresh_tokens_delete).await;
let errors: Vec<&Result<sqlx::postgres::PgQueryResult, sqlx::Error>> =
results.iter().filter(|r| r.is_err()).collect();
if !errors.is_empty() {
error!("{:?}", errors);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(Response::new(true)))
} }

View file

@ -1,6 +1,7 @@
use actix_web::{Error, HttpRequest, HttpResponse, get, post, web}; use actix_web::{HttpRequest, HttpResponse, get, post, web};
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Invite, Member}, structs::{Guild, Invite, Member},
@ -15,29 +16,17 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
} check_access_token(auth_header, &mut conn).await?;
let invite_id = path.into_inner().0; let invite_id = path.into_inner().0;
let result = Invite::fetch_one(&data.pool, invite_id).await; let invite = Invite::fetch_one(&mut conn, invite_id).await?;
if let Err(error) = result { let guild = Guild::fetch_one(&mut conn, invite.guild_uuid).await?;
return Ok(error);
}
let invite = result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, invite.guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
Ok(HttpResponse::Ok().json(guild)) Ok(HttpResponse::Ok().json(guild))
} }
@ -50,43 +39,19 @@ pub async fn join(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let invite_id = path.into_inner().0; let invite_id = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); let invite = Invite::fetch_one(&mut conn, invite_id).await?;
let result = Invite::fetch_one(&data.pool, invite_id).await; let guild = Guild::fetch_one(&mut conn, invite.guild_uuid).await?;
if let Err(error) = result { Member::new(&mut conn, uuid, guild.uuid).await?;
return Ok(error);
}
let invite = result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, invite.guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let member = Member::new(&data.pool, uuid, guild.uuid).await;
if let Err(error) = member {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild)) Ok(HttpResponse::Ok().json(guild))
} }

View file

@ -1,9 +1,9 @@
use actix_web::{get, post, web, Error, HttpRequest, HttpResponse, Scope}; use actix_web::{get, post, web, HttpRequest, HttpResponse, Scope};
use serde::Deserialize; use serde::Deserialize;
mod uuid; mod uuid;
use crate::{api::v1::auth::check_access_token, structs::{Guild, StartAmountQuery}, utils::get_auth_header, Data}; use crate::{error::Error, api::v1::auth::check_access_token, structs::{Guild, StartAmountQuery}, utils::get_auth_header, Data};
#[derive(Deserialize)] #[derive(Deserialize)]
struct GuildInfo { struct GuildInfo {
@ -26,33 +26,21 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let uuid = check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized {
return Ok(error);
}
let uuid = authorized.unwrap();
let guild = Guild::new( let guild = Guild::new(
&data.pool, &mut conn,
guild_info.name.clone(), guild_info.name.clone(),
guild_info.description.clone(), guild_info.description.clone(),
uuid, uuid,
) )
.await; .await?;
if let Err(error) = guild { Ok(HttpResponse::Ok().json(guild))
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild.unwrap()))
} }
#[get("")] #[get("")]
@ -63,28 +51,16 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
let start = request_query.start.unwrap_or(0); let start = request_query.start.unwrap_or(0);
let amount = request_query.amount.unwrap_or(10); let amount = request_query.amount.unwrap_or(10);
if let Err(error) = auth_header { check_access_token(auth_header, &mut data.pool.get().await.unwrap()).await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let guilds = Guild::fetch_amount(&data.pool, start, amount).await?;
if let Err(error) = authorized { Ok(HttpResponse::Ok().json(guilds))
return Ok(error);
}
let guilds = Guild::fetch_amount(&data.pool, start, amount).await;
if let Err(error) = guilds {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guilds.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, get, post, web}; use actix_web::{HttpRequest, HttpResponse, get, post, web};
use log::error;
use serde::Deserialize; use serde::Deserialize;
pub mod uuid; pub mod uuid;
@ -25,52 +25,27 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; if let Ok(cache_hit) = data.get_cache_key(format!("{}_channels", guild_uuid)).await {
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}_channels", guild_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let channels_result = Channel::fetch_all(&data.pool, guild_uuid).await; let channels = Channel::fetch_all(&data.pool, guild_uuid).await?;
if let Err(error) = channels_result { data
return Ok(error);
}
let channels = channels_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}_channels", guild_uuid), channels.clone(), 1800) .set_cache_key(format!("{}_channels", guild_uuid), channels.clone(), 1800)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(channels)) Ok(HttpResponse::Ok().json(channels))
} }
@ -84,27 +59,15 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
// FIXME: Logic to check permissions, should probably be done in utils.rs // FIXME: Logic to check permissions, should probably be done in utils.rs
@ -116,9 +79,5 @@ pub async fn create(
) )
.await; .await;
if let Err(error) = channel {
return Ok(error);
}
Ok(HttpResponse::Ok().json(channel.unwrap())) Ok(HttpResponse::Ok().json(channel.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, get, web}; use actix_web::{HttpRequest, HttpResponse, get, web};
use log::error;
use serde::Deserialize; use serde::Deserialize;
#[derive(Deserialize)] #[derive(Deserialize)]
@ -24,60 +24,31 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
if let Ok(cache_hit) = cache_result { if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await {
channel = serde_json::from_str(&cache_hit).unwrap() channel = serde_json::from_str(&cache_hit)?
} else { } else {
let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await; channel = Channel::fetch_one(&mut conn, channel_uuid).await?;
if let Err(error) = channel_result { data
return Ok(error);
}
channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
} }
let messages = channel let messages = channel
.fetch_messages(&data.pool, message_request.amount, message_request.offset) .fetch_messages(&mut conn, message_request.amount, message_request.offset)
.await; .await?;
if let Err(error) = messages { Ok(HttpResponse::Ok().json(messages))
return Ok(error);
}
Ok(HttpResponse::Ok().json(messages.unwrap()))
} }

View file

@ -2,14 +2,14 @@ pub mod messages;
pub mod socket; pub mod socket;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, delete, get, web}; use actix_web::{HttpRequest, HttpResponse, delete, get, web};
use log::error;
#[get("{uuid}/channels/{channel_uuid}")] #[get("{uuid}/channels/{channel_uuid}")]
pub async fn get( pub async fn get(
@ -19,52 +19,27 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await {
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await; let channel = Channel::fetch_one(&mut conn, channel_uuid).await?;
if let Err(error) = channel_result { data
return Ok(error);
}
let channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(channel)) Ok(HttpResponse::Ok().json(channel))
} }
@ -77,55 +52,27 @@ pub async fn delete(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
if let Ok(cache_hit) = cache_result { if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await {
channel = serde_json::from_str(&cache_hit).unwrap(); channel = serde_json::from_str(&cache_hit).unwrap();
let result = data.del_cache_key(format!("{}", channel_uuid)).await; data.del_cache_key(format!("{}", channel_uuid)).await?;
if let Err(error) = result {
error!("{}", error)
}
} else { } else {
let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await; channel = Channel::fetch_one(&mut conn, channel_uuid).await?;
if let Err(error) = channel_result {
return Ok(error);
} }
channel = channel_result.unwrap(); channel.delete(&mut conn).await?;
}
let delete_result = channel.delete(&data.pool).await;
if let Err(error) = delete_result {
return Ok(error);
}
Ok(HttpResponse::Ok().finish()) Ok(HttpResponse::Ok().finish())
} }

View file

@ -1,7 +1,6 @@
use actix_web::{Error, HttpRequest, HttpResponse, get, rt, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, rt, web};
use actix_ws::AggregatedMessage; use actix_ws::AggregatedMessage;
use futures_util::StreamExt as _; use futures_util::StreamExt as _;
use log::error;
use uuid::Uuid; use uuid::Uuid;
use crate::{ use crate::{
@ -22,57 +21,30 @@ pub async fn echo(
let headers = req.headers(); let headers = req.headers();
// Retrieve auth header // Retrieve auth header
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
// Get uuids from path // Get uuids from path
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let mut conn = data.pool.get().await.map_err(|e| crate::error::Error::from(e))?;
// Authorize client using auth header // Authorize client using auth header
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let uuid = check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized {
return Ok(error);
}
// Unwrap user uuid from authorization
let uuid = authorized.unwrap();
// Get server member from psql // Get server member from psql
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
if let Err(error) = member {
return Ok(error);
}
// Get cache for channel
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
// Return channel cache or result from psql as `channel` variable // Return channel cache or result from psql as `channel` variable
if let Ok(cache_hit) = cache_result { if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await {
channel = serde_json::from_str(&cache_hit).unwrap() channel = serde_json::from_str(&cache_hit).unwrap()
} else { } else {
let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await; channel = Channel::fetch_one(&mut conn, channel_uuid).await?;
if let Err(error) = channel_result { data
return Ok(error);
}
channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
} }
let (res, mut session_1, stream) = actix_ws::handle(&req, stream)?; let (res, mut session_1, stream) = actix_ws::handle(&req, stream)?;
@ -82,17 +54,11 @@ pub async fn echo(
// aggregate continuation frames up to 1MiB // aggregate continuation frames up to 1MiB
.max_continuation_size(2_usize.pow(20)); .max_continuation_size(2_usize.pow(20));
let pubsub_result = data.cache_pool.get_async_pubsub().await; let mut pubsub = data.cache_pool.get_async_pubsub().await.map_err(|e| crate::error::Error::from(e))?;
if let Err(error) = pubsub_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
let mut session_2 = session_1.clone(); let mut session_2 = session_1.clone();
rt::spawn(async move { rt::spawn(async move {
let mut pubsub = pubsub_result.unwrap();
pubsub.subscribe(channel_uuid.to_string()).await.unwrap(); pubsub.subscribe(channel_uuid.to_string()).await.unwrap();
while let Some(msg) = pubsub.on_message().next().await { while let Some(msg) = pubsub.on_message().next().await {
let payload: String = msg.get_payload().unwrap(); let payload: String = msg.get_payload().unwrap();
@ -118,7 +84,7 @@ pub async fn echo(
.await .await
.unwrap(); .unwrap();
channel channel
.new_message(&data.pool, uuid, text.to_string()) .new_message(&mut data.pool.get().await.unwrap(), uuid, text.to_string())
.await .await
.unwrap(); .unwrap();
} }

View file

@ -1,8 +1,8 @@
use actix_web::{put, web, Error, HttpRequest, HttpResponse}; use actix_web::{put, web, HttpRequest, HttpResponse};
use uuid::Uuid; use uuid::Uuid;
use futures_util::StreamExt as _; use futures_util::StreamExt as _;
use crate::{api::v1::auth::check_access_token, structs::{Guild, Member}, utils::get_auth_header, Data}; use crate::{error::Error, api::v1::auth::check_access_token, structs::{Guild, Member}, utils::get_auth_header, Data};
#[put("{uuid}/icon")] #[put("{uuid}/icon")]
pub async fn upload( pub async fn upload(
@ -13,44 +13,24 @@ pub async fn upload(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; let mut guild = Guild::fetch_one(&mut conn, guild_uuid).await?;
if let Err(error) = member {
return Ok(error);
}
let guild_result = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let mut guild = guild_result.unwrap();
let mut bytes = web::BytesMut::new(); let mut bytes = web::BytesMut::new();
while let Some(item) = payload.next().await { while let Some(item) = payload.next().await {
bytes.extend_from_slice(&item?); bytes.extend_from_slice(&item?);
} }
if let Err(error) = guild.set_icon(&data.bunny_cdn, &data.pool, data.config.bunny.cdn_url.clone(), bytes).await { guild.set_icon(&data.bunny_cdn, &mut conn, data.config.bunny.cdn_url.clone(), bytes).await?;
return Ok(error)
}
Ok(HttpResponse::Ok().finish()) Ok(HttpResponse::Ok().finish())
} }

View file

@ -1,8 +1,9 @@
use actix_web::{Error, HttpRequest, HttpResponse, get, post, web}; use actix_web::{HttpRequest, HttpResponse, get, post, web};
use serde::Deserialize; use serde::Deserialize;
use uuid::Uuid; use uuid::Uuid;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Member}, structs::{Guild, Member},
@ -22,43 +23,21 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; let guild = Guild::fetch_one(&mut conn, guild_uuid).await?;
if let Err(error) = member { let invites = guild.get_invites(&mut conn).await?;
return Ok(error);
}
let guild_result = Guild::fetch_one(&data.pool, guild_uuid).await; Ok(HttpResponse::Ok().json(invites))
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let invites = guild.get_invites(&data.pool).await;
if let Err(error) = invites {
return Ok(error);
}
Ok(HttpResponse::Ok().json(invites.unwrap()))
} }
#[post("{uuid}/invites")] #[post("{uuid}/invites")]
@ -70,45 +49,21 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); let member = Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member_result = Member::fetch_one(&data.pool, uuid, guild_uuid).await; let guild = Guild::fetch_one(&mut conn, guild_uuid).await?;
if let Err(error) = member_result {
return Ok(error);
}
let member = member_result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let custom_id = invite_request.as_ref().map(|ir| ir.custom_id.clone()); let custom_id = invite_request.as_ref().map(|ir| ir.custom_id.clone());
let invite = guild.create_invite(&data.pool, &member, custom_id).await; let invite = guild.create_invite(&mut conn, &member, custom_id).await?;
if let Err(error) = invite { Ok(HttpResponse::Ok().json(invite))
return Ok(error);
}
Ok(HttpResponse::Ok().json(invite.unwrap()))
} }

View file

@ -1,4 +1,4 @@
use actix_web::{Error, HttpRequest, HttpResponse, Scope, get, web}; use actix_web::{HttpRequest, HttpResponse, Scope, get, web};
use uuid::Uuid; use uuid::Uuid;
mod channels; mod channels;
@ -7,6 +7,7 @@ mod roles;
mod icon; mod icon;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Member}, structs::{Guild, Member},
@ -43,33 +44,17 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; let guild = Guild::fetch_one(&mut conn, guild_uuid).await?;
if let Err(error) = member { Ok(HttpResponse::Ok().json(guild))
return Ok(error);
}
let guild = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild.unwrap()))
} }

View file

@ -1,13 +1,14 @@
use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, get, post, web};
use serde::Deserialize;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Member, Role}, structs::{Member, Role},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, get, post, web};
use log::error;
use serde::Deserialize;
pub mod uuid; pub mod uuid;
@ -24,52 +25,27 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; if let Ok(cache_hit) = data.get_cache_key(format!("{}_roles", guild_uuid)).await {
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}_roles", guild_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let roles_result = Role::fetch_all(&data.pool, guild_uuid).await; let roles = Role::fetch_all(&mut conn, guild_uuid).await?;
if let Err(error) = roles_result { data
return Ok(error);
}
let roles = roles_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}_roles", guild_uuid), roles.clone(), 1800) .set_cache_key(format!("{}_roles", guild_uuid), roles.clone(), 1800)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(roles)) Ok(HttpResponse::Ok().json(roles))
} }
@ -83,35 +59,19 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await.unwrap();
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
// FIXME: Logic to check permissions, should probably be done in utils.rs // FIXME: Logic to check permissions, should probably be done in utils.rs
let role = Role::new(&data.pool, guild_uuid, role_info.name.clone()).await; let role = Role::new(&mut conn, guild_uuid, role_info.name.clone()).await?;
if let Err(error) = role { Ok(HttpResponse::Ok().json(role))
return Ok(error);
}
Ok(HttpResponse::Ok().json(role.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Member, Role}, structs::{Member, Role},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, get, web}; use actix_web::{HttpRequest, HttpResponse, get, web};
use log::error;
#[get("{uuid}/roles/{role_uuid}")] #[get("{uuid}/roles/{role_uuid}")]
pub async fn get( pub async fn get(
@ -16,52 +16,27 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, role_uuid) = path.into_inner(); let (guild_uuid, role_uuid) = path.into_inner();
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { let uuid = check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Member::fetch_one(&mut conn, uuid, guild_uuid).await?;
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await; if let Ok(cache_hit) = data.get_cache_key(format!("{}", role_uuid)).await {
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", role_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let role_result = Role::fetch_one(&data.pool, guild_uuid, role_uuid).await; let role = Role::fetch_one(&mut conn, role_uuid).await?;
if let Err(error) = role_result { data
return Ok(error);
}
let role = role_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", role_uuid), role.clone(), 60) .set_cache_key(format!("{}", role_uuid), role.clone(), 60)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(role)) Ok(HttpResponse::Ok().json(role))
} }

View file

@ -1,31 +1,31 @@
use std::time::SystemTime; use std::time::SystemTime;
use actix_web::{HttpResponse, Responder, get, web}; use actix_web::{HttpResponse, get, web};
use diesel::QueryDsl;
use diesel_async::RunQueryDsl;
use serde::Serialize; use serde::Serialize;
use crate::error::Error;
use crate::Data; use crate::Data;
use crate::schema::users::dsl::{users, uuid};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
#[derive(Serialize)] #[derive(Serialize)]
struct Response { struct Response {
accounts: usize, accounts: i64,
uptime: u64, uptime: u64,
version: String, version: String,
build_number: String, build_number: String,
} }
#[get("/stats")] #[get("/stats")]
pub async fn res(data: web::Data<Data>) -> impl Responder { pub async fn res(data: web::Data<Data>) -> Result<HttpResponse, Error> {
let accounts; let accounts: i64 = users
if let Ok(users) = sqlx::query("SELECT uuid FROM users") .select(uuid)
.fetch_all(&data.pool) .count()
.await .get_result(&mut data.pool.get().await?)
{ .await?;
accounts = users.len();
} else {
return HttpResponse::InternalServerError().finish();
}
let response = Response { let response = Response {
// TODO: Get number of accounts from db // TODO: Get number of accounts from db
@ -39,5 +39,5 @@ pub async fn res(data: web::Data<Data>) -> impl Responder {
build_number: String::from("how do i implement this?"), build_number: String::from("how do i implement this?"),
}; };
HttpResponse::Ok().json(response) Ok(HttpResponse::Ok().json(response))
} }

View file

@ -1,33 +1,21 @@
use actix_web::{get, patch, web, Error, HttpRequest, HttpResponse}; use actix_web::{get, patch, web, HttpRequest, HttpResponse};
use serde::Deserialize; use serde::Deserialize;
use crate::{api::v1::auth::check_access_token, structs::Me, utils::get_auth_header, Data}; use crate::{error::Error, structs::Me, api::v1::auth::check_access_token, utils::get_auth_header, Data};
#[get("/me")] #[get("/me")]
pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse, Error> { pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let uuid = check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized { let me = Me::get(&mut conn, uuid).await?;
return Ok(error);
}
let uuid = authorized.unwrap(); Ok(HttpResponse::Ok().json(me))
let me = Me::get(&data.pool, uuid).await;
if let Err(error) = me {
return Ok(error);
}
Ok(HttpResponse::Ok().json(me.unwrap()))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@ -42,27 +30,13 @@ struct NewInfo {
pub async fn update(req: HttpRequest, new_info: web::Json<NewInfo>, data: web::Data<Data>) -> Result<HttpResponse, Error> { pub async fn update(req: HttpRequest, new_info: web::Json<NewInfo>, data: web::Data<Data>) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let uuid = check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized { let me = Me::get(&mut conn, uuid).await?;
return Ok(error);
}
let uuid = authorized.unwrap();
let me_result = Me::get(&data.pool, uuid).await;
if let Err(error) = me_result {
return Ok(error);
}
let me = me_result.unwrap();
if let Some(username) = &new_info.username { if let Some(username) = &new_info.username {
todo!(); todo!();

View file

@ -1,5 +1,6 @@
use crate::{api::v1::auth::check_access_token, structs::{StartAmountQuery, User}, utils::get_auth_header, Data}; use actix_web::{HttpRequest, HttpResponse, Scope, get, web};
use actix_web::{Error, HttpRequest, HttpResponse, Scope, get, web};
use crate::{api::v1::auth::check_access_token, error::Error, structs::{StartAmountQuery, User}, utils::get_auth_header, Data};
mod me; mod me;
mod uuid; mod uuid;
@ -19,7 +20,7 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
let start = request_query.start.unwrap_or(0); let start = request_query.start.unwrap_or(0);
@ -29,17 +30,11 @@ pub async fn res(
return Ok(HttpResponse::BadRequest().finish()); return Ok(HttpResponse::BadRequest().finish());
} }
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; let mut conn = data.pool.get().await?;
if let Err(error) = authorized { check_access_token(auth_header, &mut conn).await?;
return Ok(error);
}
let accounts = User::fetch_amount(&data.pool, start, amount).await; let users = User::fetch_amount(&mut conn, start, amount).await?;
if let Err(error) = accounts { Ok(HttpResponse::Ok().json(users))
return Ok(error);
}
Ok(HttpResponse::Ok().json(accounts.unwrap()))
} }

View file

@ -1,8 +1,8 @@
use actix_web::{Error, HttpRequest, HttpResponse, get, web}; use actix_web::{HttpRequest, HttpResponse, get, web};
use log::error;
use uuid::Uuid; use uuid::Uuid;
use crate::{api::v1::auth::check_access_token, structs::User, utils::get_auth_header, Data}; use crate::{error::Error, api::v1::auth::check_access_token, structs::User, utils::get_auth_header, Data};
#[get("/{uuid}")] #[get("/{uuid}")]
pub async fn res( pub async fn res(
@ -14,42 +14,23 @@ pub async fn res(
let uuid = path.into_inner().0; let uuid = path.into_inner().0;
let auth_header = get_auth_header(headers); let auth_header = get_auth_header(headers)?;
if let Err(error) = auth_header { let mut conn = data.pool.get().await?;
return Ok(error);
}
let authorized = check_access_token(auth_header.unwrap(), &data.pool).await; check_access_token(auth_header, &mut conn).await?;
if let Err(error) = authorized { if let Ok(cache_hit) = data.get_cache_key(uuid.to_string()).await {
return Ok(error);
}
let cache_result = data.get_cache_key(uuid.to_string()).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let user_result = User::fetch_one(&data.pool, uuid).await; let user = User::fetch_one(&mut conn, uuid).await?;
if let Err(error) = user_result { data
return Ok(error);
}
let user = user_result.unwrap();
let cache_result = data
.set_cache_key(uuid.to_string(), user.clone(), 1800) .set_cache_key(uuid.to_string(), user.clone(), 1800)
.await; .await?;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(user)) Ok(HttpResponse::Ok().json(user))
} }

View file

@ -1,8 +1,7 @@
use crate::Error;
use bunny_api_tokio::edge_storage::Endpoint; use bunny_api_tokio::edge_storage::Endpoint;
use crate::error::Error;
use log::debug; use log::debug;
use serde::Deserialize; use serde::Deserialize;
use sqlx::postgres::PgConnectOptions;
use tokio::fs::read_to_string; use tokio::fs::read_to_string;
use url::Url; use url::Url;
@ -122,13 +121,24 @@ pub struct Bunny {
} }
impl Database { impl Database {
pub fn connect_options(&self) -> PgConnectOptions { pub fn url(&self) -> String {
PgConnectOptions::new() let mut url = String::from("postgres://");
.database(&self.database)
.host(&self.host) url += &self.username;
.username(&self.username)
.password(&self.password) url += ":";
.port(self.port) url += &self.password;
url += "@";
url += &self.host;
url += ":";
url += &self.port.to_string();
url += "/";
url += &self.database;
url
} }
} }

85
src/error.rs Normal file
View file

@ -0,0 +1,85 @@
use std::{io, time::SystemTimeError};
use actix_web::{error::{PayloadError, ResponseError}, http::{header::{ContentType, ToStrError}, StatusCode}, HttpResponse};
use deadpool::managed::{BuildError, PoolError};
use redis::RedisError;
use serde::Serialize;
use thiserror::Error;
use diesel::{result::Error as DieselError, ConnectionError};
use diesel_async::pooled_connection::PoolError as DieselPoolError;
use tokio::task::JoinError;
use serde_json::Error as JsonError;
use toml::de::Error as TomlError;
use log::error;
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
SqlError(#[from] DieselError),
#[error(transparent)]
PoolError(#[from] PoolError<DieselPoolError>),
#[error(transparent)]
BuildError(#[from] BuildError),
#[error(transparent)]
RedisError(#[from] RedisError),
#[error(transparent)]
ConnectionError(#[from] ConnectionError),
#[error(transparent)]
JoinError(#[from] JoinError),
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
TomlError(#[from] TomlError),
#[error(transparent)]
JsonError(#[from] JsonError),
#[error(transparent)]
SystemTimeError(#[from] SystemTimeError),
#[error(transparent)]
ToStrError(#[from] ToStrError),
#[error(transparent)]
RandomError(#[from] getrandom::Error),
#[error(transparent)]
BunnyError(#[from] bunny_api_tokio::error::Error),
#[error(transparent)]
UrlParseError(#[from] url::ParseError),
#[error(transparent)]
PayloadError(#[from] PayloadError),
#[error("{0}")]
PasswordHashError(String),
#[error("{0}")]
BadRequest(String),
#[error("{0}")]
Unauthorized(String),
}
impl ResponseError for Error {
fn error_response(&self) -> HttpResponse {
error!("{}: {}", self.status_code(), self.to_string());
HttpResponse::build(self.status_code())
.insert_header(ContentType::json())
.json(WebError::new(self.to_string()))
}
fn status_code(&self) -> StatusCode {
match *self {
Error::SqlError(DieselError::NotFound) => StatusCode::NOT_FOUND,
Error::BadRequest(_) => StatusCode::BAD_REQUEST,
Error::Unauthorized(_) => StatusCode::UNAUTHORIZED,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
#[derive(Serialize)]
struct WebError {
message: String,
}
impl WebError {
fn new(message: String) -> Self {
Self {
message,
}
}
}

View file

@ -2,17 +2,24 @@ use actix_cors::Cors;
use actix_web::{App, HttpServer, web}; use actix_web::{App, HttpServer, web};
use argon2::Argon2; use argon2::Argon2;
use clap::Parser; use clap::Parser;
use error::Error;
use simple_logger::SimpleLogger; use simple_logger::SimpleLogger;
use sqlx::{PgPool, Pool, Postgres}; use diesel_async::pooled_connection::AsyncDieselConnectionManager;
use diesel_async::pooled_connection::deadpool::Pool;
use std::time::SystemTime; use std::time::SystemTime;
mod config; mod config;
use config::{Config, ConfigBuilder}; use config::{Config, ConfigBuilder};
mod api; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
type Conn = deadpool::managed::Object<AsyncDieselConnectionManager<diesel_async::AsyncPgConnection>>;
mod api;
pub mod structs; pub mod structs;
pub mod utils; pub mod utils;
pub mod schema;
type Error = Box<dyn std::error::Error>; pub mod error;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[command(version, about, long_about = None)] #[command(version, about, long_about = None)]
@ -23,7 +30,7 @@ struct Args {
#[derive(Clone)] #[derive(Clone)]
pub struct Data { pub struct Data {
pub pool: Pool<Postgres>, pub pool: deadpool::managed::Pool<AsyncDieselConnectionManager<diesel_async::AsyncPgConnection>, Conn>,
pub cache_pool: redis::Client, pub cache_pool: redis::Client,
pub config: Config, pub config: Config,
pub argon2: Argon2<'static>, pub argon2: Argon2<'static>,
@ -45,7 +52,9 @@ async fn main() -> Result<(), Error> {
let web = config.web.clone(); let web = config.web.clone();
let pool = PgPool::connect_with(config.database.connect_options()).await?; // create a new connection pool with the default config
let pool_config = AsyncDieselConnectionManager::<diesel_async::AsyncPgConnection>::new(config.database.url());
let pool = Pool::builder(pool_config).build()?;
let cache_pool = redis::Client::open(config.cache_database.url())?; let cache_pool = redis::Client::open(config.cache_database.url())?;
@ -53,103 +62,18 @@ async fn main() -> Result<(), Error> {
bunny_cdn.storage.init(config.bunny.endpoint.clone(), config.bunny.storage_zone.clone())?; bunny_cdn.storage.init(config.bunny.endpoint.clone(), config.bunny.storage_zone.clone())?;
/* let database_url = config.database.url();
TODO: Figure out if a table should be used here and if not then what.
Also figure out if these should be different types from what they currently are and if we should add more "constraints"
TODO: References to time should be removed in favor of using the timestamp built in to UUIDv7 (apart from deleted_at in users) tokio::task::spawn_blocking(move || {
*/ use diesel::prelude::Connection;
sqlx::raw_sql( use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
r#"
CREATE TABLE IF NOT EXISTS users (
uuid uuid PRIMARY KEY NOT NULL, let mut conn = AsyncConnectionWrapper::<diesel_async::AsyncPgConnection>::establish(&database_url)?;
username varchar(32) NOT NULL,
display_name varchar(64) DEFAULT NULL, conn.run_pending_migrations(MIGRATIONS)?;
password varchar(512) NOT NULL, Ok::<_, Box<dyn std::error::Error + Send + Sync>>(())
email varchar(100) NOT NULL, }).await?.unwrap();
email_verified boolean NOT NULL DEFAULT FALSE,
avatar varchar(100) DEFAULT NULL,
is_deleted boolean NOT NULL DEFAULT FALSE,
deleted_at int8 DEFAULT NULL,
CONSTRAINT unique_username_active UNIQUE NULLS NOT DISTINCT (username, is_deleted),
CONSTRAINT unique_email_active UNIQUE NULLS NOT DISTINCT (email, is_deleted)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_username_active
ON users(username)
WHERE is_deleted = FALSE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_email_active
ON users(email)
WHERE is_deleted = FALSE;
CREATE TABLE IF NOT EXISTS instance_permissions (
uuid uuid NOT NULL REFERENCES users(uuid),
administrator boolean NOT NULL DEFAULT FALSE
);
CREATE TABLE IF NOT EXISTS refresh_tokens (
token varchar(64) PRIMARY KEY UNIQUE NOT NULL,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL,
device_name varchar(16) NOT NULL
);
CREATE TABLE IF NOT EXISTS access_tokens (
token varchar(32) PRIMARY KEY UNIQUE NOT NULL,
refresh_token varchar(64) UNIQUE NOT NULL REFERENCES refresh_tokens(token) ON UPDATE CASCADE ON DELETE CASCADE,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL
);
CREATE TABLE IF NOT EXISTS guilds (
uuid uuid PRIMARY KEY NOT NULL,
owner_uuid uuid NOT NULL REFERENCES users(uuid),
name VARCHAR(100) NOT NULL,
description VARCHAR(300),
icon VARCHAR(100) DEFAULT NULL
);
CREATE TABLE IF NOT EXISTS guild_members (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
nickname VARCHAR(100) DEFAULT NULL
);
CREATE TABLE IF NOT EXISTS roles (
uuid uuid UNIQUE NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name VARCHAR(50) NOT NULL,
color int NOT NULL DEFAULT 16777215,
position int NOT NULL,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (uuid, guild_uuid)
);
CREATE TABLE IF NOT EXISTS role_members (
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
member_uuid uuid NOT NULL REFERENCES guild_members(uuid) ON DELETE CASCADE,
PRIMARY KEY (role_uuid, member_uuid)
);
CREATE TABLE IF NOT EXISTS channels (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name varchar(32) NOT NULL,
description varchar(500) NOT NULL
);
CREATE TABLE IF NOT EXISTS channel_permissions (
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (channel_uuid, role_uuid)
);
CREATE TABLE IF NOT EXISTS messages (
uuid uuid PRIMARY KEY NOT NULL,
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
message varchar(4000) NOT NULL
);
CREATE TABLE IF NOT EXISTS invites (
id varchar(32) PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid)
);
"#,
)
.execute(&pool)
.await?;
/* /*
**Stored for later possible use** **Stored for later possible use**

160
src/schema.rs Normal file
View file

@ -0,0 +1,160 @@
// @generated automatically by Diesel CLI.
diesel::table! {
access_tokens (token) {
#[max_length = 32]
token -> Varchar,
#[max_length = 64]
refresh_token -> Varchar,
uuid -> Uuid,
created_at -> Int8,
}
}
diesel::table! {
channel_permissions (channel_uuid, role_uuid) {
channel_uuid -> Uuid,
role_uuid -> Uuid,
permissions -> Int8,
}
}
diesel::table! {
channels (uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
#[max_length = 32]
name -> Varchar,
#[max_length = 500]
description -> Nullable<Varchar>,
}
}
diesel::table! {
guild_members (uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
user_uuid -> Uuid,
#[max_length = 100]
nickname -> Nullable<Varchar>,
}
}
diesel::table! {
guilds (uuid) {
uuid -> Uuid,
owner_uuid -> Uuid,
#[max_length = 100]
name -> Varchar,
#[max_length = 300]
description -> Nullable<Varchar>,
#[max_length = 100]
icon -> Nullable<Varchar>,
}
}
diesel::table! {
instance_permissions (uuid) {
uuid -> Uuid,
administrator -> Bool,
}
}
diesel::table! {
invites (id) {
#[max_length = 32]
id -> Varchar,
guild_uuid -> Uuid,
user_uuid -> Uuid,
}
}
diesel::table! {
messages (uuid) {
uuid -> Uuid,
channel_uuid -> Uuid,
user_uuid -> Uuid,
#[max_length = 4000]
message -> Varchar,
}
}
diesel::table! {
refresh_tokens (token) {
#[max_length = 64]
token -> Varchar,
uuid -> Uuid,
created_at -> Int8,
#[max_length = 16]
device_name -> Varchar,
}
}
diesel::table! {
role_members (role_uuid, member_uuid) {
role_uuid -> Uuid,
member_uuid -> Uuid,
}
}
diesel::table! {
roles (uuid, guild_uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
#[max_length = 50]
name -> Varchar,
color -> Int4,
position -> Int4,
permissions -> Int8,
}
}
diesel::table! {
users (uuid) {
uuid -> Uuid,
#[max_length = 32]
username -> Varchar,
#[max_length = 64]
display_name -> Nullable<Varchar>,
#[max_length = 512]
password -> Varchar,
#[max_length = 100]
email -> Varchar,
email_verified -> Bool,
is_deleted -> Bool,
deleted_at -> Nullable<Int8>,
#[max_length = 100]
avatar -> Nullable<Varchar>,
}
}
diesel::joinable!(access_tokens -> refresh_tokens (refresh_token));
diesel::joinable!(access_tokens -> users (uuid));
diesel::joinable!(channel_permissions -> channels (channel_uuid));
diesel::joinable!(channels -> guilds (guild_uuid));
diesel::joinable!(guild_members -> guilds (guild_uuid));
diesel::joinable!(guild_members -> users (user_uuid));
diesel::joinable!(guilds -> users (owner_uuid));
diesel::joinable!(instance_permissions -> users (uuid));
diesel::joinable!(invites -> guilds (guild_uuid));
diesel::joinable!(invites -> users (user_uuid));
diesel::joinable!(messages -> channels (channel_uuid));
diesel::joinable!(messages -> users (user_uuid));
diesel::joinable!(refresh_tokens -> users (uuid));
diesel::joinable!(role_members -> guild_members (member_uuid));
diesel::joinable!(roles -> guilds (guild_uuid));
diesel::allow_tables_to_appear_in_same_query!(
access_tokens,
channel_permissions,
channels,
guild_members,
guilds,
instance_permissions,
invites,
messages,
refresh_tokens,
role_members,
roles,
users,
);

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,4 @@
use actix_web::{ use actix_web::{
HttpResponse,
cookie::{Cookie, SameSite, time::Duration}, cookie::{Cookie, SameSite, time::Duration},
http::header::HeaderMap, http::header::HeaderMap,
}; };
@ -8,25 +7,31 @@ use hex::encode;
use redis::RedisError; use redis::RedisError;
use serde::Serialize; use serde::Serialize;
use crate::Data; use crate::{error::Error, Data};
pub fn get_auth_header(headers: &HeaderMap) -> Result<&str, HttpResponse> { pub fn get_auth_header(headers: &HeaderMap) -> Result<&str, Error> {
let auth_token = headers.get(actix_web::http::header::AUTHORIZATION); let auth_token = headers.get(actix_web::http::header::AUTHORIZATION);
if auth_token.is_none() { if auth_token.is_none() {
return Err(HttpResponse::Unauthorized().finish()); return Err(Error::Unauthorized("No authorization header provided".to_string()));
} }
let auth = auth_token.unwrap().to_str(); let auth_raw = auth_token.unwrap().to_str()?;
if let Err(error) = auth { let mut auth = auth_raw.split_whitespace();
return Err(HttpResponse::Unauthorized().json(format!(r#" {{ "error": "{}" }} "#, error)));
let auth_type = auth.nth(0);
let auth_value = auth.nth(0);
if auth_type.is_none() {
return Err(Error::BadRequest("Authorization header is empty".to_string()));
} else if auth_type.is_some_and(|at| at != "Bearer") {
return Err(Error::BadRequest("Only token auth is supported".to_string()));
} }
let auth_value = auth.unwrap().split_whitespace().nth(1);
if auth_value.is_none() { if auth_value.is_none() {
return Err(HttpResponse::BadRequest().finish()); return Err(Error::BadRequest("No token provided".to_string()));
} }
Ok(auth_value.unwrap()) Ok(auth_value.unwrap())
@ -60,12 +65,12 @@ impl Data {
key: String, key: String,
value: impl Serialize, value: impl Serialize,
expire: u32, expire: u32,
) -> Result<(), RedisError> { ) -> Result<(), Error> {
let mut conn = self.cache_pool.get_multiplexed_tokio_connection().await?; let mut conn = self.cache_pool.get_multiplexed_tokio_connection().await?;
let key_encoded = encode(key); let key_encoded = encode(key);
let value_json = serde_json::to_string(&value).unwrap(); let value_json = serde_json::to_string(&value)?;
redis::cmd("SET") redis::cmd("SET")
.arg(&[key_encoded.clone(), value_json]) .arg(&[key_encoded.clone(), value_json])
@ -75,7 +80,9 @@ impl Data {
redis::cmd("EXPIRE") redis::cmd("EXPIRE")
.arg(&[key_encoded, expire.to_string()]) .arg(&[key_encoded, expire.to_string()])
.exec_async(&mut conn) .exec_async(&mut conn)
.await .await?;
Ok(())
} }
pub async fn get_cache_key(&self, key: String) -> Result<String, RedisError> { pub async fn get_cache_key(&self, key: String) -> Result<String, RedisError> {