Compare commits

..

No commits in common. "82ac501519ed4493ebcaec9edbaecabc18bbb4fa" and "fd8d8234048c08d90aef2885c0c0d725b8ccb83f" have entirely different histories.

49 changed files with 1546 additions and 1158 deletions

View file

@ -21,6 +21,7 @@ regex = "1.11"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
simple_logger = "5.0.0" simple_logger = "5.0.0"
sqlx = { version = "0.8", features = ["runtime-tokio", "tls-native-tls", "postgres"] }
redis = { version = "0.31.0", features= ["tokio-comp"] } redis = { version = "0.31.0", features= ["tokio-comp"] }
tokio-tungstenite = { version = "0.26", features = ["native-tls", "url"] } tokio-tungstenite = { version = "0.26", features = ["native-tls", "url"] }
toml = "0.8" toml = "0.8"
@ -29,11 +30,6 @@ uuid = { version = "1.16", features = ["serde", "v7"] }
random-string = "1.1" random-string = "1.1"
actix-ws = "0.3.0" actix-ws = "0.3.0"
futures-util = "0.3.31" futures-util = "0.3.31"
deadpool = "0.12"
diesel = { version = "2.2", features = ["uuid"] }
diesel-async = { version = "0.5", features = ["deadpool", "postgres", "async-connection-wrapper"] }
diesel_migrations = { version = "2.2.0", features = ["postgres"] }
thiserror = "2.0.12"
[dependencies.tokio] [dependencies.tokio]
version = "1.44" version = "1.44"

View file

@ -1,3 +0,0 @@
fn main() {
println!("cargo:rerun-if-changed=migrations");
}

View file

@ -1,9 +0,0 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
[migrations_directory]
dir = "migrations"

View file

View file

@ -1,6 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View file

@ -1,36 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View file

@ -1,4 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX idx_unique_username_active;
DROP INDEX idx_unique_email_active;
DROP TABLE users;

View file

@ -1,20 +0,0 @@
-- Your SQL goes here
CREATE TABLE users (
uuid uuid PRIMARY KEY NOT NULL,
username varchar(32) NOT NULL,
display_name varchar(64) DEFAULT NULL,
password varchar(512) NOT NULL,
email varchar(100) NOT NULL,
email_verified boolean NOT NULL DEFAULT FALSE,
is_deleted boolean NOT NULL DEFAULT FALSE,
deleted_at int8 DEFAULT NULL,
CONSTRAINT unique_username_active UNIQUE NULLS NOT DISTINCT (username, is_deleted),
CONSTRAINT unique_email_active UNIQUE NULLS NOT DISTINCT (email, is_deleted)
);
CREATE UNIQUE INDEX idx_unique_username_active
ON users(username)
WHERE is_deleted = FALSE;
CREATE UNIQUE INDEX idx_unique_email_active
ON users(email)
WHERE is_deleted = FALSE;

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE instance_permissions;

View file

@ -1,5 +0,0 @@
-- Your SQL goes here
CREATE TABLE instance_permissions (
uuid uuid PRIMARY KEY NOT NULL REFERENCES users(uuid),
administrator boolean NOT NULL DEFAULT FALSE
);

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE access_tokens;
DROP TABLE refresh_tokens;

View file

@ -1,13 +0,0 @@
-- Your SQL goes here
CREATE TABLE refresh_tokens (
token varchar(64) PRIMARY KEY UNIQUE NOT NULL,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL,
device_name varchar(16) NOT NULL
);
CREATE TABLE access_tokens (
token varchar(32) PRIMARY KEY UNIQUE NOT NULL,
refresh_token varchar(64) UNIQUE NOT NULL REFERENCES refresh_tokens(token) ON UPDATE CASCADE ON DELETE CASCADE,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL
);

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE guild_members;
DROP TABLE guilds;

View file

@ -1,13 +0,0 @@
-- Your SQL goes here
CREATE TABLE guilds (
uuid uuid PRIMARY KEY NOT NULL,
owner_uuid uuid NOT NULL REFERENCES users(uuid),
name VARCHAR(100) NOT NULL,
description VARCHAR(300)
);
CREATE TABLE guild_members (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
nickname VARCHAR(100) DEFAULT NULL
);

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE role_members;
DROP TABLE roles;

View file

@ -1,15 +0,0 @@
-- Your SQL goes here
CREATE TABLE roles (
uuid uuid UNIQUE NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name VARCHAR(50) NOT NULL,
color int NOT NULL DEFAULT 16777215,
position int NOT NULL,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (uuid, guild_uuid)
);
CREATE TABLE role_members (
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
member_uuid uuid NOT NULL REFERENCES guild_members(uuid) ON DELETE CASCADE,
PRIMARY KEY (role_uuid, member_uuid)
);

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE channel_permissions;
DROP TABLE channels;

View file

@ -1,13 +0,0 @@
-- Your SQL goes here
CREATE TABLE channels (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name varchar(32) NOT NULL,
description varchar(500) NOT NULL
);
CREATE TABLE channel_permissions (
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (channel_uuid, role_uuid)
);

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE messages;

View file

@ -1,7 +0,0 @@
-- Your SQL goes here
CREATE TABLE messages (
uuid uuid PRIMARY KEY NOT NULL,
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
message varchar(4000) NOT NULL
);

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE invites;

View file

@ -1,6 +0,0 @@
-- Your SQL goes here
CREATE TABLE invites (
id varchar(32) PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid)
);

View file

@ -1,4 +0,0 @@
-- This file should undo anything in `up.sql`
UPDATE channels SET description = '' WHERE description IS NULL;
ALTER TABLE ONLY channels ALTER COLUMN description SET NOT NULL;
ALTER TABLE ONLY channels ALTER COLUMN description DROP DEFAULT;

View file

@ -1,3 +0,0 @@
-- Your SQL goes here
ALTER TABLE ONLY channels ALTER COLUMN description DROP NOT NULL;
ALTER TABLE ONLY channels ALTER COLUMN description SET DEFAULT NULL;

View file

@ -1,14 +1,14 @@
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use actix_web::{HttpResponse, post, web}; use actix_web::{Error, HttpResponse, post, web};
use argon2::{PasswordHash, PasswordVerifier}; use argon2::{PasswordHash, PasswordVerifier};
use diesel::{dsl::insert_into, ExpressionMethods, QueryDsl}; use log::error;
use diesel_async::RunQueryDsl;
use serde::Deserialize; use serde::Deserialize;
use uuid::Uuid;
use crate::{ use crate::{
error::Error, api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX}, schema::*, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data Data,
api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX},
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
use super::Response; use super::Response;
@ -29,42 +29,66 @@ pub async fn response(
return Ok(HttpResponse::Forbidden().json(r#"{ "password_hashed": false }"#)); return Ok(HttpResponse::Forbidden().json(r#"{ "password_hashed": false }"#));
} }
use users::dsl;
let mut conn = data.pool.get().await?;
if EMAIL_REGEX.is_match(&login_information.username) { if EMAIL_REGEX.is_match(&login_information.username) {
// FIXME: error handling, right now i just want this to work let row =
let (uuid, password): (Uuid, String) = dsl::users sqlx::query_as("SELECT CAST(uuid as VARCHAR), password FROM users WHERE email = $1")
.filter(dsl::email.eq(&login_information.username)) .bind(&login_information.username)
.select((dsl::uuid, dsl::password)) .fetch_one(&data.pool)
.get_result(&mut conn) .await;
.await?;
return login( if let Err(error) = row {
if error.to_string()
== "no rows returned by a query that expected to return at least one row"
{
return Ok(HttpResponse::Unauthorized().finish());
}
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, password): (String, String) = row.unwrap();
return Ok(login(
data.clone(), data.clone(),
uuid, uuid,
login_information.password.clone(), login_information.password.clone(),
password, password,
login_information.device_name.clone(), login_information.device_name.clone(),
) )
.await; .await);
} else if USERNAME_REGEX.is_match(&login_information.username) { } else if USERNAME_REGEX.is_match(&login_information.username) {
// FIXME: error handling, right now i just want this to work let row =
let (uuid, password): (Uuid, String) = dsl::users sqlx::query_as("SELECT CAST(uuid as VARCHAR), password FROM users WHERE username = $1")
.filter(dsl::username.eq(&login_information.username)) .bind(&login_information.username)
.select((dsl::uuid, dsl::password)) .fetch_one(&data.pool)
.get_result(&mut conn) .await;
.await?;
return login( if let Err(error) = row {
if error.to_string()
== "no rows returned by a query that expected to return at least one row"
{
return Ok(HttpResponse::Unauthorized().finish());
}
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, password): (String, String) = row.unwrap();
return Ok(login(
data.clone(), data.clone(),
uuid, uuid,
login_information.password.clone(), login_information.password.clone(),
password, password,
login_information.device_name.clone(), login_information.device_name.clone(),
) )
.await; .await);
} }
Ok(HttpResponse::Unauthorized().finish()) Ok(HttpResponse::Unauthorized().finish())
@ -72,45 +96,79 @@ pub async fn response(
async fn login( async fn login(
data: actix_web::web::Data<Data>, data: actix_web::web::Data<Data>,
uuid: Uuid, uuid: String,
request_password: String, request_password: String,
database_password: String, database_password: String,
device_name: String, device_name: String,
) -> Result<HttpResponse, Error> { ) -> HttpResponse {
let mut conn = data.pool.get().await?; let parsed_hash_raw = PasswordHash::new(&database_password);
let parsed_hash = PasswordHash::new(&database_password).map_err(|e| Error::PasswordHashError(e.to_string()))?; if let Err(error) = parsed_hash_raw {
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let parsed_hash = parsed_hash_raw.unwrap();
if data if data
.argon2 .argon2
.verify_password(request_password.as_bytes(), &parsed_hash) .verify_password(request_password.as_bytes(), &parsed_hash)
.is_err() .is_err()
{ {
return Err(Error::Unauthorized("Wrong username or password".to_string())); return HttpResponse::Unauthorized().finish();
} }
let refresh_token = generate_refresh_token()?; let refresh_token_raw = generate_refresh_token();
let access_token = generate_access_token()?; let access_token_raw = generate_access_token();
if let Err(error) = refresh_token_raw {
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let refresh_token = refresh_token_raw.unwrap();
if let Err(error) = access_token_raw {
error!("{}", error);
return HttpResponse::InternalServerError().finish();
}
let access_token = access_token_raw.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)? .duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64; .as_secs() as i64;
use refresh_tokens::dsl as rdsl; if let Err(error) = sqlx::query(&format!(
"INSERT INTO refresh_tokens (token, uuid, created_at, device_name) VALUES ($1, '{}', $2, $3 )",
insert_into(refresh_tokens::table) uuid
.values((rdsl::token.eq(&refresh_token), rdsl::uuid.eq(uuid), rdsl::created_at.eq(current_time), rdsl::device_name.eq(device_name))) ))
.execute(&mut conn) .bind(&refresh_token)
.await?; .bind(current_time)
.bind(device_name)
use access_tokens::dsl as adsl; .execute(&data.pool)
.await
insert_into(access_tokens::table) {
.values((adsl::token.eq(&access_token), adsl::refresh_token.eq(&refresh_token), adsl::uuid.eq(uuid), adsl::created_at.eq(current_time))) error!("{}", error);
.execute(&mut conn) return HttpResponse::InternalServerError().finish();
.await?; }
Ok(HttpResponse::Ok() if let Err(error) = sqlx::query(&format!(
.cookie(refresh_token_cookie(refresh_token)) "INSERT INTO access_tokens (token, refresh_token, uuid, created_at) VALUES ($1, $2, '{}', $3 )",
.json(Response { access_token })) uuid
))
.bind(&access_token)
.bind(&refresh_token)
.bind(current_time)
.execute(&data.pool)
.await
{
error!("{}", error);
return HttpResponse::InternalServerError().finish()
}
HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token))
.json(Response { access_token })
} }

View file

@ -1,17 +1,16 @@
use std::{ use std::{
str::FromStr,
sync::LazyLock, sync::LazyLock,
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use actix_web::{Scope, web}; use actix_web::{HttpResponse, Scope, web};
use diesel::{ExpressionMethods, QueryDsl}; use log::error;
use diesel_async::RunQueryDsl;
use regex::Regex; use regex::Regex;
use serde::Serialize; use serde::Serialize;
use sqlx::Postgres;
use uuid::Uuid; use uuid::Uuid;
use crate::{error::Error, Conn, schema::access_tokens::dsl};
mod login; mod login;
mod refresh; mod refresh;
mod register; mod register;
@ -41,30 +40,40 @@ pub fn web() -> Scope {
pub async fn check_access_token( pub async fn check_access_token(
access_token: &str, access_token: &str,
conn: &mut Conn, pool: &sqlx::Pool<Postgres>,
) -> Result<Uuid, Error> { ) -> Result<Uuid, HttpResponse> {
let (uuid, created_at): (Uuid, i64) = dsl::access_tokens let row = sqlx::query_as(
.filter(dsl::token.eq(access_token)) "SELECT CAST(uuid as VARCHAR), created_at FROM access_tokens WHERE token = $1",
.select((dsl::uuid, dsl::created_at)) )
.get_result(conn) .bind(access_token)
.await .fetch_one(pool)
.map_err(|error| { .await;
if error == diesel::result::Error::NotFound {
Error::Unauthorized("Invalid access token".to_string()) if let Err(error) = row {
} else { if error.to_string()
Error::from(error) == "no rows returned by a query that expected to return at least one row"
{
return Err(HttpResponse::Unauthorized().finish());
} }
})?;
error!("{}", error);
return Err(HttpResponse::InternalServerError().json(
r#"{ "error": "Unhandled exception occured, contact the server administrator" }"#,
));
}
let (uuid, created_at): (String, i64) = row.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)? .duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64; .as_secs() as i64;
let lifetime = current_time - created_at; let lifetime = current_time - created_at;
if lifetime > 3600 { if lifetime > 3600 {
return Err(Error::Unauthorized("Invalid access token".to_string())); return Err(HttpResponse::Unauthorized().finish());
} }
Ok(uuid) Ok(Uuid::from_str(&uuid).unwrap())
} }

View file

@ -1,11 +1,10 @@
use actix_web::{HttpRequest, HttpResponse, post, web}; use actix_web::{Error, HttpRequest, HttpResponse, post, web};
use diesel::{delete, update, ExpressionMethods, QueryDsl};
use diesel_async::RunQueryDsl;
use log::error; use log::error;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use crate::{ use crate::{
error::Error, schema::{access_tokens::{self, dsl}, refresh_tokens::{self, dsl as rdsl}}, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data Data,
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
use super::Response; use super::Response;
@ -21,23 +20,23 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
let mut refresh_token = String::from(recv_refresh_token_cookie.unwrap().value()); let mut refresh_token = String::from(recv_refresh_token_cookie.unwrap().value());
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)? .duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64; .as_secs() as i64;
let mut conn = data.pool.get().await?; if let Ok(row) = sqlx::query_scalar("SELECT created_at FROM refresh_tokens WHERE token = $1")
.bind(&refresh_token)
if let Ok(created_at) = rdsl::refresh_tokens .fetch_one(&data.pool)
.filter(rdsl::token.eq(&refresh_token))
.select(rdsl::created_at)
.get_result::<i64>(&mut conn)
.await .await
{ {
let created_at: i64 = row;
let lifetime = current_time - created_at; let lifetime = current_time - created_at;
if lifetime > 2592000 { if lifetime > 2592000 {
if let Err(error) = delete(refresh_tokens::table) if let Err(error) = sqlx::query("DELETE FROM refresh_tokens WHERE token = $1")
.filter(rdsl::token.eq(&refresh_token)) .bind(&refresh_token)
.execute(&mut conn) .execute(&data.pool)
.await .await
{ {
error!("{}", error); error!("{}", error);
@ -53,7 +52,8 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
} }
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)? .duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64; .as_secs() as i64;
if lifetime > 1987200 { if lifetime > 1987200 {
@ -66,13 +66,13 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
let new_refresh_token = new_refresh_token.unwrap(); let new_refresh_token = new_refresh_token.unwrap();
match update(refresh_tokens::table) match sqlx::query(
.filter(rdsl::token.eq(&refresh_token)) "UPDATE refresh_tokens SET token = $1, created_at = $2 WHERE token = $3",
.set(( )
rdsl::token.eq(&new_refresh_token), .bind(&new_refresh_token)
rdsl::created_at.eq(current_time), .bind(current_time)
)) .bind(&refresh_token)
.execute(&mut conn) .execute(&data.pool)
.await .await
{ {
Ok(_) => { Ok(_) => {
@ -84,16 +84,27 @@ pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse
} }
} }
let access_token = generate_access_token()?; let access_token = generate_access_token();
update(access_tokens::table) if access_token.is_err() {
.filter(dsl::refresh_token.eq(&refresh_token)) error!("{}", access_token.unwrap_err());
.set(( return Ok(HttpResponse::InternalServerError().finish());
dsl::token.eq(&access_token), }
dsl::created_at.eq(current_time),
)) let access_token = access_token.unwrap();
.execute(&mut conn)
.await?; if let Err(error) = sqlx::query(
"UPDATE access_tokens SET token = $1, created_at = $2 WHERE refresh_token = $3",
)
.bind(&access_token)
.bind(current_time)
.bind(&refresh_token)
.execute(&data.pool)
.await
{
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token)) .cookie(refresh_token_cookie(refresh_token))

View file

@ -1,18 +1,19 @@
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use actix_web::{HttpResponse, post, web}; use actix_web::{Error, HttpResponse, post, web};
use argon2::{ use argon2::{
PasswordHasher, PasswordHasher,
password_hash::{SaltString, rand_core::OsRng}, password_hash::{SaltString, rand_core::OsRng},
}; };
use diesel::{dsl::insert_into, ExpressionMethods}; use log::error;
use diesel_async::RunQueryDsl;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use uuid::Uuid; use uuid::Uuid;
use super::Response; use super::Response;
use crate::{ use crate::{
api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX}, error::Error, schema::{access_tokens::{self, dsl as adsl}, refresh_tokens::{self, dsl as rdsl}, users::{self, dsl as udsl}}, utils::{generate_access_token, generate_refresh_token, refresh_token_cookie}, Data Data,
api::v1::auth::{EMAIL_REGEX, PASSWORD_REGEX, USERNAME_REGEX},
utils::{generate_access_token, generate_refresh_token, refresh_token_cookie},
}; };
#[derive(Deserialize)] #[derive(Deserialize)]
@ -91,49 +92,91 @@ pub async fn res(
.argon2 .argon2
.hash_password(account_information.password.as_bytes(), &salt) .hash_password(account_information.password.as_bytes(), &salt)
{ {
let mut conn = data.pool.get().await?;
// TODO: Check security of this implementation // TODO: Check security of this implementation
insert_into(users::table) return Ok(
.values(( match sqlx::query(&format!(
udsl::uuid.eq(uuid), "INSERT INTO users (uuid, username, password, email) VALUES ( '{}', $1, $2, $3 )",
udsl::username.eq(&account_information.identifier), uuid
udsl::password.eq(hashed_password.to_string()),
udsl::email.eq(&account_information.email),
)) ))
.execute(&mut conn) .bind(&account_information.identifier)
.await?; .bind(hashed_password.to_string())
.bind(&account_information.email)
.execute(&data.pool)
.await
{
Ok(_out) => {
let refresh_token = generate_refresh_token();
let access_token = generate_access_token();
let refresh_token = generate_refresh_token()?; if refresh_token.is_err() {
let access_token = generate_access_token()?; error!("{}", refresh_token.unwrap_err());
return Ok(HttpResponse::InternalServerError().finish());
}
let refresh_token = refresh_token.unwrap();
if access_token.is_err() {
error!("{}", access_token.unwrap_err());
return Ok(HttpResponse::InternalServerError().finish());
}
let access_token = access_token.unwrap();
let current_time = SystemTime::now() let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)? .duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64; .as_secs() as i64;
insert_into(refresh_tokens::table) if let Err(error) = sqlx::query(&format!("INSERT INTO refresh_tokens (token, uuid, created_at, device_name) VALUES ($1, '{}', $2, $3 )", uuid))
.values(( .bind(&refresh_token)
rdsl::token.eq(&refresh_token), .bind(current_time)
rdsl::uuid.eq(uuid), .bind(&account_information.device_name)
rdsl::created_at.eq(current_time), .execute(&data.pool)
rdsl::device_name.eq(&account_information.device_name), .await {
)) error!("{}", error);
.execute(&mut conn) return Ok(HttpResponse::InternalServerError().finish())
.await?; }
insert_into(access_tokens::table) if let Err(error) = sqlx::query(&format!("INSERT INTO access_tokens (token, refresh_token, uuid, created_at) VALUES ($1, $2, '{}', $3 )", uuid))
.values(( .bind(&access_token)
adsl::token.eq(&access_token), .bind(&refresh_token)
adsl::refresh_token.eq(&refresh_token), .bind(current_time)
adsl::uuid.eq(uuid), .execute(&data.pool)
adsl::created_at.eq(current_time), .await {
)) error!("{}", error);
.execute(&mut conn) return Ok(HttpResponse::InternalServerError().finish())
.await?; }
return Ok(HttpResponse::Ok() HttpResponse::Ok()
.cookie(refresh_token_cookie(refresh_token)) .cookie(refresh_token_cookie(refresh_token))
.json(Response { access_token })) .json(Response { access_token })
}
Err(error) => {
let err_msg = error.as_database_error().unwrap().message();
match err_msg {
err_msg
if err_msg.contains("unique") && err_msg.contains("username_key") =>
{
HttpResponse::Forbidden().json(ResponseError {
gorb_id_available: false,
..Default::default()
})
}
err_msg if err_msg.contains("unique") && err_msg.contains("email_key") => {
HttpResponse::Forbidden().json(ResponseError {
email_available: false,
..Default::default()
})
}
_ => {
error!("{}", err_msg);
HttpResponse::InternalServerError().finish()
}
}
}
},
);
} }
Ok(HttpResponse::InternalServerError().finish()) Ok(HttpResponse::InternalServerError().finish())

View file

@ -1,10 +1,10 @@
use actix_web::{HttpRequest, HttpResponse, post, web}; use actix_web::{Error, HttpRequest, HttpResponse, post, web};
use argon2::{PasswordHash, PasswordVerifier}; use argon2::{PasswordHash, PasswordVerifier};
use diesel::{delete, ExpressionMethods, QueryDsl}; use futures::future;
use diesel_async::RunQueryDsl; use log::error;
use serde::Deserialize; use serde::{Deserialize, Serialize};
use crate::{api::v1::auth::check_access_token, error::Error, schema::users::dsl as udsl, schema::refresh_tokens::{self, dsl as rdsl}, utils::get_auth_header, Data}; use crate::{Data, api::v1::auth::check_access_token, utils::get_auth_header};
#[derive(Deserialize)] #[derive(Deserialize)]
struct RevokeRequest { struct RevokeRequest {
@ -12,6 +12,17 @@ struct RevokeRequest {
device_name: String, device_name: String,
} }
#[derive(Serialize)]
struct Response {
deleted: bool,
}
impl Response {
fn new(deleted: bool) -> Self {
Self { deleted }
}
}
// TODO: Should maybe be a delete request? // TODO: Should maybe be a delete request?
#[post("/revoke")] #[post("/revoke")]
pub async fn res( pub async fn res(
@ -21,33 +32,85 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let mut conn = data.pool.get().await?; if let Err(error) = auth_header {
return Ok(error);
}
let uuid = check_access_token(auth_header, &mut conn).await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let database_password: String = udsl::users if let Err(error) = authorized {
.filter(udsl::uuid.eq(uuid)) return Ok(error);
.select(udsl::password) }
.get_result(&mut conn)
.await?;
let hashed_password = PasswordHash::new(&database_password).map_err(|e| Error::PasswordHashError(e.to_string()))?; let uuid = authorized.unwrap();
let database_password_raw = sqlx::query_scalar(&format!(
"SELECT password FROM users WHERE uuid = '{}'",
uuid
))
.fetch_one(&data.pool)
.await;
if let Err(error) = database_password_raw {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let database_password: String = database_password_raw.unwrap();
let hashed_password_raw = PasswordHash::new(&database_password);
if let Err(error) = hashed_password_raw {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let hashed_password = hashed_password_raw.unwrap();
if data if data
.argon2 .argon2
.verify_password(revoke_request.password.as_bytes(), &hashed_password) .verify_password(revoke_request.password.as_bytes(), &hashed_password)
.is_err() .is_err()
{ {
return Err(Error::Unauthorized("Wrong username or password".to_string())); return Ok(HttpResponse::Unauthorized().finish());
} }
delete(refresh_tokens::table) let tokens_raw = sqlx::query_scalar(&format!(
.filter(rdsl::uuid.eq(uuid)) "SELECT token FROM refresh_tokens WHERE uuid = '{}' AND device_name = $1",
.filter(rdsl::device_name.eq(&revoke_request.device_name)) uuid
.execute(&mut conn) ))
.await?; .bind(&revoke_request.device_name)
.fetch_all(&data.pool)
.await;
Ok(HttpResponse::Ok().finish()) if tokens_raw.is_err() {
error!("{:?}", tokens_raw);
return Ok(HttpResponse::InternalServerError().json(Response::new(false)));
}
let tokens: Vec<String> = tokens_raw.unwrap();
let mut refresh_tokens_delete = vec![];
for token in tokens {
refresh_tokens_delete.push(
sqlx::query("DELETE FROM refresh_tokens WHERE token = $1")
.bind(token.clone())
.execute(&data.pool),
);
}
let results = future::join_all(refresh_tokens_delete).await;
let errors: Vec<&Result<sqlx::postgres::PgQueryResult, sqlx::Error>> =
results.iter().filter(|r| r.is_err()).collect();
if !errors.is_empty() {
error!("{:?}", errors);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(Response::new(true)))
} }

View file

@ -1,7 +1,6 @@
use actix_web::{HttpRequest, HttpResponse, get, post, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, post, web};
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Invite, Member}, structs::{Guild, Invite, Member},
@ -16,17 +15,29 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let mut conn = data.pool.get().await?; if let Err(error) = auth_header {
return Ok(error);
check_access_token(auth_header, &mut conn).await?; }
let invite_id = path.into_inner().0; let invite_id = path.into_inner().0;
let invite = Invite::fetch_one(&mut conn, invite_id).await?; let result = Invite::fetch_one(&data.pool, invite_id).await;
let guild = Guild::fetch_one(&mut conn, invite.guild_uuid).await?; if let Err(error) = result {
return Ok(error);
}
let invite = result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, invite.guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
Ok(HttpResponse::Ok().json(guild)) Ok(HttpResponse::Ok().json(guild))
} }
@ -39,19 +50,43 @@ pub async fn join(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let invite_id = path.into_inner().0; let invite_id = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
let invite = Invite::fetch_one(&mut conn, invite_id).await?; let uuid = authorized.unwrap();
let guild = Guild::fetch_one(&mut conn, invite.guild_uuid).await?; let result = Invite::fetch_one(&data.pool, invite_id).await;
Member::new(&mut conn, uuid, guild.uuid).await?; if let Err(error) = result {
return Ok(error);
}
let invite = result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, invite.guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let member = Member::new(&data.pool, uuid, guild.uuid).await;
if let Err(error) = member {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild)) Ok(HttpResponse::Ok().json(guild))
} }

View file

@ -1,9 +1,9 @@
use actix_web::{get, post, web, HttpRequest, HttpResponse, Scope}; use actix_web::{get, post, web, Error, HttpRequest, HttpResponse, Scope};
use serde::Deserialize; use serde::Deserialize;
mod uuid; mod uuid;
use crate::{error::Error, api::v1::auth::check_access_token, structs::{Guild, StartAmountQuery}, utils::get_auth_header, Data}; use crate::{api::v1::auth::check_access_token, structs::{Guild, StartAmountQuery}, utils::get_auth_header, Data};
#[derive(Deserialize)] #[derive(Deserialize)]
struct GuildInfo { struct GuildInfo {
@ -26,21 +26,33 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let mut conn = data.pool.get().await?; if let Err(error) = auth_header {
return Ok(error);
}
let uuid = check_access_token(auth_header, &mut conn).await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
if let Err(error) = authorized {
return Ok(error);
}
let uuid = authorized.unwrap();
let guild = Guild::new( let guild = Guild::new(
&mut conn, &data.pool,
guild_info.name.clone(), guild_info.name.clone(),
guild_info.description.clone(), guild_info.description.clone(),
uuid, uuid,
) )
.await?; .await;
Ok(HttpResponse::Ok().json(guild)) if let Err(error) = guild {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild.unwrap()))
} }
#[get("")] #[get("")]
@ -51,16 +63,28 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let start = request_query.start.unwrap_or(0); let start = request_query.start.unwrap_or(0);
let amount = request_query.amount.unwrap_or(10); let amount = request_query.amount.unwrap_or(10);
check_access_token(auth_header, &mut data.pool.get().await.unwrap()).await?; if let Err(error) = auth_header {
return Ok(error);
let guilds = Guild::fetch_amount(&data.pool, start, amount).await?; }
Ok(HttpResponse::Ok().json(guilds)) let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
if let Err(error) = authorized {
return Ok(error);
}
let guilds = Guild::fetch_amount(&data.pool, start, amount).await;
if let Err(error) = guilds {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guilds.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, get, post, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, post, web};
use log::error;
use serde::Deserialize; use serde::Deserialize;
pub mod uuid; pub mod uuid;
@ -25,27 +25,52 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
if let Ok(cache_hit) = data.get_cache_key(format!("{}_channels", guild_uuid)).await { let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}_channels", guild_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let channels = Channel::fetch_all(&data.pool, guild_uuid).await?; let channels_result = Channel::fetch_all(&data.pool, guild_uuid).await;
data if let Err(error) = channels_result {
return Ok(error);
}
let channels = channels_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}_channels", guild_uuid), channels.clone(), 1800) .set_cache_key(format!("{}_channels", guild_uuid), channels.clone(), 1800)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(channels)) Ok(HttpResponse::Ok().json(channels))
} }
@ -59,15 +84,27 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
// FIXME: Logic to check permissions, should probably be done in utils.rs // FIXME: Logic to check permissions, should probably be done in utils.rs
@ -79,5 +116,9 @@ pub async fn create(
) )
.await; .await;
if let Err(error) = channel {
return Ok(error);
}
Ok(HttpResponse::Ok().json(channel.unwrap())) Ok(HttpResponse::Ok().json(channel.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, web};
use log::error;
use serde::Deserialize; use serde::Deserialize;
#[derive(Deserialize)] #[derive(Deserialize)]
@ -24,31 +24,60 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await { if let Ok(cache_hit) = cache_result {
channel = serde_json::from_str(&cache_hit)? channel = serde_json::from_str(&cache_hit).unwrap()
} else { } else {
channel = Channel::fetch_one(&mut conn, channel_uuid).await?; let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await;
data if let Err(error) = channel_result {
return Ok(error);
}
channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
} }
let messages = channel let messages = channel
.fetch_messages(&mut conn, message_request.amount, message_request.offset) .fetch_messages(&data.pool, message_request.amount, message_request.offset)
.await?; .await;
Ok(HttpResponse::Ok().json(messages)) if let Err(error) = messages {
return Ok(error);
}
Ok(HttpResponse::Ok().json(messages.unwrap()))
} }

View file

@ -2,14 +2,14 @@ pub mod messages;
pub mod socket; pub mod socket;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Channel, Member}, structs::{Channel, Member},
utils::get_auth_header, utils::get_auth_header,
}; };
use uuid::Uuid; use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, delete, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, delete, get, web};
use log::error;
#[get("{uuid}/channels/{channel_uuid}")] #[get("{uuid}/channels/{channel_uuid}")]
pub async fn get( pub async fn get(
@ -19,27 +19,52 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await { let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let channel = Channel::fetch_one(&mut conn, channel_uuid).await?; let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await;
data if let Err(error) = channel_result {
return Ok(error);
}
let channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(channel)) Ok(HttpResponse::Ok().json(channel))
} }
@ -52,27 +77,55 @@ pub async fn delete(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await { if let Ok(cache_hit) = cache_result {
channel = serde_json::from_str(&cache_hit).unwrap(); channel = serde_json::from_str(&cache_hit).unwrap();
data.del_cache_key(format!("{}", channel_uuid)).await?; let result = data.del_cache_key(format!("{}", channel_uuid)).await;
if let Err(error) = result {
error!("{}", error)
}
} else { } else {
channel = Channel::fetch_one(&mut conn, channel_uuid).await?; let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await;
if let Err(error) = channel_result {
return Ok(error);
} }
channel.delete(&mut conn).await?; channel = channel_result.unwrap();
}
let delete_result = channel.delete(&data.pool).await;
if let Err(error) = delete_result {
return Ok(error);
}
Ok(HttpResponse::Ok().finish()) Ok(HttpResponse::Ok().finish())
} }

View file

@ -1,6 +1,7 @@
use actix_web::{Error, HttpRequest, HttpResponse, get, rt, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, rt, web};
use actix_ws::AggregatedMessage; use actix_ws::AggregatedMessage;
use futures_util::StreamExt as _; use futures_util::StreamExt as _;
use log::error;
use uuid::Uuid; use uuid::Uuid;
use crate::{ use crate::{
@ -21,30 +22,57 @@ pub async fn echo(
let headers = req.headers(); let headers = req.headers();
// Retrieve auth header // Retrieve auth header
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
// Get uuids from path // Get uuids from path
let (guild_uuid, channel_uuid) = path.into_inner(); let (guild_uuid, channel_uuid) = path.into_inner();
let mut conn = data.pool.get().await.map_err(|e| crate::error::Error::from(e))?;
// Authorize client using auth header // Authorize client using auth header
let uuid = check_access_token(auth_header, &mut conn).await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
if let Err(error) = authorized {
return Ok(error);
}
// Unwrap user uuid from authorization
let uuid = authorized.unwrap();
// Get server member from psql // Get server member from psql
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
// Get cache for channel
let cache_result = data.get_cache_key(format!("{}", channel_uuid)).await;
let channel: Channel; let channel: Channel;
// Return channel cache or result from psql as `channel` variable // Return channel cache or result from psql as `channel` variable
if let Ok(cache_hit) = data.get_cache_key(format!("{}", channel_uuid)).await { if let Ok(cache_hit) = cache_result {
channel = serde_json::from_str(&cache_hit).unwrap() channel = serde_json::from_str(&cache_hit).unwrap()
} else { } else {
channel = Channel::fetch_one(&mut conn, channel_uuid).await?; let channel_result = Channel::fetch_one(&data.pool, guild_uuid, channel_uuid).await;
data if let Err(error) = channel_result {
return Ok(error);
}
channel = channel_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", channel_uuid), channel.clone(), 60) .set_cache_key(format!("{}", channel_uuid), channel.clone(), 60)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
} }
let (res, mut session_1, stream) = actix_ws::handle(&req, stream)?; let (res, mut session_1, stream) = actix_ws::handle(&req, stream)?;
@ -54,11 +82,17 @@ pub async fn echo(
// aggregate continuation frames up to 1MiB // aggregate continuation frames up to 1MiB
.max_continuation_size(2_usize.pow(20)); .max_continuation_size(2_usize.pow(20));
let mut pubsub = data.cache_pool.get_async_pubsub().await.map_err(|e| crate::error::Error::from(e))?; let pubsub_result = data.cache_pool.get_async_pubsub().await;
if let Err(error) = pubsub_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
let mut session_2 = session_1.clone(); let mut session_2 = session_1.clone();
rt::spawn(async move { rt::spawn(async move {
let mut pubsub = pubsub_result.unwrap();
pubsub.subscribe(channel_uuid.to_string()).await.unwrap(); pubsub.subscribe(channel_uuid.to_string()).await.unwrap();
while let Some(msg) = pubsub.on_message().next().await { while let Some(msg) = pubsub.on_message().next().await {
let payload: String = msg.get_payload().unwrap(); let payload: String = msg.get_payload().unwrap();
@ -84,7 +118,7 @@ pub async fn echo(
.await .await
.unwrap(); .unwrap();
channel channel
.new_message(&mut data.pool.get().await.unwrap(), uuid, text.to_string()) .new_message(&data.pool, uuid, text.to_string())
.await .await
.unwrap(); .unwrap();
} }

View file

@ -1,9 +1,8 @@
use actix_web::{HttpRequest, HttpResponse, get, post, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, post, web};
use serde::Deserialize; use serde::Deserialize;
use uuid::Uuid; use uuid::Uuid;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Member}, structs::{Guild, Member},
@ -23,21 +22,43 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let guild = Guild::fetch_one(&mut conn, guild_uuid).await?; let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
let invites = guild.get_invites(&mut conn).await?; if let Err(error) = member {
return Ok(error);
}
Ok(HttpResponse::Ok().json(invites)) let guild_result = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let invites = guild.get_invites(&data.pool).await;
if let Err(error) = invites {
return Ok(error);
}
Ok(HttpResponse::Ok().json(invites.unwrap()))
} }
#[post("{uuid}/invites")] #[post("{uuid}/invites")]
@ -49,21 +70,45 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
let member = Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let guild = Guild::fetch_one(&mut conn, guild_uuid).await?; let member_result = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member_result {
return Ok(error);
}
let member = member_result.unwrap();
let guild_result = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild_result {
return Ok(error);
}
let guild = guild_result.unwrap();
let custom_id = invite_request.as_ref().map(|ir| ir.custom_id.clone()); let custom_id = invite_request.as_ref().map(|ir| ir.custom_id.clone());
let invite = guild.create_invite(&mut conn, &member, custom_id).await?; let invite = guild.create_invite(&data.pool, &member, custom_id).await;
Ok(HttpResponse::Ok().json(invite)) if let Err(error) = invite {
return Ok(error);
}
Ok(HttpResponse::Ok().json(invite.unwrap()))
} }

View file

@ -1,4 +1,4 @@
use actix_web::{HttpRequest, HttpResponse, Scope, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, Scope, get, web};
use uuid::Uuid; use uuid::Uuid;
mod channels; mod channels;
@ -6,7 +6,6 @@ mod invites;
mod roles; mod roles;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Guild, Member}, structs::{Guild, Member},
@ -41,17 +40,33 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; }
let guild = Guild::fetch_one(&mut conn, guild_uuid).await?; let uuid = authorized.unwrap();
Ok(HttpResponse::Ok().json(guild)) let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let guild = Guild::fetch_one(&data.pool, guild_uuid).await;
if let Err(error) = guild {
return Ok(error);
}
Ok(HttpResponse::Ok().json(guild.unwrap()))
} }

View file

@ -1,14 +1,13 @@
use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, get, post, web};
use serde::Deserialize;
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Member, Role}, structs::{Member, Role},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid;
use actix_web::{Error, HttpRequest, HttpResponse, get, post, web};
use log::error;
use serde::Deserialize;
pub mod uuid; pub mod uuid;
@ -25,27 +24,52 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
if let Ok(cache_hit) = data.get_cache_key(format!("{}_roles", guild_uuid)).await { let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}_roles", guild_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let roles = Role::fetch_all(&mut conn, guild_uuid).await?; let roles_result = Role::fetch_all(&data.pool, guild_uuid).await;
data if let Err(error) = roles_result {
return Ok(error);
}
let roles = roles_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}_roles", guild_uuid), roles.clone(), 1800) .set_cache_key(format!("{}_roles", guild_uuid), roles.clone(), 1800)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(roles)) Ok(HttpResponse::Ok().json(roles))
} }
@ -59,19 +83,35 @@ pub async fn create(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let guild_uuid = path.into_inner().0; let guild_uuid = path.into_inner().0;
let mut conn = data.pool.get().await.unwrap(); let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
// FIXME: Logic to check permissions, should probably be done in utils.rs // FIXME: Logic to check permissions, should probably be done in utils.rs
let role = Role::new(&mut conn, guild_uuid, role_info.name.clone()).await?; let role = Role::new(&data.pool, guild_uuid, role_info.name.clone()).await;
Ok(HttpResponse::Ok().json(role)) if let Err(error) = role {
return Ok(error);
}
Ok(HttpResponse::Ok().json(role.unwrap()))
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
error::Error,
Data, Data,
api::v1::auth::check_access_token, api::v1::auth::check_access_token,
structs::{Member, Role}, structs::{Member, Role},
utils::get_auth_header, utils::get_auth_header,
}; };
use ::uuid::Uuid; use ::uuid::Uuid;
use actix_web::{HttpRequest, HttpResponse, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, web};
use log::error;
#[get("{uuid}/roles/{role_uuid}")] #[get("{uuid}/roles/{role_uuid}")]
pub async fn get( pub async fn get(
@ -16,27 +16,52 @@ pub async fn get(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
if let Err(error) = auth_header {
return Ok(error);
}
let (guild_uuid, role_uuid) = path.into_inner(); let (guild_uuid, role_uuid) = path.into_inner();
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let uuid = check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
}
Member::fetch_one(&mut conn, uuid, guild_uuid).await?; let uuid = authorized.unwrap();
if let Ok(cache_hit) = data.get_cache_key(format!("{}", role_uuid)).await { let member = Member::fetch_one(&data.pool, uuid, guild_uuid).await;
if let Err(error) = member {
return Ok(error);
}
let cache_result = data.get_cache_key(format!("{}", role_uuid)).await;
if let Ok(cache_hit) = cache_result {
return Ok(HttpResponse::Ok() return Ok(HttpResponse::Ok()
.content_type("application/json") .content_type("application/json")
.body(cache_hit)); .body(cache_hit));
} }
let role = Role::fetch_one(&mut conn, role_uuid).await?; let role_result = Role::fetch_one(&data.pool, guild_uuid, role_uuid).await;
data if let Err(error) = role_result {
return Ok(error);
}
let role = role_result.unwrap();
let cache_result = data
.set_cache_key(format!("{}", role_uuid), role.clone(), 60) .set_cache_key(format!("{}", role_uuid), role.clone(), 60)
.await?; .await;
if let Err(error) = cache_result {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
Ok(HttpResponse::Ok().json(role)) Ok(HttpResponse::Ok().json(role))
} }

View file

@ -1,31 +1,31 @@
use std::time::SystemTime; use std::time::SystemTime;
use actix_web::{HttpResponse, get, web}; use actix_web::{HttpResponse, Responder, get, web};
use diesel::QueryDsl;
use diesel_async::RunQueryDsl;
use serde::Serialize; use serde::Serialize;
use crate::error::Error;
use crate::Data; use crate::Data;
use crate::schema::users::dsl::{users, uuid};
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
#[derive(Serialize)] #[derive(Serialize)]
struct Response { struct Response {
accounts: i64, accounts: usize,
uptime: u64, uptime: u64,
version: String, version: String,
build_number: String, build_number: String,
} }
#[get("/stats")] #[get("/stats")]
pub async fn res(data: web::Data<Data>) -> Result<HttpResponse, Error> { pub async fn res(data: web::Data<Data>) -> impl Responder {
let accounts: i64 = users let accounts;
.select(uuid) if let Ok(users) = sqlx::query("SELECT uuid FROM users")
.count() .fetch_all(&data.pool)
.get_result(&mut data.pool.get().await?) .await
.await?; {
accounts = users.len();
} else {
return HttpResponse::InternalServerError().finish();
}
let response = Response { let response = Response {
// TODO: Get number of accounts from db // TODO: Get number of accounts from db
@ -39,5 +39,5 @@ pub async fn res(data: web::Data<Data>) -> Result<HttpResponse, Error> {
build_number: String::from("how do i implement this?"), build_number: String::from("how do i implement this?"),
}; };
Ok(HttpResponse::Ok().json(response)) HttpResponse::Ok().json(response)
} }

View file

@ -1,41 +1,51 @@
use actix_web::{HttpRequest, HttpResponse, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, web};
use diesel::{prelude::Queryable, ExpressionMethods, QueryDsl, Selectable, SelectableHelper};
use diesel_async::RunQueryDsl;
use log::error; use log::error;
use serde::Serialize; use serde::Serialize;
use uuid::Uuid;
use crate::{error::Error, api::v1::auth::check_access_token, schema::users::{self, dsl}, utils::get_auth_header, Data}; use crate::{Data, api::v1::auth::check_access_token, utils::get_auth_header};
#[derive(Serialize, Queryable, Selectable)] #[derive(Serialize)]
#[diesel(table_name = users)]
#[diesel(check_for_backend(diesel::pg::Pg))]
struct Response { struct Response {
uuid: Uuid, uuid: String,
username: String, username: String,
display_name: Option<String>, display_name: String,
} }
#[get("/me")] #[get("/me")]
pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse, Error> { pub async fn res(req: HttpRequest, data: web::Data<Data>) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let mut conn = data.pool.get().await?; if let Err(error) = auth_header {
return Ok(error);
}
let uuid = check_access_token(auth_header, &mut conn).await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
let user: Result<Response, diesel::result::Error> = dsl::users if let Err(error) = authorized {
.filter(dsl::uuid.eq(uuid)) return Ok(error);
.select(Response::as_select()) }
.get_result(&mut conn)
let uuid = authorized.unwrap();
let row = sqlx::query_as(&format!(
"SELECT username, display_name FROM users WHERE uuid = '{}'",
uuid
))
.fetch_one(&data.pool)
.await; .await;
if let Err(error) = user { if let Err(error) = row {
error!("{}", error); error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish()) return Ok(HttpResponse::InternalServerError().finish());
} }
Ok(HttpResponse::Ok().json(user.unwrap())) let (username, display_name): (String, Option<String>) = row.unwrap();
Ok(HttpResponse::Ok().json(Response {
uuid: uuid.to_string(),
username,
display_name: display_name.unwrap_or_default(),
}))
} }

View file

@ -1,19 +1,15 @@
use actix_web::{HttpRequest, HttpResponse, Scope, get, web}; use crate::{api::v1::auth::check_access_token, structs::StartAmountQuery, utils::get_auth_header, Data};
use diesel::{prelude::Queryable, QueryDsl, Selectable, SelectableHelper}; use actix_web::{Error, HttpRequest, HttpResponse, Scope, get, web};
use diesel_async::RunQueryDsl; use log::error;
use serde::Serialize; use serde::Serialize;
use ::uuid::Uuid; use sqlx::prelude::FromRow;
use crate::{error::Error,api::v1::auth::check_access_token, schema::users::{self, dsl}, structs::StartAmountQuery, utils::get_auth_header, Data};
mod me; mod me;
mod uuid; mod uuid;
#[derive(Serialize, Queryable, Selectable)] #[derive(Serialize, FromRow)]
#[diesel(table_name = users)]
#[diesel(check_for_backend(diesel::pg::Pg))]
struct Response { struct Response {
uuid: Uuid, uuid: String,
username: String, username: String,
display_name: Option<String>, display_name: Option<String>,
email: String, email: String,
@ -34,7 +30,7 @@ pub async fn res(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let headers = req.headers(); let headers = req.headers();
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let start = request_query.start.unwrap_or(0); let start = request_query.start.unwrap_or(0);
@ -44,17 +40,24 @@ pub async fn res(
return Ok(HttpResponse::BadRequest().finish()); return Ok(HttpResponse::BadRequest().finish());
} }
let mut conn = data.pool.get().await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
check_access_token(auth_header, &mut conn).await?; if let Err(error) = authorized {
return Ok(error);
let users: Vec<Response> = dsl::users }
.order_by(dsl::username)
.offset(start) let row = sqlx::query_as("SELECT CAST(uuid AS VARCHAR), username, display_name, email FROM users ORDER BY username LIMIT $1 OFFSET $2")
.limit(amount) .bind(amount)
.select(Response::as_select()) .bind(start)
.load(&mut conn) .fetch_all(&data.pool)
.await?; .await;
Ok(HttpResponse::Ok().json(users)) if let Err(error) = row {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
let accounts: Vec<Response> = row.unwrap();
Ok(HttpResponse::Ok().json(accounts))
} }

View file

@ -1,19 +1,15 @@
use actix_web::{HttpRequest, HttpResponse, get, web}; use actix_web::{Error, HttpRequest, HttpResponse, get, web};
use diesel::{ExpressionMethods, QueryDsl, Queryable, Selectable, SelectableHelper};
use diesel_async::RunQueryDsl;
use log::error; use log::error;
use serde::Serialize; use serde::Serialize;
use uuid::Uuid; use uuid::Uuid;
use crate::{error::Error, api::v1::auth::check_access_token, schema::users::{self, dsl}, utils::get_auth_header, Data}; use crate::{Data, api::v1::auth::check_access_token, utils::get_auth_header};
#[derive(Serialize, Queryable, Selectable, Clone)] #[derive(Serialize, Clone)]
#[diesel(table_name = users)]
#[diesel(check_for_backend(diesel::pg::Pg))]
struct Response { struct Response {
uuid: Uuid, uuid: String,
username: String, username: String,
display_name: Option<String>, display_name: String,
} }
#[get("/{uuid}")] #[get("/{uuid}")]
@ -26,11 +22,17 @@ pub async fn res(
let uuid = path.into_inner().0; let uuid = path.into_inner().0;
let auth_header = get_auth_header(headers)?; let auth_header = get_auth_header(headers);
let mut conn = data.pool.get().await?; if let Err(error) = auth_header {
return Ok(error);
}
check_access_token(auth_header, &mut conn).await?; let authorized = check_access_token(auth_header.unwrap(), &data.pool).await;
if let Err(error) = authorized {
return Ok(error);
}
let cache_result = data.get_cache_key(uuid.to_string()).await; let cache_result = data.get_cache_key(uuid.to_string()).await;
@ -40,11 +42,25 @@ pub async fn res(
.body(cache_hit)); .body(cache_hit));
} }
let user: Response = dsl::users let row = sqlx::query_as(&format!(
.filter(dsl::uuid.eq(uuid)) "SELECT username, display_name FROM users WHERE uuid = '{}'",
.select(Response::as_select()) uuid
.get_result(&mut conn) ))
.await?; .fetch_one(&data.pool)
.await;
if let Err(error) = row {
error!("{}", error);
return Ok(HttpResponse::InternalServerError().finish());
}
let (username, display_name): (String, Option<String>) = row.unwrap();
let user = Response {
uuid: uuid.to_string(),
username,
display_name: display_name.unwrap_or_default(),
};
let cache_result = data let cache_result = data
.set_cache_key(uuid.to_string(), user.clone(), 1800) .set_cache_key(uuid.to_string(), user.clone(), 1800)

View file

@ -1,6 +1,7 @@
use crate::error::Error; use crate::Error;
use log::debug; use log::debug;
use serde::Deserialize; use serde::Deserialize;
use sqlx::postgres::PgConnectOptions;
use tokio::fs::read_to_string; use tokio::fs::read_to_string;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -80,24 +81,13 @@ pub struct Web {
} }
impl Database { impl Database {
pub fn url(&self) -> String { pub fn connect_options(&self) -> PgConnectOptions {
let mut url = String::from("postgres://"); PgConnectOptions::new()
.database(&self.database)
url += &self.username; .host(&self.host)
.username(&self.username)
url += ":"; .password(&self.password)
url += &self.password; .port(self.port)
url += "@";
url += &self.host;
url += ":";
url += &self.port.to_string();
url += "/";
url += &self.database;
url
} }
} }

View file

@ -1,79 +0,0 @@
use std::{io, time::SystemTimeError};
use actix_web::{error::ResponseError, http::{header::{ContentType, ToStrError}, StatusCode}, HttpResponse};
use deadpool::managed::{BuildError, PoolError};
use redis::RedisError;
use serde::Serialize;
use thiserror::Error;
use diesel::{result::Error as DieselError, ConnectionError};
use diesel_async::pooled_connection::PoolError as DieselPoolError;
use tokio::task::JoinError;
use serde_json::Error as JsonError;
use toml::de::Error as TomlError;
use log::error;
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
SqlError(#[from] DieselError),
#[error(transparent)]
PoolError(#[from] PoolError<DieselPoolError>),
#[error(transparent)]
BuildError(#[from] BuildError),
#[error(transparent)]
RedisError(#[from] RedisError),
#[error(transparent)]
ConnectionError(#[from] ConnectionError),
#[error(transparent)]
JoinError(#[from] JoinError),
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
TomlError(#[from] TomlError),
#[error(transparent)]
JsonError(#[from] JsonError),
#[error(transparent)]
SystemTimeError(#[from] SystemTimeError),
#[error(transparent)]
ToStrError(#[from] ToStrError),
#[error(transparent)]
RandomError(#[from] getrandom::Error),
#[error("{0}")]
PasswordHashError(String),
#[error("{0}")]
BadRequest(String),
#[error("{0}")]
Unauthorized(String),
}
impl ResponseError for Error {
fn error_response(&self) -> HttpResponse {
error!("{}: {}", self.status_code(), self.to_string());
HttpResponse::build(self.status_code())
.insert_header(ContentType::json())
.json(WebError::new(self.to_string()))
}
fn status_code(&self) -> StatusCode {
match *self {
Error::SqlError(DieselError::NotFound) => StatusCode::NOT_FOUND,
Error::BadRequest(_) => StatusCode::BAD_REQUEST,
Error::Unauthorized(_) => StatusCode::UNAUTHORIZED,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
#[derive(Serialize)]
struct WebError {
message: String,
}
impl WebError {
fn new(message: String) -> Self {
Self {
message,
}
}
}

View file

@ -2,24 +2,17 @@ use actix_cors::Cors;
use actix_web::{App, HttpServer, web}; use actix_web::{App, HttpServer, web};
use argon2::Argon2; use argon2::Argon2;
use clap::Parser; use clap::Parser;
use error::Error;
use simple_logger::SimpleLogger; use simple_logger::SimpleLogger;
use diesel_async::pooled_connection::AsyncDieselConnectionManager; use sqlx::{PgPool, Pool, Postgres};
use diesel_async::pooled_connection::deadpool::Pool;
use std::time::SystemTime; use std::time::SystemTime;
mod config; mod config;
use config::{Config, ConfigBuilder}; use config::{Config, ConfigBuilder};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
type Conn = deadpool::managed::Object<AsyncDieselConnectionManager<diesel_async::AsyncPgConnection>>;
mod api; mod api;
pub mod structs; pub mod structs;
pub mod utils; pub mod utils;
pub mod schema;
pub mod error; type Error = Box<dyn std::error::Error>;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[command(version, about, long_about = None)] #[command(version, about, long_about = None)]
@ -30,7 +23,7 @@ struct Args {
#[derive(Clone)] #[derive(Clone)]
pub struct Data { pub struct Data {
pub pool: deadpool::managed::Pool<AsyncDieselConnectionManager<diesel_async::AsyncPgConnection>, Conn>, pub pool: Pool<Postgres>,
pub cache_pool: redis::Client, pub cache_pool: redis::Client,
pub _config: Config, pub _config: Config,
pub argon2: Argon2<'static>, pub argon2: Argon2<'static>,
@ -51,24 +44,105 @@ async fn main() -> Result<(), Error> {
let web = config.web.clone(); let web = config.web.clone();
// create a new connection pool with the default config let pool = PgPool::connect_with(config.database.connect_options()).await?;
let pool_config = AsyncDieselConnectionManager::<diesel_async::AsyncPgConnection>::new(config.database.url());
let pool = Pool::builder(pool_config).build()?;
let cache_pool = redis::Client::open(config.cache_database.url())?; let cache_pool = redis::Client::open(config.cache_database.url())?;
let database_url = config.database.url(); /*
TODO: Figure out if a table should be used here and if not then what.
Also figure out if these should be different types from what they currently are and if we should add more "constraints"
tokio::task::spawn_blocking(move || { TODO: References to time should be removed in favor of using the timestamp built in to UUIDv7 (apart from deleted_at in users)
use diesel::prelude::Connection; */
use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; sqlx::raw_sql(
r#"
CREATE TABLE IF NOT EXISTS users (
let mut conn = AsyncConnectionWrapper::<diesel_async::AsyncPgConnection>::establish(&database_url)?; uuid uuid PRIMARY KEY NOT NULL,
username varchar(32) NOT NULL,
conn.run_pending_migrations(MIGRATIONS)?; display_name varchar(64) DEFAULT NULL,
Ok::<_, Box<dyn std::error::Error + Send + Sync>>(()) password varchar(512) NOT NULL,
}).await?.unwrap(); email varchar(100) NOT NULL,
email_verified boolean NOT NULL DEFAULT FALSE,
is_deleted boolean NOT NULL DEFAULT FALSE,
deleted_at int8 DEFAULT NULL,
CONSTRAINT unique_username_active UNIQUE NULLS NOT DISTINCT (username, is_deleted),
CONSTRAINT unique_email_active UNIQUE NULLS NOT DISTINCT (email, is_deleted)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_username_active
ON users(username)
WHERE is_deleted = FALSE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_email_active
ON users(email)
WHERE is_deleted = FALSE;
CREATE TABLE IF NOT EXISTS instance_permissions (
uuid uuid NOT NULL REFERENCES users(uuid),
administrator boolean NOT NULL DEFAULT FALSE
);
CREATE TABLE IF NOT EXISTS refresh_tokens (
token varchar(64) PRIMARY KEY UNIQUE NOT NULL,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL,
device_name varchar(16) NOT NULL
);
CREATE TABLE IF NOT EXISTS access_tokens (
token varchar(32) PRIMARY KEY UNIQUE NOT NULL,
refresh_token varchar(64) UNIQUE NOT NULL REFERENCES refresh_tokens(token) ON UPDATE CASCADE ON DELETE CASCADE,
uuid uuid NOT NULL REFERENCES users(uuid),
created_at int8 NOT NULL
);
CREATE TABLE IF NOT EXISTS guilds (
uuid uuid PRIMARY KEY NOT NULL,
owner_uuid uuid NOT NULL REFERENCES users(uuid),
name VARCHAR(100) NOT NULL,
description VARCHAR(300)
);
CREATE TABLE IF NOT EXISTS guild_members (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
nickname VARCHAR(100) DEFAULT NULL
);
CREATE TABLE IF NOT EXISTS roles (
uuid uuid UNIQUE NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name VARCHAR(50) NOT NULL,
color int NOT NULL DEFAULT 16777215,
position int NOT NULL,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (uuid, guild_uuid)
);
CREATE TABLE IF NOT EXISTS role_members (
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
member_uuid uuid NOT NULL REFERENCES guild_members(uuid) ON DELETE CASCADE,
PRIMARY KEY (role_uuid, member_uuid)
);
CREATE TABLE IF NOT EXISTS channels (
uuid uuid PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
name varchar(32) NOT NULL,
description varchar(500) NOT NULL
);
CREATE TABLE IF NOT EXISTS channel_permissions (
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
role_uuid uuid NOT NULL REFERENCES roles(uuid) ON DELETE CASCADE,
permissions int8 NOT NULL DEFAULT 0,
PRIMARY KEY (channel_uuid, role_uuid)
);
CREATE TABLE IF NOT EXISTS messages (
uuid uuid PRIMARY KEY NOT NULL,
channel_uuid uuid NOT NULL REFERENCES channels(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid),
message varchar(4000) NOT NULL
);
CREATE TABLE IF NOT EXISTS invites (
id varchar(32) PRIMARY KEY NOT NULL,
guild_uuid uuid NOT NULL REFERENCES guilds(uuid) ON DELETE CASCADE,
user_uuid uuid NOT NULL REFERENCES users(uuid)
);
"#,
)
.execute(&pool)
.await?;
/* /*
**Stored for later possible use** **Stored for later possible use**

View file

@ -1,156 +0,0 @@
// @generated automatically by Diesel CLI.
diesel::table! {
access_tokens (token) {
#[max_length = 32]
token -> Varchar,
#[max_length = 64]
refresh_token -> Varchar,
uuid -> Uuid,
created_at -> Int8,
}
}
diesel::table! {
channel_permissions (channel_uuid, role_uuid) {
channel_uuid -> Uuid,
role_uuid -> Uuid,
permissions -> Int8,
}
}
diesel::table! {
channels (uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
#[max_length = 32]
name -> Varchar,
#[max_length = 500]
description -> Nullable<Varchar>,
}
}
diesel::table! {
guild_members (uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
user_uuid -> Uuid,
#[max_length = 100]
nickname -> Nullable<Varchar>,
}
}
diesel::table! {
guilds (uuid) {
uuid -> Uuid,
owner_uuid -> Uuid,
#[max_length = 100]
name -> Varchar,
#[max_length = 300]
description -> Nullable<Varchar>,
}
}
diesel::table! {
instance_permissions (uuid) {
uuid -> Uuid,
administrator -> Bool,
}
}
diesel::table! {
invites (id) {
#[max_length = 32]
id -> Varchar,
guild_uuid -> Uuid,
user_uuid -> Uuid,
}
}
diesel::table! {
messages (uuid) {
uuid -> Uuid,
channel_uuid -> Uuid,
user_uuid -> Uuid,
#[max_length = 4000]
message -> Varchar,
}
}
diesel::table! {
refresh_tokens (token) {
#[max_length = 64]
token -> Varchar,
uuid -> Uuid,
created_at -> Int8,
#[max_length = 16]
device_name -> Varchar,
}
}
diesel::table! {
role_members (role_uuid, member_uuid) {
role_uuid -> Uuid,
member_uuid -> Uuid,
}
}
diesel::table! {
roles (uuid, guild_uuid) {
uuid -> Uuid,
guild_uuid -> Uuid,
#[max_length = 50]
name -> Varchar,
color -> Int4,
position -> Int4,
permissions -> Int8,
}
}
diesel::table! {
users (uuid) {
uuid -> Uuid,
#[max_length = 32]
username -> Varchar,
#[max_length = 64]
display_name -> Nullable<Varchar>,
#[max_length = 512]
password -> Varchar,
#[max_length = 100]
email -> Varchar,
email_verified -> Bool,
is_deleted -> Bool,
deleted_at -> Nullable<Int8>,
}
}
diesel::joinable!(access_tokens -> refresh_tokens (refresh_token));
diesel::joinable!(access_tokens -> users (uuid));
diesel::joinable!(channel_permissions -> channels (channel_uuid));
diesel::joinable!(channels -> guilds (guild_uuid));
diesel::joinable!(guild_members -> guilds (guild_uuid));
diesel::joinable!(guild_members -> users (user_uuid));
diesel::joinable!(guilds -> users (owner_uuid));
diesel::joinable!(instance_permissions -> users (uuid));
diesel::joinable!(invites -> guilds (guild_uuid));
diesel::joinable!(invites -> users (user_uuid));
diesel::joinable!(messages -> channels (channel_uuid));
diesel::joinable!(messages -> users (user_uuid));
diesel::joinable!(refresh_tokens -> users (uuid));
diesel::joinable!(role_members -> guild_members (member_uuid));
diesel::joinable!(roles -> guilds (guild_uuid));
diesel::allow_tables_to_appear_in_same_query!(
access_tokens,
channel_permissions,
channels,
guild_members,
guilds,
instance_permissions,
invites,
messages,
refresh_tokens,
role_members,
roles,
users,
);

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,5 @@
use actix_web::{ use actix_web::{
HttpResponse,
cookie::{Cookie, SameSite, time::Duration}, cookie::{Cookie, SameSite, time::Duration},
http::header::HeaderMap, http::header::HeaderMap,
}; };
@ -7,31 +8,25 @@ use hex::encode;
use redis::RedisError; use redis::RedisError;
use serde::Serialize; use serde::Serialize;
use crate::{error::Error, Data}; use crate::Data;
pub fn get_auth_header(headers: &HeaderMap) -> Result<&str, Error> { pub fn get_auth_header(headers: &HeaderMap) -> Result<&str, HttpResponse> {
let auth_token = headers.get(actix_web::http::header::AUTHORIZATION); let auth_token = headers.get(actix_web::http::header::AUTHORIZATION);
if auth_token.is_none() { if auth_token.is_none() {
return Err(Error::Unauthorized("No authorization header provided".to_string())); return Err(HttpResponse::Unauthorized().finish());
} }
let auth_raw = auth_token.unwrap().to_str()?; let auth = auth_token.unwrap().to_str();
let mut auth = auth_raw.split_whitespace(); if let Err(error) = auth {
return Err(HttpResponse::Unauthorized().json(format!(r#" {{ "error": "{}" }} "#, error)));
let auth_type = auth.nth(0);
let auth_value = auth.nth(0);
if auth_type.is_none() {
return Err(Error::BadRequest("Authorization header is empty".to_string()));
} else if auth_type.is_some_and(|at| at != "Bearer") {
return Err(Error::BadRequest("Only token auth is supported".to_string()));
} }
let auth_value = auth.unwrap().split_whitespace().nth(1);
if auth_value.is_none() { if auth_value.is_none() {
return Err(Error::BadRequest("No token provided".to_string())); return Err(HttpResponse::BadRequest().finish());
} }
Ok(auth_value.unwrap()) Ok(auth_value.unwrap())
@ -65,12 +60,12 @@ impl Data {
key: String, key: String,
value: impl Serialize, value: impl Serialize,
expire: u32, expire: u32,
) -> Result<(), Error> { ) -> Result<(), RedisError> {
let mut conn = self.cache_pool.get_multiplexed_tokio_connection().await?; let mut conn = self.cache_pool.get_multiplexed_tokio_connection().await?;
let key_encoded = encode(key); let key_encoded = encode(key);
let value_json = serde_json::to_string(&value)?; let value_json = serde_json::to_string(&value).unwrap();
redis::cmd("SET") redis::cmd("SET")
.arg(&[key_encoded.clone(), value_json]) .arg(&[key_encoded.clone(), value_json])
@ -80,9 +75,7 @@ impl Data {
redis::cmd("EXPIRE") redis::cmd("EXPIRE")
.arg(&[key_encoded, expire.to_string()]) .arg(&[key_encoded, expire.to_string()])
.exec_async(&mut conn) .exec_async(&mut conn)
.await?; .await
Ok(())
} }
pub async fn get_cache_key(&self, key: String) -> Result<String, RedisError> { pub async fn get_cache_key(&self, key: String) -> Result<String, RedisError> {