fmt
This commit is contained in:
@@ -1,9 +1,9 @@
|
||||
use crate::s3_utils::get_s3_filelist;
|
||||
use actix_web::error::ErrorInternalServerError;
|
||||
use aws_config::BehaviorVersion;
|
||||
use aws_sdk_s3::{config::Credentials, Client as S3Client};
|
||||
use aws_sdk_s3::{Client as S3Client, config::Credentials};
|
||||
use log::warn;
|
||||
use redis::{aio::MultiplexedConnection, AsyncCommands, Client as RedisClient};
|
||||
use redis::{AsyncCommands, Client as RedisClient, aio::MultiplexedConnection};
|
||||
use std::env;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -15,7 +15,7 @@ pub struct AppState {
|
||||
}
|
||||
|
||||
const PATH_MAPPING_KEY: &str = "filepath_mapping"; // Ключ для хранения маппинга путей
|
||||
// Убираем TTL для квоты - она должна быть постоянной на пользователя
|
||||
// Убираем TTL для квоты - она должна быть постоянной на пользователя
|
||||
|
||||
impl AppState {
|
||||
/// Инициализация нового состояния приложения.
|
||||
|
||||
43
src/auth.rs
43
src/auth.rs
@@ -1,12 +1,12 @@
|
||||
use actix_web::error::ErrorInternalServerError;
|
||||
use redis::{aio::MultiplexedConnection, AsyncCommands};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, env, error::Error};
|
||||
use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation};
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, Validation, decode};
|
||||
use log::{info, warn};
|
||||
use reqwest::header::{HeaderMap, HeaderValue, CONTENT_TYPE};
|
||||
use redis::{AsyncCommands, aio::MultiplexedConnection};
|
||||
use reqwest::Client as HTTPClient;
|
||||
use reqwest::header::{CONTENT_TYPE, HeaderMap, HeaderValue};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::{collections::HashMap, env, error::Error};
|
||||
|
||||
// Старые структуры для совместимости с get_id_by_token
|
||||
#[derive(Deserialize)]
|
||||
@@ -106,30 +106,33 @@ fn decode_jwt_token(token: &str) -> Result<TokenClaims, Box<dyn Error>> {
|
||||
// В реальном приложении здесь должен быть настоящий секретный ключ
|
||||
let secret = std::env::var("JWT_SECRET").unwrap_or_else(|_| "your-secret-key".to_string());
|
||||
let key = DecodingKey::from_secret(secret.as_ref());
|
||||
|
||||
|
||||
let mut validation = Validation::new(Algorithm::HS256);
|
||||
validation.validate_exp = true; // Включаем проверку истечения срока действия
|
||||
|
||||
|
||||
match decode::<TokenClaims>(token, &key, &validation) {
|
||||
Ok(token_data) => {
|
||||
let claims = token_data.claims;
|
||||
|
||||
|
||||
// Дополнительная проверка exp если поле присутствует
|
||||
if let Some(exp) = claims.exp {
|
||||
let current_time = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as usize;
|
||||
|
||||
|
||||
if exp < current_time {
|
||||
warn!("JWT token expired: exp={}, current={}", exp, current_time);
|
||||
return Err(Box::new(std::io::Error::other("Token expired")));
|
||||
}
|
||||
|
||||
|
||||
info!("JWT token valid until: {} (current: {})", exp, current_time);
|
||||
}
|
||||
|
||||
info!("Successfully decoded and validated JWT token for user: {}", claims.user_id);
|
||||
|
||||
info!(
|
||||
"Successfully decoded and validated JWT token for user: {}",
|
||||
claims.user_id
|
||||
);
|
||||
Ok(claims)
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -164,9 +167,9 @@ pub async fn get_user_by_token(
|
||||
// Декодируем JWT токен для получения user_id
|
||||
let claims = decode_jwt_token(token)?;
|
||||
let user_id = &claims.user_id;
|
||||
|
||||
|
||||
info!("Extracted user_id from JWT token: {}", user_id);
|
||||
|
||||
|
||||
// Проверяем валидность токена через сессию в Redis (опционально)
|
||||
let token_key = format!("session:{}:{}", user_id, token);
|
||||
let session_exists: bool = redis
|
||||
@@ -177,14 +180,14 @@ pub async fn get_user_by_token(
|
||||
// Не критичная ошибка, продолжаем с базовыми данными
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
|
||||
if session_exists {
|
||||
// Обновляем last_activity если сессия существует
|
||||
let current_time = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
|
||||
let _: () = redis
|
||||
.hset(&token_key, "last_activity", current_time.to_string())
|
||||
.await
|
||||
@@ -192,12 +195,12 @@ pub async fn get_user_by_token(
|
||||
warn!("Failed to update last_activity: {}", e);
|
||||
})
|
||||
.unwrap_or(());
|
||||
|
||||
|
||||
info!("Updated last_activity for session: {}", token_key);
|
||||
} else {
|
||||
info!("Session not found in Redis, proceeding with JWT-only data");
|
||||
}
|
||||
|
||||
|
||||
// Создаем базовый объект Author с данными из JWT
|
||||
let author = Author {
|
||||
user_id: user_id.clone(),
|
||||
@@ -209,12 +212,12 @@ pub async fn get_user_by_token(
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
.to_string()
|
||||
.to_string(),
|
||||
),
|
||||
auth_data: None,
|
||||
device_info: None,
|
||||
};
|
||||
|
||||
|
||||
info!("Successfully created author data for user_id: {}", user_id);
|
||||
Ok(author)
|
||||
}
|
||||
|
||||
@@ -10,4 +10,4 @@ pub use upload::upload_handler;
|
||||
pub use user::get_current_user_handler;
|
||||
|
||||
// Общий лимит квоты на пользователя: 5 ГБ
|
||||
pub const MAX_USER_QUOTA_BYTES: u64 = 5 * 1024 * 1024 * 1024;
|
||||
pub const MAX_USER_QUOTA_BYTES: u64 = 5 * 1024 * 1024 * 1024;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use actix_web::error::ErrorNotFound;
|
||||
use actix_web::{error::ErrorInternalServerError, web, HttpRequest, HttpResponse, Result};
|
||||
use log::{info, error, warn};
|
||||
use actix_web::{HttpRequest, HttpResponse, Result, error::ErrorInternalServerError, web};
|
||||
use log::{error, info, warn};
|
||||
|
||||
use crate::app_state::AppState;
|
||||
use crate::handlers::serve_file::serve_file;
|
||||
@@ -26,7 +26,7 @@ pub async fn proxy_handler(
|
||||
) -> Result<HttpResponse, actix_web::Error> {
|
||||
let start_time = std::time::Instant::now();
|
||||
info!("GET {} [START]", requested_res);
|
||||
|
||||
|
||||
let normalized_path = if requested_res.ends_with("/webp") {
|
||||
info!("Converting to WebP format: {}", requested_res);
|
||||
requested_res.replace("/webp", "")
|
||||
@@ -35,16 +35,21 @@ pub async fn proxy_handler(
|
||||
};
|
||||
|
||||
// Проверяем If-None-Match заголовок для кэширования
|
||||
let client_etag = req.headers().get("if-none-match")
|
||||
let client_etag = req
|
||||
.headers()
|
||||
.get("if-none-match")
|
||||
.and_then(|h| h.to_str().ok());
|
||||
|
||||
// парсим GET запрос
|
||||
let (base_filename, requested_width, extension) = parse_file_path(&normalized_path);
|
||||
let ext = extension.as_str().to_lowercase();
|
||||
let filekey = format!("{}.{}", base_filename, &ext);
|
||||
|
||||
info!("Parsed request - base: {}, width: {}, ext: {}", base_filename, requested_width, ext);
|
||||
|
||||
|
||||
info!(
|
||||
"Parsed request - base: {}, width: {}, ext: {}",
|
||||
base_filename, requested_width, ext
|
||||
);
|
||||
|
||||
// Генерируем ETag для кэширования
|
||||
let file_etag = format!("\"{}\"", &filekey);
|
||||
if let Some(etag) = client_etag {
|
||||
@@ -77,7 +82,6 @@ pub async fn proxy_handler(
|
||||
|
||||
info!("Content-Type: {}", content_type);
|
||||
|
||||
|
||||
return match state.get_path(&filekey).await {
|
||||
Ok(Some(stored_path)) => {
|
||||
warn!("Found stored path in DB: {}", stored_path);
|
||||
@@ -111,8 +115,7 @@ pub async fn proxy_handler(
|
||||
}
|
||||
Ok(false) => {
|
||||
// Миниатюра не существует, возвращаем оригинал и запускаем генерацию миниатюры
|
||||
let original_file =
|
||||
serve_file(&stored_path, &state).await?;
|
||||
let original_file = serve_file(&stored_path, &state).await?;
|
||||
|
||||
// Запускаем асинхронную задачу для генерации миниатюры
|
||||
let state_clone = state.clone();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use actix_web::{web, HttpRequest, HttpResponse, Result};
|
||||
use actix_web::{HttpRequest, HttpResponse, Result, web};
|
||||
use log::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -34,16 +34,14 @@ pub async fn get_quota_handler(
|
||||
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
|
||||
}
|
||||
|
||||
let _admin_id = get_id_by_token(token.unwrap())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
let _admin_id = get_id_by_token(token.unwrap()).await.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
|
||||
// Получаем user_id из query параметров
|
||||
let user_id = req
|
||||
@@ -81,16 +79,14 @@ pub async fn increase_quota_handler(
|
||||
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
|
||||
}
|
||||
|
||||
let _admin_id = get_id_by_token(token.unwrap())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
let _admin_id = get_id_by_token(token.unwrap()).await.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
|
||||
let additional_bytes = quota_data
|
||||
.additional_bytes
|
||||
@@ -137,16 +133,14 @@ pub async fn set_quota_handler(
|
||||
return Err(actix_web::error::ErrorUnauthorized("Unauthorized"));
|
||||
}
|
||||
|
||||
let _admin_id = get_id_by_token(token.unwrap())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
let _admin_id = get_id_by_token(token.unwrap()).await.map_err(|e| {
|
||||
let error_msg = if e.to_string().contains("expired") {
|
||||
"Admin token has expired"
|
||||
} else {
|
||||
"Invalid admin token"
|
||||
};
|
||||
actix_web::error::ErrorUnauthorized(error_msg)
|
||||
})?;
|
||||
|
||||
let new_quota_bytes = quota_data
|
||||
.new_quota_bytes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use actix_web::{error::ErrorInternalServerError, HttpResponse, Result};
|
||||
use actix_web::{HttpResponse, Result, error::ErrorInternalServerError};
|
||||
use mime_guess::MimeGuess;
|
||||
|
||||
use crate::app_state::AppState;
|
||||
@@ -43,7 +43,7 @@ pub async fn serve_file(
|
||||
let data_bytes = data.into_bytes();
|
||||
|
||||
let mime_type = MimeGuess::from_path(filepath).first_or_octet_stream();
|
||||
|
||||
|
||||
// Генерируем ETag для кэширования на основе пути файла
|
||||
let etag = format!("\"{}\"", filepath);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use actix_multipart::Multipart;
|
||||
use actix_web::{web, HttpRequest, HttpResponse, Result};
|
||||
use actix_web::{HttpRequest, HttpResponse, Result, web};
|
||||
use log::{error, info, warn};
|
||||
|
||||
use crate::app_state::AppState;
|
||||
@@ -24,26 +24,29 @@ pub async fn upload_handler(
|
||||
.headers()
|
||||
.get("Authorization")
|
||||
.and_then(|header_value| header_value.to_str().ok());
|
||||
|
||||
|
||||
if token.is_none() {
|
||||
warn!("Upload attempt without authorization token");
|
||||
return Err(actix_web::error::ErrorUnauthorized("Authorization token required"));
|
||||
return Err(actix_web::error::ErrorUnauthorized(
|
||||
"Authorization token required",
|
||||
));
|
||||
}
|
||||
|
||||
let token = token.unwrap();
|
||||
|
||||
|
||||
// Сначала валидируем токен
|
||||
if !validate_token(token).unwrap_or(false) {
|
||||
warn!("Token validation failed");
|
||||
return Err(actix_web::error::ErrorUnauthorized("Invalid or expired token"));
|
||||
return Err(actix_web::error::ErrorUnauthorized(
|
||||
"Invalid or expired token",
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
// Затем извлекаем user_id
|
||||
let user_id = extract_user_id_from_token(token)
|
||||
.map_err(|e| {
|
||||
warn!("Failed to extract user_id from token: {}", e);
|
||||
actix_web::error::ErrorUnauthorized("Invalid authorization token")
|
||||
})?;
|
||||
let user_id = extract_user_id_from_token(token).map_err(|e| {
|
||||
warn!("Failed to extract user_id from token: {}", e);
|
||||
actix_web::error::ErrorUnauthorized("Invalid authorization token")
|
||||
})?;
|
||||
|
||||
// Получаем текущую квоту пользователя
|
||||
let current_quota: u64 = state.get_or_create_quota(&user_id).await.unwrap_or(0);
|
||||
@@ -51,12 +54,17 @@ pub async fn upload_handler(
|
||||
|
||||
// Предварительная проверка: есть ли вообще место для файлов
|
||||
if current_quota >= MAX_USER_QUOTA_BYTES {
|
||||
warn!("Author {} quota already at maximum: {}", user_id, current_quota);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge("Author quota limit exceeded"));
|
||||
warn!(
|
||||
"Author {} quota already at maximum: {}",
|
||||
user_id, current_quota
|
||||
);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge(
|
||||
"Author quota limit exceeded",
|
||||
));
|
||||
}
|
||||
|
||||
let mut uploaded_files = Vec::new();
|
||||
|
||||
|
||||
while let Ok(Some(field)) = payload.try_next().await {
|
||||
let mut field = field;
|
||||
let mut file_bytes = Vec::new();
|
||||
@@ -65,21 +73,32 @@ pub async fn upload_handler(
|
||||
// Читаем данные файла с проверкой размера
|
||||
while let Ok(Some(chunk)) = field.try_next().await {
|
||||
let chunk_size = chunk.len() as u64;
|
||||
|
||||
|
||||
// Проверка лимита одного файла
|
||||
if file_size + chunk_size > MAX_SINGLE_FILE_BYTES {
|
||||
warn!("File size exceeds single file limit: {} > {}",
|
||||
file_size + chunk_size, MAX_SINGLE_FILE_BYTES);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge("Single file size limit exceeded"));
|
||||
warn!(
|
||||
"File size exceeds single file limit: {} > {}",
|
||||
file_size + chunk_size,
|
||||
MAX_SINGLE_FILE_BYTES
|
||||
);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge(
|
||||
"Single file size limit exceeded",
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
// Проверка общей квоты пользователя
|
||||
if current_quota + file_size + chunk_size > MAX_USER_QUOTA_BYTES {
|
||||
warn!("Upload would exceed user quota: current={}, adding={}, limit={}",
|
||||
current_quota, file_size + chunk_size, MAX_USER_QUOTA_BYTES);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge("Author quota limit would be exceeded"));
|
||||
warn!(
|
||||
"Upload would exceed user quota: current={}, adding={}, limit={}",
|
||||
current_quota,
|
||||
file_size + chunk_size,
|
||||
MAX_USER_QUOTA_BYTES
|
||||
);
|
||||
return Err(actix_web::error::ErrorPayloadTooLarge(
|
||||
"Author quota limit would be exceeded",
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
file_size += chunk_size;
|
||||
file_bytes.extend_from_slice(&chunk);
|
||||
}
|
||||
@@ -140,13 +159,16 @@ pub async fn upload_handler(
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("File {} successfully uploaded to S3 ({} bytes)", filename, file_size);
|
||||
|
||||
info!(
|
||||
"File {} successfully uploaded to S3 ({} bytes)",
|
||||
filename, file_size
|
||||
);
|
||||
|
||||
// Обновляем квоту пользователя
|
||||
if let Err(e) = state.increment_uploaded_bytes(&user_id, file_size).await {
|
||||
error!("Failed to increment quota for user {}: {}", user_id, e);
|
||||
return Err(actix_web::error::ErrorInternalServerError(
|
||||
"Failed to update user quota"
|
||||
"Failed to update user quota",
|
||||
));
|
||||
}
|
||||
|
||||
@@ -156,21 +178,25 @@ pub async fn upload_handler(
|
||||
error!("Failed to store file info in Redis: {}", e);
|
||||
// Не прерываем процесс, файл уже загружен в S3
|
||||
}
|
||||
|
||||
|
||||
if let Err(e) = user_added_file(&mut redis, &user_id, &filename).await {
|
||||
error!("Failed to record user file association: {}", e);
|
||||
// Не прерываем процесс
|
||||
}
|
||||
|
||||
// Сохраняем маппинг пути
|
||||
let generated_key = generate_key_with_extension(filename.clone(), content_type.clone());
|
||||
let generated_key =
|
||||
generate_key_with_extension(filename.clone(), content_type.clone());
|
||||
state.set_path(&filename, &generated_key).await;
|
||||
|
||||
// Логируем новую квоту
|
||||
if let Ok(new_quota) = state.get_or_create_quota(&user_id).await {
|
||||
info!("Updated quota for user {}: {} bytes ({:.1}% used)",
|
||||
user_id, new_quota,
|
||||
(new_quota as f64 / MAX_USER_QUOTA_BYTES as f64) * 100.0);
|
||||
info!(
|
||||
"Updated quota for user {}: {} bytes ({:.1}% used)",
|
||||
user_id,
|
||||
new_quota,
|
||||
(new_quota as f64 / MAX_USER_QUOTA_BYTES as f64) * 100.0
|
||||
);
|
||||
}
|
||||
|
||||
uploaded_files.push(filename);
|
||||
@@ -178,7 +204,7 @@ pub async fn upload_handler(
|
||||
Err(e) => {
|
||||
error!("Failed to upload file to S3: {}", e);
|
||||
return Err(actix_web::error::ErrorInternalServerError(
|
||||
"File upload failed"
|
||||
"File upload failed",
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -188,7 +214,9 @@ pub async fn upload_handler(
|
||||
match uploaded_files.len() {
|
||||
0 => {
|
||||
warn!("No files were uploaded");
|
||||
Err(actix_web::error::ErrorBadRequest("No files provided or all files were empty"))
|
||||
Err(actix_web::error::ErrorBadRequest(
|
||||
"No files provided or all files were empty",
|
||||
))
|
||||
}
|
||||
1 => {
|
||||
info!("Successfully uploaded 1 file: {}", uploaded_files[0]);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use actix_web::{web, HttpRequest, HttpResponse, Result};
|
||||
use actix_web::{HttpRequest, HttpResponse, Result, web};
|
||||
use log::{error, info, warn};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::app_state::AppState;
|
||||
use crate::auth::{get_user_by_token, Author, validate_token};
|
||||
use crate::auth::{Author, get_user_by_token, validate_token};
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct UserWithQuotaResponse {
|
||||
@@ -40,25 +40,31 @@ pub async fn get_current_user_handler(
|
||||
|
||||
if token.is_none() {
|
||||
warn!("Request for current user without authorization token");
|
||||
return Err(actix_web::error::ErrorUnauthorized("Authorization token required"));
|
||||
return Err(actix_web::error::ErrorUnauthorized(
|
||||
"Authorization token required",
|
||||
));
|
||||
}
|
||||
|
||||
let token = token.unwrap();
|
||||
|
||||
|
||||
// Сначала валидируем токен
|
||||
if !validate_token(token).unwrap_or(false) {
|
||||
warn!("Token validation failed in user endpoint");
|
||||
return Err(actix_web::error::ErrorUnauthorized("Invalid or expired token"));
|
||||
return Err(actix_web::error::ErrorUnauthorized(
|
||||
"Invalid or expired token",
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
info!("Getting user info for valid token");
|
||||
|
||||
// Получаем информацию о пользователе из Redis сессии
|
||||
let mut redis = state.redis.clone();
|
||||
let user = match get_user_by_token(token, &mut redis).await {
|
||||
Ok(user) => {
|
||||
info!("Successfully retrieved user info: user_id={}, username={:?}",
|
||||
user.user_id, user.username);
|
||||
info!(
|
||||
"Successfully retrieved user info: user_id={}, username={:?}",
|
||||
user.user_id, user.username
|
||||
);
|
||||
user
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use actix_web::error::ErrorInternalServerError;
|
||||
use once_cell::sync::Lazy;
|
||||
use redis::aio::MultiplexedConnection;
|
||||
use redis::AsyncCommands;
|
||||
use redis::aio::MultiplexedConnection;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub static MIME_TYPES: Lazy<HashMap<&'static str, &'static str>> = Lazy::new(|| {
|
||||
|
||||
@@ -7,15 +7,15 @@ mod thumbnail;
|
||||
|
||||
use actix_cors::Cors;
|
||||
use actix_web::{
|
||||
App, HttpServer,
|
||||
http::header::{self, HeaderName},
|
||||
middleware::Logger,
|
||||
web, App, HttpServer,
|
||||
web,
|
||||
};
|
||||
use app_state::AppState;
|
||||
|
||||
use handlers::{
|
||||
get_current_user_handler, get_quota_handler,
|
||||
increase_quota_handler, proxy_handler,
|
||||
get_current_user_handler, get_quota_handler, increase_quota_handler, proxy_handler,
|
||||
set_quota_handler, upload_handler,
|
||||
};
|
||||
use log::warn;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use actix_web::error::ErrorInternalServerError;
|
||||
use aws_sdk_s3::{error::SdkError, primitives::ByteStream, Client as S3Client};
|
||||
use aws_sdk_s3::{Client as S3Client, error::SdkError, primitives::ByteStream};
|
||||
use infer::get;
|
||||
use mime_guess::mime;
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use actix_web::error::ErrorInternalServerError;
|
||||
use image::{imageops::FilterType, DynamicImage, ImageFormat};
|
||||
use image::{DynamicImage, ImageFormat, imageops::FilterType};
|
||||
use log::warn;
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user