s3-creds+sizes+postgen

This commit is contained in:
Untone 2024-08-30 22:11:21 +03:00
parent 70081f8bc0
commit 6791299fa4

View File

@ -3,11 +3,10 @@ use actix_web::{
middleware::Logger, middleware::Logger,
web, App, HttpRequest, HttpResponse, HttpServer, Result, web, App, HttpRequest, HttpResponse, HttpServer, Result,
}; };
use aws_config::{load_defaults, BehaviorVersion}; use aws_config::BehaviorVersion;
use aws_sdk_s3::{error::SdkError, operation::head_object::HeadObjectError, Client as S3Client}; use aws_sdk_s3::{config::Credentials, error::SdkError, Client as S3Client};
use aws_sdk_s3::primitives::ByteStream; use aws_sdk_s3::primitives::ByteStream;
use image::DynamicImage; use image::{DynamicImage, imageops::FilterType};
use image::imageops::FilterType;
use mime_guess::MimeGuess; use mime_guess::MimeGuess;
use redis::{aio::MultiplexedConnection, AsyncCommands}; use redis::{aio::MultiplexedConnection, AsyncCommands};
use redis::Client as RedisClient; use redis::Client as RedisClient;
@ -15,29 +14,63 @@ use std::env;
use std::io::Cursor; use std::io::Cursor;
use std::path::Path; use std::path::Path;
const MAX_QUOTA_BYTES: u64 = 1024 * 1024 * 1024; // 1 GB per week const MAX_QUOTA_BYTES: u64 = 2 * 1024 * 1024 * 1024; // 2 GB per week
#[derive(Clone)] #[derive(Clone)]
struct AppState { struct AppState {
redis: MultiplexedConnection, // Redis connection for managing quotas and file names redis: MultiplexedConnection,
s3_client: S3Client, // S3 client for uploading files s3_client: S3Client,
s3_bucket: String, // S3 bucket name for storing files s3_bucket: String,
cdn_domain: String, // CDN domain for generating URLs cdn_domain: String,
} }
// Generate a thumbnail for the image impl AppState {
fn generate_thumbnail(image: &DynamicImage) -> Result<Vec<u8>, actix_web::Error> { async fn new() -> Self {
let thumbnail = image.resize(320, 320, FilterType::Lanczos3); // Размер миниатюры 320x320 let redis_url = env::var("REDIS_URL").expect("REDIS_URL must be set");
let redis_client = RedisClient::open(redis_url).expect("Invalid Redis URL");
let redis_connection = redis_client.get_multiplexed_async_connection().await.unwrap();
let s3_access_key = env::var("STORJ_ACCESS_KEY").expect("STORJ_ACCESS_KEY must be set");
let s3_secret_key = env::var("STORJ_SECRET_KEY").expect("STORJ_SECRET_KEY must be set");
let s3_endpoint = env::var("STORJ_END_POINT").expect("STORJ_END_POINT must be set");
let s3_bucket = env::var("STORJ_BUCKET_NAME").expect("STORJ_BUCKET_NAME must be set");
let cdn_domain = env::var("CDN_DOMAIN").expect("CDN_DOMAIN must be set");
let config = aws_config::defaults(BehaviorVersion::latest())
.region("eu-west-1")
.endpoint_url(s3_endpoint)
.credentials_provider(Credentials::new(
s3_access_key,
s3_secret_key,
None,
None,
"rust-s3-client",
))
.load()
.await;
let s3_client = S3Client::new(&config);
AppState {
redis: redis_connection,
s3_client,
s3_bucket,
cdn_domain,
}
}
}
async fn generate_thumbnail(image: &DynamicImage, width: u32) -> Result<Vec<u8>, actix_web::Error> {
let k = image.width() / width;
let height = image.height() / k;
let thumbnail = image.resize(width, height, FilterType::Lanczos3);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
thumbnail thumbnail
.write_to(&mut Cursor::new(&mut buffer), image::ImageFormat::Jpeg) .write_to(&mut Cursor::new(&mut buffer), image::ImageFormat::Jpeg)
.map_err(|_| ErrorInternalServerError("Failed to generate thumbnail"))?; .map_err(|_| ErrorInternalServerError("Failed to generate thumbnail"))?;
Ok(buffer) Ok(buffer)
} }
// Upload the file to S3 and return the URL
async fn upload_to_s3( async fn upload_to_s3(
s3_client: &S3Client, s3_client: &S3Client,
bucket: &str, bucket: &str,
@ -47,7 +80,6 @@ async fn upload_to_s3(
cdn_domain: &str, cdn_domain: &str,
) -> Result<String, actix_web::Error> { ) -> Result<String, actix_web::Error> {
let body_stream = ByteStream::from(body); let body_stream = ByteStream::from(body);
s3_client.put_object() s3_client.put_object()
.bucket(bucket) .bucket(bucket)
.key(key) .key(key)
@ -60,83 +92,65 @@ async fn upload_to_s3(
Ok(format!("{}/{}", cdn_domain, key)) Ok(format!("{}/{}", cdn_domain, key))
} }
// Check if the original file exists in S3 async fn check_file_exists(s3_client: &S3Client, bucket: &str, key: &str) -> Result<bool, actix_web::Error> {
async fn check_file_exists(s3_client: &S3Client, bucket: &str, key: &str) -> Result<bool, SdkError<HeadObjectError>> {
match s3_client.head_object().bucket(bucket).key(key).send().await { match s3_client.head_object().bucket(bucket).key(key).send().await {
Ok(_) => Ok(true), Ok(_) => Ok(true),
Err(SdkError::ServiceError(service_error)) if service_error.err().is_not_found() => Ok(false), Err(SdkError::ServiceError(service_error)) if service_error.err().is_not_found() => Ok(false),
Err(e) => Err(e), Err(e) => Err(ErrorInternalServerError(e.to_string())),
} }
} }
// Check and update the user's quota
async fn check_and_update_quota( async fn check_and_update_quota(
redis: &mut MultiplexedConnection, redis: &mut MultiplexedConnection,
user_id: &str, user_id: &str,
file_size: u64, file_size: u64,
) -> Result<(), actix_web::Error> { ) -> Result<(), actix_web::Error> {
let current_quota: u64 = redis.get(user_id).await.unwrap_or(0); let current_quota: u64 = redis.get(user_id).await.unwrap_or(0);
if current_quota + file_size > MAX_QUOTA_BYTES { if current_quota + file_size > MAX_QUOTA_BYTES {
return Err(ErrorUnauthorized("Quota exceeded")); return Err(ErrorUnauthorized("Quota exceeded"));
} }
redis.incr(user_id, file_size).await.map_err(|_| ErrorInternalServerError("Failed to update quota in Redis"))?;
redis.incr(user_id, file_size).await.map_err(|_| ErrorInternalServerError("Failed to update quota in Redis")) Ok(())
}
async fn save_filename_in_redis(
redis: &mut MultiplexedConnection,
user_id: &str,
filename: &str,
) -> Result<(), actix_web::Error> {
redis.sadd(user_id, filename).await.map_err(|_| ErrorInternalServerError("Failed to save filename in Redis"))?;
Ok(())
} }
// Proxy handler for serving static files and uploading them to S3
async fn proxy_handler( async fn proxy_handler(
req: HttpRequest, req: HttpRequest,
path: web::Path<String>, path: web::Path<String>,
state: web::Data<AppState>, state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> { ) -> Result<HttpResponse, actix_web::Error> {
let token = req.headers().get("Authorization").and_then(|header_value| header_value.to_str().ok()); let token = req.headers().get("Authorization").and_then(|header_value| header_value.to_str().ok());
// Validate token (implementation needed)
if token.is_none() { if token.is_none() {
return Err(ErrorUnauthorized("Unauthorized")); return Err(ErrorUnauthorized("Unauthorized"));
} }
let user_id = token.unwrap(); // Assuming the token is the user ID, adjust as necessary let user_id = token.unwrap(); // Assuming the token is the user ID
// Load the file (implement your file loading logic)
let file_path = path.into_inner(); let file_path = path.into_inner();
let mime_type = MimeGuess::from_path(&file_path).first_or_octet_stream(); let mime_type = MimeGuess::from_path(&file_path).first_or_octet_stream();
let extension = Path::new(&file_path) let extension = Path::new(&file_path).extension().and_then(|ext| ext.to_str()).unwrap_or("bin");
.extension()
.and_then(|ext| ext.to_str())
.unwrap_or("bin");
// Handle image files: generate thumbnail and upload both
if mime_type.type_() == "image" { if mime_type.type_() == "image" {
let image = image::open(&file_path).map_err(|_| ErrorInternalServerError("Failed to open image"))?; let image = image::open(&file_path).map_err(|_| ErrorInternalServerError("Failed to open image"))?;
// Generate thumbnail // Define thumbnail sizes
let thumbnail_data = generate_thumbnail(&image)?; let thumbnail_sizes = vec![40, 110, 300, 600, 800];
let thumbnail_key = format!("thumbnail_{}.{}", file_path, "jpg");
// Upload the thumbnail for width in thumbnail_sizes {
if let Err(_) = upload_to_s3( let thumbnail_key = format!("{}_{}.jpg", file_path, width);
&state.s3_client, let thumbnail_data = generate_thumbnail(&image, width).await?;
&state.s3_bucket,
&thumbnail_key, // Check if thumbnail already exists
thumbnail_data.clone(), if !check_file_exists(&state.s3_client, &state.s3_bucket, &thumbnail_key).await? {
"image/jpeg", upload_to_s3(&state.s3_client, &state.s3_bucket, &thumbnail_key, thumbnail_data, "image/jpeg", &state.cdn_domain).await?;
&state.cdn_domain,
).await {
// If thumbnail upload fails, check if original exists
let original_key = format!("{}.{}", file_path, extension);
if check_file_exists(&state.s3_client, &state.s3_bucket, &original_key).await.unwrap_or_default() {
// Generate and upload the thumbnail again
let thumbnail_data = generate_thumbnail(&image)?;
upload_to_s3(
&state.s3_client,
&state.s3_bucket,
&thumbnail_key,
thumbnail_data,
"image/jpeg",
&state.cdn_domain,
).await?;
} }
} }
@ -144,24 +158,16 @@ async fn proxy_handler(
let mut original_buffer = Vec::new(); let mut original_buffer = Vec::new();
image.write_to(&mut Cursor::new(&mut original_buffer), image::ImageFormat::Jpeg) image.write_to(&mut Cursor::new(&mut original_buffer), image::ImageFormat::Jpeg)
.map_err(|_| ErrorInternalServerError("Failed to read image data"))?; .map_err(|_| ErrorInternalServerError("Failed to read image data"))?;
// Upload the original image // Upload the original image
let image_key = format!("{}.{}", file_path, extension); let image_key = format!("{}.{}", file_path, extension);
let image_url = upload_to_s3( let image_url = upload_to_s3(&state.s3_client, &state.s3_bucket, &image_key, original_buffer.clone(), mime_type.essence_str(), &state.cdn_domain).await?;
&state.s3_client,
&state.s3_bucket,
&image_key,
original_buffer.clone(),
mime_type.essence_str(),
&state.cdn_domain,
)
.await?;
// Update quota and save filename // Update quota and save filename
check_and_update_quota(&mut state.redis.clone(), user_id, original_buffer.len() as u64).await?; check_and_update_quota(&mut state.redis.clone(), user_id, original_buffer.len() as u64).await?;
save_filename_in_redis(&mut state.redis.clone(), user_id, &image_key).await?; save_filename_in_redis(&mut state.redis.clone(), user_id, &image_key).await?;
return Ok(HttpResponse::Ok().body(format!("Image and thumbnail uploaded to: {}", image_url))); return Ok(HttpResponse::Ok().body(format!("Image and thumbnails uploaded to: {}", image_url)));
} }
// Handle non-image files // Handle non-image files
@ -173,60 +179,24 @@ async fn proxy_handler(
// Upload the file // Upload the file
let file_key = format!("{}.{}", file_path, extension); let file_key = format!("{}.{}", file_path, extension);
let file_url = upload_to_s3( let file_url = upload_to_s3(&state.s3_client, &state.s3_bucket, &file_key, file_data, mime_type.essence_str(), &state.cdn_domain).await?;
&state.s3_client,
&state.s3_bucket,
&file_key,
file_data,
mime_type.essence_str(),
&state.cdn_domain,
)
.await?;
// Save the filename in Redis for this user // Save the filename in Redis for this user
save_filename_in_redis(&mut state.redis.clone(), user_id, &file_key).await?; save_filename_in_redis(&mut state.redis.clone(), user_id, &file_key).await?;
Ok(HttpResponse::Ok().body(format!("File uploaded to: {}", file_url))) Ok(HttpResponse::Ok().body(format!("File uploaded to: {}", file_url)))
} }
// Save filename in Redis for a specific user
async fn save_filename_in_redis(
redis: &mut MultiplexedConnection,
user_id: &str,
filename: &str,
) -> Result<(), actix_web::Error> {
redis.sadd(user_id, filename).await.map_err(|_| ErrorInternalServerError("Failed to save filename in Redis"))
}
// Main function to start the server
#[actix_web::main] #[actix_web::main]
async fn main() -> std::io::Result<()> { async fn main() -> std::io::Result<()> {
let redis_url = env::var("REDIS_URL").expect("REDIS_URL must be set"); let app_state = AppState::new().await;
let redis_client = RedisClient::open(redis_url).expect("Invalid Redis URL");
let redis_connection = redis_client.get_multiplexed_async_connection().await.ok().unwrap();
// Initialize AWS S3 client
let s3_bucket = env::var("S3_BUCKET").expect("S3_BUCKET must be set");
let cdn_domain = env::var("CDN_DOMAIN").expect("CDN_DOMAIN must be set");
let config = load_defaults(BehaviorVersion::latest()).await;
let s3_client = S3Client::new(&config);
// Create application state
let app_state = web::Data::new(AppState {
redis: redis_connection,
s3_client,
s3_bucket,
cdn_domain,
});
// Start HTTP server
HttpServer::new(move || { HttpServer::new(move || {
App::new() App::new()
.app_data(app_state.clone()) .app_data(web::Data::new(app_state.clone()))
.wrap(Logger::default()) .wrap(Logger::default())
.route("/{path:.*}", web::get().to(proxy_handler)) .route("/{path:.*}", web::get().to(proxy_handler))
}) })
.bind("127.0.0.1:8080")? .bind("127.0.0.1:8080")?
.run() .run()
.await .await
} }