From ead3e0e8e703937dbd817fd20798769b55b165ce Mon Sep 17 00:00:00 2001 From: Zachary Corvidae Date: Wed, 29 Jan 2025 16:56:31 -0800 Subject: [PATCH] Cloudflare R2 feature polish (#43) * Remove unused s3 mod * Disable delete all videos route * Add k8s configurations --- k8s/base/api/configmap.yaml | 2 + k8s/base/api/deployment.yaml | 15 +++ k8s/base/queue/configmap.yaml | 1 + k8s/base/ui/configmap.yaml | 1 + services/silo-api/src/app_state.rs | 3 +- services/silo-api/src/main.rs | 175 +++++++++++++++-------------- services/silo-api/src/s3.rs | 17 --- 7 files changed, 109 insertions(+), 105 deletions(-) delete mode 100644 services/silo-api/src/s3.rs diff --git a/k8s/base/api/configmap.yaml b/k8s/base/api/configmap.yaml index b846f77..7791c48 100644 --- a/k8s/base/api/configmap.yaml +++ b/k8s/base/api/configmap.yaml @@ -8,3 +8,5 @@ data: FFMPEG_LOCATION: "/opt/homebrew/bin/ffmpeg" # TODO: Make this is the right path for docker context TWITCH_REDIRECT_URI: "https://staging.api.farmhand.witchscrow.com/auth/twitch/callback" LOG_LEVEL: "api=debug" + STORAGE: "videos/staging" # TODO: Make this dynamic per environment + UPLOAD_BUCKET: "farmhand" diff --git a/k8s/base/api/deployment.yaml b/k8s/base/api/deployment.yaml index e26fe25..38728c9 100644 --- a/k8s/base/api/deployment.yaml +++ b/k8s/base/api/deployment.yaml @@ -35,6 +35,21 @@ spec: secretKeyRef: name: farmhand-twitch-credentials key: client-secret + - name: R2_ACCOUNT_ID + valueFrom: + secretKeyRef: + name: farmhand-cloudflare-credentials + key: account-id + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: farmhand-cloudflare-credentials + key: access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: farmhand-cloudflare-credentials + key: secret-access-key - name: JWT_SECRET valueFrom: secretKeyRef: diff --git a/k8s/base/queue/configmap.yaml b/k8s/base/queue/configmap.yaml index 95a5825..5824502 100644 --- a/k8s/base/queue/configmap.yaml +++ b/k8s/base/queue/configmap.yaml @@ -4,3 +4,4 @@ metadata: name: farmhand-queue-config data: LOG_LEVEL: "queue=debug" + STORAGE: "videos/staging" diff --git a/k8s/base/ui/configmap.yaml b/k8s/base/ui/configmap.yaml index 781f08f..30a2d73 100644 --- a/k8s/base/ui/configmap.yaml +++ b/k8s/base/ui/configmap.yaml @@ -4,3 +4,4 @@ metadata: name: farmhand-ui-config data: API_URL: "https://staging.api.farmhand.witchscrow.com" + ASSET_URL: "https://pub-ce3c4568826847bb9fbaaab457f5a1c6.r2.dev" diff --git a/services/silo-api/src/app_state.rs b/services/silo-api/src/app_state.rs index aca0271..71dbb9d 100644 --- a/services/silo-api/src/app_state.rs +++ b/services/silo-api/src/app_state.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use queue::{PostgresQueue, Queue}; use sqlx::PgPool; -use crate::{config::Config, s3::create_s3_client}; +use crate::config::Config; +use common::s3::create_s3_client; /// Shared state available to the API pub struct AppState { diff --git a/services/silo-api/src/main.rs b/services/silo-api/src/main.rs index cb8a18c..348a784 100644 --- a/services/silo-api/src/main.rs +++ b/services/silo-api/src/main.rs @@ -3,7 +3,6 @@ mod config; mod jwt; mod middleware; mod routes; -mod s3; pub use app_state::AppState; use axum::{ @@ -15,6 +14,7 @@ use axum::{ }; use config::Config; use reqwest::StatusCode; + use serde_json::json; use std::sync::Arc; @@ -51,7 +51,6 @@ async fn main() { // Initialize our router with the shared state and required routes let app = Router::new() .route("/", get(index)) - .route("/delete-videos", get(delete_all_files)) .nest( "/auth", Router::new() @@ -142,94 +141,96 @@ async fn index() -> impl IntoResponse { "Welcome to the farmhand api" } -pub async fn delete_all_files( - State(state): State>, -) -> Result)> { - let client = &state.s3_client; - let bucket = std::env::var("UPLOAD_BUCKET").expect("UPLOAD_BUCKET required"); +// Deletes all files from cloudflare +// TODO: Move to a script and make environment specific +// pub async fn delete_all_files( +// State(state): State>, +// ) -> Result)> { +// let client = &state.s3_client; +// let bucket = std::env::var("UPLOAD_BUCKET").expect("UPLOAD_BUCKET required"); - // First, abort all multipart uploads - let multipart_uploads = client - .list_multipart_uploads() - .bucket(&bucket) - .send() - .await - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(json!({ "error": format!("Failed to list multipart uploads: {}", e) })), - ) - })?; +// // First, abort all multipart uploads +// let multipart_uploads = client +// .list_multipart_uploads() +// .bucket(&bucket) +// .send() +// .await +// .map_err(|e| { +// ( +// StatusCode::INTERNAL_SERVER_ERROR, +// Json(json!({ "error": format!("Failed to list multipart uploads: {}", e) })), +// ) +// })?; - let uploads = multipart_uploads.uploads(); - for upload in uploads { - if let (Some(key), Some(upload_id)) = (upload.key(), upload.upload_id()) { - client - .abort_multipart_upload() - .bucket(&bucket) - .key(key) - .upload_id(upload_id) - .send() - .await - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json( - json!({ "error": format!("Failed to abort multipart upload: {}", e) }), - ), - ) - })?; - } - } +// let uploads = multipart_uploads.uploads(); +// for upload in uploads { +// if let (Some(key), Some(upload_id)) = (upload.key(), upload.upload_id()) { +// client +// .abort_multipart_upload() +// .bucket(&bucket) +// .key(key) +// .upload_id(upload_id) +// .send() +// .await +// .map_err(|e| { +// ( +// StatusCode::INTERNAL_SERVER_ERROR, +// Json( +// json!({ "error": format!("Failed to abort multipart upload: {}", e) }), +// ), +// ) +// })?; +// } +// } - // Then delete all complete objects - let objects = client - .list_objects_v2() - .bucket(&bucket) - .send() - .await - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(json!({ "error": format!("Failed to list objects: {}", e) })), - ) - })?; +// // Then delete all complete objects +// let objects = client +// .list_objects_v2() +// .bucket(&bucket) +// .send() +// .await +// .map_err(|e| { +// ( +// StatusCode::INTERNAL_SERVER_ERROR, +// Json(json!({ "error": format!("Failed to list objects: {}", e) })), +// ) +// })?; - // If there are objects to delete - if !objects.contents().is_empty() { - // Prepare objects for deletion - let objects_to_delete: Vec<_> = objects - .contents() - .iter() - .filter_map(|obj| { - obj.key().map(|k| { - aws_sdk_s3::types::ObjectIdentifier::builder() - .key(k) - .build() - .expect("Could not build object identifier") - }) - }) - .collect(); +// // If there are objects to delete +// if !objects.contents().is_empty() { +// // Prepare objects for deletion +// let objects_to_delete: Vec<_> = objects +// .contents() +// .iter() +// .filter_map(|obj| { +// obj.key().map(|k| { +// aws_sdk_s3::types::ObjectIdentifier::builder() +// .key(k) +// .build() +// .expect("Could not build object identifier") +// }) +// }) +// .collect(); - // Delete the objects - client - .delete_objects() - .bucket(&bucket) - .delete( - aws_sdk_s3::types::Delete::builder() - .set_objects(Some(objects_to_delete)) - .build() - .expect("Could not build deleter"), - ) - .send() - .await - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(json!({ "error": format!("Failed to delete objects: {}", e) })), - ) - })?; - } +// // Delete the objects +// client +// .delete_objects() +// .bucket(&bucket) +// .delete( +// aws_sdk_s3::types::Delete::builder() +// .set_objects(Some(objects_to_delete)) +// .build() +// .expect("Could not build deleter"), +// ) +// .send() +// .await +// .map_err(|e| { +// ( +// StatusCode::INTERNAL_SERVER_ERROR, +// Json(json!({ "error": format!("Failed to delete objects: {}", e) })), +// ) +// })?; +// } - Ok(Json(json!({ "message": "All files deleted successfully" }))) -} +// Ok(Json(json!({ "message": "All files deleted successfully" }))) +// } diff --git a/services/silo-api/src/s3.rs b/services/silo-api/src/s3.rs deleted file mode 100644 index 12dc56d..0000000 --- a/services/silo-api/src/s3.rs +++ /dev/null @@ -1,17 +0,0 @@ -use aws_config::Region; -use aws_sdk_s3::Client; - -/// Create an S3 Client configured against Cloudflare R2 -pub async fn create_s3_client() -> Client { - let region = Region::new("auto"); - let r2_account_id = std::env::var("R2_ACCOUNT_ID").expect("R2_ACCOUNT_ID required"); - let endpoint_url = format!("https://{}.r2.cloudflarestorage.com", r2_account_id); - - let config = aws_config::from_env() - .region(region) - .endpoint_url(endpoint_url) - .load() - .await; - - Client::new(&config) -}