Skip to content

Commit

Permalink
Cloudflare R2 feature polish (#43)
Browse files Browse the repository at this point in the history
* Remove unused s3 mod

* Disable delete all videos route

* Add k8s configurations
  • Loading branch information
sneakycrow authored Jan 30, 2025
1 parent d4a4922 commit ead3e0e
Show file tree
Hide file tree
Showing 7 changed files with 109 additions and 105 deletions.
2 changes: 2 additions & 0 deletions k8s/base/api/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,5 @@ data:
FFMPEG_LOCATION: "/opt/homebrew/bin/ffmpeg" # TODO: Make this is the right path for docker context
TWITCH_REDIRECT_URI: "https://staging.api.farmhand.witchscrow.com/auth/twitch/callback"
LOG_LEVEL: "api=debug"
STORAGE: "videos/staging" # TODO: Make this dynamic per environment
UPLOAD_BUCKET: "farmhand"
15 changes: 15 additions & 0 deletions k8s/base/api/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,21 @@ spec:
secretKeyRef:
name: farmhand-twitch-credentials
key: client-secret
- name: R2_ACCOUNT_ID
valueFrom:
secretKeyRef:
name: farmhand-cloudflare-credentials
key: account-id
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: farmhand-cloudflare-credentials
key: access-key-id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: farmhand-cloudflare-credentials
key: secret-access-key
- name: JWT_SECRET
valueFrom:
secretKeyRef:
Expand Down
1 change: 1 addition & 0 deletions k8s/base/queue/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ metadata:
name: farmhand-queue-config
data:
LOG_LEVEL: "queue=debug"
STORAGE: "videos/staging"
1 change: 1 addition & 0 deletions k8s/base/ui/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ metadata:
name: farmhand-ui-config
data:
API_URL: "https://staging.api.farmhand.witchscrow.com"
ASSET_URL: "https://pub-ce3c4568826847bb9fbaaab457f5a1c6.r2.dev"
3 changes: 2 additions & 1 deletion services/silo-api/src/app_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ use std::sync::Arc;
use queue::{PostgresQueue, Queue};
use sqlx::PgPool;

use crate::{config::Config, s3::create_s3_client};
use crate::config::Config;
use common::s3::create_s3_client;

/// Shared state available to the API
pub struct AppState {
Expand Down
175 changes: 88 additions & 87 deletions services/silo-api/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ mod config;
mod jwt;
mod middleware;
mod routes;
mod s3;

pub use app_state::AppState;
use axum::{
Expand All @@ -15,6 +14,7 @@ use axum::{
};
use config::Config;
use reqwest::StatusCode;

use serde_json::json;

use std::sync::Arc;
Expand Down Expand Up @@ -51,7 +51,6 @@ async fn main() {
// Initialize our router with the shared state and required routes
let app = Router::new()
.route("/", get(index))
.route("/delete-videos", get(delete_all_files))
.nest(
"/auth",
Router::new()
Expand Down Expand Up @@ -142,94 +141,96 @@ async fn index() -> impl IntoResponse {
"Welcome to the farmhand api"
}

pub async fn delete_all_files(
State(state): State<Arc<AppState>>,
) -> Result<impl IntoResponse, (StatusCode, Json<serde_json::Value>)> {
let client = &state.s3_client;
let bucket = std::env::var("UPLOAD_BUCKET").expect("UPLOAD_BUCKET required");
// Deletes all files from cloudflare
// TODO: Move to a script and make environment specific
// pub async fn delete_all_files(
// State(state): State<Arc<AppState>>,
// ) -> Result<impl IntoResponse, (StatusCode, Json<serde_json::Value>)> {
// let client = &state.s3_client;
// let bucket = std::env::var("UPLOAD_BUCKET").expect("UPLOAD_BUCKET required");

// First, abort all multipart uploads
let multipart_uploads = client
.list_multipart_uploads()
.bucket(&bucket)
.send()
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": format!("Failed to list multipart uploads: {}", e) })),
)
})?;
// // First, abort all multipart uploads
// let multipart_uploads = client
// .list_multipart_uploads()
// .bucket(&bucket)
// .send()
// .await
// .map_err(|e| {
// (
// StatusCode::INTERNAL_SERVER_ERROR,
// Json(json!({ "error": format!("Failed to list multipart uploads: {}", e) })),
// )
// })?;

let uploads = multipart_uploads.uploads();
for upload in uploads {
if let (Some(key), Some(upload_id)) = (upload.key(), upload.upload_id()) {
client
.abort_multipart_upload()
.bucket(&bucket)
.key(key)
.upload_id(upload_id)
.send()
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(
json!({ "error": format!("Failed to abort multipart upload: {}", e) }),
),
)
})?;
}
}
// let uploads = multipart_uploads.uploads();
// for upload in uploads {
// if let (Some(key), Some(upload_id)) = (upload.key(), upload.upload_id()) {
// client
// .abort_multipart_upload()
// .bucket(&bucket)
// .key(key)
// .upload_id(upload_id)
// .send()
// .await
// .map_err(|e| {
// (
// StatusCode::INTERNAL_SERVER_ERROR,
// Json(
// json!({ "error": format!("Failed to abort multipart upload: {}", e) }),
// ),
// )
// })?;
// }
// }

// Then delete all complete objects
let objects = client
.list_objects_v2()
.bucket(&bucket)
.send()
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": format!("Failed to list objects: {}", e) })),
)
})?;
// // Then delete all complete objects
// let objects = client
// .list_objects_v2()
// .bucket(&bucket)
// .send()
// .await
// .map_err(|e| {
// (
// StatusCode::INTERNAL_SERVER_ERROR,
// Json(json!({ "error": format!("Failed to list objects: {}", e) })),
// )
// })?;

// If there are objects to delete
if !objects.contents().is_empty() {
// Prepare objects for deletion
let objects_to_delete: Vec<_> = objects
.contents()
.iter()
.filter_map(|obj| {
obj.key().map(|k| {
aws_sdk_s3::types::ObjectIdentifier::builder()
.key(k)
.build()
.expect("Could not build object identifier")
})
})
.collect();
// // If there are objects to delete
// if !objects.contents().is_empty() {
// // Prepare objects for deletion
// let objects_to_delete: Vec<_> = objects
// .contents()
// .iter()
// .filter_map(|obj| {
// obj.key().map(|k| {
// aws_sdk_s3::types::ObjectIdentifier::builder()
// .key(k)
// .build()
// .expect("Could not build object identifier")
// })
// })
// .collect();

// Delete the objects
client
.delete_objects()
.bucket(&bucket)
.delete(
aws_sdk_s3::types::Delete::builder()
.set_objects(Some(objects_to_delete))
.build()
.expect("Could not build deleter"),
)
.send()
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": format!("Failed to delete objects: {}", e) })),
)
})?;
}
// // Delete the objects
// client
// .delete_objects()
// .bucket(&bucket)
// .delete(
// aws_sdk_s3::types::Delete::builder()
// .set_objects(Some(objects_to_delete))
// .build()
// .expect("Could not build deleter"),
// )
// .send()
// .await
// .map_err(|e| {
// (
// StatusCode::INTERNAL_SERVER_ERROR,
// Json(json!({ "error": format!("Failed to delete objects: {}", e) })),
// )
// })?;
// }

Ok(Json(json!({ "message": "All files deleted successfully" })))
}
// Ok(Json(json!({ "message": "All files deleted successfully" })))
// }
17 changes: 0 additions & 17 deletions services/silo-api/src/s3.rs

This file was deleted.

0 comments on commit ead3e0e

Please sign in to comment.