diff --git a/.env.sample b/.env.sample new file mode 100644 index 0000000..5f91da7 --- /dev/null +++ b/.env.sample @@ -0,0 +1,4 @@ +DATABASE_URL=postgres://localhost/opensecret +OPENAI_API_KEY= +ENCLAVE_SECRET_MOCK= +RESEND_API_KEY= diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..a5b907c --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,134 @@ +# This workflow handles Nix-based reproducible builds for opensecret +# It requires a custom ARM64 runner for AWS Nitro Enclave compatibility +name: "Nix Reproducible Builds" + +on: + push: + branches: + - master + pull_request: + branches: + - master + workflow_dispatch: + +jobs: + dev: + name: "Development Reproducible Build" + # Run on all PRs and master pushes + if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'push' + # Custom runner required: ARM64 architecture needed for AWS Nitro Enclaves + # 4 cores needed for efficient builds and PCR verification + runs-on: ubuntu-22.04-arm64-4core + steps: + - name: Check out repository + uses: actions/checkout@v4 + + # Setup Nix with caching for faster builds + - name: Install Nix + uses: DeterminateSystems/nix-installer-action@v16 + + - name: Enable Magic Nix Cache + uses: DeterminateSystems/magic-nix-cache-action@v8 + + - name: Check flake.lock health + uses: DeterminateSystems/flake-checker-action@v9 + with: + flake-lock-path: flake.lock + + # Build development EIF directly using Nix package + - name: Build dev EIF + id: build-dev + run: | + set -euo pipefail + nix build .?submodules=1#eif-dev + echo "Build completed successfully" + + # Verify PCR values match the reference + - name: Verify dev PCR + id: verify-dev + run: | + set -euo pipefail + if [ ! -f "./pcrDev.json" ]; then + echo "❌ No pcrDev.json found. This file must be checked into version control." + exit 1 + fi + + if diff -q "./pcrDev.json" result/pcr.json > /dev/null; then + echo "✅ Dev PCR values match!" + else + echo "❌ Dev PCR values do not match!" + echo "Expected (./pcrDev.json):" + cat "./pcrDev.json" + echo "Got (result/pcr.json):" + cat result/pcr.json + exit 1 + fi + + # Store artifacts for 7 days + - name: Upload dev artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: dev-artifacts + path: result/ + retention-days: 7 + + prod: + name: "Production Reproducible Build" + # Only run on master pushes or manual triggers for production safety + if: github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.ref == 'refs/heads/master') + # Same custom runner requirements as dev build + runs-on: ubuntu-22.04-arm64-4core + steps: + - name: Check out repository + uses: actions/checkout@v4 + + # Setup Nix with caching for faster builds + - name: Install Nix + uses: DeterminateSystems/nix-installer-action@v16 + + - name: Enable Magic Nix Cache + uses: DeterminateSystems/magic-nix-cache-action@v8 + + - name: Check flake.lock health + uses: DeterminateSystems/flake-checker-action@v9 + with: + flake-lock-path: flake.lock + + # Build production EIF directly using Nix package + - name: Build prod EIF + id: build-prod + run: | + set -euo pipefail + nix build .?submodules=1#eif-prod + echo "Build completed successfully" + + # Verify PCR values match the reference + - name: Verify prod PCR + id: verify-prod + run: | + set -euo pipefail + if [ ! -f "./pcrProd.json" ]; then + echo "❌ No pcrProd.json found. This file must be checked into version control." + exit 1 + fi + + if diff -q "./pcrProd.json" result/pcr.json > /dev/null; then + echo "✅ Production PCR values match!" + else + echo "❌ Production PCR values do not match!" + echo "Expected (./pcrProd.json):" + cat "./pcrProd.json" + echo "Got (result/pcr.json):" + cat result/pcr.json + exit 1 + fi + + # Store production artifacts for 30 days + - name: Upload prod artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: prod-artifacts + path: result/ + retention-days: 30 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..128a233 --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +/debug +/target +/build +/result +/*.tar +/*/*.tar + +# Environment variables +.env +.env.local +.env.*.local + +# IDE files +.vscode/ +.idea/ + +# Operating system files +.DS_Store +Thumbs.db + +# Rust +**/*.rs.bk + +# Clippy compile errors keep generating this +/rustc-ice-* + +# Continuum local files +continuum-manifests/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..d42b830 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "nitro-toolkit"] + path = nitro-toolkit + url = git@github.com:OpenSecretCloud/nitro-toolkit.git diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..b478de9 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3836 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + +[[package]] +name = "aes-siv" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e08d0cdb774acd1e4dac11478b1a0c0d203134b2aab0ba25eb430de9b18f8b9" +dependencies = [ + "aead", + "aes", + "cipher", + "cmac", + "ctr", + "dbl", + "digest", + "zeroize", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" + +[[package]] +name = "argon2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash", +] + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-trait" +version = "0.1.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "aws-config" +version = "1.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b49afaa341e8dd8577e1a2200468f98956d6eda50bcf4a53246cc00174ba924" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 0.2.12", + "ring", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-nitro-enclaves-nsm-api" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36097332580c65ddaac1ad9686ffb58ea531bf3b2d4b3cef7ccb9b7271045d4b" +dependencies = [ + "libc", + "log", + "nix 0.26.4", + "serde", + "serde_bytes", + "serde_cbor", +] + +[[package]] +name = "aws-runtime" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a10d5c055aa540164d9561a0e2e74ad30f0dcf7393c3a92f6733ddf9c5762468" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-sqs" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "073df10a6d1dbbfdb06c5a6a6d1ebf5bf799afe64586e0688bae08a3b1be553f" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09677244a9da92172c8dc60109b4a9658597d4d298b188dd0018b6a66b410ca4" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fea2f3a8bb3bd10932ae7ad59cc59f65f270fc9183a7e91f501dc5efbef7ee" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ada54e5f26ac246dc79727def52f7f8ed38915cb47781e2a72213957dc3a7d5" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5619742a0d8f253be760bfbb8e8e8368c69e3587e4637af5754e488a611499b1" +dependencies = [ + "aws-credential-types", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.1.0", + "once_cell", + "percent-encoding", + "sha2", + "time", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-http" +version = "0.60.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4683df9469ef09468dad3473d129960119a0d3593617542b7d52086c8486f2d6" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fbd61ceb3fe8a1cb7352e42689cec5335833cd9f94103a61e98f9bb61c64bb" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f20685047ca9d6f17b994a07f629c813f08b5bce65523e47124879e60103d45" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "http-body 1.0.1", + "httparse", + "hyper 0.14.30", + "hyper-rustls", + "once_cell", + "pin-project-lite", + "pin-utils", + "rustls", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.1.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbd94a32b3a7d55d3806fe27d98d3ad393050439dd05eb53ece36ec5e3d3510" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.1.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core", + "axum-macros", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "futures-core", + "getrandom", + "instant", + "pin-project-lite", + "rand", + "tokio", +] + +[[package]] +name = "backtrace" +version = "0.3.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes 0.14.0", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + +[[package]] +name = "bigdecimal" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", + "serde", +] + +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes 0.11.0", + "serde", + "unicode-normalization", +] + +[[package]] +name = "bitcoin" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea507acc1cd80fc084ace38544bbcf7ced7c2aa65b653b102de0ce718df668f6" +dependencies = [ + "base58ck", + "bech32", + "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", + "bitcoin_hashes 0.14.0", + "hex-conservative", + "hex_lit", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin-io" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "340e09e8399c7bd8912f495af6aa58bea0c9214773417ffaa8f6460f93aaee56" + +[[package]] +name = "bitcoin-units" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" +dependencies = [ + "bitcoin-internals", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" + +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + +[[package]] +name = "cc" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.6", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half 2.4.1", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + +[[package]] +name = "cmac" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8543454e3c3f5126effff9cd44d562af4e31fb8ce1cc0d3dcd8f084515dbc1aa" +dependencies = [ + "cipher", + "dbl", + "digest", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "rand_core", + "typenum", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.74", +] + +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "dbl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd2735a791158376708f9347fe8faba9667589d82427ef3aed6794a8981de3d9" +dependencies = [ + "generic-array", +] + +[[package]] +name = "der-parser" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "diesel" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf97ee7261bb708fa3402fa9c17a54b70e90e3cb98afb3dc8999d5512cb03f94" +dependencies = [ + "bigdecimal", + "bitflags 2.6.0", + "byteorder", + "chrono", + "diesel_derives", + "itoa", + "num-bigint", + "num-integer", + "num-traits", + "pq-sys", + "r2d2", + "uuid", +] + +[[package]] +name = "diesel_derives" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ff2be1e7312c858b2ef974f5c7089833ae57b5311b334b30923af58e5718d8" +dependencies = [ + "diesel_table_macro_syntax", + "dsl_auto_type", + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" +dependencies = [ + "syn 2.0.74", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "dotenv" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" + +[[package]] +name = "dsl_auto_type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d9abe6314103864cc2d8901b7ae224e0ab1a103a0a416661b4097b0779b607" +dependencies = [ + "darling", + "either", + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "ecow" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54bfbb1708988623190a6c4dbedaeaf0f53c20c6395abd6a01feb327b3146f4b" +dependencies = [ + "serde", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "encoding_rs" +version = "0.8.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + +[[package]] +name = "gimli" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" + +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand", + "smallvec", + "spinning_top", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.30", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.30", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "block-padding", + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "jwt-compact" +version = "0.9.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7472e210ef577f9e04c74ea05afc3542643cd9a8c13ce896592ccc99db2a5eae" +dependencies = [ + "anyhow", + "base64ct", + "chrono", + "ciborium", + "hmac", + "lazy_static", + "rand_core", + "secp256k1", + "serde", + "serde_json", + "sha2", + "smallvec", + "subtle", + "zeroize", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi", + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.7.1", + "pin-utils", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", +] + +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "oauth2" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" +dependencies = [ + "base64 0.13.1", + "chrono", + "getrandom", + "http 0.2.12", + "rand", + "reqwest 0.11.27", + "serde", + "serde_json", + "serde_path_to_error", + "sha2", + "thiserror", + "url", +] + +[[package]] +name = "object" +version = "0.36.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +dependencies = [ + "memchr", +] + +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "opensecret" +version = "0.1.0" +dependencies = [ + "aes-gcm", + "aes-siv", + "async-trait", + "aws-config", + "aws-nitro-enclaves-nsm-api", + "aws-sdk-sqs", + "aws-smithy-runtime", + "aws-types", + "axum", + "axum-macros", + "backoff", + "base64 0.22.1", + "bigdecimal", + "bip39", + "bitcoin", + "bytes", + "cbc", + "chacha20poly1305", + "chrono", + "diesel", + "dotenv", + "futures", + "generic-array", + "getrandom", + "hex", + "hmac", + "hyper 0.14.30", + "hyper-tls 0.5.0", + "jsonwebtoken", + "jwt-compact", + "oauth2", + "password-auth", + "rand_core", + "rcgen", + "reqwest 0.11.27", + "resend-rs", + "secp256k1", + "serde", + "serde_bytes", + "serde_cbor", + "serde_json", + "sha2", + "thiserror", + "tokio", + "tokio-stream", + "tower-http", + "tracing", + "tracing-subscriber", + "url", + "uuid", + "vsock", + "x25519-dalek", + "x509-parser", + "yasna", +] + +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "outref" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "password-auth" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a2a4764cc1f8d961d802af27193c6f4f0124bd0e76e8393cf818e18880f0524" +dependencies = [ + "argon2", + "getrandom", + "password-hash", + "rand_core", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core", + "subtle", +] + +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "raw-cpuid" +version = "11.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "rcgen" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "redox_syscall" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.4", +] + +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", + "hyper-rustls", + "hyper-tls 0.5.0", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile 1.0.4", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 0.1.2", + "system-configuration", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "reqwest" +version = "0.12.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + +[[package]] +name = "resend-rs" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54524581cce8b45f854c9add73cbe9449448a50ab8a655c2c3a477abf0ddf7e6" +dependencies = [ + "ecow", + "governor", + "maybe-async", + "rand", + "reqwest 0.12.7", + "serde", + "thiserror", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "secp256k1" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +dependencies = [ + "bitcoin_hashes 0.14.0", + "rand", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +dependencies = [ + "cc", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "serde" +version = "1.0.207" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half 1.8.3", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.207" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "serde_json" +version = "1.0.124" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "uuid" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +dependencies = [ + "getrandom", + "serde", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + +[[package]] +name = "vsock" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e8b4d00e672f147fc86a09738fadb1445bd1c0a40542378dfb82909deeee688" +dependencies = [ + "libc", + "nix 0.29.0", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.74", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core", + "serde", + "zeroize", +] + +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..c592ec5 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "opensecret" +version = "0.1.0" +edition = "2021" + +[dependencies] +axum = { version = "0.7.5", features = ["macros"] } +axum-macros = { version = "0.4.1" } +password-auth = "1.0.0" +bip39 = { version = "2.0.0" } +bitcoin = { version = "0.32.2", default-features = false, features = ["std", "serde", "secp-recovery", "rand"] } +aws-nitro-enclaves-nsm-api = "0.3.0" +aws-smithy-runtime = "1.7.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_bytes = "0.11" +tokio = { version = "1.0", features = ["full"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tower-http = { version = "0.5.2", features = ["cors"] } +thiserror = "1.0.63" +async-trait = "0.1.81" +jsonwebtoken = "9.3.0" +jwt-compact = { version = "0.9.0-beta.1", features = ["es256k"] } +diesel = { version = "=2.2.2", features = [ + "postgres", + "postgres_backend", + "r2d2", + "chrono", + "numeric", + "uuid", +] } +chrono = { version = "0.4.26", features = ["serde"] } +dotenv = "0.15.0" +aes-gcm = "0.10.1" +aes-siv = "0.7" +hmac = "0.12.1" +generic-array = "0.14" +cbc = "0.1.2" +secp256k1 = { version = "0.29.0", features = ["rand"] } +hyper = { version = "0.14", features = ["full"] } +hyper-tls = "0.5.0" +reqwest = { version = "0.11", features = ["json"] } +futures = "0.3.30" +uuid = { version = "1.10.0", features = ["v4", "serde"] } +tokio-stream = "0.1" +bytes = "1.0" +sha2 = { version = "0.10", default-features = false } +hex = "0.4.3" +base64 = "0.22.1" +vsock = "0.5.1" +resend-rs = "0.9.1" +x25519-dalek = "2.0.1" +rand_core = "0.6" +serde_cbor = "0.11" +x509-parser = "0.15" +yasna = "0.5" +rcgen = { version = "0.13.1", features = ["crypto"] } +chacha20poly1305 = "0.10.1" +getrandom = "0.2.15" +oauth2 = "4.4.2" +url = "2.5.2" +bigdecimal = { version = "0.4.5", features = ["serde"] } +aws-config = "1.5.10" +aws-sdk-sqs = "1.49.0" +aws-types = "1.3.3" +backoff = { version = "0.4.0", features = ["tokio"] } diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..217115a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,64 @@ +#################################################################################################### +## Builder +#################################################################################################### +FROM docker.io/library/rust:latest AS builder + +RUN update-ca-certificates + +WORKDIR /app + +COPY ./ . + +# Build for the default target +RUN cargo build --release + +#################################################################################################### +## Final image +#################################################################################################### +FROM enclave_base + +# Install required packages including PostgreSQL client +RUN dnf update -y && \ + dnf install -y socat ca-certificates iproute python3 postgresql-libs jq && \ + dnf clean all + +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/app +ENV RUST_LOG_STYLE=never +ENV RUST_LOG=info + +WORKDIR /app + +# Copy kmstool_enclave_cli to bin directory +COPY --from=enclave_base /app/kmstool_enclave_cli /bin/kmstool_enclave_cli + +# Copy our build +COPY --from=builder /app/target/release/opensecret /app/opensecret + +# Copy the entrypoint script, traffic forwarder, and vsock helper programs +COPY entrypoint.sh /app/entrypoint.sh +COPY nitro-toolkit/traffic_forwarder.py /app/traffic_forwarder.py +COPY nitro-toolkit/vsock_helper.py /app/vsock_helper.py +RUN chmod +x /app/opensecret /app/entrypoint.sh + +# Copy migrations folder +COPY --from=builder /app/migrations /app/migrations + +# Add environment variables +ARG DATABASE_URL +ENV DATABASE_URL=${DATABASE_URL} +ARG OPENAI_API_KEY +ENV OPENAI_API_KEY=${OPENAI_API_KEY} +ARG APP_MODE +ENV APP_MODE=${APP_MODE:-dev} +ARG ENV_NAME +ENV ENV_NAME=${ENV_NAME} +ARG RESEND_API_KEY +ENV RESEND_API_KEY=${RESEND_API_KEY} + +# Copy continuum-proxy +COPY continuum-proxy /app/continuum-proxy + +# Expose the ports the app runs on +EXPOSE 3000 5432 8092 + +ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..9215981 --- /dev/null +++ b/README.md @@ -0,0 +1,273 @@ +# OpenSecret + +This is the platform code for running OpenSecret's backend. This is intended to run on AWS Nitro inside an enclave. + +## AWS Nitro Deployment + +When deploying to AWS Nitro, you'll need to choose the appropriate environment: +- `dev` for development environment +- `preview` for preview/staging environment +- `prod` for production environment +- `custom` for custom environment (requires `ENV_NAME` to be set) + +Each environment has its own configuration, secrets, and infrastructure. Make sure to use the correct environment variables and AWS resources for your target environment. + +### New Nix-based Deployment + +The new deployment process uses Nix to create reproducible builds: + +1. First, build the required Nitro binaries (only needed once): +```bash +just build-nitro-bins +``` + +2. Build the EIF for your target environment: +```bash +# For development +nix build .#eif-dev + +# For production +nix build .#eif-prod + +# For preview +nix build .#eif-preview + +# For custom environments +ENV_NAME=your_env_name nix build .#eif +``` + +This will create a symlink `result` pointing to the built EIF file. + +3. Copy the EIF to your AWS parent instance: +```bash +# For development +just scp-eif-to-aws-dev + +# For production +just scp-eif-to-aws-prod + +# For preview +just scp-eif-to-aws-preview +``` + +4. Deploy the EIF: +```bash +# For development +just deploy-dev-nix + +# For production +just deploy-prod-nix + +# For preview +just deploy-preview-nix +``` + +The deployment process will: +1. Build the EIF +2. Copy it to the AWS parent instance +3. Prompt you to review the PCR values +4. After confirmation, terminate any existing enclave +5. Run the new enclave +6. Restart the socat proxy + +### PCR Value Management + +The Nix build process generates PCR (Platform Configuration Register) values that are used by AWS KMS for attestation. You can: + +1. Copy PCR values to a reference file: +```bash +just copy-pcr-dev # For development +just copy-pcr-prod # For production +just copy-pcr-preview # For preview +``` + +2. Verify PCR values match the reference: +```bash +just verify-pcr-dev # For development +just verify-pcr-prod # For production +just verify-pcr-preview # For preview +``` + +This ensures the build is reproducible and matches the expected configuration. + +### Deprecated Docker-based Deployment + +This method is deprecated as it does not provide reproducible builds. Here are the raw commands for reference: + +```sh +# Build the Docker image +docker build -t opensecret --build-arg APP_MODE=dev . + +# Save the image to a tar file +docker save -o opensecret.tar opensecret + +# Copy to AWS parent instance +scp opensecret.tar ec2-user@[aws-parent-instance-ip]:~/ + +# Load the image on the parent instance +ssh ec2-user@[aws-parent-instance-ip] +docker load -i opensecret.tar && docker tag localhost/opensecret:latest opensecret:latest + +# Build the EIF file +nitro-cli build-enclave --docker-uri opensecret:latest --output-file opensecret.eif + +# Run the EIF file +nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4 + +# Or run in debug mode +nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4 --debug-mode +``` + +## Nitro Enclaves Setup + +The project uses AWS Nitro Enclaves and requires two pre-built binaries: +- `libnsm.so` - NSM (Nitro Security Module) library +- `kmstool_enclave_cli` - KMS tool for key operations + +These binaries are built from the official AWS repositories: +- [aws-nitro-enclaves-nsm-api](https://github.com/aws/aws-nitro-enclaves-nsm-api) +- [aws-nitro-enclaves-sdk-c](https://github.com/aws/aws-nitro-enclaves-sdk-c) + +### Building Nitro Binaries + +The binaries are built using Docker to ensure a consistent build environment. To build them: + +```bash +just build-nitro-bins +``` + +This will: +1. Create a `nitro-bins` directory +2. Build the binaries in an Amazon Linux 2 container +3. Extract them to the `nitro-bins` directory + +You only need to do this once, or when you want to update the binaries to a new version. +The built binaries are used by the Nix build process to create the EIF (Enclave Image Format). + +## Building and Deploying with Nix + +### Building the EIF + +1. First, build the required Nitro binaries (only needed once): +```bash +just build-nitro-bins +``` + +2. Build the EIF using Nix: +```bash +nix build .#eif +``` + +This will create a symlink `result` pointing to the built EIF file. + +### Differences from Docker-based Build + +The Nix-based build: +- Creates a more reproducible build environment +- Uses pre-built Nitro binaries for consistency +- Integrates with the Monzo aws-nitro-util for EIF creation +- Produces the same functionality as the Docker-based build + +The resulting EIF can be deployed and managed exactly like the Docker-built version. + +## CI/CD Requirements + +### GitHub Actions Runner + +This project requires a custom GitHub Actions runner with the following specifications: + +- Label: `ubuntu-22.04-arm64-4core` +- Architecture: ARM64 +- Operating System: Ubuntu 22.04 +- Resources: 4 CPU cores + +The workflow uses this custom runner for both development and production builds. For more information about setting up custom GitHub Actions runners, see [GitHub's documentation](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/adding-self-hosted-runners). + + +## Development + +This project can be built and run using Docker. Follow these steps to build and run the Docker container: + +### Building the Docker Image + +1. Ensure you have Docker installed on your system. +2. Navigate to the project root directory in your terminal. + +3. Build the enclave base image: + +```sh +docker build ./nitro-toolkit/enclave-base-image/ -t enclave_base +``` + +4. Build the main Docker image using the following command: + +DEV: + +```sh +docker build -t opensecret \ +--build-arg DATABASE_URL={PROD_DB_CONNECTION} \ +--build-arg OPENAI_API_KEY={YOUR_OPENAI_API_KEY} \ +--build-arg APP_MODE=local \ +. +``` + +If building for the nitro image (use `dev` [default], `preview`, `prod`, or `custom` depending on the env): + +```sh +docker rmi opensecret:latest && docker build -t opensecret \ +--build-arg APP_MODE=dev \ +. +``` + +```sh +docker rmi opensecret:latest && docker build -t opensecret \ +--build-arg APP_MODE=preview \ +. +``` + +```sh +docker rmi opensecret:latest && docker build -t opensecret \ +--build-arg APP_MODE=prod \ +. +``` + +For custom environments, you must also provide an `ENV_NAME`: +```sh +docker rmi opensecret:latest && docker build -t opensecret \ +--build-arg APP_MODE=custom \ +--build-arg ENV_NAME=your_env_name \ +. +``` + +This command builds the Docker image and tags it as `opensecret`. The `--build-arg` flags are used to pass the environment variables to the Docker build process: +- `DATABASE_URL`: Your production database connection string +- `OPENAI_API_KEY`: Your OpenAI API key +- `APP_MODE`: The deployment environment (`dev`, `preview`, `prod`, or `custom`) +- `ENV_NAME`: Required when `APP_MODE` is `custom`, specifies the custom environment name + +### Running the Docker Container + +After building the image, you can run the container using: + +```sh +docker run -p 3000:3000 -p 5000:5000 --name opensecret-container opensecret +``` + +This command starts a new container from the `opensecret` image and maps port 3000 on the host machine to port 3000 in the container. + +```sh +sh +docker run -p 3000:3000 -p 5000:5000 --name opensecret-container opensecret +``` + +To stop the container, use: + +```sh +docker stop opensecret-container +``` + +To remove the container, use: + +```sh +docker rm opensecret-container +``` diff --git a/continuum-proxy b/continuum-proxy new file mode 100755 index 0000000..08b9b20 Binary files /dev/null and b/continuum-proxy differ diff --git a/diesel.toml b/diesel.toml new file mode 100644 index 0000000..dc07365 --- /dev/null +++ b/diesel.toml @@ -0,0 +1,8 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/models/schema.rs" + +[migrations_directory] +dir = "migrations" diff --git a/docs/nitro-deploy.md b/docs/nitro-deploy.md new file mode 100644 index 0000000..aaa1441 --- /dev/null +++ b/docs/nitro-deploy.md @@ -0,0 +1,1316 @@ +# Deploy on Nitro + +## Log into AWS CLI + +This should be after creating an IAM profile with admin access. + +For the first time: +``` +aws configure sso +``` + +For logging in to an existing profile (replace with your own): +``` +aws sso login --profile AdministratorAccess-1111 +``` + +## Create an SSH Keypair for logging into the machine: + +Read up on the [docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html). Either import existing or create a new one. + +To ge the public key of an existing `.pem` file: + +``` +ssh-keygen -y -f ~/.ssh/your_ssh.pem +``` + +## Create a container that is nitro compatible + +Read up on the [docs](https://docs.aws.amazon.com/enclaves/latest/user/getting-started.html) if needed. + +Use this ec2 command: + + +Replace `AWS_PROFILE` with your CLI access name. Ex. `AdministratorAccess-1111` +Replace `KEY_NAME` with the uploaded ssh key. Ex. `tony_dev_ssh` + +``` +aws ec2 run-instances \ +--image-id ami-067df2907035c28c2 \ +--count 1 \ +--instance-type m6g.2xlarge \ +--enclave-options 'Enabled=true' \ +--block-device-mappings "[{\"DeviceName\":\"/dev/xvda\",\"Ebs\":{\"VolumeSize\":20,\"DeleteOnTermination\":true}}]" \ +--key-name $KEY_NAME \ +--profile $AWS_PROFILE +``` + +These are the image types, ARM vs x86: + +``` +ami-05c3dc660cb6907f0 (64-bit (x86), uefi-preferred) - m6a.xlarge +ami-067df2907035c28c2 (64-bit (Arm), uefi) - m6g.xlarge +``` + +Log into the AWS console and get the IP address of the EC2 instance. + +Add a new security group rule for allowing SSH access from IPv4 and IPv6. Also allow 80 and 443 while you are here. + +## Basic server configurations after creation: + +Get the current IP address and ssh in: + +``` +ssh ec2-user@ec2-[your-ip].us-east-2.compute.amazonaws.com -i ~/.ssh/your_ssh_key.pem +``` + +Upgrade if needed: + +``` +/usr/bin/dnf check-release-update +``` + +## Install packages + + +Much of this comes from the nitro workshop: https://catalog.workshops.aws/nitro-enclaves/en-US/0-getting-started + +Install nitro CLI things: + +``` +sudo dnf install aws-nitro-enclaves-cli -y +``` + +``` +sudo dnf install socat -y +``` + +``` +sudo dnf install aws-nitro-enclaves-cli-devel -y +``` + +``` +sudo usermod -aG ne ec2-user +``` + +``` +sudo usermod -aG docker ec2-user +``` + +``` +sudo systemctl enable --now docker +``` + +Verify: + +``` +nitro-cli --version +``` + +Configure nitro enclaves: + +``` +sudo vim /etc/nitro_enclaves/allocator.yaml +``` + +Basic recommendation: +``` +# How much memory to allocate for enclaves (in MiB). +memory_mib: 21504 +# +# How many CPUs to reserve for enclaves. +cpu_count: 6 +``` + +Enable them: + +``` +sudo systemctl enable --now nitro-enclaves-allocator.service +``` + +If you need to reconfig and then restart: + +``` +sudo systemctl restart nitro-enclaves-allocator.service +``` + +## App deployment + +When deploying the app in the Nitro enclave, make sure to set the `APP_MODE` to one of: +- `dev` for development environment +- `preview` for preview/staging environment +- `prod` for production environment +- `custom` for custom environment (requires `ENV_NAME` to be set) + +For custom environments, you must also set the `ENV_NAME` environment variable. This name will be used to: +- Form the KMS key alias (`alias/open-secret-{env_name}-enclave`) +- Form the database URL secret name (`opensecret_{env_name}_database_url`) +- Form the Continuum proxy API key secret name (`continuum_proxy_{env_name}_api_key`) + +For example, to deploy a custom environment named "staging": +```sh +docker build -t opensecret \ +--build-arg APP_MODE=custom \ +--build-arg ENV_NAME=staging \ +. +``` + +### Building and Deploying with Nix (Recommended) + +The recommended way to build and deploy the enclave is using Nix, which provides reproducible builds: + +1. First, build the required Nitro binaries (only needed once): +```bash +just build-nitro-bins +``` + +2. Build the EIF for your target environment: +```bash +# For development +nix build .#eif-dev + +# For production +nix build .#eif-prod + +# For preview +nix build .#eif-preview + +# For custom environments +ENV_NAME=your_env_name nix build .#eif +``` + +This will create a symlink `result` pointing to the built EIF file. + +3. Copy the EIF to your AWS parent instance: +```bash +# For development +just scp-eif-to-aws-dev + +# For production +just scp-eif-to-aws-prod + +# For preview +just scp-eif-to-aws-preview +``` + +4. Deploy the EIF: +```bash +# For development +just deploy-dev-nix + +# For production +just deploy-prod-nix + +# For preview +just deploy-preview-nix +``` + +The deployment process will: +1. Build the EIF +2. Copy it to the AWS parent instance +3. Prompt you to review the PCR values +4. After confirmation, terminate any existing enclave +5. Run the new enclave +6. Restart the socat proxy + +### PCR Value Management + +The Nix build process generates PCR (Platform Configuration Register) values that are used by AWS KMS for attestation. You can: + +1. Copy PCR values to a reference file: +```bash +just copy-pcr-dev # For development +just copy-pcr-prod # For production +just copy-pcr-preview # For preview +``` + +2. Verify PCR values match the reference: +```bash +just verify-pcr-dev # For development +just verify-pcr-prod # For production +just verify-pcr-preview # For preview +``` + +This ensures the build is reproducible and matches the expected configuration. + +## Setup SSL + +Install nginx: + +``` +sudo dnf install nginx -y +``` + +Install acm: + +``` +sudo dnf install aws-nitro-enclaves-acm -y +``` + + +Follow instructions for configuring [nginx](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html) in the enclave. + +After following the instructions, reboot the machine. And then start up this enclave program again. + + +## Socat proxy for SSL +Create a socat proxy service so that HTTP program on port 8080 can talk to the enclave: + +``` +sudo vim /etc/systemd/system/socat-proxy.service +``` + +Put in this info: + +``` +[Unit] +Description=Socat Proxy for Nitro Enclave +After=network.target + +[Service] +Type=simple +User=ec2-user +ExecStart=/bin/bash -c 'ENCLAVES=$(nitro-cli describe-enclaves); echo "Enclaves: $ENCLAVES"; ENCLAVE_CID=$(echo "$ENCLAVES" | jq -r '\''.[] | select(.EnclaveName == "opensecret") | .EnclaveCID'\''); echo "ENCLAVE_CID: $ENCLAVE_CID"; if [ -n "$ENCLAVE_CID" ]; then socat TCP-LISTEN:8080,reuseaddr,fork VSOCK-CONNECT:$ENCLAVE_CID:5000; else echo "Enclave not found" >&2; exit 1; fi' +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate service: +``` +sudo systemctl daemon-reload +sudo systemctl enable socat-proxy.service +sudo systemctl start socat-proxy.service +sudo systemctl status socat-proxy.service +``` + +Restart socat proxy anytime there is a change to the enclave program: +``` +sudo systemctl restart socat-proxy.service +``` + +## Vsock DB proxy +Create a vsock proxy service so that enclave program can talk to the database: + +First configure the endpoint into it's allowlist: + +``` +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +``` +- {address: [YOUR-DB-ENDPOINT].us-east-2.aws.neon.tech, port: 5432} +``` + +Now create a service that spins this up automatically: + +``` +sudo vim /etc/systemd/system/vsock-db-proxy.service +``` + +``` +[Unit] +Description=Vsock DB Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8001 [YOUR-DB-ENDPOINT].us-east-2.aws.neon.tech 5432 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate service: + +``` +sudo systemctl daemon-reload +sudo systemctl enable vsock-db-proxy.service +sudo systemctl start vsock-db-proxy.service +sudo systemctl status vsock-db-proxy.service +``` + +A restart of this should not be needed but if you need to +``` +sudo systemctl restart vsock-db-proxy.service +``` + +## Vsock GitHub OAuth proxy +Create a vsock proxy service so that enclave program can talk to GitHub: + +First configure the endpoints into their allowlist: + +``` +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +Add these lines: +``` +- {address: github.com, port: 443} +- {address: api.github.com, port: 443} +``` + +Now create services that spin these up automatically: + +``` +sudo vim /etc/systemd/system/vsock-github-proxy.service +``` + +``` +[Unit] +Description=Vsock GitHub Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8012 github.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +``` +sudo vim /etc/systemd/system/vsock-github-api-proxy.service +``` + +``` +[Unit] +Description=Vsock GitHub API Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8013 api.github.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate services: + +``` +sudo systemctl daemon-reload +sudo systemctl enable vsock-github-proxy.service +sudo systemctl start vsock-github-proxy.service +sudo systemctl status vsock-github-proxy.service +sudo systemctl enable vsock-github-api-proxy.service +sudo systemctl start vsock-github-api-proxy.service +sudo systemctl status vsock-github-api-proxy.service +``` + +A restart of these should not be needed but if you need to: +``` +sudo systemctl restart vsock-github-proxy.service +sudo systemctl restart vsock-github-api-proxy.service +``` + +## Vsock Google OAuth proxy +Create vsock proxy services so that enclave program can talk to Google OAuth: + +First configure the endpoints into their allowlist: + +``` +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +Add these lines: +``` +- {address: oauth2.googleapis.com, port: 443} +- {address: www.googleapis.com, port: 443} +``` + +Now create services that spin these up automatically: + +``` +sudo vim /etc/systemd/system/vsock-google-oauth-proxy.service +``` + +``` +[Unit] +Description=Vsock Google OAuth Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8014 oauth2.googleapis.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +``` +sudo vim /etc/systemd/system/vsock-google-api-proxy.service +``` + +``` +[Unit] +Description=Vsock Google API Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8015 www.googleapis.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate services: + +``` +sudo systemctl daemon-reload +sudo systemctl enable vsock-google-oauth-proxy.service +sudo systemctl start vsock-google-oauth-proxy.service +sudo systemctl status vsock-google-oauth-proxy.service +sudo systemctl enable vsock-google-api-proxy.service +sudo systemctl start vsock-google-api-proxy.service +sudo systemctl status vsock-google-api-proxy.service +``` + +A restart of these should not be needed but if you need to: +``` +sudo systemctl restart vsock-google-oauth-proxy.service +sudo systemctl restart vsock-google-api-proxy.service +``` + +## Vsock Resend proxy +Create a vsock proxy service so that enclave program can talk to resend: + +First configure the endpoint into it's allowlist: + +``` +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +``` +- {address: api.resend.com, port: 443} +``` + +Now create a service that spins this up automatically: + +``` +sudo vim /etc/systemd/system/vsock-resend-proxy.service +``` + +``` +[Unit] +Description=Vsock Resend Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8010 api.resend.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate service: + +``` +sudo systemctl daemon-reload +sudo systemctl enable vsock-resend-proxy.service +sudo systemctl start vsock-resend-proxy.service +sudo systemctl status vsock-resend-proxy.service +``` + +A restart of this should not be needed but if you need to +``` +sudo systemctl restart vsock-resend-proxy.service +``` + + +## Vsock Continuum API proxy +Create a vsock proxy service so that the continuum-proxy can talk to the Continuum API: + +First configure the endpoint into its allowlist: + +``` +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +Add this line: +``` +- {address: api.ai.confidential.cloud, port: 443} +- {address: cdn.confidential.cloud, port: 443} +- {address: attestation.ai.confidential.cloud, port: 3000} +- {address: weu.service.attest.azure.net, port: 443} +- {address: kdsintf.amd.com, port: 443} +- {address: continuumd50111e7.weu.attest.azure.net, port: 443} +``` + +Restart the nitro vsock proxy service: +``` +sudo systemctl restart nitro-enclaves-vsock-proxy.service +``` + +#### Continuum API +Now create a service that spins this up automatically: + +``` +sudo vim /etc/systemd/system/vsock-continuum-proxy.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock Continuum API Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8004 api.ai.confidential.cloud 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +#### Continuum CDN +``` +sudo vim /etc/systemd/system/vsock-continuum-cdn.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock Continuum CDN Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8005 cdn.confidential.cloud 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +#### Continuum Attestation +``` +sudo vim /etc/systemd/system/vsock-continuum-attestation.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock Continuum Attestation Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8006 attestation.ai.confidential.cloud 3000 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +#### Azure Attestation +``` +sudo vim /etc/systemd/system/vsock-azure-attestation.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock Azure Attestation Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8007 weu.service.attest.azure.net 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +#### AMD KDS Interface +``` +sudo vim /etc/systemd/system/vsock-amd-kds.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock AMD KDS Interface Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8008 kdsintf.amd.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +#### Continuum Azure Attestation +``` +sudo vim /etc/systemd/system/vsock-azure-continuum.service +``` + +Add the following content: +``` +[Unit] +Description=Vsock Azure Continuum Attestation Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8009 continuumd50111e7.weu.attest.azure.net 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + + +Activate the services: + +``` +sudo systemctl daemon-reload +sudo systemctl enable vsock-continuum-proxy.service +sudo systemctl start vsock-continuum-proxy.service +sudo systemctl status vsock-continuum-proxy.service +sudo systemctl enable vsock-continuum-cdn.service +sudo systemctl start vsock-continuum-cdn.service +sudo systemctl status vsock-continuum-cdn.service +sudo systemctl enable vsock-continuum-attestation.service +sudo systemctl start vsock-continuum-attestation.service +sudo systemctl status vsock-continuum-attestation.service +sudo systemctl enable vsock-azure-attestation.service +sudo systemctl start vsock-azure-attestation.service +sudo systemctl status vsock-azure-attestation.service +sudo systemctl enable vsock-amd-kds.service +sudo systemctl start vsock-amd-kds.service +sudo systemctl status vsock-amd-kds.service +sudo systemctl enable vsock-azure-continuum.service +sudo systemctl start vsock-azure-continuum.service +sudo systemctl status vsock-azure-continuum.service +``` + +If you need to restart these services: +``` +sudo systemctl restart vsock-continuum-proxy.service +sudo systemctl restart vsock-continuum-cdn.service +sudo systemctl restart vsock-continuum-attestation.service +sudo systemctl restart vsock-azure-attestation.service +sudo systemctl restart vsock-amd-kds.service +sudo systemctl restart vsock-continuum-continuum.service +``` + +#### Vsock AWS SQS proxy +Create a vsock proxy service so that enclave program can talk to AWS SQS: + +First configure the endpoint into its allowlist: + +```sh +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +Add this line: +``` +- {address: sqs.us-east-2.amazonaws.com, port: 443} +``` + +Now create a service that spins this up automatically: + +```sh +sudo vim /etc/systemd/system/vsock-sqs-proxy.service +``` + +``` +[Unit] +Description=Vsock AWS SQS Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8016 sqs.us-east-2.amazonaws.com 443 +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate the service: + +```sh +sudo systemctl daemon-reload +sudo systemctl enable vsock-sqs-proxy.service +sudo systemctl start vsock-sqs-proxy.service +sudo systemctl status vsock-sqs-proxy.service +``` + +A restart should not be needed but if you need to: +```sh +sudo systemctl restart vsock-sqs-proxy.service +``` + +## Vsock Billing proxy +Create a vsock proxy service so that enclave program can talk to the billing service: + +First configure the endpoints into their allowlist: + +```sh +sudo vim /etc/nitro_enclaves/vsock-proxy.yaml +``` + +Add one of these lines depending on your environment: +``` +- {address: billing-dev.opensecret.cloud, port: 443} # for dev environment +- {address: billing.opensecret.cloud, port: 443} # for prod environment +``` + +Now create a service that spins this up automatically: + +```sh +sudo vim /etc/systemd/system/vsock-billing-proxy.service +``` + +``` +[Unit] +Description=Vsock Billing Proxy Service +After=network.target + +[Service] +User=root +ExecStart=/usr/bin/vsock-proxy 8017 billing-dev.opensecret.cloud 443 # Change to billing.opensecret.cloud for prod +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Activate the service: + +```sh +sudo systemctl daemon-reload +sudo systemctl enable vsock-billing-proxy.service +sudo systemctl start vsock-billing-proxy.service +sudo systemctl status vsock-billing-proxy.service +``` + +A restart should not be needed but if you need to: +```sh +sudo systemctl restart vsock-billing-proxy.service +``` + +### Continuum Attestation Updator + +We need to run a script on the parent that updates the URL for the continuum azure attestation endpoint. + +On the parent: + +``` +scp update_continuum_url.sh ec2-user@[aws-parent-instance-ip]:~/ +``` + +``` +sudo vim /etc/systemd/system/update-continuum-url.service +``` + +``` +[Unit] +Description=Update Continuum URL Service +After=network-online.target +Wants=network-online.target + +[Service] +ExecStart=/home/ec2-user/update_continuum_url.sh +User=ec2-user +Group=ec2-user +Type=simple +Restart=on-failure +RestartSec=30s + +[Install] +WantedBy=multi-user.target +``` + +``` +sudo systemctl daemon-reload +sudo systemctl enable update-continuum-url.service +sudo systemctl start update-continuum-url.service +sudo systemctl status update-continuum-url.service +``` + +## KMS Key + +You need to create an AWS KMS key that the enclave can encrypt/decrypt things to. Name it according to your environment: +- `open-secret-dev-enclave` for dev environment +- `open-secret-preview1-enclave` for preview environment +- `open-secret-prod-enclave` for prod environment +- `open-secret-{env_name}-enclave` for custom environments (replace `{env_name}` with your ENV_NAME) + +Here is an example policy, replace with your values: + +```json +{ + "Version": "2012-10-17", + "Id": "key-consolepolicy-3", + "Statement": [ + { + "Sid": "Limited Root Account Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn::{ACCOUNT}:root" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Enable decrypt from enclave", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:sts::{ACCOUNT}:assumed-role/acm-role/i-{INSTNANCE}" + }, + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKey*" + ], + "Resource": "*", + "Condition": { + "StringEqualsIgnoreCase": { + "kms:RecipientAttestation:ImageSha384": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + } + } + { + "Sid": "Enable encrypt from instance", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:sts::{{ACCOUNT}}:assumed-role/acm-role/i-{INSTANCE}" + }, + "Action": "kms:Encrypt", + "Resource": "*" + } + ] +} +``` + +Add a policy to your EC2's IAM role with this info: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:CreateAlias", + "kms:CreateKey", + "kms:DeleteAlias", + "kms:Describe*", + "kms:GenerateRandom", + "kms:Get*", + "kms:List*", + "kms:TagResource", + "kms:UntagResource" + ], + "Resource": "*" + } + ] +} +``` + +## Resend key + +After the DB is initialized, we need to store the resend api key encrypted to the enclave KMS key. + +```sh +echo -n "API_KEY" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `resend_api_key` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('resend_api_key', decode('your_base64_string', 'base64')); +``` + +## Github oauth info + +After the DB is initialized, we need to store the github secret key encrypted to the enclave KMS key. + +### Github secret + +```sh +echo -n "GITHUB_SECRET" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `github_client_secret` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('github_client_secret', decode('your_base64_string', 'base64')); +``` + +### Github client id + +```sh +echo -n "GITHUB_CLIENT_ID" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `github_client_id` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('github_client_id', decode('your_base64_string', 'base64')); +``` + +## Google oauth info + +After the DB is initialized, we need to store the google secret key encrypted to the enclave KMS key. + +### Google secret + +```sh +echo -n "GOOGLE_SECRET" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `google_client_secret` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('google_client_secret', decode('your_base64_string', 'base64')); +``` + +### Google client id + +```sh +echo -n "GOOGLE_CLIENT_ID" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `google_client_id` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('google_client_id', decode('your_base64_string', 'base64')); +``` + +### SQS Queue URL + +After the DB is initialized, we need to store the SQS queue URL encrypted to the enclave KMS key. + +```sh +echo -n "SQS_QUEUE_URL" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_URL" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `sqs_queue_ai_events_url` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('sqs_queue_ai_events_url', decode('your_base64_string', 'base64')); +``` + +#### SQS Permissions + +Add this policy to your EC2's IAM role to allow SQS access: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:SendMessage", + "sqs:GetQueueUrl" + ], + "Resource": [ + "arn:aws:sqs:us-east-2:YOUR_ACCOUNT_ID:ai-events*" + ] + } + ] +} +``` + +Replace `YOUR_ACCOUNT_ID` with your AWS account ID and adjust the queue name pattern if needed. + +#### Billing API Key + +```sh +echo -n "BILLING_API_KEY" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `billing_api_key` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('billing_api_key', decode('your_base64_string', 'base64')); +``` + +#### Billing Server URL + +```sh +echo -n "BILLING_SERVER_URL" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that encrypted base64 and insert it into the `enclave_secrets` table with key as `billing_server_url` and value as the base64. + +```sql +INSERT INTO enclave_secrets (key, value) +VALUES ('billing_server_url', decode('your_base64_string', 'base64')); +``` + + +## Secrets Manager + +### Postgresql +Need to store the postgresql string encrypted to the enclave. + +```sh +echo -n "DB_URL" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that value and insert into SecretsManager with the appropriate name: +- `opensecret_dev_database_url` for dev environment +- `opensecret_preview1_database_url` for preview environment +- `opensecret_prod_database_url` for prod environment + +#### Continuum API Key +Need to store the continuum api string encrypted to the enclave. + +```sh +echo -n "API_KEY" | base64 -w 0 +``` + +Take that output and encrypt to the KMS key, from a machine that has encrypt access to the key: + +```sh +aws kms encrypt --key-id "KEY_ARN" --plaintext "BASE64_KEY" --query CiphertextBlob --output text +``` + +Take that value and insert into SecretsManager with the appropriate name: +- `continuum_proxy_dev_api_key` for dev environment +- `continuum_proxy_preview1_api_key` for preview environment +- `continuum_proxy_prod_api_key` for prod environment + +## Credential Requester + +This setup will run the credential requester on port 8003 of the parent instance, making it available for the enclave to request aws credentials. + +The ec2 role will need a new inline policy to request secrets from Secrets Manager. Add the appropriate ARNs for your environment: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "secretsmanager:GetSecretValue", + "Resource": [ + "arn:aws:secretsmanager:us-east-2:XXX:secret:continuum_proxy_dev_api_key-XXX", + "arn:aws:secretsmanager:us-east-2:XXX:secret:opensecret_dev_database_url-XXX", + "arn:aws:secretsmanager:us-east-2:XXX:secret:continuum_proxy_preview1_api_key-XXX", + "arn:aws:secretsmanager:us-east-2:XXX:secret:opensecret_preview1_database_url-XXX", + "arn:aws:secretsmanager:us-east-2:XXX:secret:continuum_proxy_prod_api_key-XXX", + "arn:aws:secretsmanager:us-east-2:XXX:secret:opensecret_prod_database_url-XXX" + ] + } + ] +} +``` + +Replace with the correct ARNs for those keys. + +Build the docker image. + +```sh +cd nitro-toolkit/credential_requester +docker build -t credential-requester . +``` + +Store it for transfer to the parent: + +```sh +rm credential-requester.tar && docker save -o credential-requester.tar credential-requester +``` + +Now SCP into the AWS Parent instance: + +```sh +scp credential-requester.tar ec2-user@[aws-parent-instance-ip]:~/ +``` + +Load the docker image and tag it: + +```sh +ssh ec2-user@[aws-parent-instance-ip] +docker load -i credential-requester.tar +docker tag localhost/credential-requester:latest credential-requester:latest +``` + +Now run it: + +```sh +docker run -d --restart always --name credential-requester --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e PORT=8003 credential-requester:latest +``` + +## Logging Setup + +To set up logging from the enclave to CloudWatch: + +1. Build the logging Docker image: + +```sh +cd nitro-toolkit/logging +docker build -t enclave-logging . +``` + +2. Save the Docker image: + +```sh +docker save -o enclave-logging.tar enclave-logging +``` + +3. SCP the Docker image to the AWS parent instance: + +```sh +scp enclave-logging.tar ec2-user@[aws-parent-instance-ip]:~/ +``` + +4. SSH into the AWS parent instance and load the Docker image: + +```sh +ssh ec2-user@[aws-parent-instance-ip] +docker load -i enclave-logging.tar +docker tag localhost/enclave-logging:latest enclave-logging:latest +``` + +5. Run the logging container: + +```sh +docker run -d --restart always --name enclave-logging \ + --device=/dev/vsock:/dev/vsock \ + -v /var/run/vsock:/var/run/vsock \ + --privileged \ + -e VSOCK_PORT=8011 \ + -e LOG_GROUP=/aws/nitro-enclaves/enclave-dev \ + -e LOG_STREAM=enclave-logs-dev \ + -e AWS_REGION=us-east-2 \ + enclave-logging:latest +``` + +Replace `enclave-dev` and `enclave-logs-dev` with appropriate names for your development environment. For preview, use `enclave-preview` and `enclave-logs-preview`. For production, use `enclave-prod` and `enclave-logs-prod`. + +### Setting up CloudWatch in AWS Console + +Before running the logging container, you need to set up the necessary permissions and log groups in AWS CloudWatch. Follow these steps: + +1. Log in to the AWS Management Console. + +2. Navigate to the IAM (Identity and Access Management) service. + +3. In the left sidebar, click on "Roles". + +4. Find and click on the IAM role associated with your EC2 instance running the Nitro Enclave. + +5. Click the "Add permissions" button and choose "Attach policies". + +6. Search for and attach the "CloudWatchLogsFullAccess" policy. Note: In a production environment, you should create a more restrictive custom policy. + +7. Navigate to the CloudWatch service in the AWS Console. + +8. In the left sidebar, under "Logs", click on "Log groups". + +9. Click the "Create log group" button. + +10. Enter the name of your log group (e.g., `/aws/nitro-enclaves/enclave-dev` for development or `/aws/nitro-enclaves/enclave-prod` for production). + +11. Click "Create" to finalize the log group creation. + +After completing these steps, your EC2 instance will have the necessary permissions to write logs to CloudWatch, and the log group will be ready to receive logs from your Nitro Enclave. + +Remember to repeat steps 9-11 if you need separate log groups for different environments (e.g., development and production). + +Once CloudWatch is set up and the logging container is running, you can view your enclave logs by: + +1. Going to the CloudWatch service in the AWS Console. +2. Clicking on "Log groups" in the left sidebar. +3. Selecting your log group (e.g., `/aws/nitro-enclaves/enclave-dev`). +4. Clicking on the log stream (e.g., `enclave-logs-dev`) to view the logs. + +This setup allows you to monitor your Nitro Enclave's logs in real-time through the AWS CloudWatch console. diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..fb455ce --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,514 @@ +#!/bin/bash + +set -e + +# Function for logging +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" +} + +log "Starting entrypoint script" + +# Start the logging script +log "Starting log exports" + +# Redirect all output to the logging script via VSOCK +exec > >(socat - VSOCK-CONNECT:3:8011) 2>&1 + +# Read and set APP_MODE from file +log "Reading /app/APP_MODE" +if [ -f /app/APP_MODE ]; then + APP_MODE="" + APP_MODE="$(cat /app/APP_MODE)" || { log "Failed to read /app/APP_MODE"; exit 1; } + export APP_MODE + log "Set APP_MODE=$APP_MODE from /app/APP_MODE" +else + log "ERROR: /app/APP_MODE is missing. Please ensure the file exists and contains a valid mode (dev/preview/prod/custom)" + exit 1 +fi + +log "Starting entrypoint script" +log "APP_MODE=$APP_MODE" + +# Configure loopback interface +log "Configuring loopback interface" +ip addr add 127.0.0.1/8 dev lo +ip link set dev lo up + +# Function to send request and receive response via VSOCK +vsock_request() { + local cid=$1 + local port=$2 + local request=$3 + + response=$(python3 /app/vsock_helper.py "$cid" "$port" "$request") + + # Check if the response contains an error + if echo "$response" | jq -e 'has("error")' > /dev/null; then + error_message=$(echo "$response" | jq -r '.error') + log "VSOCK request failed: $error_message" + return 1 + fi + + echo "$response" +} + +# Function to get AWS credentials +get_aws_credentials() { + local cid=3 + local port=8003 + local request='{"request_type":"credentials","key_name":null}' + + vsock_request $cid $port "$request" +} + +# Function to get secret from Secrets Manager +get_database_url_secret() { + local cid=3 + local port=8003 + + # Determine the correct secret name based on APP_MODE + local secret_name + if [ "$APP_MODE" = "prod" ]; then + secret_name="opensecret_prod_database_url" + elif [ "$APP_MODE" = "preview" ]; then + secret_name="opensecret_preview1_database_url" + elif [ "$APP_MODE" = "custom" ]; then + if [ -z "$ENV_NAME" ]; then + log "Error: ENV_NAME must be set when using custom mode" + exit 1 + fi + secret_name="opensecret_${ENV_NAME}_database_url" + else + secret_name="opensecret_dev_database_url" + fi + + local request="{\"request_type\":\"SecretsManager\",\"key_name\":\"$secret_name\"}" + + vsock_request $cid $port "$request" +} + +# Function to get secret from Secrets Manager +get_continuum_proxy_api_key_secret() { + local cid=3 + local port=8003 + + # Determine the correct secret name based on APP_MODE + local secret_name + if [ "$APP_MODE" = "prod" ]; then + secret_name="continuum_proxy_prod_api_key" + elif [ "$APP_MODE" = "preview" ]; then + secret_name="continuum_proxy_preview1_api_key" + elif [ "$APP_MODE" = "custom" ]; then + if [ -z "$ENV_NAME" ]; then + log "Error: ENV_NAME must be set when using custom mode" + exit 1 + fi + secret_name="continuum_proxy_${ENV_NAME}_api_key" + else + secret_name="continuum_proxy_dev_api_key" + fi + + local request="{\"request_type\":\"SecretsManager\",\"key_name\":\"$secret_name\"}" + + vsock_request $cid $port "$request" +} + +# Get AWS credentials +log "Fetching AWS credentials" +aws_creds=$(get_aws_credentials) +if [ -z "$aws_creds" ]; then + log "Error: Failed to get AWS credentials" + exit 1 +fi + +# Add error checking for jq parsing +if ! access_key_id=$(echo "$aws_creds" | jq -r '.response_value.AccessKeyId'); then + log "Error: Failed to parse AccessKeyId from AWS credentials" + log "AWS credentials response: $aws_creds" + exit 1 +fi +if ! secret_access_key=$(echo "$aws_creds" | jq -r '.response_value.SecretAccessKey'); then + log "Error: Failed to parse SecretAccessKey from AWS credentials" + exit 1 +fi +if ! session_token=$(echo "$aws_creds" | jq -r '.response_value.Token'); then + log "Error: Failed to parse Token from AWS credentials" + exit 1 +fi +if ! region=$(echo "$aws_creds" | jq -r '.response_value.Region'); then + log "Error: Failed to parse Region from AWS credentials" + exit 1 +fi + +log "AWS credentials retrieved and parsed successfully" + +# Get encrypted database URL from Secrets Manager +log "Fetching encrypted database URL" +secret_response=$(get_database_url_secret) +log "Retrieved raw secret response" + +# Extract the database_url value from the JSON structure +encrypted_db_url=$(echo "$secret_response" | jq -r '.response_value | fromjson | .database_url') +if [ -z "$encrypted_db_url" ]; then + log "Error: Failed to get encrypted database URL" + log "Secret response: $secret_response" + exit 1 +fi + +log "Encrypted database URL retrieved successfully" + +# Decrypt the database URL using kmstool_enclave_cli +log "Decrypting database URL" +decryption_output=$(kmstool_enclave_cli decrypt \ + --region "$region" \ + --proxy-port 8000 \ + --aws-access-key-id "$access_key_id" \ + --aws-secret-access-key "$secret_access_key" \ + --aws-session-token "$session_token" \ + --ciphertext "$encrypted_db_url" 2>&1) + +log "Got decryption output, parsing URL" + +decrypted_db_url=$(echo "$decryption_output" | sed -n 's/PLAINTEXT: //p') + +if [ -z "$decrypted_db_url" ]; then + log "Error: Failed to decrypt database URL" + log "Decryption output: $decryption_output" + exit 1 +fi + +log "Database URL decrypted successfully" + +# Decode the base64 decrypted URL +decoded_db_url=$(echo "$decrypted_db_url" | base64 -d) + +if [ -z "$decoded_db_url" ]; then + log "Error: Failed to decode base64 database URL" + exit 1 +fi + +# Extract the hostname from the decoded DATABASE_URL and add it to /etc/hosts +DB_HOSTNAME=$(echo "$decoded_db_url" | sed -n 's/.*@\(.*\)\/.*/\1/p') +if [ -z "$DB_HOSTNAME" ]; then + log "Error: Failed to extract DB_HOSTNAME from decoded URL" + exit 1 +fi + +echo "127.0.0.1 $DB_HOSTNAME" >> /etc/hosts +log "Added $DB_HOSTNAME to /etc/hosts" + +# Add OpenAI API hostname to /etc/hosts +echo "127.0.0.1 api.openai.com" >> /etc/hosts +log "Added api.openai.com to /etc/hosts" + +# Add Resend API hostname to /etc/hosts +echo "127.0.0.8 api.resend.com" >> /etc/hosts +log "Added api.resend.com to /etc/hosts" + +# Add continuum hostnames to /etc/hosts +echo "127.0.0.2 api.ai.confidential.cloud" >> /etc/hosts +echo "127.0.0.3 cdn.confidential.cloud" >> /etc/hosts +echo "127.0.0.4 attestation.ai.confidential.cloud" >> /etc/hosts +echo "127.0.0.5 weu.service.attest.azure.net" >> /etc/hosts +echo "127.0.0.6 kdsintf.amd.com" >> /etc/hosts + +log "Added confidential.cloud, Azure, and AMD domains to /etc/hosts" + +# Add GitHub OAuth hostnames to /etc/hosts +echo "127.0.0.9 github.com" >> /etc/hosts +echo "127.0.0.10 api.github.com" >> /etc/hosts +log "Added GitHub OAuth domains to /etc/hosts" + +# Add Google OAuth hostnames to /etc/hosts +echo "127.0.0.11 oauth2.googleapis.com" >> /etc/hosts +echo "127.0.0.12 www.googleapis.com" >> /etc/hosts +log "Added Google OAuth domains to /etc/hosts" + +# Add AWS SQS hostname to /etc/hosts +echo "127.0.0.13 sqs.us-east-2.amazonaws.com" >> /etc/hosts +log "Added AWS SQS domain to /etc/hosts" + +# Add billing hostname to /etc/hosts based on APP_MODE +if [ "$APP_MODE" = "prod" ]; then + echo "127.0.0.14 billing.opensecret.cloud" >> /etc/hosts + log "Added production billing domain to /etc/hosts" +else + echo "127.0.0.14 billing-dev.opensecret.cloud" >> /etc/hosts + log "Added development billing domain to /etc/hosts" +fi + +touch /app/libnsm.so +log "Created /app/libnsm.so" + +# Print network information for debugging +log "Network configuration:" +ip addr show +ip route +cat /etc/hosts + +# Start the traffic forwarder for the database in the background +log "Starting database traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.1 5432 3 8001 & + +# Start the traffic forwarder for OpenAI API in the background +log "Starting OpenAI API traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.1 443 3 8002 & + +# Start the traffic forwarder for Resend API in the background +log "Starting Resend API traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.8 443 3 8010 & + +# Start the traffic forwarder for Continuum API in the background +log "Starting Continuum API traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.2 443 3 8004 & + +# Start the traffic forwarder for Continuum CDN in the background +log "Starting Continuum CDN traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.3 443 3 8005 & + +# Start the traffic forwarder for Continuum Attestation in the background +log "Starting Continuum Attestation traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.4 3000 3 8006 & + +# Start the traffic forwarder for Azure Attestation in the background +log "Starting Azure Attestation traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.5 443 3 8007 & + +# Start the traffic forwarder for AMD KDS Interface in the background +log "Starting AMD KDS Interface traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.6 443 3 8008 & + +# Start the traffic forwarder for GitHub in the background +log "Starting GitHub traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.9 443 3 8012 & + +# Start the traffic forwarder for GitHub API in the background +log "Starting GitHub API traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.10 443 3 8013 & + +# Start the traffic forwarder for Google OAuth in the background +log "Starting Google OAuth traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.11 443 3 8014 & + +# Start the traffic forwarder for Google APIs in the background +log "Starting Google APIs traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.12 443 3 8015 & + +# Start the traffic forwarder for AWS SQS in the background +log "Starting AWS SQS traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.13 443 3 8016 & + +# Start the traffic forwarder for billing service in the background +log "Starting billing service traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.14 443 3 8017 & + +# Wait for the forwarders to start +log "Waiting for forwarders to start" +sleep 5 + +# Fetch the current Azure Attestation subdomain +log "Fetching current Azure Attestation subdomain" +manifest=$(curl -s https://cdn.confidential.cloud/continuum/v1/manifest.toml) +azure_subdomain=$(echo "$manifest" | awk '/^\[attestationService\.cpu\.azureSEVSNP\]/{flag=1; next} /^\[/{flag=0} flag && /^maaURL *=/{print $3; exit}' | tr -d '"' | awk -F[/:/.] '{print $4}') + +if [ -z "$azure_subdomain" ]; then + log "Error: Failed to extract Azure Attestation subdomain from manifest" + exit 1 +fi + +log "Extracted Azure Attestation subdomain: $azure_subdomain" + +echo "127.0.0.7 ${azure_subdomain}.weu.attest.azure.net" >> /etc/hosts + +log "Added continuum azure subdomain to host" + +# Start the traffic forwarder for Azure Continuum Attestation in the background +log "Starting Azure Continuum Attestation traffic forwarder" +python3 /app/traffic_forwarder.py 127.0.0.7 443 3 8009 & + +# Wait for the forwarders to start +log "Waiting for forwarders to start" +sleep 2 + +# Test the connection to PostgreSQL +log "Testing connection to PostgreSQL:" +if timeout 5 bash -c ' /dev/null; then + error_message=$(echo "$continuum_proxy_api_key_response" | jq -r '.response_value') + log "Error: Failed to get Continuum Proxy API key. Error message: $error_message" + exit 1 + fi + + # Extract the encrypted API key value from the JSON structure + continuum_proxy_api_key_encrypted=$(echo "$continuum_proxy_api_key_response" | jq -r '.response_value | fromjson | .api_key') + if [ -z "$continuum_proxy_api_key_encrypted" ]; then + log "Error: Failed to extract Continuum Proxy API key from the response" + log "Secret response: $continuum_proxy_api_key_response" + exit 1 + fi + + # Decrypt the API key using kmstool_enclave_cli + log "Decrypting Continuum Proxy API key" + decryption_output=$(kmstool_enclave_cli decrypt \ + --region "$region" \ + --proxy-port 8000 \ + --aws-access-key-id "$access_key_id" \ + --aws-secret-access-key "$secret_access_key" \ + --aws-session-token "$session_token" \ + --ciphertext "$continuum_proxy_api_key_encrypted" 2>&1) + + decrypted_api_key=$(echo "$decryption_output" | sed -n 's/PLAINTEXT: //p') + + if [ -z "$decrypted_api_key" ]; then + log "Error: Failed to decrypt Continuum Proxy API key" + log "Decryption output: $decryption_output" + exit 1 + fi + + # Base64 decode the decrypted API key + continuum_proxy_api_key=$(echo "$decrypted_api_key" | base64 -d) + + if [ -z "$continuum_proxy_api_key" ]; then + log "Error: Failed to base64 decode Continuum Proxy API key" + exit 1 + fi + + log "Continuum Proxy API key retrieved, decrypted, and decoded successfully" + + log "Starting continuum-proxy on port 8092" + /app/continuum-proxy --port 8092 --apiKey "$continuum_proxy_api_key" & + + # Wait for the proxy to start + sleep 5 + + # Set OPENAI_API_BASE to point to the local proxy + export OPENAI_API_BASE="http://127.0.0.1:8092" +else + # For local mode, use the default OpenAI API base or the one set in the environment + export OPENAI_API_BASE=${OPENAI_API_BASE:-"https://api.openai.com"} +fi + +# Start the opensecret +log "Starting opensecret..." +RUST_LOG_STYLE=never RUST_LOG=debug APP_MODE="$APP_MODE" OPENAI_API_BASE="$OPENAI_API_BASE" /app/opensecret & + +# Wait for the opensecret to start +log "Waiting for opensecret to start" +sleep 5 + +# Start socat to forward from vsock to the opensecret +log "Starting socat..." +socat VSOCK-LISTEN:5000,reuseaddr,fork TCP:0.0.0.0:3000 diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..ffc0811 --- /dev/null +++ b/flake.lock @@ -0,0 +1,137 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nitro-util": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1727353784, + "narHash": "sha256-OpxNvShY8MMlFFGg1cZZsEupph+zncXgfh3SImFCjH4=", + "owner": "monzo", + "repo": "aws-nitro-util", + "rev": "7d755578b0b0b9850c0d7c4738a6c8daf3ff55c0", + "type": "github" + }, + "original": { + "owner": "monzo", + "repo": "aws-nitro-util", + "rev": "7d755578b0b0b9850c0d7c4738a6c8daf3ff55c0", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1723637854, + "narHash": "sha256-med8+5DSWa2UnOqtdICndjDAEjxr5D7zaIiK4pn0Q7c=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c3aa7b8938b17aebd2deecf7be0636000d62a2b9", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "ref": "nixos-unstable", + "type": "indirect" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nitro-util": "nitro-util", + "nixpkgs": "nixpkgs", + "rust-overlay": "rust-overlay" + } + }, + "rust-overlay": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1723688259, + "narHash": "sha256-WzeUR1MG9MnJnh9T7qcVe/v12qHvJvzdc3Z5HCeE2ns=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "6e75319846684326d900daff1e2e11338cc80d2b", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..8341766 --- /dev/null +++ b/flake.nix @@ -0,0 +1,286 @@ +{ + description = "Rust project"; + + inputs = { + flake-utils.url = "github:numtide/flake-utils"; + rust-overlay = { + url = "github:oxalica/rust-overlay"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + nixpkgs.url = "nixpkgs/nixos-unstable"; + nitro-util = { + url = "github:monzo/aws-nitro-util/7d755578b0b0b9850c0d7c4738a6c8daf3ff55c0"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { self, nixpkgs, flake-utils, rust-overlay, nitro-util }: + flake-utils.lib.eachDefaultSystem (system: + let + overlays = [ rust-overlay.overlays.default ]; + pkgs = import nixpkgs { inherit system overlays; }; + rust = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; + nitro = nitro-util.lib.${system}; + + # Development environment setup + commonInputs = [ + rust + pkgs.rust-analyzer + pkgs.pkg-config + pkgs.openssl + pkgs.zlib + pkgs.gcc + pkgs.clang + pkgs.jq + pkgs.just + pkgs.postgresql + pkgs.diesel-cli + ]; + linuxOnlyInputs = [ + pkgs.podman + pkgs.conmon + pkgs.slirp4netns + pkgs.fuse-overlayfs + ]; + darwinOnlyInputs = [ + pkgs.libiconv + pkgs.darwin.apple_sdk.frameworks.Security + pkgs.darwin.apple_sdk.frameworks.SystemConfiguration + ]; + inputs = commonInputs + ++ pkgs.lib.optionals pkgs.stdenv.isLinux linuxOnlyInputs + ++ pkgs.lib.optionals pkgs.stdenv.isDarwin darwinOnlyInputs; + + setupPostgresScript = pkgs.writeShellScript "setup-postgres" '' + export PGDATA=$(mktemp -d) + export PGSOCKETS=$(mktemp -d) + ${pkgs.postgresql}/bin/initdb -D $PGDATA + ${pkgs.postgresql}/bin/pg_ctl start -D $PGDATA -o "-h localhost -p 5432 -k $PGSOCKETS" + until ${pkgs.postgresql}/bin/pg_isready -h localhost -p 5432; do sleep 1; done + ${pkgs.postgresql}/bin/createuser -h localhost -p 5432 -s postgres + ${pkgs.postgresql}/bin/psql -h localhost -p 5432 -c "CREATE USER \"opensecret_user\" WITH PASSWORD 'password';" -U postgres + ${pkgs.postgresql}/bin/psql -h localhost -p 5432 -c "CREATE DATABASE \"opensecret\" OWNER \"opensecret_user\";" -U postgres + exit + ''; + + setupEnvScript = pkgs.writeShellScript "setup-env" '' + if [ ! -f .env ]; then + cp .env.sample .env + sed -i 's|DATABASE_URL=postgres://localhost/opensecret|DATABASE_URL=postgres://opensecret_user:password@localhost:5432/opensecret|g' .env + + # Get a new ENCLAVE_SECRET_MOCK value using openssl + export enclaveSecret=$(openssl rand -hex 32) + sed -i "s|ENCLAVE_SECRET_MOCK=|ENCLAVE_SECRET_MOCK=$enclaveSecret|g" .env + + # Get a new JWT_SECRET value using openssl + export jwtSecret=$(openssl rand -base64 32) + sed -i "s|JWT_SECRET=|JWT_SECRET=$jwtSecret|g" .env + fi + ''; + + # Function to create rootfs with specific APP_MODE + mkRootfs = appMode: pkgs.buildEnv { + name = "opensecret-rootfs-${appMode}"; + paths = [ + opensecret + (pkgs.writeScriptBin "entrypoint" '' + #!${pkgs.bash}/bin/bash + + # Set up busybox commands and other tools + export PATH="/bin:${pkgs.busybox}/bin:${pkgs.python3}/bin:${pkgs.jq}/bin:${pkgs.socat}/bin:${nitro-bins}/bin:$PATH" + + # Create symlinks for busybox commands + mkdir -p /bin + ln -sf ${pkgs.busybox}/bin/busybox /bin/date + ln -sf ${pkgs.busybox}/bin/busybox /bin/ip + ln -sf ${pkgs.python3}/bin/python3 /bin/python3 + ln -sf ${pkgs.jq}/bin/jq /bin/jq + ln -sf ${pkgs.socat}/bin/socat /bin/socat + ln -sf ${pkgs.curl}/bin/curl /bin/curl + + # Set up CA certificates + mkdir -p /etc/ssl/certs + ln -sf ${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-bundle.crt + export SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt + export AWS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt + + # Copy required libraries and tools + mkdir -p /lib + export LD_LIBRARY_PATH="/lib:$LD_LIBRARY_PATH" + install -m 755 ${nitro-bins}/lib/libnsm.so /lib/ + + install -m 755 ${nitro-bins}/bin/kmstool_enclave_cli /bin/ + + # Copy required C libraries + cp -P ${pkgs.glibc}/lib/ld-linux*.so* /lib/ + cp -P ${pkgs.glibc}/lib/libc.so* /lib/ + cp -P ${pkgs.glibc}/lib/libdl.so* /lib/ + cp -P ${pkgs.glibc}/lib/libpthread.so* /lib/ + cp -P ${pkgs.glibc}/lib/librt.so* /lib/ + cp -P ${pkgs.glibc}/lib/libm.so* /lib/ + + # Set up Python environment + export PYTHONPATH="$(find ${pkgs.python3}/lib -name site-packages):$PYTHONPATH" + + # Copy opensecret and continuum-proxy to their locations + mkdir -p /app + install -m 755 ${opensecret}/bin/opensecret /app/ + install -m 755 ${continuum-proxy}/bin/continuum-proxy /app/ + + ${builtins.readFile ./entrypoint.sh} + '') + (pkgs.writeTextFile { + name = "app-mode"; + text = builtins.trace "Creating APP_MODE file with value: ${appMode}" appMode; + destination = "/app/APP_MODE"; + }) + (pkgs.writeTextFile { + name = "traffic_forwarder"; + text = builtins.readFile ./nitro-toolkit/traffic_forwarder.py; + destination = "/app/traffic_forwarder.py"; + }) + (pkgs.writeTextFile { + name = "vsock_helper"; + text = builtins.readFile ./nitro-toolkit/vsock_helper.py; + destination = "/app/vsock_helper.py"; + }) + pkgs.bash + pkgs.busybox + pkgs.openssl + pkgs.postgresql + pkgs.socat + pkgs.python3 + pkgs.jq + pkgs.iproute2 + pkgs.coreutils + pkgs.cacert + pkgs.curl + nitro-bins + continuum-proxy + ]; + pathsToLink = [ "/bin" "/lib" "/app" "/usr/bin" "/usr/sbin" "/sbin" ]; + }; + + # Function to create EIF with specific APP_MODE + mkEif = appMode: nitro.buildEif { + name = "opensecret-eif-${appMode}"; + kernel = nitro.blobs.${arch}.kernel; + kernelConfig = nitro.blobs.${arch}.kernelConfig; + nsmKo = nitro.blobs.${arch}.nsmKo; + copyToRoot = mkRootfs appMode; + entrypoint = "/bin/entrypoint"; + }; + + # Build the main Rust package + opensecret = pkgs.rustPlatform.buildRustPackage { + pname = "opensecret"; + version = "0.1.0"; + src = pkgs.lib.cleanSourceWith { + src = ./.; + filter = path: type: + let + baseName = baseNameOf path; + parts = pkgs.lib.splitString "/" path; + in + # Explicitly exclude .env files + (baseName != ".env" && baseName != ".env.sample") && + ( + (builtins.elem "src" parts) || + (type == "regular" && ( + baseName == "Cargo.toml" || + baseName == "Cargo.lock" || + baseName == "rust-toolchain.toml" + )) + ); + }; + cargoLock = { + lockFile = ./Cargo.lock; + }; + nativeBuildInputs = [ + pkgs.pkg-config + pkgs.rust-analyzer + pkgs.gcc + pkgs.clang + ]; + buildInputs = [ + pkgs.openssl + pkgs.zlib + pkgs.postgresql + pkgs.diesel-cli + ]; + LIBPQ_LIB_DIR = "${pkgs.postgresql.lib}/lib"; + }; + + # Use pre-built NSM library and KMS tools from nitro-bins directory + nitro-bins = pkgs.stdenv.mkDerivation { + name = "nitro-bins"; + version = "1.0"; + src = ./nitro-bins; + dontUnpack = true; + installPhase = '' + mkdir -p $out/{lib,bin} + # Use install to copy files and set permissions + install -m 755 $src/libnsm.so $out/lib/ + install -m 755 $src/kmstool_enclave_cli $out/bin/ + ''; + }; + + # Copy continuum-proxy from local filesystem + continuum-proxy = pkgs.runCommand "continuum-proxy" {} '' + mkdir -p $out/bin + cp ${./continuum-proxy} $out/bin/continuum-proxy + chmod +x $out/bin/continuum-proxy + ''; + + arch = pkgs.stdenv.hostPlatform.uname.processor; + in + { + packages = { + default = opensecret; + eif-dev = mkEif "dev"; + eif-prod = mkEif "prod"; + eif-preview = mkEif "preview"; + }; + + devShell = pkgs.mkShell { + packages = inputs; + shellHook = '' + export LIBCLANG_PATH=${pkgs.libclang.lib}/lib/ + export LD_LIBRARY_PATH=${pkgs.openssl}/lib:$LD_LIBRARY_PATH + export CC_wasm32_unknown_unknown=${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang-14 + export CFLAGS_wasm32_unknown_unknown="-I ${pkgs.llvmPackages_14.libclang.lib}/lib/clang/14.0.6/include/" + export PKG_CONFIG_PATH=${pkgs.openssl.dev}/lib/pkgconfig + + ${pkgs.lib.optionalString pkgs.stdenv.isLinux '' + alias docker='podman' + echo "Using 'podman' as an alias for 'docker'" + echo "You can now use 'docker' commands, which will be executed by podman" + + # Podman configuration + export CONTAINERS_CONF=$HOME/.config/containers/containers.conf + export CONTAINERS_POLICY=$HOME/.config/containers/policy.json + mkdir -p $HOME/.config/containers + echo '{"default":[{"type":"insecureAcceptAnything"}]}' > $CONTAINERS_POLICY + + # Create a basic containers.conf if it doesn't exist + if [ ! -f $CONTAINERS_CONF ]; then + echo "[engine] + cgroup_manager = \"cgroupfs\" + events_logger = \"file\" + runtime = \"crun\" + + [storage] + driver = \"vfs\"" > $CONTAINERS_CONF + fi + + # Ensure correct permissions + chmod 600 $CONTAINERS_POLICY $CONTAINERS_CONF + ''} + + ${setupPostgresScript} + ${setupEnvScript} + ''; + }; + } + ); +} diff --git a/justfile b/justfile new file mode 100644 index 0000000..e2db64b --- /dev/null +++ b/justfile @@ -0,0 +1,443 @@ +# Load environment variables from .env file +set dotenv-load + +# Set the container runtime (docker or podman) +container := "podman" + +# Set the default recipe to list all available commands +default: + @just --list + +# Build the enclave base image +build-enclave-base: + {{container}} build ./nitro-toolkit/enclave-base-image/ -t enclave_base + +# Build Nitro binaries from enclave base image (NSM and KMS tools) +build-nitro-bins: + mkdir -p nitro-bins + {{container}} build -t nitro-bins -f nitro-toolkit/enclave-base-image/Dockerfile --target enclave_base . + {{container}} create --name temp-nitro nitro-bins sh + {{container}} cp temp-nitro:/app/libnsm.so nitro-bins/ + {{container}} cp temp-nitro:/app/kmstool_enclave_cli nitro-bins/ + {{container}} rm temp-nitro + chmod +x nitro-bins/kmstool_enclave_cli + +# Build the main Docker image for local +build-docker-local: + {{container}} rmi opensecret:latest || true + {{container}} build -t opensecret \ + --build-arg APP_MODE=local \ + . + +### Credential Requester Commands ### + +# Build the Credential Requester Docker image for development +build-credential-requester-docker: + {{container}} rmi credential-requester:latest || true + cd nitro-toolkit/credential_requester && \ + {{container}} build -t credential-requester . + +# Save Credential Requester Docker image to a tar file for dev mode +save-credential-requester-docker-image-dev: + rm -f build/credential-requester/dev/credential-requester.tar && \ + {{container}} save -o build/credential-requester/dev/credential-requester.tar credential-requester + +# Save Credential Requester Docker image to a tar file for prod +save-credential-requester-docker-image-prod: + rm -f build/credential-requester/prod/credential-requester.tar && \ + {{container}} save -o build/credential-requester/prod/credential-requester.tar credential-requester + +# Save Credential Requester Docker image to a tar file for preview mode +save-credential-requester-docker-image-preview: + rm -f build/credential-requester/preview/credential-requester.tar && \ + {{container}} save -o build/credential-requester/preview/credential-requester.tar credential-requester + +# SCP the Credential Requester Docker image to the AWS parent instance (dev) +scp-credential-requester-to-aws-dev: + scp -i $DEV_SSH_KEY build/credential-requester/dev/credential-requester.tar $DEV_SERVER:~/ + +# SCP the Docker image to the AWS parent instance (prod) +scp-credential-requester-to-aws-prod: + scp -i $PROD_SSH_KEY build/credential-requester/prod/credential-requester.tar $PROD_SERVER:~/ + +# SCP the Credential Requester Docker image to the AWS parent instance (preview) +scp-credential-requester-to-aws-preview: + scp -i $PREVIEW_SSH_KEY build/credential-requester/preview/credential-requester.tar $PREVIEW_SERVER:~/ + +# Load Credential Requester Docker image on AWS instance (dev) +load-credential-requester-docker-on-aws-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "docker load -i credential-requester.tar && docker tag localhost/credential-requester:latest credential-requester:latest" + +# Load Credential Requester Docker image on AWS instance (prod) +load-credential-requester-docker-on-aws-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "docker load -i credential-requester.tar && docker tag localhost/credential-requester:latest credential-requester:latest" + +# Load Credential Requester Docker image on AWS instance (preview) +load-credential-requester-docker-on-aws-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "docker load -i credential-requester.tar && docker tag localhost/credential-requester:latest credential-requester:latest" + +# Run Credential Requester Docker image on AWS instance (dev) +run-credential-requester-docker-on-aws-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "docker run -d --restart always --name credential-requester --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e PORT=8003 credential-requester:latest" + +# Run Credential Requester Docker image on AWS instance (prod) +run-credential-requester-docker-on-aws-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "docker run -d --restart always --name credential-requester --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e PORT=8003 credential-requester:latest" + +# Run Credential Requester Docker image on AWS instance (preview) +run-credential-requester-docker-on-aws-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "docker run -d --restart always --name credential-requester --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e PORT=8003 credential-requester:latest" + +### Logging Commands ### + +# Build the Logging Docker image +build-logging-docker: + {{container}} rmi enclave-logging:latest || true + cd nitro-toolkit/logging && {{container}} build -t enclave-logging . + +# Save Logging Docker image to a tar file (Dev) +save-logging-docker-image-dev: + rm -f build/dev/logging/enclave-logging.tar && {{container}} save -o build/dev/logging/enclave-logging.tar enclave-logging + +# Save Logging Docker image to a tar file (Prod) +save-logging-docker-image-prod: + rm -f build/prod/logging/enclave-logging.tar && {{container}} save -o build/prod/logging/enclave-logging.tar enclave-logging + +# Save Logging Docker image to a tar file (Preview) +save-logging-docker-image-preview: + rm -f build/preview/logging/enclave-logging.tar && {{container}} save -o build/preview/logging/enclave-logging.tar enclave-logging + +# SCP the Logging Docker image to the AWS parent instance (dev) +scp-logging-to-aws-dev: + scp -i $DEV_SSH_KEY build/dev/logging/enclave-logging.tar $DEV_SERVER:~/ + +# SCP the Logging Docker image to the AWS parent instance (prod) +scp-logging-to-aws-prod: + scp -i $PROD_SSH_KEY build/prod/logging/enclave-logging.tar $PROD_SERVER:~/ + +# SCP the Logging Docker image to the AWS parent instance (preview) +scp-logging-to-aws-preview: + scp -i $PREVIEW_SSH_KEY build/preview/logging/enclave-logging.tar $PREVIEW_SERVER:~/ + +# Load Logging Docker image on AWS instance (dev) +load-logging-docker-on-aws-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "docker load -i enclave-logging.tar && docker tag localhost/enclave-logging:latest enclave-logging:latest" + +# Load Logging Docker image on AWS instance (prod) +load-logging-docker-on-aws-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "docker load -i enclave-logging.tar && docker tag localhost/enclave-logging:latest enclave-logging:latest" + +# Load Logging Docker image on AWS instance (preview) +load-logging-docker-on-aws-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "docker load -i enclave-logging.tar && docker tag localhost/enclave-logging:latest enclave-logging:latest" + +# Run Logging Docker image on AWS instance (dev) +run-logging-docker-on-aws-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "docker run -d --restart always --name enclave-logging --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e VSOCK_PORT=8011 -e LOG_GROUP=/aws/nitro-enclaves/maple-enclave-dev -e LOG_STREAM=enclave-logs-dev -e AWS_REGION=us-east-2 enclave-logging:latest" + +# Run Logging Docker image on AWS instance (prod) +run-logging-docker-on-aws-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "docker run -d --restart always --name enclave-logging --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e VSOCK_PORT=8011 -e LOG_GROUP=/aws/nitro-enclaves/maple-enclave-prod -e LOG_STREAM=enclave-logs-prod -e AWS_REGION=us-east-2 enclave-logging:latest" + +# Run Logging Docker image on AWS instance (preview) +run-logging-docker-on-aws-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "docker run -d --restart always --name enclave-logging --device=/dev/vsock:/dev/vsock -v /var/run/vsock:/var/run/vsock --privileged -e VSOCK_PORT=8011 -e LOG_GROUP=/aws/nitro-enclaves/maple-enclave-preview -e LOG_STREAM=enclave-logs-preview -e AWS_REGION=us-east-2 enclave-logging:latest" + +# Build and deploy logging for dev +build-and-deploy-logging-dev: build-logging-docker save-logging-docker-image-dev scp-logging-to-aws-dev load-logging-docker-on-aws-dev run-logging-docker-on-aws-dev + +# Build and deploy logging for prod +build-and-deploy-logging-prod: build-logging-docker save-logging-docker-image-prod scp-logging-to-aws-prod load-logging-docker-on-aws-prod run-logging-docker-on-aws-prod + +# Build and deploy logging for preview +build-and-deploy-logging-preview: build-logging-docker save-logging-docker-image-preview scp-logging-to-aws-preview load-logging-docker-on-aws-preview run-logging-docker-on-aws-preview + +### Database Commands ### + +# Setup diesel CLI (first-time setup) +diesel-setup: + diesel setup + +# Generate a new migration +diesel-migration-generate name: + diesel migration generate {{name}} + +# Run migrations locally +diesel-migration-run-local: + diesel migration run + +# Run migrations on development +diesel-migration-run-dev: + diesel migration run --database-url $DEV_DATABASE_URL + +# Run migrations on production +diesel-migration-run-prod: + diesel migration run --database-url $PROD_DATABASE_URL + +# Run migrations on preview +diesel-migration-run-preview: + diesel migration run --database-url $PREVIEW_DATABASE_URL + + +### Continuum Proxy Commands ### + +# Update continuum-proxy +update-continuum-proxy: + containerID=$({{container}} create --platform linux/arm64 ghcr.io/edgelesssys/continuum/continuum-proxy:v1.5.1-0.20250115175902-e274a3d28b59@sha256:c0e41ce62f9fdd210b40a6fd2e7d6f194d03e6bec3b34081b039b2f0a3eb67e4) && \ + {{container}} cp "${containerID}":/bin/continuum-proxy ./continuum-proxy && \ + {{container}} rm "${containerID}" + +# SCP the update_continuum_url.sh script to the AWS parent instance (dev) +scp-update-continuum-url-dev: + scp -i $DEV_SSH_KEY update_continuum_url.sh $DEV_SERVER:~/ + +# SCP the update_continuum_url.sh script to the AWS parent instance (prod) +scp-update-continuum-url-prod: + scp -i $PROD_SSH_KEY update_continuum_url.sh $PROD_SERVER:~/ + +# SCP the update_continuum_url.sh script to the AWS parent instance (preview) +scp-update-continuum-url-preview: + scp -i $PREVIEW_SSH_KEY update_continuum_url.sh $PREVIEW_SERVER:~/ + +### Enclave Management ### + +# Terminate the running enclave (dev) +terminate-enclave-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER 'bash -c "\ + ENCLAVE_ID=\$(nitro-cli describe-enclaves | jq -r \".[0].EnclaveID\") && \ + if [ ! -z \"\$ENCLAVE_ID\" ]; then \ + echo \"Terminating enclave with ID: \$ENCLAVE_ID\" && \ + nitro-cli terminate-enclave --enclave-id \$ENCLAVE_ID; \ + else \ + echo \"No running enclave found.\"; \ + fi"' + +# Terminate the running enclave (prod) +terminate-enclave-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER 'bash -c "\ + ENCLAVE_ID=\$(nitro-cli describe-enclaves | jq -r \".[0].EnclaveID\") && \ + if [ ! -z \"\$ENCLAVE_ID\" ]; then \ + echo \"Terminating enclave with ID: \$ENCLAVE_ID\" && \ + nitro-cli terminate-enclave --enclave-id \$ENCLAVE_ID; \ + else \ + echo \"No running enclave found.\"; \ + fi"' + +# Terminate the running enclave (preview) +terminate-enclave-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER 'bash -c "\ + ENCLAVE_ID=\$(nitro-cli describe-enclaves | jq -r \".[0].EnclaveID\") && \ + if [ ! -z \"\$ENCLAVE_ID\" ]; then \ + echo \"Terminating enclave with ID: \$ENCLAVE_ID\" && \ + nitro-cli terminate-enclave --enclave-id \$ENCLAVE_ID; \ + else \ + echo \"No running enclave found.\"; \ + fi"' + +# Restart socat-proxy service (dev) +restart-socat-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "sudo systemctl restart socat-proxy.service" + +# Restart socat-proxy service (prod) +restart-socat-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "sudo systemctl restart socat-proxy.service" +# +# Restart socat-proxy service (preview) +restart-socat-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "sudo systemctl restart socat-proxy.service" + +# Run the staged dev environment +run-stage-dev: terminate-enclave-dev run-eif-dev restart-socat-dev + +# Run the staged prod environment +run-stage-prod: terminate-enclave-prod run-eif-prod restart-socat-prod + +# Run the staged preview environment +run-stage-preview: terminate-enclave-preview run-eif-preview restart-socat-preview + +### EIF Building ### + +# Build the EIF using Nix +build-eif: + nix build .?submodules=1#eif + echo "EIF build completed. PCR:" + cat result/pcr.json + +# Build EIF for development environment +build-eif-dev: + nix build .?submodules=1#eif-dev + echo "EIF build completed. PCR:" + cat result/pcr.json + +# Build EIF for production environment +build-eif-prod: + nix build .?submodules=1#eif-prod + echo "EIF build completed. PCR:" + cat result/pcr.json + +# Build EIF for preview environment +build-eif-preview: + nix build .?submodules=1#eif-preview + echo "EIF build completed. PCR:" + cat result/pcr.json + +# Build EIF with custom environment variables +build-eif-custom env_vars: + #!/usr/bin/env bash + eval "{{env_vars}}" nix build .?submodules=1#eif + echo "EIF build completed. PCR:" + cat result/pcr.json + +# Build EIF for development environment +copy-pcr-dev: + nix build .?submodules=1#eif-dev + echo "EIF build completed. PCR:" + cat result/pcr.json + cp -f result/pcr.json ./pcrDev.json + +# Build EIF for production environment +copy-pcr-prod: + nix build .?submodules=1#eif-prod + echo "EIF build completed. PCR:" + cat result/pcr.json + cp -f result/pcr.json ./pcrProd.json + +# Internal function for PCR verification +_verify-pcr-internal env pcr_file: + #!/usr/bin/env bash + if [ ! -f "./{{pcr_file}}" ]; then + echo "No {{pcr_file}} found. Building {{env}} EIF first..." + just build-eif-{{env}} + exit 0 + fi + + if [ ! -f result/pcr.json ]; then + echo "No result/pcr.json found. Building {{env}} EIF first..." + just build-eif-{{env}} + fi + + if diff -q "./{{pcr_file}}" result/pcr.json > /dev/null; then + echo "✅ {{env}} PCR values match!" + else + echo "❌ {{env}} PCR values do not match!" + echo "Expected (./{{pcr_file}}):" + cat "./{{pcr_file}}" + echo "Got (result/pcr.json):" + cat result/pcr.json + exit 1 + fi + +# Verify PCR values for dev environment +verify-pcr-dev: + just _verify-pcr-internal dev pcrDev.json + +# Verify PCR values for prod environment +verify-pcr-prod: + just _verify-pcr-internal prod pcrProd.json + +# Verify PCR values for preview environment +verify-pcr-preview: + just _verify-pcr-internal preview pcrPreview.json + +# Verify PCR values for custom environment +verify-pcr-custom: + #!/usr/bin/env bash + if [ ! -f ./pcrCustom.json ]; then + echo "No pcrCustom.json found. Please run build-eif-custom first" + exit 1 + fi + + if [ ! -f result/pcr.json ]; then + echo "No result/pcr.json found. Please rebuild with the same environment variables" + exit 1 + fi + + if diff -q ./pcrCustom.json result/pcr.json > /dev/null; then + echo "✅ Custom PCR values match!" + else + echo "❌ Custom PCR values do not match!" + echo "Expected (./pcrCustom.json):" + cat ./pcrCustom.json + echo "Got (result/pcr.json):" + cat result/pcr.json + exit 1 + fi + +# SCP the Nix-built EIF to AWS parent instance (dev) +scp-eif-to-aws-dev: + scp -i $DEV_SSH_KEY result/image.eif $DEV_SERVER:~/opensecret.eif + +# SCP the Nix-built EIF to AWS parent instance (prod) +scp-eif-to-aws-prod: + scp -i $PROD_SSH_KEY result/image.eif $PROD_SERVER:~/opensecret.eif + +# SCP the Nix-built EIF to AWS parent instance (preview) +scp-eif-to-aws-preview: + scp -i $PREVIEW_SSH_KEY result/image.eif $PREVIEW_SERVER:~/opensecret.eif + +# Stage to dev environment without debug mode (using Nix-built EIF) +stage-dev-nix: build-eif-dev scp-eif-to-aws-dev + +# Stage to prod environment without debug mode (using Nix-built EIF) +stage-prod-nix: build-eif-prod scp-eif-to-aws-prod + +# Stage to preview environment without debug mode (using Nix-built EIF) +stage-preview-nix: build-eif-preview scp-eif-to-aws-preview + +# Run EIF file on AWS (dev) +run-eif-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4" + +# Run EIF file on AWS (prod) +run-eif-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4" + +# Run EIF file on AWS (preview) +run-eif-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4" + +# Run EIF file in debug mode (preview) +run-eif-debug-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4 --debug-mode" + +# Run EIF file in debug mode (dev) +run-eif-debug-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4 --debug-mode" + +# Run EIF file in debug mode (prod) +run-eif-debug-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "nitro-cli run-enclave --eif-path opensecret.eif --memory 16384 --cpu-count 4 --debug-mode" + +# View console logs in debug mode (dev) +view-console-logs-dev: + ssh -i $DEV_SSH_KEY $DEV_SERVER "export ENCLAVE_ID=$(nitro-cli describe-enclaves | jq -r '.[0].EnclaveID') && nitro-cli console --enclave-id $ENCLAVE_ID" + +# View console logs in debug mode (prod) +view-console-logs-prod: + ssh -i $PROD_SSH_KEY $PROD_SERVER "export ENCLAVE_ID=$(nitro-cli describe-enclaves | jq -r '.[0].EnclaveID') && nitro-cli console --enclave-id $ENCLAVE_ID" + +# View console logs in debug mode (preview) +view-console-logs-preview: + ssh -i $PREVIEW_SSH_KEY $PREVIEW_SERVER "export ENCLAVE_ID=$(nitro-cli describe-enclaves | jq -r '.[0].EnclaveID') && nitro-cli console --enclave-id $ENCLAVE_ID" + +# Deploy to dev environment without debug mode (using Nix-built EIF) +deploy-dev-nix: build-eif-dev verify-pcr-dev scp-eif-to-aws-dev + @echo "EIF copied to server. Please review the PCR values and press Enter to continue with termination and deployment..." + @read -p "" + just terminate-enclave-dev run-eif-dev restart-socat-dev + +# Deploy to prod environment without debug mode (using Nix-built EIF) +deploy-prod-nix: build-eif-prod verify-pcr-prod scp-eif-to-aws-prod + @echo "EIF copied to production server. Please review the PCR values and press Enter to continue with termination and deployment..." + @read -p "" + just terminate-enclave-prod run-eif-prod restart-socat-prod + +# Deploy to preview environment without debug mode (using Nix-built EIF) +deploy-preview-nix: build-eif-preview verify-pcr-preview scp-eif-to-aws-preview + @echo "EIF copied to preview server. Please review the PCR values and press Enter to continue with termination and deployment..." + @read -p "" + just terminate-enclave-preview run-eif-preview restart-socat-preview + +# Clean EIF build artifacts +clean-eif: + rm -f result diff --git a/migrations/00000000000000_diesel_initial_setup/down.sql b/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000..a9f5260 --- /dev/null +++ b/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/migrations/00000000000000_diesel_initial_setup/up.sql b/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000..d68895b --- /dev/null +++ b/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/migrations/2024-08-20-163446_users_table/down.sql b/migrations/2024-08-20-163446_users_table/down.sql new file mode 100644 index 0000000..8882a47 --- /dev/null +++ b/migrations/2024-08-20-163446_users_table/down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS idx_users_uuid; +DROP TABLE IF EXISTS users; diff --git a/migrations/2024-08-20-163446_users_table/up.sql b/migrations/2024-08-20-163446_users_table/up.sql new file mode 100644 index 0000000..7fff9ab --- /dev/null +++ b/migrations/2024-08-20-163446_users_table/up.sql @@ -0,0 +1,13 @@ +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + +CREATE TABLE IF NOT EXISTS users ( + id SERIAL PRIMARY KEY, + uuid UUID NOT NULL DEFAULT uuid_generate_v4() UNIQUE, + name TEXT, + email TEXT NOT NULL UNIQUE, + password_enc BYTEA NOT NULL, + seed_enc BYTEA +); + +-- Add an index on the uuid column +CREATE INDEX idx_users_uuid ON users(uuid); diff --git a/migrations/2024-08-26-211803_user_kv_table/down.sql b/migrations/2024-08-26-211803_user_kv_table/down.sql new file mode 100644 index 0000000..5c2bf36 --- /dev/null +++ b/migrations/2024-08-26-211803_user_kv_table/down.sql @@ -0,0 +1,5 @@ +DROP TRIGGER IF EXISTS trigger_update_user_kv_updated_at ON user_kv; +DROP FUNCTION IF EXISTS update_user_kv_updated_at(); +DROP INDEX IF EXISTS idx_user_kv_user_id; +DROP INDEX IF EXISTS idx_user_kv_user_id_key_enc; +DROP TABLE IF EXISTS user_kv; diff --git a/migrations/2024-08-26-211803_user_kv_table/up.sql b/migrations/2024-08-26-211803_user_kv_table/up.sql new file mode 100644 index 0000000..eac0447 --- /dev/null +++ b/migrations/2024-08-26-211803_user_kv_table/up.sql @@ -0,0 +1,27 @@ +CREATE TABLE user_kv ( + id BIGSERIAL PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(uuid), + key_enc BYTEA NOT NULL, + value_enc BYTEA NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE (user_id, key_enc) +); + +-- Create an index on user_id for faster lookups +CREATE INDEX idx_user_kv_user_id ON user_kv(user_id); +CREATE INDEX idx_user_kv_user_id_key_enc ON user_kv(user_id, key_enc); + +-- Create a trigger to automatically update the updated_at column +CREATE OR REPLACE FUNCTION update_user_kv_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_update_user_kv_updated_at +BEFORE UPDATE ON user_kv +FOR EACH ROW +EXECUTE FUNCTION update_user_kv_updated_at(); diff --git a/migrations/2024-09-16-172351_enclave_secrets/down.sql b/migrations/2024-09-16-172351_enclave_secrets/down.sql new file mode 100644 index 0000000..4f4b1a4 --- /dev/null +++ b/migrations/2024-09-16-172351_enclave_secrets/down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS idx_enclave_secrets_key; +DROP TABLE IF EXISTS enclave_secrets; diff --git a/migrations/2024-09-16-172351_enclave_secrets/up.sql b/migrations/2024-09-16-172351_enclave_secrets/up.sql new file mode 100644 index 0000000..c88a3e7 --- /dev/null +++ b/migrations/2024-09-16-172351_enclave_secrets/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE enclave_secrets ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL UNIQUE, + value BYTEA NOT NULL +); + +-- Create an index on key for faster lookups +CREATE INDEX idx_enclave_secrets_key ON enclave_secrets(key); diff --git a/migrations/2024-09-27-181732_user_created_updated/down.sql b/migrations/2024-09-27-181732_user_created_updated/down.sql new file mode 100644 index 0000000..789a640 --- /dev/null +++ b/migrations/2024-09-27-181732_user_created_updated/down.sql @@ -0,0 +1,12 @@ +-- This file should undo anything in `up.sql` + +-- Remove the trigger +DROP TRIGGER IF EXISTS trigger_update_users_updated_at ON users; + +-- Remove the trigger function +DROP FUNCTION IF EXISTS update_users_updated_at(); + +-- Remove the columns +ALTER TABLE users +DROP COLUMN IF EXISTS created_at, +DROP COLUMN IF EXISTS updated_at; diff --git a/migrations/2024-09-27-181732_user_created_updated/up.sql b/migrations/2024-09-27-181732_user_created_updated/up.sql new file mode 100644 index 0000000..945f696 --- /dev/null +++ b/migrations/2024-09-27-181732_user_created_updated/up.sql @@ -0,0 +1,23 @@ +-- Add created_at and updated_at columns +ALTER TABLE users +ADD COLUMN created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, +ADD COLUMN updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP; + +-- Create a trigger to automatically update the updated_at column +CREATE OR REPLACE FUNCTION update_users_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_update_users_updated_at +BEFORE UPDATE ON users +FOR EACH ROW +EXECUTE FUNCTION update_users_updated_at(); + +-- Backfill existing records +UPDATE users +SET created_at = CURRENT_TIMESTAMP, + updated_at = CURRENT_TIMESTAMP; diff --git a/migrations/2024-09-30-200112_email_verification/down.sql b/migrations/2024-09-30-200112_email_verification/down.sql new file mode 100644 index 0000000..422405d --- /dev/null +++ b/migrations/2024-09-30-200112_email_verification/down.sql @@ -0,0 +1,12 @@ +-- Remove the trigger +DROP TRIGGER IF EXISTS trigger_update_email_verifications_updated_at ON email_verifications; + +-- Remove the trigger function +DROP FUNCTION IF EXISTS update_email_verifications_updated_at(); + +-- Remove the indexes +DROP INDEX IF EXISTS idx_email_verifications_verification_code; +DROP INDEX IF EXISTS idx_email_verifications_user_id; + +-- Drop the email_verifications table +DROP TABLE IF EXISTS email_verifications; diff --git a/migrations/2024-09-30-200112_email_verification/up.sql b/migrations/2024-09-30-200112_email_verification/up.sql new file mode 100644 index 0000000..21ec8cb --- /dev/null +++ b/migrations/2024-09-30-200112_email_verification/up.sql @@ -0,0 +1,28 @@ +-- Create the email_verifications table +CREATE TABLE email_verifications ( + id SERIAL PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(uuid), + verification_code UUID NOT NULL DEFAULT uuid_generate_v4(), + is_verified BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + expires_at TIMESTAMP WITH TIME ZONE NOT NULL +); + +-- Create indexes for faster lookups +CREATE INDEX idx_email_verifications_user_id ON email_verifications(user_id); +CREATE INDEX idx_email_verifications_verification_code ON email_verifications(verification_code); + +-- Create a trigger to automatically update the updated_at column +CREATE OR REPLACE FUNCTION update_email_verifications_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_update_email_verifications_updated_at +BEFORE UPDATE ON email_verifications +FOR EACH ROW +EXECUTE FUNCTION update_email_verifications_updated_at(); diff --git a/migrations/2024-10-14-200004_password_reset/down.sql b/migrations/2024-10-14-200004_password_reset/down.sql new file mode 100644 index 0000000..8631036 --- /dev/null +++ b/migrations/2024-10-14-200004_password_reset/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS password_reset_requests; diff --git a/migrations/2024-10-14-200004_password_reset/up.sql b/migrations/2024-10-14-200004_password_reset/up.sql new file mode 100644 index 0000000..1feff46 --- /dev/null +++ b/migrations/2024-10-14-200004_password_reset/up.sql @@ -0,0 +1,12 @@ +CREATE TABLE password_reset_requests ( + id SERIAL PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(uuid), + hashed_secret VARCHAR(255) NOT NULL, + encrypted_code BYTEA NOT NULL, + expiration_time TIMESTAMP WITH TIME ZONE NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL, + is_reset BOOLEAN NOT NULL DEFAULT FALSE +); + +CREATE INDEX idx_password_reset_requests_user_id ON password_reset_requests(user_id); +CREATE INDEX idx_password_reset_requests_encrypted_code ON password_reset_requests(encrypted_code); diff --git a/migrations/2024-10-17-200717_cascade_deletes/down.sql b/migrations/2024-10-17-200717_cascade_deletes/down.sql new file mode 100644 index 0000000..7e30f70 --- /dev/null +++ b/migrations/2024-10-17-200717_cascade_deletes/down.sql @@ -0,0 +1,22 @@ +-- Revert cascading deletes from all tables referencing users + +-- Revert the foreign key constraint change on user_kv table +ALTER TABLE user_kv +DROP CONSTRAINT user_kv_user_id_fkey, +ADD CONSTRAINT user_kv_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid); + +-- Revert the foreign key constraint change on email_verifications table +ALTER TABLE email_verifications +DROP CONSTRAINT email_verifications_user_id_fkey, +ADD CONSTRAINT email_verifications_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid); + +-- Revert the foreign key constraint change on password_reset_requests table +ALTER TABLE password_reset_requests +DROP CONSTRAINT password_reset_requests_user_id_fkey, +ADD CONSTRAINT password_reset_requests_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid); diff --git a/migrations/2024-10-17-200717_cascade_deletes/up.sql b/migrations/2024-10-17-200717_cascade_deletes/up.sql new file mode 100644 index 0000000..1fa70c6 --- /dev/null +++ b/migrations/2024-10-17-200717_cascade_deletes/up.sql @@ -0,0 +1,25 @@ +-- Add cascading deletes to all tables referencing users + +-- Modify the foreign key constraint on user_kv table +ALTER TABLE user_kv +DROP CONSTRAINT user_kv_user_id_fkey, +ADD CONSTRAINT user_kv_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid) + ON DELETE CASCADE; + +-- Modify the foreign key constraint on email_verifications table +ALTER TABLE email_verifications +DROP CONSTRAINT email_verifications_user_id_fkey, +ADD CONSTRAINT email_verifications_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid) + ON DELETE CASCADE; + +-- Modify the foreign key constraint on password_reset_requests table +ALTER TABLE password_reset_requests +DROP CONSTRAINT password_reset_requests_user_id_fkey, +ADD CONSTRAINT password_reset_requests_user_id_fkey + FOREIGN KEY (user_id) + REFERENCES users(uuid) + ON DELETE CASCADE; diff --git a/migrations/2024-10-17-200718_oauth/down.sql b/migrations/2024-10-17-200718_oauth/down.sql new file mode 100644 index 0000000..d9c14ba --- /dev/null +++ b/migrations/2024-10-17-200718_oauth/down.sql @@ -0,0 +1,21 @@ +-- Drop triggers +DROP TRIGGER IF EXISTS trigger_update_user_oauth_connections_updated_at ON user_oauth_connections; +DROP TRIGGER IF EXISTS trigger_update_oauth_providers_updated_at ON oauth_providers; + +-- Drop functions +DROP FUNCTION IF EXISTS update_user_oauth_connections_updated_at(); +DROP FUNCTION IF EXISTS update_oauth_providers_updated_at(); + +-- Drop indexes +DROP INDEX IF EXISTS idx_user_oauth_connections_provider_id; +DROP INDEX IF EXISTS idx_user_oauth_connections_user_id; + +-- Drop tables +DROP TABLE IF EXISTS user_oauth_connections; +DROP TABLE IF EXISTS oauth_providers; + +-- Remove users without passwords since this was a requirement before this migration +DELETE FROM users WHERE password_enc IS NULL; + +-- Revert users table change +ALTER TABLE users ALTER COLUMN password_enc SET NOT NULL; diff --git a/migrations/2024-10-17-200718_oauth/up.sql b/migrations/2024-10-17-200718_oauth/up.sql new file mode 100644 index 0000000..1346e8c --- /dev/null +++ b/migrations/2024-10-17-200718_oauth/up.sql @@ -0,0 +1,56 @@ +-- Create oauth_providers table +CREATE TABLE oauth_providers ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL UNIQUE, + auth_url TEXT NOT NULL, + token_url TEXT NOT NULL, + user_info_url TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- Create user_oauth_connections table +CREATE TABLE user_oauth_connections ( + id SERIAL PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(uuid) ON DELETE CASCADE, + provider_id INTEGER NOT NULL REFERENCES oauth_providers(id), + provider_user_id VARCHAR(255) NOT NULL, + access_token_enc BYTEA NOT NULL, + refresh_token_enc BYTEA, + expires_at TIMESTAMP WITH TIME ZONE, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE (user_id, provider_id, provider_user_id) +); + +-- Alter users table to allow NULL passwords for OAuth-only users +ALTER TABLE users ALTER COLUMN password_enc DROP NOT NULL; + +-- Create indexes +CREATE INDEX idx_user_oauth_connections_user_id ON user_oauth_connections(user_id); +CREATE INDEX idx_user_oauth_connections_provider_id ON user_oauth_connections(provider_id); + +-- Create triggers for updating timestamps +CREATE OR REPLACE FUNCTION update_oauth_providers_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_update_oauth_providers_updated_at +BEFORE UPDATE ON oauth_providers +FOR EACH ROW EXECUTE FUNCTION update_oauth_providers_updated_at(); + +CREATE OR REPLACE FUNCTION update_user_oauth_connections_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_update_user_oauth_connections_updated_at +BEFORE UPDATE ON user_oauth_connections +FOR EACH ROW EXECUTE FUNCTION update_user_oauth_connections_updated_at(); diff --git a/migrations/2024-10-26-175809_token_usage/down.sql b/migrations/2024-10-26-175809_token_usage/down.sql new file mode 100644 index 0000000..da035b3 --- /dev/null +++ b/migrations/2024-10-26-175809_token_usage/down.sql @@ -0,0 +1,2 @@ +-- Drop token_usage table and related objects +DROP TABLE IF EXISTS token_usage; diff --git a/migrations/2024-10-26-175809_token_usage/up.sql b/migrations/2024-10-26-175809_token_usage/up.sql new file mode 100644 index 0000000..918fba1 --- /dev/null +++ b/migrations/2024-10-26-175809_token_usage/up.sql @@ -0,0 +1,15 @@ +-- Create token_usage table +CREATE TABLE token_usage ( + id BIGSERIAL PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(uuid) ON DELETE CASCADE, + input_tokens INTEGER NOT NULL CHECK (input_tokens >= 0), + output_tokens INTEGER NOT NULL CHECK (output_tokens >= 0), + estimated_cost DECIMAL(12, 6) NOT NULL CHECK (estimated_cost >= 0), + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- Create an index on user_id for faster lookups +CREATE INDEX idx_token_usage_user_id ON token_usage(user_id); + +-- Create an index on created_at for time-based queries +CREATE INDEX idx_token_usage_created_at ON token_usage(created_at); diff --git a/migrations/2024-12-02-194751_guest_user/down.sql b/migrations/2024-12-02-194751_guest_user/down.sql new file mode 100644 index 0000000..9f97a76 --- /dev/null +++ b/migrations/2024-12-02-194751_guest_user/down.sql @@ -0,0 +1,11 @@ +-- Remove the partial unique index +DROP INDEX IF EXISTS users_email_unique; + +-- Remove guest users since email was required before this migration +DELETE FROM users WHERE email IS NULL; + +-- Make email non-null again +ALTER TABLE users ALTER COLUMN email SET NOT NULL; + +-- Restore the original unique constraint +ALTER TABLE users ADD CONSTRAINT users_email_key UNIQUE (email); diff --git a/migrations/2024-12-02-194751_guest_user/up.sql b/migrations/2024-12-02-194751_guest_user/up.sql new file mode 100644 index 0000000..39c7016 --- /dev/null +++ b/migrations/2024-12-02-194751_guest_user/up.sql @@ -0,0 +1,8 @@ +-- First drop the existing unique constraint and index on email +ALTER TABLE users DROP CONSTRAINT IF EXISTS users_email_key; + +-- Make email nullable +ALTER TABLE users ALTER COLUMN email DROP NOT NULL; + +-- Add a partial unique index that only applies to non-null emails +CREATE UNIQUE INDEX users_email_unique ON users (email) WHERE email IS NOT NULL; diff --git a/nitro-bins/kmstool_enclave_cli b/nitro-bins/kmstool_enclave_cli new file mode 100755 index 0000000..b96a8f2 Binary files /dev/null and b/nitro-bins/kmstool_enclave_cli differ diff --git a/nitro-bins/libnsm.so b/nitro-bins/libnsm.so new file mode 100755 index 0000000..97a6773 Binary files /dev/null and b/nitro-bins/libnsm.so differ diff --git a/nitro-toolkit b/nitro-toolkit new file mode 160000 index 0000000..db7398b --- /dev/null +++ b/nitro-toolkit @@ -0,0 +1 @@ +Subproject commit db7398b31593f51f7bb17973718ade5a88957c1d diff --git a/pcrDev.json b/pcrDev.json new file mode 100644 index 0000000..1597a80 --- /dev/null +++ b/pcrDev.json @@ -0,0 +1,6 @@ +{ + "HashAlgorithm": "Sha384 { ... }", + "PCR0": "f58409ae1bc8600c887fef5cc4055149c88c94b41c2b3e268826af7b43a1cdbacffdb2c96bf5972120c6460ab83fe89e", + "PCR1": "5039fa3d13b95dded883deed58d2a0ac63bee4f05f16e05eda0dd21e54bcd01f5e700505998b5674616ea8346ce94b29", + "PCR2": "1c3dc614330f50cd17f219abb7473d8fea736259aa550de114401b90094d751855fce279b2891c3c978023a5376aafa0" +} diff --git a/pcrProd.json b/pcrProd.json new file mode 100644 index 0000000..0925acd --- /dev/null +++ b/pcrProd.json @@ -0,0 +1,6 @@ +{ + "HashAlgorithm": "Sha384 { ... }", + "PCR0": "33ffe5cae0f72cfe904bde8019ad98efa0ce5db2800f37c5d4149461023d1f70ea77e4f58ae1327ff46ed6a34045d6e2", + "PCR1": "5039fa3d13b95dded883deed58d2a0ac63bee4f05f16e05eda0dd21e54bcd01f5e700505998b5674616ea8346ce94b29", + "PCR2": "b594414f4ea52bb0985a41442e85f72996373ec7f12898820277b5e822fa9b3c76ecfffc7068410c0eec3dbdf3072465" +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..ea1fe84 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-07-25" +components = ["rustfmt", "clippy"] +profile = "default" diff --git a/src/aws_credentials.rs b/src/aws_credentials.rs new file mode 100644 index 0000000..368649e --- /dev/null +++ b/src/aws_credentials.rs @@ -0,0 +1,138 @@ +use serde::{Deserialize, Serialize}; +use std::io::{Read, Write}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::RwLock; +use tracing::error; +use vsock::{VsockAddr, VsockStream}; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct AwsCredentials { + #[serde(rename = "AccessKeyId")] + pub access_key_id: String, + #[serde(rename = "SecretAccessKey")] + pub secret_access_key: String, + #[serde(rename = "Token")] + pub token: String, + #[serde(rename = "Region")] + pub region: String, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct EnclaveRequest { + pub request_type: String, + pub key_name: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ParentResponse { + pub response_type: String, + pub response_value: serde_json::Value, +} + +#[derive(Debug, thiserror::Error)] +pub enum AwsCredentialError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("JSON error: {0}")] + Json(#[from] serde_json::Error), + + #[error("Authentication error")] + Authentication, + + #[error("Timed out waiting for credentials")] + Timeout, +} + +#[derive(Clone, Default)] +pub struct AwsCredentialManager { + credentials: Arc>>, +} + +impl AwsCredentialManager { + pub fn new() -> Self { + Self { + credentials: Arc::new(RwLock::new(None)), + } + } + + pub async fn get_credentials(&self) -> Option { + let creds = self.credentials.read().await; + creds.clone() + } + + pub async fn set_credentials(&self, credentials: AwsCredentials) { + let mut creds = self.credentials.write().await; + *creds = Some(credentials); + } + + pub async fn fetch_credentials(&self) -> Result { + tracing::debug!("Entering fetch_credentials"); + + let creds = Self::fetch_credentials_from_vsock().await?; + self.set_credentials(creds.clone()).await; + + tracing::debug!("Exiting fetch_credentials"); + Ok(creds) + } + + async fn fetch_credentials_from_vsock() -> Result { + let cid = 3; + let port = 8003; + + let sock_addr = VsockAddr::new(cid, port); + let mut stream = VsockStream::connect(&sock_addr)?; + + let request = EnclaveRequest { + request_type: "credentials".to_string(), + key_name: None, + }; + let request_json = serde_json::to_string(&request)?; + stream.write_all(request_json.as_bytes())?; + + let mut response = String::new(); + stream.read_to_string(&mut response)?; + + let parent_response: ParentResponse = serde_json::from_str(&response)?; + if parent_response.response_type == "credentials" { + let creds: AwsCredentials = serde_json::from_value(parent_response.response_value)?; + Ok(creds) + } else { + tracing::error!( + "Failed to refresh AWS credentials: {:?}", + AwsCredentialError::Authentication + ); + Err(AwsCredentialError::Authentication) + } + } + + pub async fn wait_for_credentials(&self) -> AwsCredentials { + tracing::info!("Waiting for initial AWS credentials"); + let max_retries = 12; // 1 minute total with 5s delay + let mut attempts = 0; + + loop { + match self.fetch_credentials().await { + Ok(c) => return c, + Err(e) => { + attempts += 1; + if attempts >= max_retries { + tracing::error!( + "Failed to get credentials after {} attempts, giving up", + max_retries + ); + panic!("Could not obtain AWS credentials after maximum retries"); + } + tracing::error!("Failed to refresh AWS credentials: {:?}", e); + tracing::info!( + "Retrying in 5 seconds... (attempt {}/{})", + attempts, + max_retries + ); + tokio::time::sleep(Duration::from_secs(5)).await; + } + } + } + } +} diff --git a/src/billing.rs b/src/billing.rs new file mode 100644 index 0000000..0b21eae --- /dev/null +++ b/src/billing.rs @@ -0,0 +1,63 @@ +use reqwest::Client; +use serde::Deserialize; +use uuid::Uuid; + +#[derive(Debug, Deserialize)] +pub struct UsageResponse { + pub can_use: bool, +} + +#[derive(Debug, thiserror::Error)] +pub enum BillingError { + #[error("Request failed: {0}")] + RequestFailed(#[from] reqwest::Error), + #[error("Failed to parse response: {0}")] + ParseError(String), + #[error("Service error: {0}")] + ServiceError(String), +} + +#[derive(Clone)] +pub struct BillingClient { + client: Client, + api_key: String, + base_url: String, +} + +impl BillingClient { + pub fn new(api_key: String, base_url: String) -> Self { + Self { + client: Client::new(), + api_key, + base_url, + } + } + + pub async fn can_user_chat(&self, user_id: Uuid) -> Result { + let url = format!( + "{}/v1/admin/check-usage?user_id={}&product=maple", + self.base_url, user_id + ); + + let response = self + .client + .get(&url) + .header("x-api-key", &self.api_key) + .send() + .await?; + + if response.status().is_success() { + response + .json::() + .await + .map(|usage| usage.can_use) + .map_err(|e| BillingError::ParseError(e.to_string())) + } else { + let error = response + .text() + .await + .unwrap_or_else(|_| "Unknown error".to_string()); + Err(BillingError::ServiceError(error)) + } + } +} diff --git a/src/db.rs b/src/db.rs new file mode 100644 index 0000000..f80a7a6 --- /dev/null +++ b/src/db.rs @@ -0,0 +1,427 @@ +use crate::models::email_verification::{ + EmailVerification, EmailVerificationError, NewEmailVerification, +}; +use crate::models::enclave_secrets::{EnclaveSecret, EnclaveSecretError, NewEnclaveSecret}; +use crate::models::oauth::{ + NewOAuthProvider, NewUserOAuthConnection, OAuthError, OAuthProvider, UserOAuthConnection, +}; +use crate::models::password_reset::{ + NewPasswordResetRequest, PasswordResetError, PasswordResetRequest, +}; +use crate::models::token_usage::{NewTokenUsage, TokenUsage, TokenUsageError}; +use crate::models::users::{NewUser, User, UserError}; +use diesel::{ + pg::PgConnection, + r2d2::{ConnectionManager, Pool}, +}; +use std::sync::Arc; +use tracing::{debug, error, info}; +use uuid::Uuid; + +#[derive(Debug, thiserror::Error)] +pub enum DBError { + #[error("Database connection error")] + ConnectionError, + #[error("Database query error: {0}")] + QueryError(#[from] diesel::result::Error), + #[error("User error: {0}")] + UserError(#[from] UserError), + #[error("User not found")] + UserNotFound, + #[error("Enclave secret error: {0}")] + EnclaveSecretError(#[from] EnclaveSecretError), + #[error("Email verification error: {0}")] + EmailVerificationError(#[from] EmailVerificationError), + #[error("Email verification not found")] + EmailVerificationNotFound, + #[error("Password reset error: {0}")] + PasswordResetError(#[from] PasswordResetError), + #[error("Password reset request not found")] + PasswordResetRequestNotFound, + #[error("Encryption error: {0}")] + EncryptionError(#[from] crate::encrypt::EncryptError), + #[error("OAuth error: {0}")] + OAuthError(#[from] OAuthError), + #[error("Token usage error: {0}")] + TokenUsageError(#[from] TokenUsageError), +} + +#[allow(dead_code)] +pub trait DBConnection { + fn create_user(&self, new_user: NewUser) -> Result; + fn get_user_by_uuid(&self, uuid: Uuid) -> Result; + fn get_user_by_email(&self, email: String) -> Result; + fn set_user_key(&self, user: User, private_key: Vec) -> Result<(), DBError>; + fn get_pool(&self) -> &diesel::r2d2::Pool>; + fn create_enclave_secret(&self, new_secret: NewEnclaveSecret) + -> Result; + fn get_enclave_secret_by_id(&self, id: i32) -> Result, DBError>; + fn get_enclave_secret_by_key(&self, key: &str) -> Result, DBError>; + fn get_all_enclave_secrets(&self) -> Result, DBError>; + fn update_enclave_secret(&self, secret: &EnclaveSecret) -> Result<(), DBError>; + fn delete_enclave_secret(&self, secret: &EnclaveSecret) -> Result<(), DBError>; + fn create_email_verification( + &self, + new_verification: NewEmailVerification, + ) -> Result; + fn get_email_verification_by_id(&self, id: i32) -> Result; + fn get_email_verification_by_user_id( + &self, + user_id: Uuid, + ) -> Result; + fn get_email_verification_by_code(&self, code: Uuid) -> Result; + fn update_email_verification(&self, verification: &EmailVerification) -> Result<(), DBError>; + fn delete_email_verification(&self, verification: &EmailVerification) -> Result<(), DBError>; + fn verify_email(&self, verification: &mut EmailVerification) -> Result<(), DBError>; + fn create_password_reset_request( + &self, + new_request: NewPasswordResetRequest, + ) -> Result; + fn get_password_reset_request_by_user_id_and_code( + &self, + user_id: Uuid, + encrypted_code: Vec, + ) -> Result, DBError>; + fn update_user_password( + &self, + user: &User, + new_password_enc: Option>, + ) -> Result<(), DBError>; + fn mark_password_reset_as_complete( + &self, + request: &PasswordResetRequest, + ) -> Result<(), DBError>; + + // OAuth Provider methods + fn create_oauth_provider( + &self, + new_provider: NewOAuthProvider, + ) -> Result; + fn get_oauth_provider_by_id(&self, id: i32) -> Result, DBError>; + fn get_oauth_provider_by_name(&self, name: &str) -> Result, DBError>; + fn get_all_oauth_providers(&self) -> Result, DBError>; + fn update_oauth_provider(&self, provider: &OAuthProvider) -> Result<(), DBError>; + fn delete_oauth_provider(&self, provider: &OAuthProvider) -> Result<(), DBError>; + + // User OAuth Connection methods + fn create_user_oauth_connection( + &self, + new_connection: NewUserOAuthConnection, + ) -> Result; + fn get_user_oauth_connection_by_id( + &self, + id: i32, + ) -> Result, DBError>; + fn get_user_oauth_connection_by_user_and_provider( + &self, + user_id: Uuid, + provider_id: i32, + ) -> Result, DBError>; + fn get_all_user_oauth_connections_for_user( + &self, + user_id: Uuid, + ) -> Result, DBError>; + fn update_user_oauth_connection(&self, connection: &UserOAuthConnection) + -> Result<(), DBError>; + fn delete_user_oauth_connection(&self, connection: &UserOAuthConnection) + -> Result<(), DBError>; + + fn create_token_usage(&self, new_usage: NewTokenUsage) -> Result; + + fn update_user(&self, user: &User) -> Result<(), DBError>; +} + +pub(crate) struct PostgresConnection { + db: Pool>, +} + +impl DBConnection for PostgresConnection { + fn create_user(&self, new_user: NewUser) -> Result { + debug!("Creating new user"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = new_user.insert(conn).map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to create user: {:?}", e); + } + result + } + + fn get_user_by_uuid(&self, uuid: Uuid) -> Result { + debug!("Getting user by UUID"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = User::get_by_uuid(conn, uuid)?.ok_or(DBError::UserNotFound); + if let Err(ref e) = result { + error!("Failed to get user by UUID: {:?}", e); + } + result + } + + fn get_user_by_email(&self, email: String) -> Result { + debug!("Getting user by email"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = User::get_by_email(conn, email)?.ok_or(DBError::UserNotFound); + if let Err(ref e) = result { + error!("Failed to get user by email: {:?}", e); + } + result + } + + fn set_user_key(&self, user: User, private_key: Vec) -> Result<(), DBError> { + debug!("Setting user key"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = user.set_key(conn, private_key).map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to set user key: {:?}", e); + } + result + } + + fn get_pool(&self) -> &diesel::r2d2::Pool> { + &self.db + } + + fn create_enclave_secret( + &self, + new_secret: NewEnclaveSecret, + ) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + new_secret.insert(conn).map_err(DBError::from) + } + + fn get_enclave_secret_by_id(&self, id: i32) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EnclaveSecret::get_by_id(conn, id).map_err(DBError::from) + } + + fn get_enclave_secret_by_key(&self, key: &str) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EnclaveSecret::get_by_key(conn, key).map_err(DBError::from) + } + + fn get_all_enclave_secrets(&self) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EnclaveSecret::get_all(conn).map_err(DBError::from) + } + + fn update_enclave_secret(&self, secret: &EnclaveSecret) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + secret.update(conn).map_err(DBError::from) + } + + fn delete_enclave_secret(&self, secret: &EnclaveSecret) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + secret.delete(conn).map_err(DBError::from) + } + + fn create_email_verification( + &self, + new_verification: NewEmailVerification, + ) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + new_verification.insert(conn).map_err(DBError::from) + } + + fn get_email_verification_by_id(&self, id: i32) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EmailVerification::get_by_id(conn, id)?.ok_or(DBError::EmailVerificationNotFound) + } + + fn get_email_verification_by_user_id( + &self, + user_id: Uuid, + ) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EmailVerification::get_by_user_id(conn, user_id)?.ok_or(DBError::EmailVerificationNotFound) + } + + fn get_email_verification_by_code(&self, code: Uuid) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + EmailVerification::get_by_verification_code(conn, code)? + .ok_or(DBError::EmailVerificationNotFound) + } + + fn update_email_verification(&self, verification: &EmailVerification) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + verification.update(conn).map_err(DBError::from) + } + + fn delete_email_verification(&self, verification: &EmailVerification) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + verification.delete(conn).map_err(DBError::from) + } + + fn verify_email(&self, verification: &mut EmailVerification) -> Result<(), DBError> { + debug!("Verifying email"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = verification.verify(conn).map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to verify email: {:?}", e); + } + result + } + + fn create_password_reset_request( + &self, + new_request: NewPasswordResetRequest, + ) -> Result { + debug!("Creating new password reset request"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = new_request.insert(conn).map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to create password reset request: {:?}", e); + } + result + } + + fn get_password_reset_request_by_user_id_and_code( + &self, + user_id: Uuid, + encrypted_code: Vec, + ) -> Result, DBError> { + debug!("Getting password reset request by user_id and encrypted code"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = PasswordResetRequest::get_by_user_id_and_code(conn, user_id, &encrypted_code) + .map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to get password reset request: {:?}", e); + } + result + } + + fn update_user_password( + &self, + user: &User, + new_password_enc: Option>, + ) -> Result<(), DBError> { + debug!("Updating user password"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = user + .update_password(conn, new_password_enc) + .map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to update user password: {:?}", e); + } + result + } + + fn mark_password_reset_as_complete( + &self, + request: &PasswordResetRequest, + ) -> Result<(), DBError> { + debug!("Marking password reset request as complete"); + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + let result = request.mark_as_reset(conn).map_err(DBError::from); + if let Err(ref e) = result { + error!("Failed to mark password reset request as complete: {:?}", e); + } + result + } + + // OAuth Provider method implementations + fn create_oauth_provider( + &self, + new_provider: NewOAuthProvider, + ) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + new_provider.insert(conn).map_err(DBError::from) + } + + fn get_oauth_provider_by_id(&self, id: i32) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + OAuthProvider::get_by_id(conn, id).map_err(DBError::from) + } + + fn get_oauth_provider_by_name(&self, name: &str) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + OAuthProvider::get_by_name(conn, name).map_err(DBError::from) + } + + fn get_all_oauth_providers(&self) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + OAuthProvider::get_all(conn).map_err(DBError::from) + } + + fn update_oauth_provider(&self, provider: &OAuthProvider) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + provider.update(conn).map_err(DBError::from) + } + + fn delete_oauth_provider(&self, provider: &OAuthProvider) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + provider.delete(conn).map_err(DBError::from) + } + + // User OAuth Connection method implementations + fn create_user_oauth_connection( + &self, + new_connection: NewUserOAuthConnection, + ) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + new_connection.insert(conn).map_err(DBError::from) + } + + fn get_user_oauth_connection_by_id( + &self, + id: i32, + ) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + UserOAuthConnection::get_by_id(conn, id).map_err(DBError::from) + } + + fn get_user_oauth_connection_by_user_and_provider( + &self, + user_id: Uuid, + provider_id: i32, + ) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + UserOAuthConnection::get_by_user_and_provider(conn, user_id, provider_id) + .map_err(DBError::from) + } + + fn get_all_user_oauth_connections_for_user( + &self, + user_id: Uuid, + ) -> Result, DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + UserOAuthConnection::get_all_for_user(conn, user_id).map_err(DBError::from) + } + + fn update_user_oauth_connection( + &self, + connection: &UserOAuthConnection, + ) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + connection.update(conn).map_err(DBError::from) + } + + fn delete_user_oauth_connection( + &self, + connection: &UserOAuthConnection, + ) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + connection.delete(conn).map_err(DBError::from) + } + + fn create_token_usage(&self, new_usage: NewTokenUsage) -> Result { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + new_usage.insert(conn).map_err(DBError::from) + } + + fn update_user(&self, user: &User) -> Result<(), DBError> { + let conn = &mut self.db.get().map_err(|_| DBError::ConnectionError)?; + user.update(conn).map_err(DBError::from) + } +} + +pub(crate) fn setup_db(url: String) -> Arc { + info!("Connecting to database..."); + let manager = ConnectionManager::::new(url); + // TODO make pool size bigger, just for testing connection issues + let pool = Pool::builder() + .max_size(1) // should be a multiple of 100, our database connection limit + .test_on_check_out(true) + .build(manager) + .expect("Unable to build DB connection pool"); + info!("Connected to database"); + Arc::new(PostgresConnection { db: pool }) +} diff --git a/src/email.rs b/src/email.rs new file mode 100644 index 0000000..ecdf38c --- /dev/null +++ b/src/email.rs @@ -0,0 +1,304 @@ +use crate::AppMode; +use chrono::{Duration, Utc}; +use resend_rs::types::CreateEmailBaseOptions; +use resend_rs::{Resend, Result}; +use tracing::error; + +#[derive(Debug, thiserror::Error)] +pub enum EmailError { + #[error("Unknown Email error")] + UnknownError, + #[error("Resend API key not found")] + ApiKeyNotFound, +} + +const WELCOME_EMAIL_HTML: &str = r#" + + + + + + Welcome to Maple AI + + + +
+

Welcome to Maple AI!

+

We're thrilled to have you join us during our private beta.

+ +

Just as Maple trees thrive through their discreet underground communication network of fungal hyphae, Maple AI empowers you to flourish in the digital world while maintaining your privacy.

+ +

Your Secure, AI-Powered Second Brain

+

Maple AI is designed with privacy and security at its core, helping you:

+
    +
  • Organize your thoughts
  • +
  • Enhance your creativity
  • +
  • Boost your productivity
  • +
+

With Maple AI, you can chat with your notes, create new ideas, and connect concepts effortlessly, all while maintaining complete control over your data.

+ +
+

Privacy: Our Core Value

+
    +
  • Confidential Compute: Secure enclaves ensure we can't see your requests.
  • +
  • End-to-End Encryption: Your chat history is synced with a private key we can't access.
  • +
  • Encrypted GPU: Enjoy a private 1:1 conversation with your AI companion.
  • +
+
+ +

At OpenSecret, we believe privacy is essential for trusted conversations – not just with people, but also with AI. By prioritizing your privacy, we're creating a more secure world that puts individual needs and values at its core.

+ +

We hope you enjoy using Maple AI, knowing that your sensitive discussions and data are protected at every step. Your privacy is not just a feature – it's our mission.

+ +

As we're in private beta, your feedback is incredibly valuable. If you encounter any issues or have suggestions, please reach out to us at team@opensecret.cloud.

+ +

Thank you for being an early adopter and helping us shape the future of secure, AI-powered productivity!

+ +

Best regards,
The OpenSecret Team

+
+ + +"#; + +pub async fn send_hello_email( + app_mode: AppMode, + resend_api_key: Option, + to_email: String, +) -> Result<(), EmailError> { + tracing::debug!("Entering send_hello_email"); + if resend_api_key.is_none() { + return Err(EmailError::ApiKeyNotFound); + } + let api_key = resend_api_key.expect("just checked"); + + let resend = Resend::new(&api_key); + + let from = from_email(app_mode); + let to = [to_email]; + let subject = "Welcome to Maple!"; + + // Schedule the email to be sent 5 minutes from now + let scheduled_time = Utc::now() + Duration::minutes(5); + let scheduled_at = scheduled_time.to_rfc3339(); + + let email = CreateEmailBaseOptions::new(from, to, subject) + .with_html(WELCOME_EMAIL_HTML) + .with_scheduled_at(&scheduled_at); + + let _email = resend.emails.send(email).await.map_err(|e| { + tracing::error!("Failed to send email: {}", e); + EmailError::UnknownError + }); + + tracing::debug!("Exiting send_hello_email"); + Ok(()) +} + +pub async fn send_verification_email( + app_mode: AppMode, + resend_api_key: Option, + to_email: String, + verification_code: uuid::Uuid, +) -> Result<(), EmailError> { + tracing::debug!("Entering send_verification_email"); + if resend_api_key.is_none() { + return Err(EmailError::ApiKeyNotFound); + } + let api_key = resend_api_key.expect("just checked"); + + let resend = Resend::new(&api_key); + + let from = from_email(app_mode.clone()); + let to = [to_email]; + let subject = "Verify Your Maple AI Account"; + + let base_url = match app_mode { + AppMode::Local => "http://localhost:5173", + AppMode::Dev => "https://dev.secretgpt.ai", + AppMode::Preview => "https://opensecret.cloud", + AppMode::Prod => "https://trymaple.ai", + AppMode::Custom(_) => "https://preview.opensecret.cloud", + }; + + let verification_url = format!("{}/verify/{}", base_url, verification_code); + + let html_content = format!( + r#" + + + + + + Verify Your Maple AI Account + + + +
+

Welcome to Maple AI!

+

Thank you for registering. To complete your account setup, please verify your email address by clicking the button below:

+

+ Verify Your Email +

+

If the button doesn't work, you can copy and paste the following link into your browser:

+

{}

+

Alternatively, you can use the following verification code:

+

{}

+

This verification link and code will expire in 24 hours.

+

If you didn't create an account with Maple AI, please ignore this email.

+

Best regards,
The OpenSecret Team

+
+ + + "#, + verification_url, verification_url, verification_code + ); + + let email = CreateEmailBaseOptions::new(from, to, subject).with_html(&html_content); + + let _email = resend.emails.send(email).await.map_err(|e| { + tracing::error!("Failed to send email: {}", e); + EmailError::UnknownError + }); + + tracing::debug!("Exiting send_verification_email"); + Ok(()) +} + +pub async fn send_password_reset_email( + app_mode: AppMode, + resend_api_key: Option, + to_email: String, + alphanumeric_code: String, +) -> Result<(), EmailError> { + tracing::debug!("Entering send_password_reset_email"); + if resend_api_key.is_none() { + return Err(EmailError::ApiKeyNotFound); + } + let api_key = resend_api_key.expect("just checked"); + + let resend = Resend::new(&api_key); + + let from = from_email(app_mode); + let to = [to_email]; + let subject = "Reset Your Maple AI Password"; + + let html_content = format!( + r#" + + + + + + Reset Your Maple AI Password + + + +
+

Reset Your Maple AI Password

+

We received a request to reset your Maple AI account password. If you didn't make this request, you can ignore this email.

+

To reset your password, use the following code:

+

{}

+

This code will expire in 24 hours.

+

If you have any issues, please contact our support team.

+

Best regards,
The OpenSecret Team

+
+ + + "#, + alphanumeric_code + ); + + let email = CreateEmailBaseOptions::new(from, to, subject).with_html(&html_content); + + let _email = resend.emails.send(email).await.map_err(|e| { + tracing::error!("Failed to send email: {}", e); + EmailError::UnknownError + }); + + tracing::debug!("Exiting send_password_reset_email"); + Ok(()) +} + +pub async fn send_password_reset_confirmation_email( + app_mode: AppMode, + resend_api_key: Option, + to_email: String, +) -> Result<(), EmailError> { + tracing::debug!("Entering send_password_reset_confirmation_email"); + if resend_api_key.is_none() { + return Err(EmailError::ApiKeyNotFound); + } + let api_key = resend_api_key.expect("just checked"); + + let resend = Resend::new(&api_key); + + let from = from_email(app_mode); + let to = [to_email]; + let subject = "Your Maple AI Password Has Been Reset"; + + let html_content = r#" + + + + + + Password Reset Confirmation + + + +
+

Password Reset Confirmation

+

Your Maple AI account password has been successfully reset.

+

If you did not initiate this password reset, please contact us immediately at support@trymaple.ai.

+

For security reasons, we recommend that you:

+
    +
  • Change your password again if you suspect any unauthorized access.
  • +
  • Review your account activity for any suspicious actions.
  • +
+

If you have any questions or concerns, please don't hesitate to reach out to our support team.

+

Best regards,
The Maple AI Team

+
+ + + "#; + + let email = CreateEmailBaseOptions::new(from, to, subject).with_html(html_content); + + let _email = resend.emails.send(email).await.map_err(|e| { + tracing::error!("Failed to send email: {}", e); + EmailError::UnknownError + }); + + tracing::debug!("Exiting send_password_reset_confirmation_email"); + Ok(()) +} + +fn from_email(app_mode: AppMode) -> String { + match app_mode { + AppMode::Local => "local@email.trymaple.ai".to_string(), + AppMode::Dev => "dev@email.trymaple.ai".to_string(), + AppMode::Preview => "preview@email.trymaple.ai".to_string(), + AppMode::Prod => "hello@email.trymaple.ai".to_string(), + AppMode::Custom(_) => "preview@email.trymaple.ai".to_string(), + } +} diff --git a/src/encrypt.rs b/src/encrypt.rs new file mode 100644 index 0000000..b2a7d82 --- /dev/null +++ b/src/encrypt.rs @@ -0,0 +1,369 @@ +use aes_gcm::{ + aead::{Aead as GcmAead, KeyInit as GcmKeyInit}, + Aes256Gcm, Nonce as GcmNonce, +}; +use aes_siv::{Aes256SivAead, Nonce as SivNonce}; +use base64::{engine::general_purpose::STANDARD, Engine as _}; +use generic_array::typenum; +use generic_array::GenericArray; +use rand_core::RngCore; +use secp256k1::rand::rngs::OsRng; +use secp256k1::SecretKey; +use sha2::{Digest, Sha512}; +use std::{process::Command, sync::Arc}; +use tokio::sync::Mutex; +use tracing::error; + +use crate::aws_credentials::AwsCredentialManager; + +#[derive(Debug, thiserror::Error)] +pub enum EncryptError { + #[error("Failed to decrypt")] + FailedToDecrypt, + #[error("Bad data")] + BadData, + #[error("KMS encryption failed: {0}")] + KmsError(String), +} + +pub async fn encrypt_with_key(encryption_key: &SecretKey, bytes: &[u8]) -> Vec { + tracing::debug!("Entering encrypt_with_key"); + let cipher = Aes256Gcm::new_from_slice(&encryption_key.secret_bytes()).expect("should convert"); + + // Generate a random 96-bit nonce + let nonce: [u8; 12] = generate_random::<12>(); + + let nonce = GcmNonce::from_slice(&nonce); + + let ciphertext = cipher.encrypt(nonce, bytes).expect("should encrypt"); + + // Combine nonce and ciphertext + let mut encrypted = nonce.to_vec(); + encrypted.extend(ciphertext); + + tracing::debug!("Exiting encrypt_with_key"); + encrypted +} + +pub fn decrypt_with_key(encryption_key: &SecretKey, bytes: &[u8]) -> Result, EncryptError> { + tracing::debug!("Entering decrypt_with_key"); + if bytes.len() < 12 { + return Err(EncryptError::BadData); + } + + // The first 12 bytes are the nonce + let nonce = GcmNonce::from_slice(&bytes[..12]); + + // The rest is the ciphertext + let ciphertext = &bytes[12..]; + + let cipher = Aes256Gcm::new_from_slice(&encryption_key.secret_bytes()) + .map_err(|_| EncryptError::FailedToDecrypt)?; + + tracing::debug!("Exiting decrypt_with_key"); + cipher + .decrypt(nonce, ciphertext) + .map_err(|_| EncryptError::FailedToDecrypt) +} + +pub fn encrypt_key_deterministic(encryption_key: &SecretKey, key: &[u8]) -> Vec { + let key_bytes: [u8; 32] = encryption_key.secret_bytes(); + let extended_key = extend_key(&key_bytes); + let cipher = Aes256SivAead::new(&extended_key); + let nonce = SivNonce::default(); + cipher.encrypt(&nonce, key).expect("encryption failure!") +} + +pub fn decrypt_key_deterministic( + encryption_key: &SecretKey, + encrypted: &[u8], +) -> Result, EncryptError> { + let key_bytes: [u8; 32] = encryption_key.secret_bytes(); + let extended_key = extend_key(&key_bytes); + let cipher = Aes256SivAead::new(&extended_key); + let nonce = SivNonce::default(); + cipher + .decrypt(&nonce, encrypted) + .map_err(|_| EncryptError::FailedToDecrypt) +} + +fn extend_key(key: &[u8; 32]) -> GenericArray { + let mut hasher = Sha512::new(); + hasher.update(key); + GenericArray::clone_from_slice(&hasher.finalize()) +} + +pub fn decrypt_with_kms( + aws_region: &str, + aws_key_id: &str, + aws_secret_key: &str, + aws_session_token: &str, + ciphertext: &str, +) -> Result, EncryptError> { + tracing::debug!("Attempting KMS decryption"); + let output = Command::new("/bin/kmstool_enclave_cli") + .arg("decrypt") + .arg("--region") + .arg(aws_region) + .arg("--proxy-port") + .arg("8000") + .arg("--aws-access-key-id") + .arg(aws_key_id) + .arg("--aws-secret-access-key") + .arg(aws_secret_key) + .arg("--aws-session-token") + .arg(aws_session_token) + .arg("--ciphertext") + .arg(ciphertext) + .output() + .map_err(|e| { + tracing::error!( + "Failed to execute kmstool_enclave_cli for decryption: {}", + e + ); + EncryptError::KmsError(e.to_string()) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + tracing::error!("kmstool_enclave_cli decryption failed: {}", stderr); + return Err(EncryptError::KmsError(stderr.to_string())); + } + + let output_str = + String::from_utf8(output.stdout).map_err(|e| EncryptError::KmsError(e.to_string()))?; + + let plaintext_b64 = output_str + .strip_prefix("PLAINTEXT: ") + .ok_or_else(|| EncryptError::KmsError("Failed to parse plaintext".to_string()))? + .trim(); + + STANDARD + .decode(plaintext_b64) + .map_err(|e| EncryptError::KmsError(format!("Failed to decode base64: {}", e))) +} + +#[derive(Debug)] +pub struct GenKeyResult { + pub key: Vec, + pub encrypted_key: Vec, +} + +pub fn create_new_encryption_key( + aws_region: &str, + aws_key_id: &str, + aws_secret_key: &str, + aws_session_token: &str, + aws_kms_key_id: &str, +) -> Result { + tracing::info!("Creating new encryption key"); + tracing::debug!("Attempting to run kmstool_enclave_cli"); + let output = Command::new("/bin/kmstool_enclave_cli") + .arg("genkey") + .arg("--region") + .arg(aws_region) + .arg("--proxy-port") + .arg("8000") + .arg("--aws-access-key-id") + .arg(aws_key_id) + .arg("--aws-secret-access-key") + .arg(aws_secret_key) + .arg("--aws-session-token") + .arg(aws_session_token) + .arg("--key-id") + .arg(aws_kms_key_id) + .arg("--key-spec") + .arg("AES-256") + .output() + .map_err(|e| { + tracing::error!("Failed to execute kmstool_enclave_cli: {}", e); + EncryptError::KmsError(e.to_string()) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + tracing::error!("kmstool_enclave_cli failed: {}", stderr); + return Err(EncryptError::KmsError(stderr.to_string())); + } + + let output_str = + String::from_utf8(output.stdout).map_err(|e| EncryptError::KmsError(e.to_string()))?; + let lines: Vec<&str> = output_str.lines().collect(); + + let encrypted_key_b64 = lines[0] + .split(": ") + .nth(1) + .ok_or_else(|| EncryptError::KmsError("Failed to parse encrypted key".to_string()))?; + let plaintext_key_b64 = lines[1] + .split(": ") + .nth(1) + .ok_or_else(|| EncryptError::KmsError("Failed to parse plaintext key".to_string()))?; + + let encrypted_key = STANDARD + .decode(encrypted_key_b64) + .map_err(|e| EncryptError::KmsError(format!("Failed to decode encrypted key: {}", e)))?; + + let plaintext_key = STANDARD + .decode(plaintext_key_b64) + .map_err(|e| EncryptError::KmsError(e.to_string()))?; + + Ok(GenKeyResult { + encrypted_key, + key: plaintext_key, + }) +} + +pub fn generate_random() -> [u8; LENGTH] { + let mut buffer = [0u8; LENGTH]; + getrandom::getrandom(&mut buffer).expect("Failed to generate random bytes"); + buffer +} + +pub async fn generate_random_enclave( + aws_credential_manager: Arc>>, +) -> [u8; LENGTH] { + let nonce = if let Some(cred_manager) = aws_credential_manager.read().await.as_ref().cloned() { + let aws_creds = cred_manager + .get_credentials() + .await + .expect("should have creds"); + + generate_random_bytes_from_enclave( + &aws_creds.region, + &aws_creds.access_key_id, + &aws_creds.secret_access_key, + &aws_creds.token, + LENGTH, + ) + .await + .expect("should generate random bytes") + } else { + // Use OS random if aws_credential_manager is None + let mut nonce = [0u8; LENGTH]; + OsRng.fill_bytes(&mut nonce); + nonce.to_vec() + }; + nonce.try_into().expect("Length mismatch") +} + +pub async fn generate_random_bytes_from_enclave( + aws_region: &str, + aws_key_id: &str, + aws_secret_key: &str, + aws_session_token: &str, + length: usize, +) -> Result, EncryptError> { + tracing::debug!("Attempting to run kmstool_enclave_cli for random byte generation"); + let output = Command::new("/bin/kmstool_enclave_cli") + .arg("genrandom") + .arg("--region") + .arg(aws_region) + .arg("--proxy-port") + .arg("8000") + .arg("--aws-access-key-id") + .arg(aws_key_id) + .arg("--aws-secret-access-key") + .arg(aws_secret_key) + .arg("--aws-session-token") + .arg(aws_session_token) + .arg("--length") + .arg(length.to_string()) + .output() + .map_err(|e| { + tracing::error!( + "Failed to execute kmstool_enclave_cli for random byte generation: {}", + e + ); + EncryptError::KmsError(e.to_string()) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + tracing::error!( + "kmstool_enclave_cli random byte generation failed: {}", + stderr + ); + return Err(EncryptError::KmsError(stderr.to_string())); + } + + let output_str = + String::from_utf8(output.stdout).map_err(|e| EncryptError::KmsError(e.to_string()))?; + + let plaintext_b64 = output_str + .strip_prefix("PLAINTEXT: ") + .ok_or_else(|| EncryptError::KmsError("Failed to parse plaintext".to_string()))? + .trim(); + + STANDARD + .decode(plaintext_b64) + .map_err(|e| EncryptError::KmsError(format!("Failed to decode base64: {}", e))) +} + +pub struct CustomRng { + buffer: Mutex>, +} + +impl CustomRng { + pub fn new() -> Self { + CustomRng { + buffer: Mutex::new(Vec::new()), + } + } + + async fn fill_buffer(&self) { + let bytes: [u8; 1024] = generate_random(); + let mut buffer = self.buffer.lock().await; + buffer.extend_from_slice(&bytes); + } + + pub async fn fill_bytes(&self, dest: &mut [u8]) { + let mut buffer = self.buffer.lock().await; + while buffer.len() < dest.len() { + drop(buffer); // Release the lock before filling the buffer + self.fill_buffer().await; + buffer = self.buffer.lock().await; + } + + let n = dest.len(); + dest.copy_from_slice(&buffer[..n]); + *buffer = buffer[n..].to_vec(); + } + + pub async fn next_u32(&self) -> u32 { + let mut bytes = [0u8; 4]; + self.fill_bytes(&mut bytes).await; + u32::from_le_bytes(bytes) + } + + pub async fn next_u64(&self) -> u64 { + let mut bytes = [0u8; 8]; + self.fill_bytes(&mut bytes).await; + u64::from_le_bytes(bytes) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_encryption_with_key() { + let key = SecretKey::from_slice(&[1u8; 32]).unwrap(); + let content = [6u8; 32].to_vec(); + + let encrypted = encrypt_with_key(&key, &content).await; + + let decrypted = decrypt_with_key(&key, &encrypted).unwrap(); + assert_eq!(content, decrypted); + } + + #[test] + fn test_deterministic_encryption() { + let key = SecretKey::from_slice(&[1u8; 32]).unwrap(); + let content = b"test_key"; + + let encrypted = encrypt_key_deterministic(&key, content); + let decrypted = decrypt_key_deterministic(&key, &encrypted).unwrap(); + assert_eq!(content.to_vec(), decrypted); + } +} diff --git a/src/jwt.rs b/src/jwt.rs new file mode 100644 index 0000000..16350c1 --- /dev/null +++ b/src/jwt.rs @@ -0,0 +1,297 @@ +use crate::aws_credentials::AwsCredentialManager; +use crate::encrypt::generate_random_bytes_from_enclave; +use crate::Error; +use std::sync::Arc; + +use axum::{ + body::Body, + extract::{Request, State}, + http::header, + middleware::Next, + response::IntoResponse, +}; +use chrono::Duration; +use jwt_compact::{alg::Es256k, prelude::*, AlgorithmExt}; +use secp256k1::{All, PublicKey, Secp256k1, SecretKey}; +use serde::{Deserialize, Serialize}; +use sha2::Sha256; +use uuid::Uuid; + +use crate::AppMode; +use crate::{ApiError, AppState, User}; +use url::Url; + +pub enum TokenType { + Access, + Refresh, + ThirdParty { aud: String, azp: String }, +} + +#[derive(Debug, Clone)] +pub struct NewToken { + pub token: String, +} + +#[derive(Debug, Clone)] +pub struct JwtKeys { + pub(crate) legacy_secret: Vec, // For old HMAC tokens + signing_key: SecretKey, // For ES256K + secp: Secp256k1, +} + +impl JwtKeys { + pub fn new(secret_bytes: Vec) -> Result { + let secp = Secp256k1::new(); // Creates All context + let signing_key = SecretKey::from_slice(&secret_bytes[..32]) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + Ok(Self { + legacy_secret: secret_bytes, + signing_key, + secp, + }) + } + + pub fn public_key(&self) -> PublicKey { + PublicKey::from_secret_key(&self.secp, &self.signing_key) + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct CustomClaims { + pub sub: String, + pub aud: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub azp: Option, +} + +impl TokenType { + pub fn validate_third_party_audience(aud: &str, app_mode: &AppMode) -> Result<(), ApiError> { + // Parse the URL first + let url = Url::parse(aud).map_err(|_| { + tracing::error!("Invalid audience URL format: {}", aud); + ApiError::BadRequest + })?; + + // Allow localhost/127.0.0.1/0.0.0.0 in local mode + if matches!(app_mode, AppMode::Local) { + let host = url.host_str().unwrap_or_default(); + if host == "localhost" || host == "127.0.0.1" || host == "0.0.0.0" { + return Ok(()); + } + } + + // Define allowed production/staging domains + const ALLOWED_DOMAINS: [&str; 2] = + ["billing.opensecret.cloud", "billing-dev.opensecret.cloud"]; + + if ALLOWED_DOMAINS.contains(&url.host_str().unwrap_or_default()) { + Ok(()) + } else { + tracing::error!( + "Unauthorized audience domain: {}", + url.host_str().unwrap_or_default() + ); + Err(ApiError::BadRequest) + } + } +} + +impl NewToken { + pub fn new(user: &User, token_type: TokenType, app_state: &AppState) -> Result { + let (aud, azp, duration) = match token_type { + TokenType::Access => ( + "access".to_string(), + None, + Duration::minutes(app_state.config.access_token_maxage), + ), + TokenType::Refresh => ( + "refresh".to_string(), + None, + Duration::days(app_state.config.refresh_token_maxage), + ), + TokenType::ThirdParty { aud, azp } => { + // Validate the audience URL against allowed domains + TokenType::validate_third_party_audience(&aud, &app_state.app_mode)?; + + // For now, enforce that azp must be "maple" + if azp != "maple" { + return Err(ApiError::BadRequest); + } + (aud, Some(azp), Duration::hours(1)) + } + }; + + let custom_claims = CustomClaims { + sub: user.get_id().to_string(), + aud, + azp, + }; + + tracing::debug!("Creating new token with claims: {:?}", custom_claims); + + let time_options = TimeOptions::default(); + let claims = Claims::new(custom_claims).set_duration_and_issuance(&time_options, duration); + + // Create header with typ field + let header = Header::empty().with_token_type("JWT"); + + let es256k = Es256k::::new(app_state.config.jwt_keys.secp.clone()); + + let token_string = es256k + .token(&header, &claims, &app_state.config.jwt_keys.signing_key) + .map_err(|e| { + tracing::error!("Error creating token: {:?}", e); + ApiError::InternalServerError + })?; + + tracing::debug!("Successfully created token"); + + Ok(Self { + token: token_string, + }) + } +} + +pub async fn generate_jwt_secret( + aws_credential_manager: Arc>>, +) -> Result, Error> { + tracing::info!("Generating new JWT secret"); + if let Some(cred_manager) = aws_credential_manager.read().await.as_ref().cloned() { + let aws_creds = cred_manager + .get_credentials() + .await + .expect("should have creds"); + + generate_random_bytes_from_enclave( + &aws_creds.region, + &aws_creds.access_key_id, + &aws_creds.secret_access_key, + &aws_creds.token, + 32, + ) + .await + .map_err(|e| Error::EncryptionError(e.to_string())) + } else { + Ok(crate::encrypt::generate_random::<32>().to_vec()) + } +} + +pub async fn validate_jwt( + State(data): State>, + mut req: Request, + next: Next, +) -> impl IntoResponse { + tracing::debug!("Entering validate_jwt"); + let token = match req + .headers() + .get(header::AUTHORIZATION) + .and_then(|auth_header| auth_header.to_str().ok()) + .and_then(|auth_value| auth_value.strip_prefix("Bearer ").map(ToString::to_string)) + { + Some(token) => token, + None => return ApiError::InvalidJwt.into_response(), + }; + + tracing::trace!("Validating JWT"); + + let claims = match validate_token(&token, &data, "access") { + Ok(claims) => claims, + Err(_) => return ApiError::InvalidJwt.into_response(), + }; + + let user_uuid: Uuid = match Uuid::parse_str(&claims.sub) { + Ok(uuid) => uuid, + Err(e) => { + tracing::error!("Error parsing user uuid: {:?}", e); + return ApiError::InvalidJwt.into_response(); + } + }; + + let user = match data.get_user(user_uuid).await { + Ok(user) => user, + Err(e) => { + tracing::error!("Error getting user: {:?}", e); + return ApiError::InternalServerError.into_response(); + } + }; + + req.extensions_mut().insert(user); + tracing::debug!("Exiting validate_jwt"); + next.run(req).await +} + +pub(crate) fn validate_token( + original_token: &str, + data: &AppState, + expected_audience: &str, +) -> Result { + // Try ES256K first + let es256k = Es256k::::new(data.config.jwt_keys.secp.clone()); + let public_key = data.config.jwt_keys.public_key(); + + tracing::trace!("Attempting to validate ES256K token"); + + // First parse the token with the correct type + let parsed_token = match UntrustedToken::new(original_token) { + Ok(token) => token, + Err(e) => { + tracing::error!("Failed to parse token: {:?}", e); + return Err(ApiError::InvalidJwt); + } + }; + + // Deserialize claims first + let token: Token = match es256k.validator(&public_key).validate(&parsed_token) { + Ok(token) => { + tracing::trace!("ES256K signature validation successful"); + + // Only validate expiration, not maturity + let time_options = TimeOptions::default(); + if let Err(e) = token.claims().validate_expiration(&time_options) { + tracing::error!("Token expired: {:?}", e); + return Err(ApiError::InvalidJwt); + } + + // Validate audience with proper type annotation + let claims: &Claims = token.claims(); + if claims.custom.aud != expected_audience { + tracing::error!( + "Invalid audience: got {}, expected {}", + claims.custom.aud, + expected_audience + ); + return Err(ApiError::InvalidJwt); + } + + token + } + Err(e) => { + tracing::debug!("ES256K validation failed: {:?}, trying legacy HMAC", e); + + // Try legacy HMAC validation + use jsonwebtoken::{decode, DecodingKey, Validation}; + let mut hmac_validation = Validation::default(); + hmac_validation.validate_exp = true; + hmac_validation.set_audience(&[expected_audience]); // Only accept expected audience + + match decode::( + original_token, + &DecodingKey::from_secret(&data.config.jwt_keys.legacy_secret), + &hmac_validation, + ) { + Ok(token_data) => { + tracing::debug!("Legacy HMAC validation successful"); + return Ok(token_data.claims); + } + Err(e) => { + tracing::error!("Legacy HMAC validation failed: {:?}", e); + return Err(ApiError::InvalidJwt); + } + } + } + }; + + // Return the claims + Ok(token.claims().custom.clone()) +} diff --git a/src/kv.rs b/src/kv.rs new file mode 100644 index 0000000..e3a590d --- /dev/null +++ b/src/kv.rs @@ -0,0 +1,152 @@ +use crate::encrypt::{ + decrypt_key_deterministic, decrypt_with_key, encrypt_key_deterministic, encrypt_with_key, +}; +use crate::{ + aws_credentials::AwsCredentialManager, + models::user_kv::{NewUserKV, UserKV, UserKVError}, +}; +use diesel::prelude::*; +use secp256k1::SecretKey; +use serde::Serialize; +use std::sync::Arc; +use thiserror::Error; +use tracing::{debug, error}; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum StoreError { + #[error("Key not found: {0}")] + KeyNotFound(String), + #[error("Unauthorized access")] + Unauthorized, + #[error("Decryption error")] + DecryptionError, + #[error("Database error: {0}")] + DatabaseError(#[from] UserKVError), +} + +pub type StoreResult = Result; + +#[derive(Debug, Clone, Serialize)] +pub struct KVPair { + pub key: String, + pub value: String, + pub created_at: i64, + pub updated_at: i64, +} + +// Update the get function +pub fn get( + pool: &diesel::r2d2::Pool>, + user_id: Uuid, + key: &str, + user_secret_key: &SecretKey, +) -> StoreResult> { + debug!("Getting KV pair"); + let mut conn = pool.get().map_err(|e| { + error!("Failed to get database connection: {:?}", e); + StoreError::DatabaseError(UserKVError::DatabaseError(diesel::result::Error::NotFound)) + })?; + + let encrypted_key = encrypt_key_deterministic(user_secret_key, key.as_bytes()); + + let user_kv = UserKV::get_by_user_and_key(&mut conn, user_id, &encrypted_key).map_err(|e| { + error!("Failed to get KV pair: {:?}", e); + e + })?; + + if let Some(user_kv) = user_kv { + let decrypted_value = + decrypt_with_key(user_secret_key, &user_kv.value_enc).map_err(|e| { + error!("Failed to decrypt value: {:?}", e); + StoreError::DecryptionError + })?; + let value_str = String::from_utf8(decrypted_value).map_err(|e| { + error!("Failed to convert decrypted value to string: {:?}", e); + StoreError::DecryptionError + })?; + Ok(Some(value_str)) + } else { + Ok(None) + } +} + +pub async fn put( + pool: &diesel::r2d2::Pool>, + user_id: Uuid, + key: String, + value: String, + encryption_key: &SecretKey, + _aws_credential_manager: Arc>>, +) -> StoreResult<()> { + let mut conn = pool.get().map_err(|_| { + StoreError::DatabaseError(UserKVError::DatabaseError(diesel::result::Error::NotFound)) + })?; + + let encrypted_key = encrypt_key_deterministic(encryption_key, key.as_bytes()); + let encrypted_value = encrypt_with_key(encryption_key, value.as_bytes()).await; + + let new_user_kv = NewUserKV { + user_id, + key_enc: encrypted_key, + value_enc: encrypted_value, + }; + + new_user_kv.insert(&mut conn)?; + + Ok(()) +} + +pub fn delete( + pool: &diesel::r2d2::Pool>, + user_id: Uuid, + key: &str, + user_secret_key: &SecretKey, +) -> StoreResult<()> { + let mut conn = pool.get().map_err(|_| { + StoreError::DatabaseError(UserKVError::DatabaseError(diesel::result::Error::NotFound)) + })?; + + let encrypted_key = encrypt_key_deterministic(user_secret_key, key.as_bytes()); + + let user_kv = UserKV::get_by_user_and_key(&mut conn, user_id, &encrypted_key)?; + + if let Some(user_kv) = user_kv { + user_kv.delete(&mut conn)?; + Ok(()) + } else { + Err(StoreError::KeyNotFound(key.to_string())) + } +} + +pub fn list( + pool: &diesel::r2d2::Pool>, + user_id: Uuid, + user_secret_key: &SecretKey, +) -> StoreResult> { + let mut conn = pool.get().map_err(|_| { + StoreError::DatabaseError(UserKVError::DatabaseError(diesel::result::Error::NotFound)) + })?; + let user_kvs = UserKV::get_all_for_user(&mut conn, user_id)?; + let mut pairs = Vec::new(); + for user_kv in user_kvs { + let decrypted_key = decrypt_key_deterministic(user_secret_key, &user_kv.key_enc) + .map_err(|_| StoreError::DecryptionError)?; + let key = String::from_utf8(decrypted_key).map_err(|_| StoreError::DecryptionError)?; + + let decrypted_value = decrypt_with_key(user_secret_key, &user_kv.value_enc) + .map_err(|_| StoreError::DecryptionError)?; + let value = String::from_utf8(decrypted_value).map_err(|_| StoreError::DecryptionError)?; + + let created_at = user_kv.created_at.timestamp_millis(); + let updated_at = user_kv.updated_at.timestamp_millis(); + + pairs.push(KVPair { + key, + value, + created_at, + updated_at, + }); + } + Ok(pairs) +} diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..2e44152 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,1873 @@ +use crate::billing::BillingClient; +use crate::email::send_password_reset_confirmation_email; +use crate::email::send_password_reset_email; +use crate::encrypt::encrypt_key_deterministic; +use crate::encrypt::generate_random; +use crate::encrypt::{ + decrypt_with_key, decrypt_with_kms, encrypt_with_key, CustomRng, GenKeyResult, +}; +use crate::models::password_reset::NewPasswordResetRequest; +use crate::sqs::SqsEventPublisher; +use crate::{attestation_routes::SessionState, web::oauth_routes}; +use crate::{ + aws_credentials::AwsCredentialError, + models::enclave_secrets::NewEnclaveSecret, + private_key::{decrypt_user_seed_to_key, generate_twelve_word_seed}, +}; +use crate::{ + db::{setup_db, DBConnection, DBError}, + models::users::{NewUser, User}, + web::openai_routes, +}; +use crate::{encrypt::create_new_encryption_key, jwt::validate_jwt}; +use aws_credentials::{AwsCredentialManager, AwsCredentials}; +use axum::{ + http::{Method, StatusCode}, + middleware::from_fn_with_state, + response::IntoResponse, + Json, +}; +use base64::engine::general_purpose; +use base64::Engine as _; +use chacha20poly1305::aead::Aead; +use chacha20poly1305::KeyInit; +use chacha20poly1305::{ChaCha20Poly1305, Key, Nonce}; +use kv::{KVPair, StoreError, StoreResult}; +use password_auth::{generate_hash, verify_password, VerifyError}; +use rand_core::{CryptoRng, RngCore}; +use secp256k1::SecretKey; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use sha2::{Digest, Sha256}; +use std::collections::HashMap; +use std::env; +use std::fmt; +use std::io::{Read, Write}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::RwLock; +use tokio::task::{self}; +use tower_http::cors::{Any, CorsLayer}; +use tracing::{debug, error, trace, warn}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; +use url::Url; +use uuid::Uuid; +use vsock::{VsockAddr, VsockStream}; +use web::{attestation_routes, health_routes, login_routes, protected_routes}; +use x25519_dalek::{EphemeralSecret, PublicKey}; + +mod aws_credentials; +mod billing; +mod db; +mod email; +mod encrypt; +mod jwt; +mod kv; +mod message_signing; +mod models; +mod oauth; +mod private_key; +mod sqs; +mod web; + +use oauth::{GithubProvider, GoogleProvider, OAuthManager}; + +const ENCLAVE_KEY_NAME: &str = "enclave_key"; +const OPENAI_API_KEY_NAME: &str = "openai_api_key"; +const JWT_SECRET_KEY_NAME: &str = "jwt_secret"; +const GITHUB_CLIENT_ID_NAME: &str = "github_client_id"; +const GITHUB_CLIENT_SECRET_NAME: &str = "github_client_secret"; +const GOOGLE_CLIENT_ID_NAME: &str = "google_client_id"; +const GOOGLE_CLIENT_SECRET_NAME: &str = "google_client_secret"; +const BILLING_API_KEY_NAME: &str = "billing_api_key"; +const BILLING_SERVER_URL_NAME: &str = "billing_server_url"; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct EnclaveRequest { + pub request_type: String, + pub key_name: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ParentResponse { + pub response_type: String, + pub response_value: serde_json::Value, +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + TaskJoin(#[from] task::JoinError), + + #[error(transparent)] + StdIo(#[from] std::io::Error), + + #[error(transparent)] + TryInit(#[from] tracing_subscriber::util::TryInitError), + + #[error("Database error: {0}")] + DatabaseError(#[from] DBError), + + #[error("Private key not found")] + PrivateKeyNotFound, + + #[error("Private key could not be generated")] + PrivateKeyGenerationFailure, + + #[error("Private key already exists")] + PrivateKeyAlreadyExists, + + #[error("User not found")] + UserNotFound, + + #[error("JSON error: {0}")] + JsonError(#[from] serde_json::Error), + + #[error("Encryption error: {0}")] + EncryptionError(String), + + #[error("Authentication error")] + AuthenticationError, + + #[error("Failed to parse secret")] + SecretParsingError, + + #[error("AWS Credential error: {0}")] + AwsCredentialError(#[from] aws_credentials::AwsCredentialError), + + #[error("User is already verified")] + UserAlreadyVerified, + + #[error("Builder error: {0}")] + BuilderError(String), + + #[error("Password reset request expired")] + PasswordResetExpired, + + #[error("Invalid password reset secret")] + InvalidPasswordResetSecret, + + #[error("Invalid password reset request")] + InvalidPasswordResetRequest, + + #[error("Password verification error: {0}")] + PasswordVerificationError(#[from] VerifyError), + + #[error("Password is required for registration")] + PasswordRequired, + + #[error("OAuth error: {0}")] + OAuthError(String), + + #[error("User with this email already exists")] + UserAlreadyExists, + + #[error("Signing error: {0}")] + SigningError(String), + + #[error("Invalid derivation path: {0}")] + InvalidDerivationPath(String), + + #[error("Key derivation failed: {0}")] + KeyDerivationError(String), +} + +#[derive(Debug, thiserror::Error)] +pub enum ApiError { + #[error("Invalid email, password, or login method")] + InvalidUsernameOrPassword, + + #[error("Invalid JWT")] + InvalidJwt, + + #[error("Internal server error")] + InternalServerError, + + #[error("Bad Request")] + BadRequest, + + #[error("Encryption error")] + EncryptionError, + + #[error("Unauthorized")] + Unauthorized, + + #[error("Invalid invite code")] + InvalidInviteCode, + + #[error("Token refresh failed")] + RefreshFailed, + + #[error("User is already verified")] + UserAlreadyVerified, + + #[error("No valid email found for the Oauth account")] + NoEmailFound, + + #[error("User exists but Oauth not linked")] + UserExistsNotLinked, + + #[error("User not found")] + UserNotFound, + + #[error("Email already registered")] + EmailAlreadyExists, + + #[error("Usage limit reached")] + UsageLimitReached, +} + +impl IntoResponse for ApiError { + fn into_response(self) -> axum::response::Response { + let status = match self { + ApiError::InvalidUsernameOrPassword => StatusCode::UNAUTHORIZED, + ApiError::InvalidJwt => StatusCode::UNAUTHORIZED, + ApiError::Unauthorized => StatusCode::UNAUTHORIZED, + ApiError::InternalServerError => StatusCode::INTERNAL_SERVER_ERROR, + ApiError::BadRequest => StatusCode::BAD_REQUEST, + ApiError::InvalidInviteCode => StatusCode::UNAUTHORIZED, + ApiError::RefreshFailed => StatusCode::UNAUTHORIZED, + ApiError::UserAlreadyVerified => StatusCode::BAD_REQUEST, + ApiError::EncryptionError => StatusCode::BAD_REQUEST, + ApiError::NoEmailFound => StatusCode::BAD_REQUEST, + ApiError::UserExistsNotLinked => StatusCode::CONFLICT, + ApiError::UserNotFound => StatusCode::NOT_FOUND, + ApiError::EmailAlreadyExists => StatusCode::CONFLICT, + ApiError::UsageLimitReached => StatusCode::FORBIDDEN, + }; + ( + status, + Json(ErrorResponse { + status: status.as_u16(), + message: self.to_string(), + }), + ) + .into_response() + } +} + +#[derive(Debug, Serialize)] +pub struct ErrorResponse { + status: u16, + message: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TokenClaims { + // Subject (whom token refers to) + pub sub: String, // This will now be the UUID + // Expiration time (as UTC timestamp) + pub exp: i64, + // Issued at (as UTC timestamp) + pub iat: i64, + // Audience + pub aud: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct Credentials { + pub email: Option, + pub id: Option, + pub password: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RegisterCredentials { + pub name: Option, + pub email: Option, + pub password: String, + pub invite_code: String, +} + +#[derive(Debug, Clone)] +pub struct Config { + jwt_keys: jwt::JwtKeys, + access_token_maxage: i64, + refresh_token_maxage: i64, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum AppMode { + Local, + Dev, + Preview, + Prod, + Custom(String), +} + +impl AppMode { + fn frontend_url(&self) -> &str { + match self { + AppMode::Local => "http://localhost:5173", + AppMode::Dev => "https://dev.secretgpt.ai", + AppMode::Preview => "https://preview.opensecret.cloud", + AppMode::Prod => "https://trymaple.ai", + AppMode::Custom(_) => "https://preview.opensecret.cloud", + } + } +} + +impl fmt::Display for AppMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + AppMode::Local => write!(f, "local"), + AppMode::Dev => write!(f, "dev"), + AppMode::Preview => write!(f, "preview"), + AppMode::Prod => write!(f, "prod"), + AppMode::Custom(_) => write!(f, "custom"), + } + } +} + +impl FromStr for AppMode { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "local" => Ok(AppMode::Local), + "dev" => Ok(AppMode::Dev), + "preview" => Ok(AppMode::Preview), + "prod" => Ok(AppMode::Prod), + "custom" => { + // For custom mode, get the ENV_NAME + match std::env::var("ENV_NAME") { + Ok(env_name) => Ok(AppMode::Custom(env_name)), + Err(_) => Err("ENV_NAME must be set when using custom mode".to_string()), + } + } + _ => Err(format!("Invalid app mode: {}", s)), + } + } +} + +#[derive(Clone)] +pub struct AppState { + app_mode: AppMode, + db: Arc, + config: Config, + aws_credential_manager: Arc>>, + enclave_key: Vec, + openai_api_key: Option, + openai_api_base: String, + resend_api_key: Option, + ephemeral_keys: Arc>>, + session_states: Arc>>, + oauth_manager: Arc, + sqs_publisher: Option>, + billing_client: Option, +} + +#[derive(Default)] +pub struct AppStateBuilder { + app_mode: Option, + db: Option>, + enclave_key: Option>, + aws_credential_manager: Option>>>, + openai_api_key: Option, + openai_api_base: Option, + jwt_secret: Option>, + resend_api_key: Option, + github_client_secret: Option, + github_client_id: Option, + google_client_secret: Option, + google_client_id: Option, + sqs_queue_maple_events_url: Option, + sqs_publisher: Option>, + billing_api_key: Option, + billing_server_url: Option, +} + +impl AppStateBuilder { + pub fn app_mode(mut self, app_mode: AppMode) -> Self { + self.app_mode = Some(app_mode); + self + } + + pub fn db(mut self, db: Arc) -> Self { + self.db = Some(db); + self + } + + pub fn enclave_key(mut self, enclave_key: Vec) -> Self { + self.enclave_key = Some(enclave_key); + self + } + + pub fn aws_credential_manager( + mut self, + aws_credential_manager: Arc>>, + ) -> Self { + self.aws_credential_manager = Some(aws_credential_manager); + self + } + + pub fn openai_api_key(mut self, openai_api_key: Option) -> Self { + self.openai_api_key = openai_api_key; + self + } + + pub fn openai_api_base(mut self, openai_api_base: String) -> Self { + self.openai_api_base = Some(openai_api_base); + self + } + + pub fn jwt_secret(mut self, jwt_secret: Vec) -> Self { + self.jwt_secret = Some(jwt_secret); + self + } + + pub fn resend_api_key(mut self, resend_api_key: Option) -> Self { + self.resend_api_key = resend_api_key; + self + } + + pub fn github_client_secret(mut self, github_client_secret: Option) -> Self { + self.github_client_secret = github_client_secret; + self + } + + pub fn github_client_id(mut self, github_client_id: Option) -> Self { + self.github_client_id = github_client_id; + self + } + + pub fn google_client_secret(mut self, google_client_secret: Option) -> Self { + self.google_client_secret = google_client_secret; + self + } + + pub fn google_client_id(mut self, google_client_id: Option) -> Self { + self.google_client_id = google_client_id; + self + } + + pub fn sqs_queue_maple_events_url( + mut self, + sqs_queue_maple_events_url: Option, + ) -> Self { + self.sqs_queue_maple_events_url = sqs_queue_maple_events_url; + self + } + + pub fn sqs_publisher(mut self, sqs_publisher: Option>) -> Self { + self.sqs_publisher = sqs_publisher; + self + } + + pub fn billing_api_key(mut self, billing_api_key: Option) -> Self { + self.billing_api_key = billing_api_key; + self + } + + pub fn billing_server_url(mut self, billing_server_url: Option) -> Self { + self.billing_server_url = billing_server_url; + self + } + + pub async fn build(self) -> Result { + let app_mode = self + .app_mode + .ok_or(Error::BuilderError("app_mode is required".to_string()))?; + let db = self + .db + .ok_or(Error::BuilderError("db is required".to_string()))?; + let enclave_key = self + .enclave_key + .ok_or(Error::BuilderError("enclave_key is required".to_string()))?; + let aws_credential_manager = self.aws_credential_manager.ok_or(Error::BuilderError( + "aws_credential_manager is required".to_string(), + ))?; + let openai_api_base = self.openai_api_base.ok_or(Error::BuilderError( + "openai_api_base is required".to_string(), + ))?; + let jwt_secret = self + .jwt_secret + .ok_or(Error::BuilderError("jwt_secret is required".to_string()))?; + + let config = Config { + jwt_keys: jwt::JwtKeys::new(jwt_secret)?, + access_token_maxage: 60, // 60 minutes + refresh_token_maxage: 30, // 30 days + }; + + // Log the public key in hex format + tracing::info!( + "JWT ES256K public key (hex): {}", + hex::encode(config.jwt_keys.public_key().serialize()) + ); + + let mut oauth_manager = OAuthManager::new(); + + if let (Some(client_id), Some(client_secret)) = ( + self.github_client_id.clone(), + self.github_client_secret.clone(), + ) { + let callback_url = format!("{}/auth/github/callback", app_mode.frontend_url()); + let github_provider = + GithubProvider::new(db.clone(), client_id, client_secret, callback_url).await?; + oauth_manager.add_provider("github".to_string(), Box::new(github_provider)); + } + + if let (Some(client_id), Some(client_secret)) = ( + self.google_client_id.clone(), + self.google_client_secret.clone(), + ) { + let callback_url = format!("{}/auth/google/callback", app_mode.frontend_url()); + let google_provider = + GoogleProvider::new(db.clone(), client_id, client_secret, callback_url).await?; + oauth_manager.add_provider("google".to_string(), Box::new(google_provider)); + } + + let oauth_manager = Arc::new(oauth_manager); + + // Initialize SQS publisher if URL is provided + let sqs_publisher = if let Some(ref queue_url) = self.sqs_queue_maple_events_url { + // Use the same region as AWS credentials if available + let region = if let Some(creds) = aws_credential_manager.read().await.as_ref() { + creds.get_credentials().await.map(|c| c.region) + } else { + None + }; + + Some(Arc::new( + SqsEventPublisher::new(queue_url.clone(), region, aws_credential_manager.clone()) + .await, + )) + } else { + None + }; + + let billing_client = if let (Some(api_key), Some(base_url)) = + (self.billing_api_key, self.billing_server_url) + { + tracing::debug!("Billing client is configured."); + Some(BillingClient::new(api_key, base_url)) + } else { + tracing::debug!("Billing client not configured"); + None + }; + + Ok(AppState { + app_mode, + db, + config, + aws_credential_manager, + enclave_key, + openai_api_key: self.openai_api_key, + openai_api_base, + resend_api_key: self.resend_api_key, + ephemeral_keys: Arc::new(RwLock::new(HashMap::new())), + session_states: Arc::new(tokio::sync::RwLock::new(HashMap::new())), + oauth_manager, + sqs_publisher, + billing_client, + }) + } +} + +impl AppState { + async fn register_user(&self, creds: RegisterCredentials) -> Result { + // First check if user exists - only if email is provided + if let Some(email) = &creds.email { + match self.db.get_user_by_email(email.clone()) { + Ok(_) => { + // User already exists + return Err(Error::UserAlreadyExists); + } + Err(DBError::UserNotFound) => { + // This is what we want - user doesn't exist + } + Err(e) => { + // Some other database error + return Err(Error::DatabaseError(e)); + } + } + } + + let password = creds.password; + + // hash then encrypt with enclave key + let password_hash = generate_hash(password); + + let secret_key = SecretKey::from_slice(&self.enclave_key.clone()) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + let encrypted_data = encrypt_with_key(&secret_key, password_hash.as_bytes()).await; + + tracing::debug!("registering new user: {:?}", creds.email); + + let new_user = NewUser::new(creds.email, Some(encrypted_data)).with_name_option(creds.name); + + let user = self.db.create_user(new_user)?; + + tracing::info!("registered new user: {:?} {:?}", user.email, user.uuid); + + Ok(user) + } + + async fn authenticate_user(&self, creds: Credentials) -> Result, Error> { + // Ensure at least one identifier is provided + if creds.email.is_none() && creds.id.is_none() { + return Err(Error::AuthenticationError); + } + + // Try email first if provided, fall back to UUID + let user = if let Some(email) = creds.email { + self.db.get_user_by_email(email)? + } else { + // We can safely unwrap id here because we checked above that at least one exists + self.db.get_user_by_uuid(creds.id.unwrap())? + }; + + if let Some(ref password_enc) = user.password_enc { + // Decrypt the stored password with the enclave key + let secret_key = SecretKey::from_slice(&self.enclave_key.clone()) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + let decrypted_password_bytes = decrypt_with_key(&secret_key, password_enc) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + let decrypted_password_hash = String::from_utf8(decrypted_password_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e)))?; + + // Verifying the password is blocking and potentially slow, so we'll do so via + // `spawn_blocking`. + let res = task::spawn_blocking(move || { + verify_password(creds.password, &decrypted_password_hash) + }) + .await?; + + match res { + Ok(_) => Ok(Some(user)), + Err(_) => Ok(None), + } + } else { + // If the user doesn't have a password (OAuth-only user), authentication fails + Ok(None) + } + } + + async fn get_user(&self, user_uuid: Uuid) -> Result { + let user = self + .db + .get_user_by_uuid(user_uuid) + .map_err(|_| Error::UserNotFound)?; + Ok(user) + } + + /// Returns the user's private key, optionally derived using the provided derivation path. + /// + /// # Arguments + /// * `user_uuid` - The UUID of the user + /// * `derivation_path` - Optional BIP32 derivation path (e.g., "m/44'/0'/0'/0/0") + /// + /// # Returns + /// * `Result` - The user's private key or a derived key if a path is provided + async fn get_user_key( + &self, + user_uuid: Uuid, + derivation_path: Option<&str>, + ) -> Result { + let user = self.get_user(user_uuid).await?; + + let encrypted_seed = match user.get_seed_encrypted().await { + Some(es) => es, + None => { + // create seed if not already exists + let updated_user = self.generate_private_key(user_uuid).await?; + updated_user + .get_seed_encrypted() + .await + .expect("seed should have been created") + } + }; + + let user_secret_key = + decrypt_user_seed_to_key(self.enclave_key.clone(), encrypted_seed, derivation_path)?; + + Ok(user_secret_key) + } + + /// Sign a message with the user's private key, using the specified algorithm + async fn sign_message( + &self, + user_uuid: Uuid, + message_bytes: &[u8], + algorithm: message_signing::SigningAlgorithm, + derivation_path: Option<&str>, + ) -> Result { + let user_secret_key = self.get_user_key(user_uuid, derivation_path).await?; + message_signing::sign_message(&user_secret_key, message_bytes, algorithm) + } + + async fn generate_private_key(&self, user_uuid: Uuid) -> Result { + let user = self.get_user(user_uuid).await?; + + if user.get_seed_encrypted().await.is_none() { + let user_seed_words = generate_twelve_word_seed(self.aws_credential_manager.clone()) + .await? + .to_string(); + + let secret_key = SecretKey::from_slice(&self.enclave_key.clone()) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + let encrypted_key = encrypt_with_key(&secret_key, user_seed_words.as_bytes()).await; + + self.db.set_user_key(user, encrypted_key)?; + + self.get_user(user_uuid).await + } else { + Err(Error::PrivateKeyAlreadyExists) + } + } + + async fn get(&self, user_id: Uuid, key: String) -> StoreResult> { + let user_key = self + .get_user_key(user_id, None) + .await + .map_err(|_| StoreError::Unauthorized)?; + kv::get(self.db.get_pool(), user_id, &key, &user_key) + } + + async fn put(&self, user_id: Uuid, key: String, value: String) -> StoreResult<()> { + let user_key = self + .get_user_key(user_id, None) + .await + .map_err(|_| StoreError::Unauthorized)?; + kv::put( + self.db.get_pool(), + user_id, + key, + value, + &user_key, + self.aws_credential_manager.clone(), + ) + .await + } + + async fn delete(&self, user_id: Uuid, key: String) -> StoreResult<()> { + let user_key = self + .get_user_key(user_id, None) + .await + .map_err(|_| StoreError::Unauthorized)?; + kv::delete(self.db.get_pool(), user_id, &key, &user_key) + } + + async fn list(&self, user_id: Uuid) -> StoreResult> { + let user_key = self + .get_user_key(user_id, None) + .await + .map_err(|_| StoreError::Unauthorized)?; + kv::list(self.db.get_pool(), user_id, &user_key) + } + + pub async fn get_aws_credentials(&self) -> Option { + if let Some(manager) = self.aws_credential_manager.read().await.as_ref() { + manager.get_credentials().await + } else { + None + } + } + + pub async fn get_enclave_key(&self) -> Vec { + self.enclave_key.clone() + } + + pub async fn create_ephemeral_key(&self, nonce: String) -> PublicKey { + let custom_rng = CustomRng::new(); + + // Use a wrapper that implements RngCore and CryptoRng + let mut rng_wrapper = AsyncRngWrapper::new(custom_rng); + + // Create the EphemeralSecret using the RngCore implementation + let ephemeral_secret = EphemeralSecret::random_from_rng(&mut rng_wrapper); + let public_key = PublicKey::from(&ephemeral_secret); + + self.ephemeral_keys + .write() + .await + .insert(nonce, ephemeral_secret); + + public_key + } + + pub async fn get_and_remove_ephemeral_secret(&self, nonce: &str) -> Option { + self.ephemeral_keys.write().await.remove(nonce) + } + + pub async fn decrypt_session_data( + &self, + session_id: &Uuid, + encrypted_data: &str, + ) -> Result, ApiError> { + tracing::trace!("decrypting session data for session_id: {}", session_id); + + let decoded_data = general_purpose::STANDARD + .decode(encrypted_data) + .map_err(|e| { + tracing::error!("Failed to decode base64 data: {:?}", e); + ApiError::BadRequest + })?; + + tracing::trace!("decoded session data length: {}", decoded_data.len()); + + if decoded_data.len() < 12 { + tracing::error!("Decoded data is too short"); + return Err(ApiError::BadRequest); + } + + let (nonce, ciphertext) = decoded_data.split_at(12); + let nonce_array: [u8; 12] = nonce.try_into().map_err(|e| { + tracing::error!("Failed to convert nonce: {:?}", e); + ApiError::BadRequest + })?; + + tracing::trace!("nonce: {:?}", nonce_array); + tracing::trace!("ciphertext length: {}", ciphertext.len()); + + self.session_states + .read() + .await + .get(session_id) + .ok_or_else(|| { + tracing::error!("Session not found: {}", session_id); + ApiError::Unauthorized + }) + .and_then(|state| { + state.decrypt(ciphertext, &nonce_array).map_err(|e| { + tracing::error!("Decryption failed: {:?}", e); + e + }) + }) + } + + pub async fn encrypt_session_data( + &self, + session_id: &Uuid, + data: &[u8], + ) -> Result, ApiError> { + let session_states = self.session_states.read().await; + let session_state = session_states + .get(session_id) + .ok_or(ApiError::Unauthorized)?; + + let session_key = session_state.get_session_key(); + let key = Key::from_slice(session_key.as_ref()); + + let nonce_bytes: [u8; 12] = crate::encrypt::generate_random(); + let nonce = Nonce::from_slice(&nonce_bytes); + + let cipher = ChaCha20Poly1305::new(key); + + let mut encrypted_data = nonce_bytes.to_vec(); + encrypted_data.extend_from_slice( + &cipher + .encrypt(nonce, data) + .map_err(|_| ApiError::InternalServerError)?, + ); + + Ok(encrypted_data) + } + + pub async fn create_password_reset_request( + &self, + email: String, + hashed_secret: String, + ) -> Result { + let alphanumeric_code = self.generate_alphanumeric_code(); + + // Check if the user exists + match self.db.get_user_by_email(email.clone()) { + Ok(user) => { + // Only proceed with email if user has one + if user.get_email().is_some() { + // User exists, proceed with the actual reset request + let secret_key = SecretKey::from_slice(&self.enclave_key) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + let encrypted_code = + encrypt_key_deterministic(&secret_key, alphanumeric_code.as_bytes()); + + let new_request = NewPasswordResetRequest::new( + user.uuid, + hashed_secret, + encrypted_code, + 24, // 24 hours expiration + ); + + self.db.create_password_reset_request(new_request)?; + + // Send the actual email in the background + let app_mode = self.app_mode.clone(); + let resend_api_key = self.resend_api_key.clone(); + let user_email = email.clone(); + let code = alphanumeric_code.clone(); + tokio::spawn(async move { + if let Err(e) = + send_password_reset_email(app_mode, resend_api_key, user_email, code) + .await + { + error!("Failed to send password reset email: {:?}", e); + } + }); + } + } + Err(DBError::UserNotFound) => { + // User doesn't exist, but we don't want to reveal this information + // So we'll just log it and return as if everything was successful + debug!("Password reset requested for non-existent email: {}", email); + } + Err(e) => { + // For other errors, we should still log them but not expose them to the user + error!("Error during password reset request: {:?}", e); + } + } + + // Always return the generated code, even if we didn't actually create a request + Ok(alphanumeric_code) + } + + pub async fn confirm_password_reset( + &self, + email: String, + alphanumeric_code: String, + plaintext_secret: String, + new_password: String, + ) -> Result<(), Error> { + let user = self.db.get_user_by_email(email.clone())?; + + // Verify user has an email + if user.get_email().is_none() { + return Err(Error::UserNotFound); + } + + // Deterministically encrypt the provided alphanumeric code for lookup + let secret_key = SecretKey::from_slice(&self.enclave_key) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + let encrypted_code = encrypt_key_deterministic(&secret_key, alphanumeric_code.as_bytes()); + + let reset_request = self + .db + .get_password_reset_request_by_user_id_and_code(user.uuid, encrypted_code)?; + + if let Some(reset_request) = reset_request { + if reset_request.is_expired() { + warn!("Password reset expired for user: {}", user.uuid); + return Err(Error::PasswordResetExpired); + } + + trace!("Stored hashed secret: {}", reset_request.hashed_secret); + + // Hash the plaintext secret again for comparison + let hashed_plaintext = generate_reset_hash(plaintext_secret.clone()); + + trace!("Newly hashed plaintext secret: {}", hashed_plaintext); + + // Compare the hashed values directly + if hashed_plaintext == reset_request.hashed_secret { + // Password verification succeeded, continue with reset + self.update_user_password(&user, new_password).await?; + self.db.mark_password_reset_as_complete(&reset_request)?; + + // Send confirmation email in the background + let app_mode = self.app_mode.clone(); + let resend_api_key = self.resend_api_key.clone(); + let user_email = user.email.clone(); + tokio::spawn(async move { + if let Err(e) = send_password_reset_confirmation_email( + app_mode, + resend_api_key, + user_email.expect("We checked email had to exist above"), + ) + .await + { + error!("Failed to send password reset confirmation email: {:?}", e); + } + }); + + Ok(()) + } else { + warn!( + "Password verification failed for user {}. Hashes do not match.", + user.uuid + ); + Err(Error::InvalidPasswordResetSecret) + } + } else { + Err(Error::InvalidPasswordResetRequest) + } + } + + fn generate_alphanumeric_code(&self) -> String { + const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + const CODE_LEN: usize = 8; + + let random_bytes: [u8; CODE_LEN] = generate_random(); + + random_bytes + .iter() + .map(|&b| CHARSET[b as usize % CHARSET.len()] as char) + .collect() + } + + pub async fn update_user_password( + &self, + user: &User, + new_password: String, + ) -> Result<(), Error> { + // Hash the new password + let password_hash = password_auth::generate_hash(new_password); + + // Encrypt the hashed password + let secret_key = SecretKey::from_slice(&self.enclave_key) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + let encrypted_password = encrypt_with_key(&secret_key, password_hash.as_bytes()).await; + + // Update the user's password + self.db + .update_user_password(user, Some(encrypted_password)) + .map_err(Error::from) + } + + pub fn frontend_url(&self) -> String { + self.app_mode.frontend_url().to_string() + } + + pub fn oauth_callback_url(&self, provider: &str) -> Result { + let base_url = Url::parse(self.frontend_url().as_str())?; + Ok(base_url + .join(&format!("/auth/{}/callback", provider))? + .to_string()) + } +} + +async fn get_secret(key_name: &str) -> Result { + let cid = 3; + let port = 8003; + + let sock_addr = VsockAddr::new(cid, port); + let mut stream = VsockStream::connect(&sock_addr)?; + + let request = EnclaveRequest { + request_type: "SecretsManager".to_string(), + key_name: Some(key_name.to_string()), + }; + let request_json = serde_json::to_string(&request)?; + stream.write_all(request_json.as_bytes())?; + + let mut response = String::new(); + stream.read_to_string(&mut response)?; + + let parent_response: ParentResponse = serde_json::from_str(&response)?; + if parent_response.response_type == "secret" { + let secret_json: Value = + serde_json::from_str(parent_response.response_value.as_str().unwrap())?; + + // Assuming the secret is always a JSON object with a single key-value pair + if let Some((_, value)) = secret_json.as_object().and_then(|obj| obj.iter().next()) { + Ok(value.as_str().unwrap_or_default().to_string()) + } else { + Err(Error::SecretParsingError) + } + } else { + Err(Error::AuthenticationError) + } +} + +async fn get_or_create_enclave_key( + app_mode: &AppMode, + aws_credential_manager: Arc>>, + db: Arc, +) -> Result { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + let aws_kms_key_id = get_kms_key_id(app_mode); + + // Check if the key has been initialized before + let existing_key = db.get_enclave_secret_by_key(ENCLAVE_KEY_NAME)?; + + let key_res = if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + // Decrypt the existing key + let decrypted_key = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + GenKeyResult { + key: decrypted_key, + encrypted_key: encrypted_key.value.clone(), + } + } else { + // Create a new encryption key + create_new_encryption_key( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &aws_kms_key_id, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))? + }; + + // Store the encrypted version of the key if it's new + if existing_key.is_none() { + let new_secret = + NewEnclaveSecret::new(ENCLAVE_KEY_NAME.to_string(), key_res.encrypted_key.clone()); + db.create_enclave_secret(new_secret)?; + } + + Ok(key_res) +} + +async fn retrieve_openai_api_key( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(OPENAI_API_KEY_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted api key"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + } else { + Err(Error::EncryptionError( + "OpenAI API key not found in the database".to_string(), + )) + } +} + +async fn get_or_create_jwt_secret( + app_mode: &AppMode, + aws_credential_manager: Arc>>, + db: Arc, + enclave_key: &[u8], +) -> Result, Error> { + match app_mode { + AppMode::Local => { + // For local mode, use environment variable + Ok(std::env::var("JWT_SECRET") + .expect("JWT_SECRET must be set in local mode") + .into_bytes()) + } + _ => { + // Check if JWT secret exists in enclave_secrets + if let Some(encrypted_jwt_secret) = db.get_enclave_secret_by_key(JWT_SECRET_KEY_NAME)? { + // Decrypt existing JWT secret + let secret_key = SecretKey::from_slice(enclave_key) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + decrypt_with_key(&secret_key, &encrypted_jwt_secret.value) + .map_err(|e| Error::EncryptionError(e.to_string())) + } else { + // Generate new JWT secret + let jwt_secret = jwt::generate_jwt_secret(aws_credential_manager.clone()).await?; + + // Encrypt and store the new JWT secret + let secret_key = SecretKey::from_slice(enclave_key) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + let encrypted_jwt_secret = encrypt_with_key(&secret_key, &jwt_secret).await; + + let new_secret = + NewEnclaveSecret::new(JWT_SECRET_KEY_NAME.to_string(), encrypted_jwt_secret); + db.create_enclave_secret(new_secret)?; + + Ok(jwt_secret) + } + } + } +} + +fn get_kms_key_id(app_mode: &AppMode) -> String { + match app_mode { + AppMode::Prod => "alias/open-secret-prod-enclave".to_string(), + AppMode::Preview => "alias/open-secret-preview1-enclave".to_string(), + AppMode::Dev => "alias/open-secret-dev-enclave".to_string(), + AppMode::Custom(env_name) => format!("alias/open-secret-{}-enclave", env_name), + AppMode::Local => unreachable!("shouldn't use kms in local mode"), + } +} + +fn is_default_openai_domain(domain: &str) -> bool { + domain.contains("openai.com") +} + +async fn retrieve_resend_api_key( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key("resend_api_key")?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted Resend API key"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("Resend API key not found in the database"); + Ok(None) + } +} + +async fn retrieve_github_client_id( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(GITHUB_CLIENT_ID_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted GitHub client ID"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("GitHub client ID not found in the database"); + Ok(None) + } +} + +async fn retrieve_github_client_secret( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(GITHUB_CLIENT_SECRET_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted GitHub client secret"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("GitHub client secret not found in the database"); + Ok(None) + } +} + +async fn retrieve_google_client_secret( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(GOOGLE_CLIENT_SECRET_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted Google client secret"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("Google client secret not found in the database"); + Ok(None) + } +} + +async fn retrieve_google_client_id( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(GOOGLE_CLIENT_ID_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted Google client ID"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("Google client ID not found in the database"); + Ok(None) + } +} + +struct AsyncRngWrapper { + inner: CustomRng, +} + +impl AsyncRngWrapper { + fn new(inner: CustomRng) -> Self { + AsyncRngWrapper { inner } + } +} + +impl RngCore for AsyncRngWrapper { + fn next_u32(&mut self) -> u32 { + futures::executor::block_on(self.inner.next_u32()) + } + + fn next_u64(&mut self) -> u64 { + futures::executor::block_on(self.inner.next_u64()) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + futures::executor::block_on(self.inner.fill_bytes(dest)) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand_core::Error> { + futures::executor::block_on(self.inner.fill_bytes(dest)); + Ok(()) + } +} + +// Implement CryptoRng for AsyncRngWrapper +impl CryptoRng for AsyncRngWrapper {} + +pub fn generate_reset_hash(password: String) -> String { + let mut hasher = Sha256::new(); + hasher.update(password.as_bytes()); + format!("{:x}", hasher.finalize()) +} + +async fn retrieve_billing_api_key( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the key already exists in the db + let existing_key = db.get_enclave_secret_by_key(BILLING_API_KEY_NAME)?; + + if let Some(ref encrypted_key) = existing_key { + // Convert the stored bytes back to base64 + let base64_encrypted_key = general_purpose::STANDARD.encode(&encrypted_key.value); + + debug!("trying to decrypt base64 encrypted billing API key"); + + // Decrypt the existing key + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_key, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("Billing API key not found in the database"); + Ok(None) + } +} + +async fn retrieve_billing_server_url( + aws_credential_manager: Arc>>, + db: Arc, +) -> Result, Error> { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("non-local mode should have creds"); + + // check if the url already exists in the db + let existing_url = db.get_enclave_secret_by_key(BILLING_SERVER_URL_NAME)?; + + if let Some(ref encrypted_url) = existing_url { + // Convert the stored bytes back to base64 + let base64_encrypted_url = general_purpose::STANDARD.encode(&encrypted_url.value); + + debug!("trying to decrypt base64 encrypted billing server URL"); + + // Decrypt the existing url + let decrypted_bytes = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &base64_encrypted_url, + ) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // Convert the decrypted bytes to a UTF-8 string + String::from_utf8(decrypted_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e))) + .map(Some) + } else { + tracing::info!("Billing server URL not found in the database"); + Ok(None) + } +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + // Add debug logs for entrypoints and exit points + tracing::debug!("Starting application"); + + // Load .env file + dotenv::dotenv().ok(); + + let app_mode = std::env::var("APP_MODE") + .unwrap_or_else(|_| "local".to_string()) + .parse::() + .expect("Invalid APP_MODE"); + + tracing_subscriber::registry() + .with(EnvFilter::new(std::env::var("RUST_LOG").unwrap_or_else( + |_| { + "sg_backend=debug,axum_login=debug,tower_sessions=debug,sqlx=warn,tower_http=debug" + .into() + }, + ))) + .with(tracing_subscriber::fmt::layer().with_ansi(false)) + .try_init()?; + + let aws_credential_manager = if app_mode != AppMode::Local { + Arc::new(RwLock::new(Some(AwsCredentialManager::new()))) + } else { + Arc::new(RwLock::new(None)) + }; + + if app_mode != AppMode::Local { + // Wait for initial credentials with a timeout + let timeout = Duration::from_secs(60); // 1 minute timeout + match tokio::time::timeout( + timeout, + aws_credential_manager + .read() + .await + .as_ref() + .expect("non-local mode should have creds") + .wait_for_credentials(), + ) + .await + { + Ok(_) => tracing::info!("Initial AWS credentials fetched successfully"), + Err(_) => { + tracing::error!("Timed out waiting for initial AWS credentials"); + return Err(Error::AwsCredentialError(AwsCredentialError::Timeout)); + } + } + + // Spawn a task to refresh AWS credentials + let refresh_manager = aws_credential_manager.clone(); + tokio::spawn(async move { + let refresh_interval = Duration::from_secs(5 * 60 * 60); // 5 hours + let retry_interval = Duration::from_secs(5); // 5 seconds + + loop { + tracing::info!("Refreshing AWS credentials"); + + let fetch_res = refresh_manager + .read() + .await + .as_ref() + .expect("non-local mode should have creds") + .fetch_credentials() + .await; + + match fetch_res { + Ok(_creds) => { + tracing::info!("AWS credentials refreshed successfully"); + tokio::time::sleep(refresh_interval).await; + } + Err(e) => { + tracing::error!("Failed to refresh AWS credentials: {:?}", e); + tracing::info!("Retrying in 5 seconds..."); + tokio::time::sleep(retry_interval).await; + } + } + } + }); + } + + let pg_url = if app_mode != AppMode::Local { + // Fetch database URL from Secrets Manager using the new AwsCredentialManager + let secret_name = match app_mode { + AppMode::Prod => "opensecret_prod_database_url", + AppMode::Preview => "opensecret_preview1_database_url", + AppMode::Dev => "opensecret_dev_database_url", + AppMode::Custom(ref env_name) => { + let name = format!("opensecret_{}_database_url", env_name); + Box::leak(name.into_boxed_str()) + } + AppMode::Local => unreachable!("just checked"), + }; + match get_secret(secret_name).await { + Ok(encrypted_url) => { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("should have just waited for credentials"); + + tracing::info!("Retrieved and decrypting database URL from Secrets Manager"); + let url_vec = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &encrypted_url, + ) + .map_err(|e| { + tracing::error!("Failed to decrypt database URL: {:?}", e); + Error::EncryptionError(e.to_string()) + })?; + + String::from_utf8(url_vec).expect("should parse url") + } + Err(e) => { + tracing::error!( + "Failed to retrieve database URL from Secrets Manager: {:?}", + e + ); + return Err(e); + } + } + } else { + std::env::var("DATABASE_URL").expect("DATABASE_URL must be set") + }; + + let db = setup_db(pg_url); + + // enclave secret retrieval + let enclave_key = if app_mode != AppMode::Local { + let enclave_key = + get_or_create_enclave_key(&app_mode, aws_credential_manager.clone(), db.clone()) + .await?; + enclave_key.key + } else { + let enclave_key = + std::env::var("ENCLAVE_SECRET_MOCK").expect("needs ENCLAVE_SECRET_MOCK in local mode"); + let enclave_key: [u8; 32] = hex::decode(enclave_key) + .unwrap() + .try_into() + .expect("ENCLAVE_SECRET_MOCK must be 32 bytes"); + enclave_key.to_vec() + }; + + let openai_api_base = + env::var("OPENAI_API_BASE").unwrap_or_else(|_| "https://api.openai.com".to_string()); + + let openai_api_key = if is_default_openai_domain(&openai_api_base) { + if app_mode != AppMode::Local { + Some( + retrieve_openai_api_key(aws_credential_manager.clone(), db.clone()) + .await + .expect("OpenAI API key should be retrieved correctly"), + ) + } else { + Some( + std::env::var("OPENAI_API_KEY") + .expect("OPENAI_API_KEY must be set for OpenAI domain"), + ) + } + } else { + None // No API key needed if not using OpenAI's domain + }; + + let jwt_secret = get_or_create_jwt_secret( + &app_mode, + aws_credential_manager.clone(), + db.clone(), + &enclave_key, + ) + .await?; + + let resend_api_key = if app_mode != AppMode::Local { + retrieve_resend_api_key(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("RESEND_API_KEY").ok() + }; + + let github_client_secret = if app_mode != AppMode::Local { + retrieve_github_client_secret(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("GITHUB_CLIENT_SECRET").ok() + }; + + let github_client_id = if app_mode != AppMode::Local { + retrieve_github_client_id(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("GITHUB_CLIENT_ID").ok() + }; + + let google_client_secret = if app_mode != AppMode::Local { + retrieve_google_client_secret(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("GOOGLE_CLIENT_SECRET").ok() + }; + + let google_client_id = if app_mode != AppMode::Local { + retrieve_google_client_id(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("GOOGLE_CLIENT_ID").ok() + }; + + let sqs_queue_maple_events_url = if app_mode != AppMode::Local { + // Get from database if in enclave mode + if let Some(ref encrypted_url) = + db.get_enclave_secret_by_key("sqs_queue_maple_events_url")? + { + let creds = aws_credential_manager + .read() + .await + .clone() + .expect("non-local mode should have creds") + .get_credentials() + .await + .expect("should have just waited for credentials"); + + // Decrypt the URL + let url_vec = decrypt_with_kms( + &creds.region, + &creds.access_key_id, + &creds.secret_access_key, + &creds.token, + &general_purpose::STANDARD.encode(&encrypted_url.value), + ) + .map_err(|e| { + tracing::error!("Failed to decrypt SQS queue URL: {:?}", e); + Error::EncryptionError(e.to_string()) + })?; + + Some(String::from_utf8(url_vec).expect("should parse url")) + } else { + // URL not found in database - this is optional so we'll return None + None + } + } else { + // In local mode, get from environment variable + std::env::var("SQS_QUEUE_MAPLE_EVENTS_URL").ok() + }; + + let billing_api_key = if app_mode != AppMode::Local { + // Get from database if in enclave mode + retrieve_billing_api_key(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("BILLING_API_KEY").ok() + }; + + let billing_server_url = if app_mode != AppMode::Local { + // Get from database if in enclave mode + retrieve_billing_server_url(aws_credential_manager.clone(), db.clone()).await? + } else { + std::env::var("BILLING_SERVER_URL").ok() + }; + + let app_state = AppStateBuilder::default() + .app_mode(app_mode.clone()) + .db(db) + .enclave_key(enclave_key) + .aws_credential_manager(aws_credential_manager) + .openai_api_key(openai_api_key) + .openai_api_base(openai_api_base) + .jwt_secret(jwt_secret) + .resend_api_key(resend_api_key) + .github_client_secret(github_client_secret) + .github_client_id(github_client_id) + .google_client_secret(google_client_secret) + .google_client_id(google_client_id) + .sqs_queue_maple_events_url(sqs_queue_maple_events_url) + .billing_api_key(billing_api_key) + .billing_server_url(billing_server_url) + .build() + .await?; + tracing::info!("App state created, app_mode: {:?}", app_mode); + + let app_state = Arc::new(app_state); + + let cors = CorsLayer::new() + // allow `GET` and `POST` when accessing the resource + .allow_methods([Method::GET, Method::POST, Method::PUT, Method::DELETE]) + // allow all headers + .allow_headers(Any) + // allow requests from any origin + .allow_origin(Any); + + let app = protected_routes(app_state.clone()) + .route_layer(from_fn_with_state(app_state.clone(), validate_jwt)) + .merge(health_routes()) + .merge(login_routes(app_state.clone())) + .merge( + openai_routes(app_state.clone()) + .route_layer(from_fn_with_state(app_state.clone(), validate_jwt)), + ) + .merge(attestation_routes::router(app_state.clone())) + .merge(oauth_routes(app_state.clone())) + .layer(cors); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:3000") + .await + .unwrap(); + + tracing::info!("Listening on http://localhost:3000"); + + Ok(axum::serve(listener, app.into_make_service()).await?) +} diff --git a/src/message_signing.rs b/src/message_signing.rs new file mode 100644 index 0000000..85582da --- /dev/null +++ b/src/message_signing.rs @@ -0,0 +1,66 @@ +use secp256k1::{Message, Secp256k1, SecretKey}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +use crate::encrypt::generate_random; +use crate::Error; + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum SigningAlgorithm { + Schnorr, + Ecdsa, +} + +#[derive(Debug)] +pub struct SignMessageResponse { + pub signature: SignatureType, + pub message_hash: [u8; 32], +} + +#[derive(Debug)] +pub enum SignatureType { + Schnorr(secp256k1::schnorr::Signature), + Ecdsa(secp256k1::ecdsa::Signature), +} + +impl std::fmt::Display for SignatureType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SignatureType::Schnorr(sig) => write!(f, "{}", sig), + SignatureType::Ecdsa(sig) => write!(f, "{}", sig), + } + } +} + +pub fn sign_message(secret_key: &SecretKey, message_bytes: &[u8], algorithm: SigningAlgorithm) -> Result { + let secp = Secp256k1::new(); + + // Hash the message + let mut hasher = Sha256::new(); + hasher.update(message_bytes); + let message_hash = hasher.finalize(); + let message_hash_array: [u8; 32] = message_hash.into(); + + // Create secp256k1 message from hash + let message = Message::from_digest_slice(&message_hash).map_err(|e| { + Error::SigningError(format!("Failed to create message from digest: {}", e)) + })?; + + // Sign with the specified algorithm + let signature = match algorithm { + SigningAlgorithm::Schnorr => { + let keypair = secret_key.keypair(&secp); + let random_bytes = generate_random::<32>(); + SignatureType::Schnorr(secp.sign_schnorr_with_aux_rand(&message, &keypair, &random_bytes)) + }, + SigningAlgorithm::Ecdsa => { + SignatureType::Ecdsa(secp.sign_ecdsa(&message, secret_key)) + }, + }; + + Ok(SignMessageResponse { + signature, + message_hash: message_hash_array, + }) +} \ No newline at end of file diff --git a/src/models/email_verification.rs b/src/models/email_verification.rs new file mode 100644 index 0000000..e4dff5b --- /dev/null +++ b/src/models/email_verification.rs @@ -0,0 +1,115 @@ +use crate::models::schema::email_verifications; +use chrono::{DateTime, Duration, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum EmailVerificationError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = email_verifications)] +pub struct EmailVerification { + pub id: i32, + pub user_id: Uuid, + pub verification_code: Uuid, + pub is_verified: bool, + pub created_at: DateTime, + pub updated_at: DateTime, + pub expires_at: DateTime, +} + +impl EmailVerification { + pub fn get_by_id( + conn: &mut PgConnection, + lookup_id: i32, + ) -> Result, EmailVerificationError> { + email_verifications::table + .filter(email_verifications::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(EmailVerificationError::DatabaseError) + } + + pub fn get_by_user_id( + conn: &mut PgConnection, + lookup_user_id: Uuid, + ) -> Result, EmailVerificationError> { + email_verifications::table + .filter(email_verifications::user_id.eq(lookup_user_id)) + .first::(conn) + .optional() + .map_err(EmailVerificationError::DatabaseError) + } + + pub fn get_by_verification_code( + conn: &mut PgConnection, + lookup_code: Uuid, + ) -> Result, EmailVerificationError> { + email_verifications::table + .filter(email_verifications::verification_code.eq(lookup_code)) + .first::(conn) + .optional() + .map_err(EmailVerificationError::DatabaseError) + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), EmailVerificationError> { + diesel::update(email_verifications::table) + .filter(email_verifications::id.eq(self.id)) + .set(self) + .execute(conn) + .map(|_| ()) + .map_err(EmailVerificationError::DatabaseError) + } + + pub fn delete(&self, conn: &mut PgConnection) -> Result<(), EmailVerificationError> { + diesel::delete(email_verifications::table) + .filter(email_verifications::id.eq(self.id)) + .execute(conn) + .map(|_| ()) + .map_err(EmailVerificationError::DatabaseError) + } + + pub fn verify(&mut self, conn: &mut PgConnection) -> Result<(), EmailVerificationError> { + self.is_verified = true; + self.update(conn) + } + + pub fn is_expired(&self) -> bool { + Utc::now() > self.expires_at + } +} + +#[derive(Insertable)] +#[diesel(table_name = email_verifications)] +pub struct NewEmailVerification { + pub user_id: Uuid, + pub verification_code: Uuid, + pub expires_at: DateTime, + pub is_verified: bool, +} + +impl NewEmailVerification { + pub fn new(user_id: Uuid, expiration_hours: i64, is_verified: bool) -> Self { + NewEmailVerification { + user_id, + verification_code: Uuid::new_v4(), + expires_at: Utc::now() + Duration::hours(expiration_hours), + is_verified, + } + } + + pub fn insert( + &self, + conn: &mut PgConnection, + ) -> Result { + diesel::insert_into(email_verifications::table) + .values(self) + .get_result::(conn) + .map_err(EmailVerificationError::DatabaseError) + } +} diff --git a/src/models/enclave_secrets.rs b/src/models/enclave_secrets.rs new file mode 100644 index 0000000..994183d --- /dev/null +++ b/src/models/enclave_secrets.rs @@ -0,0 +1,88 @@ +use crate::models::schema::enclave_secrets; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum EnclaveSecretError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = enclave_secrets)] +pub struct EnclaveSecret { + pub id: i32, + pub key: String, + pub value: Vec, +} + +impl EnclaveSecret { + pub fn get_by_id( + conn: &mut PgConnection, + lookup_id: i32, + ) -> Result, EnclaveSecretError> { + enclave_secrets::table + .filter(enclave_secrets::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(EnclaveSecretError::DatabaseError) + } + + pub fn get_by_key( + conn: &mut PgConnection, + lookup_key: &str, + ) -> Result, EnclaveSecretError> { + enclave_secrets::table + .filter(enclave_secrets::key.eq(lookup_key)) + .first::(conn) + .optional() + .map_err(EnclaveSecretError::DatabaseError) + } + + pub fn get_all(conn: &mut PgConnection) -> Result, EnclaveSecretError> { + enclave_secrets::table + .load::(conn) + .map_err(EnclaveSecretError::DatabaseError) + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), EnclaveSecretError> { + diesel::update(enclave_secrets::table) + .filter(enclave_secrets::id.eq(self.id)) + .set(self) + .execute(conn) + .map(|_| ()) + .map_err(EnclaveSecretError::DatabaseError) + } + + pub fn delete(&self, conn: &mut PgConnection) -> Result<(), EnclaveSecretError> { + diesel::delete(enclave_secrets::table) + .filter(enclave_secrets::id.eq(self.id)) + .execute(conn) + .map(|_| ()) + .map_err(EnclaveSecretError::DatabaseError) + } +} + +#[derive(Insertable)] +#[diesel(table_name = enclave_secrets)] +pub struct NewEnclaveSecret { + pub key: String, + pub value: Vec, +} + +impl NewEnclaveSecret { + pub fn new(key: String, value: Vec) -> Self { + NewEnclaveSecret { key, value } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(enclave_secrets::table) + .values(self) + .on_conflict(enclave_secrets::key) + .do_update() + .set(enclave_secrets::value.eq(self.value.clone())) + .get_result::(conn) + .map_err(EnclaveSecretError::DatabaseError) + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 0000000..ff58eaa --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,8 @@ +pub mod email_verification; +pub mod enclave_secrets; +pub mod oauth; +pub mod password_reset; +mod schema; +pub mod token_usage; +pub mod user_kv; +pub mod users; diff --git a/src/models/oauth.rs b/src/models/oauth.rs new file mode 100644 index 0000000..903d9e7 --- /dev/null +++ b/src/models/oauth.rs @@ -0,0 +1,210 @@ +use crate::models::schema::{oauth_providers, user_oauth_connections}; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum OAuthError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +// OAuthProvider model +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = oauth_providers)] +pub struct OAuthProvider { + pub id: i32, + pub name: String, + pub auth_url: String, + pub token_url: String, + pub user_info_url: String, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl OAuthProvider { + pub fn get_by_id( + conn: &mut PgConnection, + lookup_id: i32, + ) -> Result, OAuthError> { + oauth_providers::table + .filter(oauth_providers::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(OAuthError::DatabaseError) + } + + pub fn get_by_name( + conn: &mut PgConnection, + lookup_name: &str, + ) -> Result, OAuthError> { + oauth_providers::table + .filter(oauth_providers::name.eq(lookup_name)) + .first::(conn) + .optional() + .map_err(OAuthError::DatabaseError) + } + + pub fn get_all(conn: &mut PgConnection) -> Result, OAuthError> { + oauth_providers::table + .load::(conn) + .map_err(OAuthError::DatabaseError) + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), OAuthError> { + diesel::update(oauth_providers::table) + .filter(oauth_providers::id.eq(self.id)) + .set(self) + .execute(conn) + .map(|_| ()) + .map_err(OAuthError::DatabaseError) + } + + pub fn delete(&self, conn: &mut PgConnection) -> Result<(), OAuthError> { + diesel::delete(oauth_providers::table) + .filter(oauth_providers::id.eq(self.id)) + .execute(conn) + .map(|_| ()) + .map_err(OAuthError::DatabaseError) + } +} + +#[derive(Insertable)] +#[diesel(table_name = oauth_providers)] +pub struct NewOAuthProvider { + pub name: String, + pub auth_url: String, + pub token_url: String, + pub user_info_url: String, +} + +impl NewOAuthProvider { + pub fn new(name: String, auth_url: String, token_url: String, user_info_url: String) -> Self { + NewOAuthProvider { + name, + auth_url, + token_url, + user_info_url, + } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(oauth_providers::table) + .values(self) + .get_result::(conn) + .map_err(OAuthError::DatabaseError) + } +} + +// UserOAuthConnection model +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = user_oauth_connections)] +pub struct UserOAuthConnection { + pub id: i32, + pub user_id: Uuid, + pub provider_id: i32, + pub provider_user_id: String, + pub access_token_enc: Vec, + pub refresh_token_enc: Option>, + pub expires_at: Option>, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl UserOAuthConnection { + pub fn get_by_id( + conn: &mut PgConnection, + lookup_id: i32, + ) -> Result, OAuthError> { + user_oauth_connections::table + .filter(user_oauth_connections::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(OAuthError::DatabaseError) + } + + pub fn get_by_user_and_provider( + conn: &mut PgConnection, + lookup_user_id: Uuid, + lookup_provider_id: i32, + ) -> Result, OAuthError> { + user_oauth_connections::table + .filter(user_oauth_connections::user_id.eq(lookup_user_id)) + .filter(user_oauth_connections::provider_id.eq(lookup_provider_id)) + .first::(conn) + .optional() + .map_err(OAuthError::DatabaseError) + } + + pub fn get_all_for_user( + conn: &mut PgConnection, + lookup_user_id: Uuid, + ) -> Result, OAuthError> { + user_oauth_connections::table + .filter(user_oauth_connections::user_id.eq(lookup_user_id)) + .load::(conn) + .map_err(OAuthError::DatabaseError) + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), OAuthError> { + diesel::update(user_oauth_connections::table) + .filter(user_oauth_connections::id.eq(self.id)) + .set(( + user_oauth_connections::access_token_enc.eq(&self.access_token_enc), + user_oauth_connections::refresh_token_enc.eq(&self.refresh_token_enc), + user_oauth_connections::expires_at.eq(self.expires_at), + user_oauth_connections::updated_at.eq(diesel::dsl::now), + )) + .execute(conn) + .map(|_| ()) + .map_err(OAuthError::DatabaseError) + } + + pub fn delete(&self, conn: &mut PgConnection) -> Result<(), OAuthError> { + diesel::delete(user_oauth_connections::table) + .filter(user_oauth_connections::id.eq(self.id)) + .execute(conn) + .map(|_| ()) + .map_err(OAuthError::DatabaseError) + } +} + +#[derive(Insertable)] +#[diesel(table_name = user_oauth_connections)] +pub struct NewUserOAuthConnection { + pub user_id: Uuid, + pub provider_id: i32, + pub provider_user_id: String, + pub access_token_enc: Vec, + pub refresh_token_enc: Option>, + pub expires_at: Option>, +} + +impl NewUserOAuthConnection { + pub fn new( + user_id: Uuid, + provider_id: i32, + provider_user_id: String, + access_token_enc: Vec, + refresh_token_enc: Option>, + expires_at: Option>, + ) -> Self { + NewUserOAuthConnection { + user_id, + provider_id, + provider_user_id, + access_token_enc, + refresh_token_enc, + expires_at, + } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(user_oauth_connections::table) + .values(self) + .get_result::(conn) + .map_err(OAuthError::DatabaseError) + } +} diff --git a/src/models/password_reset.rs b/src/models/password_reset.rs new file mode 100644 index 0000000..14f60ab --- /dev/null +++ b/src/models/password_reset.rs @@ -0,0 +1,88 @@ +use crate::models::schema::password_reset_requests; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum PasswordResetError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = password_reset_requests)] +pub struct PasswordResetRequest { + pub id: i32, + pub user_id: Uuid, + pub hashed_secret: String, + pub encrypted_code: Vec, + pub expiration_time: DateTime, + pub created_at: DateTime, + pub is_reset: bool, +} + +impl PasswordResetRequest { + pub fn get_by_user_id_and_code( + conn: &mut PgConnection, + lookup_user_id: Uuid, + lookup_encrypted_code: &[u8], + ) -> Result, PasswordResetError> { + password_reset_requests::table + .filter(password_reset_requests::user_id.eq(lookup_user_id)) + .filter(password_reset_requests::encrypted_code.eq(lookup_encrypted_code)) + .filter(password_reset_requests::is_reset.eq(false)) + .first::(conn) + .optional() + .map_err(PasswordResetError::DatabaseError) + } + + pub fn mark_as_reset(&self, conn: &mut PgConnection) -> Result<(), PasswordResetError> { + diesel::update(password_reset_requests::table) + .filter(password_reset_requests::id.eq(self.id)) + .set(password_reset_requests::is_reset.eq(true)) + .execute(conn) + .map(|_| ()) + .map_err(PasswordResetError::DatabaseError) + } + + pub fn is_expired(&self) -> bool { + Utc::now() > self.expiration_time + } +} + +#[derive(Insertable)] +#[diesel(table_name = password_reset_requests)] +pub struct NewPasswordResetRequest { + pub user_id: Uuid, + pub hashed_secret: String, + pub encrypted_code: Vec, + pub expiration_time: DateTime, +} + +impl NewPasswordResetRequest { + pub fn new( + user_id: Uuid, + hashed_secret: String, + encrypted_code: Vec, + expiration_hours: i64, + ) -> Self { + NewPasswordResetRequest { + user_id, + hashed_secret, + encrypted_code, + expiration_time: Utc::now() + chrono::Duration::hours(expiration_hours), + } + } + + pub fn insert( + &self, + conn: &mut PgConnection, + ) -> Result { + diesel::insert_into(password_reset_requests::table) + .values(self) + .get_result::(conn) + .map_err(PasswordResetError::DatabaseError) + } +} diff --git a/src/models/schema.rs b/src/models/schema.rs new file mode 100644 index 0000000..73162dd --- /dev/null +++ b/src/models/schema.rs @@ -0,0 +1,110 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + email_verifications (id) { + id -> Int4, + user_id -> Uuid, + verification_code -> Uuid, + is_verified -> Bool, + created_at -> Timestamptz, + updated_at -> Timestamptz, + expires_at -> Timestamptz, + } +} + +diesel::table! { + enclave_secrets (id) { + id -> Int4, + key -> Text, + value -> Bytea, + } +} + +diesel::table! { + oauth_providers (id) { + id -> Int4, + #[max_length = 255] + name -> Varchar, + auth_url -> Text, + token_url -> Text, + user_info_url -> Text, + created_at -> Timestamptz, + updated_at -> Timestamptz, + } +} + +diesel::table! { + password_reset_requests (id) { + id -> Int4, + user_id -> Uuid, + #[max_length = 255] + hashed_secret -> Varchar, + encrypted_code -> Bytea, + expiration_time -> Timestamptz, + created_at -> Timestamptz, + is_reset -> Bool, + } +} + +diesel::table! { + token_usage (id) { + id -> Int8, + user_id -> Uuid, + input_tokens -> Int4, + output_tokens -> Int4, + estimated_cost -> Numeric, + created_at -> Timestamptz, + } +} + +diesel::table! { + user_kv (id) { + id -> Int8, + user_id -> Uuid, + key_enc -> Bytea, + value_enc -> Bytea, + created_at -> Timestamptz, + updated_at -> Timestamptz, + } +} + +diesel::table! { + user_oauth_connections (id) { + id -> Int4, + user_id -> Uuid, + provider_id -> Int4, + #[max_length = 255] + provider_user_id -> Varchar, + access_token_enc -> Bytea, + refresh_token_enc -> Nullable, + expires_at -> Nullable, + created_at -> Timestamptz, + updated_at -> Timestamptz, + } +} + +diesel::table! { + users (id) { + id -> Int4, + uuid -> Uuid, + name -> Nullable, + email -> Nullable, + password_enc -> Nullable, + seed_enc -> Nullable, + created_at -> Timestamptz, + updated_at -> Timestamptz, + } +} + +diesel::joinable!(user_oauth_connections -> oauth_providers (provider_id)); + +diesel::allow_tables_to_appear_in_same_query!( + email_verifications, + enclave_secrets, + oauth_providers, + password_reset_requests, + token_usage, + user_kv, + user_oauth_connections, + users, +); diff --git a/src/models/token_usage.rs b/src/models/token_usage.rs new file mode 100644 index 0000000..eb157c1 --- /dev/null +++ b/src/models/token_usage.rs @@ -0,0 +1,56 @@ +use crate::models::schema::token_usage; +use bigdecimal::BigDecimal; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum TokenUsageError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(Queryable, Identifiable, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = token_usage)] +pub struct TokenUsage { + pub id: i64, + pub user_id: Uuid, + pub input_tokens: i32, + pub output_tokens: i32, + pub estimated_cost: BigDecimal, + pub created_at: DateTime, +} + +#[derive(Insertable)] +#[diesel(table_name = token_usage)] +pub struct NewTokenUsage { + pub user_id: Uuid, + pub input_tokens: i32, + pub output_tokens: i32, + pub estimated_cost: BigDecimal, +} + +impl NewTokenUsage { + pub fn new( + user_id: Uuid, + input_tokens: i32, + output_tokens: i32, + estimated_cost: BigDecimal, + ) -> Self { + NewTokenUsage { + user_id, + input_tokens, + output_tokens, + estimated_cost, + } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(token_usage::table) + .values(self) + .get_result::(conn) + .map_err(TokenUsageError::DatabaseError) + } +} diff --git a/src/models/user_kv.rs b/src/models/user_kv.rs new file mode 100644 index 0000000..41319bf --- /dev/null +++ b/src/models/user_kv.rs @@ -0,0 +1,104 @@ +use crate::models::schema::user_kv; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum UserKVError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(Queryable, Identifiable, AsChangeset, Serialize, Deserialize, Clone, Debug)] +#[diesel(table_name = user_kv)] +pub struct UserKV { + pub id: i64, + pub user_id: Uuid, + pub key_enc: Vec, + pub value_enc: Vec, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl UserKV { + pub fn get_by_id( + conn: &mut PgConnection, + lookup_id: i64, + ) -> Result, UserKVError> { + user_kv::table + .filter(user_kv::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(UserKVError::DatabaseError) + } + + pub fn get_by_user_and_key( + conn: &mut PgConnection, + lookup_user_id: Uuid, + lookup_key: &Vec, + ) -> Result, UserKVError> { + user_kv::table + .filter(user_kv::user_id.eq(lookup_user_id)) + .filter(user_kv::key_enc.eq(lookup_key)) + .first::(conn) + .optional() + .map_err(UserKVError::DatabaseError) + } + + pub fn get_all_for_user( + conn: &mut PgConnection, + lookup_user_id: Uuid, + ) -> Result, UserKVError> { + user_kv::table + .filter(user_kv::user_id.eq(lookup_user_id)) + .load::(conn) + .map_err(UserKVError::DatabaseError) + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), UserKVError> { + diesel::update(user_kv::table) + .filter(user_kv::id.eq(self.id)) + .set(self) + .execute(conn) + .map(|_| ()) + .map_err(UserKVError::DatabaseError) + } + + pub fn delete(&self, conn: &mut PgConnection) -> Result<(), UserKVError> { + diesel::delete(user_kv::table) + .filter(user_kv::id.eq(self.id)) + .execute(conn) + .map(|_| ()) + .map_err(UserKVError::DatabaseError) + } +} + +#[derive(Insertable)] +#[diesel(table_name = user_kv)] +pub struct NewUserKV { + pub user_id: Uuid, + pub key_enc: Vec, + pub value_enc: Vec, +} + +impl NewUserKV { + pub fn new(user_id: Uuid, key_enc: Vec, value_enc: Vec) -> Self { + NewUserKV { + user_id, + key_enc, + value_enc, + } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(user_kv::table) + .values(self) + .on_conflict((user_kv::user_id, user_kv::key_enc)) + .do_update() + .set(user_kv::value_enc.eq(self.value_enc.clone())) + .get_result::(conn) + .map_err(UserKVError::DatabaseError) + } +} diff --git a/src/models/users.rs b/src/models/users.rs new file mode 100644 index 0000000..a20ecf8 --- /dev/null +++ b/src/models/users.rs @@ -0,0 +1,179 @@ +use crate::models::schema::users; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum UserError { + #[error("Database error: {0}")] + DatabaseError(#[from] diesel::result::Error), +} + +#[derive(QueryableByName, Queryable, AsChangeset, Serialize, Deserialize, Clone, PartialEq)] +#[diesel(check_for_backend(diesel::pg::Pg))] +#[diesel(table_name = users)] +pub struct User { + id: i32, + pub uuid: Uuid, + pub name: Option, + pub email: Option, + pub password_enc: Option>, + seed_enc: Option>, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl User { + pub async fn get_seed_encrypted(&self) -> Option> { + self.seed_enc.clone() + } + + pub fn get_by_id(conn: &mut PgConnection, lookup_id: i32) -> Result, UserError> { + users::table + .filter(users::id.eq(lookup_id)) + .first::(conn) + .optional() + .map_err(UserError::DatabaseError) + } + + pub fn get_by_uuid( + conn: &mut PgConnection, + lookup_uuid: Uuid, + ) -> Result, UserError> { + users::table + .filter(users::uuid.eq(lookup_uuid)) + .first::(conn) + .optional() + .map_err(UserError::DatabaseError) + } + + pub fn get_by_email( + conn: &mut PgConnection, + lookup_email: String, + ) -> Result, UserError> { + users::table + .filter(users::email.eq(lookup_email)) + .first::(conn) + .optional() + .map_err(UserError::DatabaseError) + } + + pub fn set_key( + &self, + conn: &mut PgConnection, + new_seed_encrypted: Vec, + ) -> Result<(), UserError> { + diesel::update(users::table) + .filter(users::id.eq(self.id)) + .set(users::seed_enc.eq(new_seed_encrypted)) + .execute(conn) + .map(|_| ()) + .map_err(UserError::DatabaseError) + } + + pub fn get_id(&self) -> Uuid { + self.uuid + } + + pub fn get_email(&self) -> Option<&str> { + self.email.as_deref() + } + + pub fn update_password( + &self, + conn: &mut PgConnection, + new_password_enc: Option>, + ) -> Result<(), UserError> { + diesel::update(users::table) + .filter(users::id.eq(self.id)) + .set(users::password_enc.eq(new_password_enc)) + .execute(conn) + .map(|_| ()) + .map_err(UserError::DatabaseError) + } + + pub fn is_guest(&self) -> bool { + self.email.is_none() + } + + pub fn update(&self, conn: &mut PgConnection) -> Result<(), UserError> { + diesel::update(users::table) + .filter(users::id.eq(self.id)) + .set(( + users::email.eq(&self.email), + users::password_enc.eq(&self.password_enc), + users::name.eq(&self.name), + users::updated_at.eq(diesel::dsl::now), + )) + .execute(conn) + .map(|_| ()) + .map_err(UserError::DatabaseError) + } +} + +// Here we've implemented `Debug` manually to avoid accidentally logging the +// password hash. +impl std::fmt::Debug for User { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("User") + .field("id", &self.id) + .field("uuid", &self.uuid) + .field("name", &self.name) + .field("email", &self.email) + .field("password", &"[redacted]") + .field("private_key", &"[redacted]") + .finish() + } +} + +#[derive(Insertable)] +#[diesel(table_name = users)] +pub struct NewUser { + pub name: Option, + pub email: Option, + pub password_enc: Option>, + pub seed_enc: Option>, +} + +impl NewUser { + pub fn new(email: Option, password_enc: Option>) -> Self { + NewUser { + name: None, + email, + password_enc, + seed_enc: None, + } + } + + pub fn insert(&self, conn: &mut PgConnection) -> Result { + diesel::insert_into(users::table) + .values(self) + .get_result::(conn) + .map_err(UserError::DatabaseError) + } + + pub fn with_name(mut self, name: String) -> Self { + self.name = Some(name); + self + } + + pub fn with_name_option(mut self, name: Option) -> Self { + self.name = name; + self + } +} + +// Here we've implemented `Debug` manually to avoid accidentally logging the +// password hash. +impl std::fmt::Debug for NewUser { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("User") + .field("name", &self.name) + .field("email", &self.email) + .field("password_enc", &"[redacted]") + .field("seed_enc", &"[redacted]") + .finish() + } +} diff --git a/src/oauth.rs b/src/oauth.rs new file mode 100644 index 0000000..6d612ee --- /dev/null +++ b/src/oauth.rs @@ -0,0 +1,294 @@ +use crate::db::DBConnection; +use crate::models::oauth::NewOAuthProvider; +use crate::Error; +use async_trait::async_trait; +use oauth2::{ + basic::BasicClient, AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, RedirectUrl, + Scope, TokenResponse, TokenUrl, +}; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; +use tracing::{debug, error, info}; + +#[derive(Debug, Clone)] +pub struct GithubProvider { + pub auth_url: String, + pub token_url: String, + pub user_info_url: String, + pub client: BasicClient, + pub state_store: Arc>>, +} + +impl GithubProvider { + pub async fn new( + db: Arc, + client_id: String, + client_secret: String, + redirect_url: String, + ) -> Result { + let auth_url = AuthUrl::new("https://github.com/login/oauth/authorize".to_string()) + .map_err(|e| Error::OAuthError(format!("Invalid auth URL: {}", e)))?; + let token_url = TokenUrl::new("https://github.com/login/oauth/access_token".to_string()) + .map_err(|e| Error::OAuthError(format!("Invalid token URL: {}", e)))?; + + let client = BasicClient::new( + ClientId::new(client_id), + Some(ClientSecret::new(client_secret)), + auth_url.clone(), + Some(token_url.clone()), + ) + .set_redirect_uri( + RedirectUrl::new(redirect_url) + .map_err(|e| Error::OAuthError(format!("Invalid redirect URL: {}", e)))?, + ); + + let provider = Self { + auth_url: auth_url.to_string(), + token_url: token_url.to_string(), + user_info_url: "https://api.github.com/user".to_string(), + client, + state_store: Arc::new(RwLock::new(HashMap::new())), + }; + + // Ensure the provider exists in the database + provider.ensure_provider_exists(db).await?; + + info!("GitHub OAuth provider initialized successfully"); + Ok(provider) + } + + pub async fn generate_authorize_url(&self) -> (String, CsrfToken) { + let (auth_url, csrf_token) = self + .client + .authorize_url(CsrfToken::new_random) + .add_scope(Scope::new("user:email".to_string())) + .url(); + + // Store the CSRF token + self.state_store + .write() + .await + .insert(csrf_token.secret().clone(), csrf_token.clone()); + + (auth_url.to_string(), csrf_token) + } + + pub async fn validate_state(&self, state: &str) -> bool { + self.state_store.read().await.contains_key(state) + } + + pub async fn exchange_code(&self, code: String) -> Result { + let token_result = self + .client + .exchange_code(AuthorizationCode::new(code)) + .request_async(oauth2::reqwest::async_http_client) + .await + .map_err(|e| Error::OAuthError(format!("Failed to exchange code: {}", e)))?; + + Ok(token_result.access_token().clone()) + } + + async fn ensure_provider_exists( + &self, + db: Arc, + ) -> Result<(), Error> { + debug!("Checking if GitHub OAuth provider exists in the database"); + let existing_provider = db.get_oauth_provider_by_name("github")?; + + if existing_provider.is_none() { + info!("GitHub OAuth provider not found in database, creating new entry"); + let new_provider = NewOAuthProvider { + name: "github".to_string(), + auth_url: self.auth_url.clone(), + token_url: self.token_url.clone(), + user_info_url: self.user_info_url.clone(), + }; + + match db.create_oauth_provider(new_provider) { + Ok(_) => info!("GitHub OAuth provider successfully added to database"), + Err(e) => { + error!( + "Failed to create GitHub OAuth provider in database: {:?}", + e + ); + return Err(e.into()); + } + } + } else { + debug!("GitHub OAuth provider already exists in database"); + } + + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct GoogleProvider { + pub auth_url: String, + pub token_url: String, + pub user_info_url: String, + pub client: BasicClient, + pub state_store: Arc>>, +} + +impl GoogleProvider { + pub async fn new( + db: Arc, + client_id: String, + client_secret: String, + redirect_url: String, + ) -> Result { + let auth_url = AuthUrl::new("https://accounts.google.com/o/oauth2/v2/auth".to_string()) + .map_err(|e| Error::OAuthError(format!("Invalid auth URL: {}", e)))?; + let token_url = TokenUrl::new("https://oauth2.googleapis.com/token".to_string()) + .map_err(|e| Error::OAuthError(format!("Invalid token URL: {}", e)))?; + + let client = BasicClient::new( + ClientId::new(client_id), + Some(ClientSecret::new(client_secret)), + auth_url.clone(), + Some(token_url.clone()), + ) + .set_redirect_uri( + RedirectUrl::new(redirect_url) + .map_err(|e| Error::OAuthError(format!("Invalid redirect URL: {}", e)))?, + ); + + let provider = Self { + auth_url: auth_url.to_string(), + token_url: token_url.to_string(), + user_info_url: "https://www.googleapis.com/oauth2/v3/userinfo".to_string(), + client, + state_store: Arc::new(RwLock::new(HashMap::new())), + }; + + // Ensure the provider exists in the database + provider.ensure_provider_exists(db).await?; + + info!("Google OAuth provider initialized successfully"); + Ok(provider) + } + + pub async fn generate_authorize_url(&self) -> (String, CsrfToken) { + let (auth_url, csrf_token) = self + .client + .authorize_url(CsrfToken::new_random) + .add_scope(Scope::new("email".to_string())) + .add_scope(Scope::new("profile".to_string())) + .url(); + + // Store the CSRF token + self.state_store + .write() + .await + .insert(csrf_token.secret().clone(), csrf_token.clone()); + + (auth_url.to_string(), csrf_token) + } + + pub async fn validate_state(&self, state: &str) -> bool { + self.state_store.read().await.contains_key(state) + } + + pub async fn exchange_code(&self, code: String) -> Result { + let token_result = self + .client + .exchange_code(AuthorizationCode::new(code)) + .request_async(oauth2::reqwest::async_http_client) + .await + .map_err(|e| Error::OAuthError(format!("Failed to exchange code: {}", e)))?; + + Ok(token_result.access_token().clone()) + } + + async fn ensure_provider_exists( + &self, + db: Arc, + ) -> Result<(), Error> { + debug!("Checking if Google OAuth provider exists in the database"); + let existing_provider = db.get_oauth_provider_by_name("google")?; + + if existing_provider.is_none() { + info!("Google OAuth provider not found in database, creating new entry"); + let new_provider = NewOAuthProvider { + name: "google".to_string(), + auth_url: self.auth_url.clone(), + token_url: self.token_url.clone(), + user_info_url: self.user_info_url.clone(), + }; + + match db.create_oauth_provider(new_provider) { + Ok(_) => info!("Google OAuth provider successfully added to database"), + Err(e) => { + error!( + "Failed to create Google OAuth provider in database: {:?}", + e + ); + return Err(e.into()); + } + } + } else { + debug!("Google OAuth provider already exists in database"); + } + + Ok(()) + } +} + +#[async_trait] +pub trait OAuthProvider: Send + Sync { + async fn generate_authorize_url(&self) -> (String, CsrfToken); + async fn validate_state(&self, state: &str) -> bool; + async fn exchange_code(&self, code: String) -> Result; +} + +#[async_trait] +impl OAuthProvider for GithubProvider { + async fn generate_authorize_url(&self) -> (String, CsrfToken) { + self.generate_authorize_url().await + } + + async fn validate_state(&self, state: &str) -> bool { + self.validate_state(state).await + } + + async fn exchange_code(&self, code: String) -> Result { + self.exchange_code(code).await + } +} + +#[async_trait] +impl OAuthProvider for GoogleProvider { + async fn generate_authorize_url(&self) -> (String, CsrfToken) { + self.generate_authorize_url().await + } + + async fn validate_state(&self, state: &str) -> bool { + self.validate_state(state).await + } + + async fn exchange_code(&self, code: String) -> Result { + self.exchange_code(code).await + } +} + +pub struct OAuthManager { + providers: HashMap>, +} + +impl OAuthManager { + pub fn new() -> Self { + Self { + providers: HashMap::new(), + } + } + + pub fn add_provider(&mut self, name: String, provider: Box) { + self.providers.insert(name, provider); + } + + pub fn get_provider(&self, name: &str) -> Option<&(dyn OAuthProvider + Send + Sync)> { + self.providers.get(name).map(|p| p.as_ref()) + } +} diff --git a/src/private_key.rs b/src/private_key.rs new file mode 100644 index 0000000..31a7837 --- /dev/null +++ b/src/private_key.rs @@ -0,0 +1,63 @@ +use bip39::Mnemonic; +use bitcoin::{ + bip32::{DerivationPath, Xpriv}, + Network, +}; +use secp256k1::SecretKey; +use std::{str::FromStr, sync::Arc}; + +use crate::{ + aws_credentials::AwsCredentialManager, + encrypt::{decrypt_with_key, generate_random_enclave}, + Error, +}; + +pub async fn generate_twelve_word_seed( + aws_credential_manager: Arc>>, +) -> Result { + // the bip39 library supports 12. 15, 18, 21, and 24 word mnemonics + // we only support 12 words, which is 16 bytes of entropy + let random_bytes: [u8; 16] = generate_random_enclave::<16>(aws_credential_manager).await; + + let mnemonic = + Mnemonic::from_entropy(&random_bytes).map_err(|_| Error::PrivateKeyGenerationFailure)?; + Ok(mnemonic) +} + +pub fn decrypt_user_seed_to_key( + enclave_key: Vec, + encrypted_seed: Vec, + derivation_path: Option<&str>, +) -> Result { + let user_mnemonic = decrypt_user_seed_to_mnemonic(enclave_key, encrypted_seed)?; + let user_seed = user_mnemonic.to_seed(""); + let xprivkey = Xpriv::new_master(Network::Bitcoin, &user_seed) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + + // If a derivation path is provided, derive the child key + if let Some(path) = derivation_path { + let path = DerivationPath::from_str(path) + .map_err(|e| Error::InvalidDerivationPath(e.to_string()))?; + let derived_key = xprivkey + .derive_priv(&secp256k1::Secp256k1::new(), &path) + .map_err(|e| Error::KeyDerivationError(e.to_string()))?; + Ok(derived_key.private_key) + } else { + Ok(xprivkey.private_key) + } +} + +pub fn decrypt_user_seed_to_mnemonic( + enclave_key: Vec, + encrypted_seed: Vec, +) -> Result { + let enclave_secret_key = + SecretKey::from_slice(&enclave_key).map_err(|e| Error::EncryptionError(e.to_string()))?; + let decrypted_user_seed_bytes = decrypt_with_key(&enclave_secret_key, &encrypted_seed) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + let decrypted_user_seed_str = String::from_utf8(decrypted_user_seed_bytes) + .map_err(|e| Error::EncryptionError(format!("Failed to decode UTF-8: {}", e)))?; + let user_mnemonic = Mnemonic::from_str(&decrypted_user_seed_str) + .map_err(|e| Error::EncryptionError(e.to_string()))?; + Ok(user_mnemonic) +} diff --git a/src/sqs.rs b/src/sqs.rs new file mode 100644 index 0000000..c801a51 --- /dev/null +++ b/src/sqs.rs @@ -0,0 +1,149 @@ +use crate::aws_credentials::AwsCredentialManager; +use aws_sdk_sqs::{config::Credentials, Client as SqsClient}; +use backoff::SystemClock; +use backoff::{exponential::ExponentialBackoff, future::retry, Error as BackoffError}; +use bigdecimal::BigDecimal; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use tokio::sync::RwLock; +use tracing::{debug, error, info}; +use uuid::Uuid; + +const DEFAULT_REGION: &str = "us-east-2"; +const INITIAL_INTERVAL_MS: u64 = 100; +const MAX_INTERVAL_MS: u64 = 10_000; // 10 seconds +const MAX_ELAPSED_TIME_SECS: u64 = 120; // 2 minutes + +#[derive(Clone)] +pub struct SqsEventPublisher { + queue_url: String, + aws_credential_manager: Arc>>, + region: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UsageEvent { + pub event_id: Uuid, + pub user_id: Uuid, + pub input_tokens: i32, + pub output_tokens: i32, + pub estimated_cost: BigDecimal, + pub chat_time: DateTime, +} + +#[derive(Debug, thiserror::Error)] +pub enum SqsError { + #[error("AWS SDK error: {0}")] + AwsSdk(String), + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + #[error("No credentials available")] + NoCredentials, +} + +impl SqsEventPublisher { + pub async fn new( + queue_url: String, + region: Option, + aws_credential_manager: Arc>>, + ) -> Self { + let region = region.unwrap_or_else(|| DEFAULT_REGION.to_string()); + Self { + queue_url, + aws_credential_manager, + region, + } + } + + async fn create_client(&self) -> Result { + let creds = if let Some(manager) = self.aws_credential_manager.read().await.as_ref() { + manager + .get_credentials() + .await + .ok_or(SqsError::NoCredentials)? + } else { + debug!("Using default AWS credential chain"); + return Ok(SqsClient::new( + &aws_config::defaults(aws_config::BehaviorVersion::latest()) + .region(aws_types::region::Region::new(self.region.clone())) + .load() + .await, + )); + }; + + let aws_creds = Credentials::new( + creds.access_key_id, + creds.secret_access_key, + Some(creds.token), + None, + "sqs-publisher", + ); + + let config = aws_config::defaults(aws_config::BehaviorVersion::latest()) + .region(aws_types::region::Region::new(self.region.clone())) + .credentials_provider(aws_creds) + .load() + .await; + + Ok(SqsClient::new(&config)) + } + + pub async fn publish_event(&self, event: UsageEvent) -> Result<(), SqsError> { + let event_id = event.event_id; + let user_id = event.user_id; + + info!("Publishing event {} for user {}", event_id, user_id); + + let backoff = ExponentialBackoff:: { + initial_interval: Duration::from_millis(INITIAL_INTERVAL_MS), + max_interval: Duration::from_millis(MAX_INTERVAL_MS), + multiplier: 2.0, + max_elapsed_time: Some(Duration::from_secs(MAX_ELAPSED_TIME_SECS)), + ..ExponentialBackoff::default() + }; + + let result = retry(backoff, || async { + let client = match self.create_client().await { + Ok(client) => client, + Err(e) => return Err(BackoffError::transient(e)), + }; + + let message_body = serde_json::to_string(&event) + .map_err(|e| BackoffError::permanent(SqsError::Serialization(e)))?; + + debug!("sending message to SQS: {:?}", event); + + match client + .send_message() + .queue_url(&self.queue_url) + .message_body(&message_body) + .send() + .await + { + Ok(_) => Ok(()), + Err(e) => Err(BackoffError::transient(SqsError::AwsSdk(e.to_string()))), + } + }) + .await; + + match result { + Ok(_) => { + info!( + "Successfully published event {} for user {} to SQS", + event_id, user_id + ); + Ok(()) + } + Err(_) => { + error!( + "Failed to publish event after retries. Event data: {:?}", + event + ); + Err(SqsError::AwsSdk( + "Failed to publish after retries".to_string(), + )) + } + } + } +} diff --git a/src/web.rs b/src/web.rs new file mode 100644 index 0000000..47e1f59 --- /dev/null +++ b/src/web.rs @@ -0,0 +1,13 @@ +pub mod attestation_routes; +pub mod encryption_middleware; +mod health_routes; +mod login_routes; +mod oauth_routes; +mod openai; +mod protected_routes; + +pub use health_routes::router as health_routes; +pub use login_routes::router as login_routes; +pub use oauth_routes::router as oauth_routes; +pub use openai::router as openai_routes; +pub use protected_routes::router as protected_routes; diff --git a/src/web/attestation_routes.rs b/src/web/attestation_routes.rs new file mode 100644 index 0000000..374fec1 --- /dev/null +++ b/src/web/attestation_routes.rs @@ -0,0 +1,466 @@ +use crate::{encrypt::generate_random, ApiError, AppMode, AppState}; +use aws_nitro_enclaves_nsm_api::{ + api::{Request, Response}, + driver::{nsm_exit, nsm_init, nsm_process_request}, +}; +use axum::routing::post; +use axum::{extract::State, routing::get, Json}; +use axum::{http::StatusCode, Router}; +use base64::{engine::general_purpose, Engine as _}; +use chacha20poly1305::{aead::Aead, ChaCha20Poly1305, Key, KeyInit, Nonce}; +use chrono::{Duration, Utc}; +use secp256k1::{PublicKey, Secp256k1, SecretKey}; +use serde::{Deserialize, Serialize}; +use serde_bytes::ByteBuf; +use serde_cbor::Value; +use sha2::{Digest, Sha256}; +use std::collections::BTreeMap; +use std::sync::Arc; +use tracing::{debug, error, trace}; +use uuid::Uuid; +use x25519_dalek::SharedSecret; +use yasna::models::ObjectIdentifier; +use yasna::{construct_der, Tag}; + +pub struct SessionState { + session_key: [u8; 32], + shared_secret: SharedSecret, +} + +impl SessionState { + pub fn new(shared_secret: SharedSecret, session_key: [u8; 32]) -> Self { + Self { + shared_secret, + session_key, + } + } + + pub fn get_session_key(&self) -> [u8; 32] { + self.session_key + } + + pub fn decrypt(&self, encrypted_data: &[u8], nonce: &[u8; 12]) -> Result, ApiError> { + tracing::trace!("decrypting encrypted data"); + tracing::trace!("session key: {:?}", self.session_key); + tracing::trace!("nonce: {:?}", nonce); + tracing::trace!("encrypted data length: {}", encrypted_data.len()); + + let key = Key::from_slice(self.session_key.as_ref()); + let cipher = ChaCha20Poly1305::new(key); + let nonce = Nonce::from_slice(nonce); + + cipher.decrypt(nonce, encrypted_data).map_err(|e| { + tracing::error!("could not decrypt data: {e}"); + ApiError::InternalServerError + }) + } + + pub fn encrypt(&self, data: &[u8], nonce: &[u8; 12]) -> Result, ApiError> { + let key = Key::from_slice(self.shared_secret.as_bytes()); + let cipher = ChaCha20Poly1305::new(key); + let nonce = Nonce::from_slice(nonce); + cipher + .encrypt(nonce, data) + .map_err(|_| ApiError::InternalServerError) + } +} + +#[derive(Deserialize)] +struct KeyExchangeRequest { + nonce: String, + client_public_key: String, +} + +#[derive(Serialize)] +struct KeyExchangeResponse { + session_id: Uuid, + encrypted_session_key: String, +} + +#[derive(Serialize)] +struct AttestationResponse { + attestation_document: String, +} + +pub fn router(app_state: Arc) -> Router<()> { + Router::new() + .route("/attestation/:nonce", get(get_attestation)) + .route("/key_exchange", post(key_exchange)) + .with_state(app_state) +} + +async fn get_attestation( + State(data): State>, + axum::extract::Path(nonce): axum::extract::Path, +) -> Result<(StatusCode, Json), ApiError> { + debug!("Entering get_attestation function"); + trace!("Entering get_attestation"); + + // Create an ephemeral key pair for this request + trace!("Creating ephemeral key"); + let enclave_public_key = data.create_ephemeral_key(nonce.clone()).await; + trace!("Ephemeral key created"); + + // Create a request for the attestation document + let request = Request::Attestation { + user_data: None, + public_key: Some(ByteBuf::from(enclave_public_key.as_bytes().to_vec())), + nonce: Some(ByteBuf::from(nonce.into_bytes())), + }; + + trace!("Generating attestation based on app mode"); + let result = match data.app_mode { + AppMode::Local => generate_mock_attestation(data.clone(), request).await, + _ => generate_real_attestation(data, request).await, + }; + + trace!("Exiting get_attestation"); + debug!("Exiting get_attestation function"); + result +} + +async fn generate_mock_attestation( + data: Arc, + request: Request, +) -> Result<(StatusCode, Json), ApiError> { + debug!("Entering generate_mock_attestation function"); + trace!("Entering generate_mock_attestation"); + + let (user_data, nonce, public_key) = match request { + Request::Attestation { + user_data, + nonce, + public_key, + } => (user_data, nonce, public_key), + _ => unreachable!(), + }; + + // Create a mock attestation document + trace!("Creating mock attestation document"); + let mock_document = + create_mock_attestation_document(data.clone(), user_data, nonce, public_key).await; + trace!("Mock attestation document created"); + + // Encode the mock document + trace!("Encoding mock document"); + let encoded_document = serde_cbor::to_vec(&mock_document).map_err(|e| { + error!("Failed to encode mock document: {}", e); + ApiError::InternalServerError + })?; + trace!("Mock document encoded"); + + // Sign the mock document + trace!("Signing mock document"); + let (signature, _) = sign_mock_document(&encoded_document).map_err(|e| { + error!("Failed to sign mock document: {}", e); + ApiError::InternalServerError + })?; + trace!("Mock document signed"); + + // Create the COSE_Sign1 structure + trace!("Creating COSE_Sign1 structure"); + let cose_sign1 = create_cose_sign1(encoded_document, signature); + trace!("COSE_Sign1 structure created"); + + // Encode the COSE_Sign1 structure + trace!("Encoding COSE_Sign1 structure"); + let final_document = serde_cbor::to_vec(&cose_sign1).map_err(|e| { + error!("Failed to encode COSE_Sign1 structure: {}", e); + ApiError::InternalServerError + })?; + trace!("COSE_Sign1 structure encoded"); + + // Convert to base64 + trace!("Converting to base64"); + let attestation_doc_base64 = general_purpose::STANDARD.encode(&final_document); + trace!("Converted to base64"); + + trace!("Exiting generate_mock_attestation"); + debug!("Exiting generate_mock_attestation function"); + Ok(( + StatusCode::OK, + Json(AttestationResponse { + attestation_document: attestation_doc_base64, + }), + )) +} + +async fn create_mock_attestation_document( + data: Arc, + user_data: Option, + nonce: Option, + public_key: Option, +) -> Value { + trace!("Entering create_mock_attestation_document"); + + let mut pcrs = BTreeMap::new(); + for i in 0..3 { + trace!("Generating random bytes for PCR {}", i); + let random_bytes = generate_random::<48>(); + pcrs.insert( + Value::Integer(i.into()), + Value::Bytes(random_bytes.to_vec()), + ); + } + + trace!("Generating module_id"); + let module_id = format!("i-{}", hex::encode(generate_random::<8>())); + + // Create a mock certificate + trace!("Creating mock certificate"); + let mock_cert = create_mock_certificate(data.clone()).await; + trace!("Creating cabundle"); + let cabundle = vec![ + create_mock_certificate(data.clone()).await, + create_mock_certificate(data.clone()).await, + ]; + + trace!("Building attestation document"); + let mut document = BTreeMap::new(); + document.insert(Value::Text("module_id".into()), Value::Text(module_id)); + document.insert(Value::Text("digest".into()), Value::Text("SHA384".into())); + document.insert( + Value::Text("timestamp".into()), + Value::Integer(chrono::Utc::now().timestamp().into()), + ); + document.insert(Value::Text("pcrs".into()), Value::Map(pcrs)); + document.insert(Value::Text("certificate".into()), Value::Bytes(mock_cert)); + document.insert( + Value::Text("cabundle".into()), + Value::Array(cabundle.into_iter().map(Value::Bytes).collect()), + ); + if let Some(p) = public_key { + document.insert(Value::Text("public_key".into()), Value::Bytes(p.to_vec())); + } + if let Some(u) = user_data { + document.insert(Value::Text("user_data".into()), Value::Bytes(u.to_vec())); + } + if let Some(n) = nonce { + document.insert(Value::Text("nonce".into()), Value::Bytes(n.to_vec())); + } + + trace!("Exiting create_mock_attestation_document"); + Value::Map(document) +} + +async fn create_mock_certificate(_data: Arc) -> Vec { + trace!("Entering create_mock_certificate"); + + trace!("Generating random bytes"); + let random_8_bytes = generate_random::<8>(); + let random_32_bytes = generate_random::<32>(); + + trace!("Constructing DER"); + let result = construct_der(|writer| { + writer.write_sequence(|writer| { + // TBSCertificate + writer.next().write_sequence(|writer| { + // Version + writer.next().write_tagged(Tag::context(0), |writer| { + writer.write_i32(2) // v3 + }); + // SerialNumber + writer.next().write_u64(u64::from_be_bytes(random_8_bytes)); + // Signature Algorithm + writer.next().write_sequence(|writer| { + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[1, 2, 840, 10045, 4, 3, 2])); + // ecdsa-with-SHA256 + }); + // Issuer + writer.next().write_sequence(|writer| { + writer.next().write_set(|writer| { + writer.next().write_sequence(|writer| { + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[2, 5, 4, 3])); // commonName + writer.next().write_utf8_string("Mock CA"); + }); + }); + }); + // Validity + writer.next().write_sequence(|writer| { + let now = Utc::now(); + let not_after = now + Duration::days(365); + + // Write dates as bytes + writer + .next() + .write_bytes(&now.format("%y%m%d%H%M%SZ").to_string().into_bytes()); + writer + .next() + .write_bytes(¬_after.format("%y%m%d%H%M%SZ").to_string().into_bytes()); + }); + // Subject + writer.next().write_sequence(|writer| { + writer.next().write_set(|writer| { + writer.next().write_sequence(|writer| { + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[2, 5, 4, 3])); // commonName + writer.next().write_utf8_string("Mock Enclave"); + }); + }); + }); + // SubjectPublicKeyInfo + writer.next().write_sequence(|writer| { + writer.next().write_sequence(|writer| { + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[1, 2, 840, 10045, 2, 1])); // ecPublicKey + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[1, 3, 132, 0, 34])); + // secp384r1 + }); + writer + .next() + .write_bitvec_bytes(&random_32_bytes, random_32_bytes.len() * 8); + }); + }); + // SignatureAlgorithm + writer.next().write_sequence(|writer| { + writer + .next() + .write_oid(&ObjectIdentifier::from_slice(&[1, 2, 840, 10045, 4, 3, 2])); + // ecdsa-with-SHA256 + }); + // SignatureValue + writer + .next() + .write_bitvec_bytes(&random_32_bytes, random_32_bytes.len() * 8); + }) + }); + + trace!("Exiting create_mock_certificate"); + result +} + +fn sign_mock_document(document: &[u8]) -> Result<(Vec, PublicKey), String> { + let secp = Secp256k1::new(); + let secret_key = SecretKey::from_slice(&[0x42; 32]) + .map_err(|e| format!("Failed to create secret key: {}", e))?; + let public_key = PublicKey::from_secret_key(&secp, &secret_key); + + let mut hasher = Sha256::new(); + hasher.update(document); + let message_hash = hasher.finalize(); + + let message = secp256k1::Message::from_digest_slice(&message_hash) + .map_err(|e| format!("Failed to create message from digest: {}", e))?; + + let signature = secp.sign_ecdsa(&message, &secret_key); + + Ok((signature.serialize_compact().to_vec(), public_key)) +} + +fn create_cose_sign1(payload: Vec, signature: Vec) -> Value { + Value::Array(vec![ + Value::Bytes(vec![]), // Protected header (empty) + Value::Map(BTreeMap::new()), // Unprotected header (empty) + Value::Bytes(payload), + Value::Bytes(signature), + ]) +} + +async fn generate_real_attestation( + _data: Arc, + request: Request, +) -> Result<(StatusCode, Json), ApiError> { + debug!("Entering generate_real_attestation function"); + // Initialize the Nitro Secure Module (NSM) driver + let nsm_fd = nsm_init(); + if nsm_fd < 0 { + return Err(ApiError::InternalServerError); + } + + // Process the request and get the response + let response = nsm_process_request(nsm_fd, request); + + // Close the NSM file descriptor + nsm_exit(nsm_fd); + + // Handle the response + match response { + Response::Attestation { document } => { + // Convert the attestation document to a base64 encoded string + let attestation_doc_base64 = general_purpose::STANDARD.encode(&document); + + Ok(( + StatusCode::OK, + Json(AttestationResponse { + attestation_document: attestation_doc_base64, + }), + )) + } + Response::Error(_) => { + error!("NSM returned an error response"); + Err(ApiError::InternalServerError) + } + _ => { + error!("Unexpected response from NSM"); + Err(ApiError::InternalServerError) + } + } +} + +async fn key_exchange( + State(data): State>, + Json(payload): Json, +) -> Result, ApiError> { + debug!("Entering key_exchange function"); + trace!("Starting key exchange"); + + let client_public_key_bytes = general_purpose::STANDARD + .decode(&payload.client_public_key) + .map_err(|_| ApiError::BadRequest)?; + + let client_public_key = x25519_dalek::PublicKey::from( + <[u8; 32]>::try_from(client_public_key_bytes.as_slice()) + .map_err(|_| ApiError::BadRequest)?, + ); + + let ephemeral_secret = data + .get_and_remove_ephemeral_secret(&payload.nonce) + .await + .ok_or(ApiError::BadRequest)?; + + let shared_secret = ephemeral_secret.diffie_hellman(&client_public_key); + + // Generate a random session key using your secure random function + let session_key: [u8; 32] = crate::encrypt::generate_random(); + + // Encrypt the session key using the shared secret + let nonce_bytes: [u8; 12] = crate::encrypt::generate_random(); + let nonce = Nonce::from_slice(&nonce_bytes); + let cipher = ChaCha20Poly1305::new(shared_secret.as_bytes().into()); + let mut encrypted_session_key = nonce_bytes.to_vec(); + encrypted_session_key.extend_from_slice( + &cipher + .encrypt(nonce, session_key.as_ref()) + .map_err(|_| ApiError::InternalServerError)?, + ); + + // Generate a new UUID for the session + let session_id = Uuid::new_v4(); + + trace!( + "Generated session key {:?} for nonce {:?}", + session_key, + nonce + ); + + // Store the session state + data.session_states + .write() + .await + .insert(session_id, SessionState::new(shared_secret, session_key)); + + debug!("Exiting key_exchange function"); + Ok(Json(KeyExchangeResponse { + session_id, + encrypted_session_key: general_purpose::STANDARD.encode(&encrypted_session_key), + })) +} + diff --git a/src/web/encryption_middleware.rs b/src/web/encryption_middleware.rs new file mode 100644 index 0000000..e8152ac --- /dev/null +++ b/src/web/encryption_middleware.rs @@ -0,0 +1,105 @@ +use axum::{ + body::Body, + extract::State, + http::{HeaderMap, Method, Request}, + middleware::Next, + response::Response, + Json, +}; +use base64::Engine; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use uuid::Uuid; + +use crate::{ApiError, AppState}; + +#[derive(Deserialize)] +pub struct EncryptedRequest { + pub encrypted: String, +} + +#[derive(Serialize)] +pub struct EncryptedResponse { + pub encrypted: String, + #[serde(skip)] + _phantom: std::marker::PhantomData, +} + +impl EncryptedResponse { + pub fn new(encrypted: String) -> Self { + Self { + encrypted, + _phantom: std::marker::PhantomData, + } + } +} + +pub async fn decrypt_request( + State(state): State>, + headers: HeaderMap, + mut request: Request, + next: Next, +) -> Result +where + T: DeserializeOwned + Send + Sync + Clone + 'static, +{ + tracing::debug!("Entering decrypt_request"); + let session_id = headers + .get("x-session-id") + .and_then(|v| v.to_str().ok()) + .and_then(|v| Uuid::parse_str(v).ok()) + .ok_or(ApiError::BadRequest)?; + + // Skip body processing for GET, DELETE, or when T is () + if request.method() == Method::GET + || request.method() == Method::DELETE + || std::any::TypeId::of::() == std::any::TypeId::of::<()>() + { + if std::any::TypeId::of::() == std::any::TypeId::of::<()>() { + request.extensions_mut().insert(()); + } + request.extensions_mut().insert(session_id); + return Ok(next.run(request).await); + } + + let body = std::mem::replace(request.body_mut(), Body::empty()); + let body_bytes = axum::body::to_bytes(body, usize::MAX) + .await + .map_err(|_| ApiError::BadRequest)?; + + let encrypted_request: EncryptedRequest = + serde_json::from_slice(&body_bytes).map_err(|_| ApiError::BadRequest)?; + + let decrypted_data = state + .decrypt_session_data(&session_id, &encrypted_request.encrypted) + .await + .map_err(|_| ApiError::BadRequest)?; + + let decrypted: T = serde_json::from_slice(&decrypted_data).map_err(|e| { + tracing::error!("Failed to deserialize decrypted data: {:?}", e); + ApiError::BadRequest + })?; + + request.extensions_mut().insert(decrypted); + request.extensions_mut().insert(session_id); + + tracing::debug!("Exiting decrypt_request"); + Ok(next.run(request).await) +} + +pub async fn encrypt_response( + state: &AppState, + session_id: &Uuid, + response: &T, +) -> Result>, ApiError> { + tracing::debug!("Entering encrypt_response"); + let response_json = serde_json::to_vec(response).map_err(|_| ApiError::InternalServerError)?; + let encrypted_response = state + .encrypt_session_data(session_id, &response_json) + .await?; + tracing::debug!("Exiting encrypt_response"); + Ok(Json(EncryptedResponse::new( + base64::engine::general_purpose::STANDARD.encode(encrypted_response), + ))) +} diff --git a/src/web/health_routes.rs b/src/web/health_routes.rs new file mode 100644 index 0000000..4d74137 --- /dev/null +++ b/src/web/health_routes.rs @@ -0,0 +1,31 @@ +use axum::{http::StatusCode, Router}; +use axum::{routing::get, Json}; +use serde::Serialize; + +const API_VERSION: &str = "v1"; + +pub fn router() -> Router<()> { + Router::new().route("/health-check", get(health_check)) +} + +#[derive(Serialize)] +pub struct HealthResponse { + pub status: String, + pub version: String, +} + +impl HealthResponse { + /// Fabricate a status: pass response without checking database connectivity + pub fn new_ok() -> Self { + Self { + status: String::from("pass"), + version: String::from(API_VERSION), + } + } +} + +/// IETF draft RFC for HTTP API Health Checks: +/// https://datatracker.ietf.org/doc/html/draft-inadarei-api-health-check +pub async fn health_check() -> Result, (StatusCode, String)> { + Ok(Json(HealthResponse::new_ok())) +} diff --git a/src/web/login_routes.rs b/src/web/login_routes.rs new file mode 100644 index 0000000..0417b10 --- /dev/null +++ b/src/web/login_routes.rs @@ -0,0 +1,491 @@ +use crate::AppMode; +use crate::User; +use crate::{ + db::DBError, + email::{send_hello_email, send_verification_email}, + jwt::{validate_token, NewToken, TokenType}, + models::email_verification::NewEmailVerification, +}; +use crate::{web::encryption_middleware::EncryptedResponse, Credentials}; +use crate::{ + web::encryption_middleware::{decrypt_request, encrypt_response}, + Error, +}; +use crate::{ApiError, AppState, RegisterCredentials}; +use axum::{ + extract::{Path, State}, + middleware::from_fn_with_state, + routing::{get, post}, + Extension, Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::sync::Arc; +use tokio::spawn; +use tracing::{debug, error, info}; +use uuid::Uuid; + +pub const VALID_INVITE_CODES: [&str; 3] = ["bearclaw24", "friends24", "hivemind24"]; + +#[derive(Deserialize, Clone)] +pub struct PasswordResetRequestPayload { + email: String, + hashed_secret: String, +} + +#[derive(Deserialize, Clone)] +pub struct PasswordResetConfirmPayload { + email: String, + alphanumeric_code: String, + plaintext_secret: String, + new_password: String, +} + +pub fn router(app_state: Arc) -> Router<()> { + Router::new() + .route( + "/login", + post(login).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/register", + post(register).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/logout", + post(logout).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/refresh", + post(refresh_token).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/verify-email/:code", + get(verify_email).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/password-reset/request", + post(password_reset_request).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/password-reset/confirm", + post(password_reset_confirm).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .with_state(app_state) +} + +#[derive(Serialize)] +pub struct AuthResponse { + pub id: Uuid, + pub email: Option, + pub access_token: String, + pub refresh_token: String, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct RefreshRequest { + refresh_token: String, +} + +#[derive(Serialize)] +pub struct RefreshResponse { + access_token: String, + refresh_token: String, +} + +#[derive(Deserialize, Clone)] +pub struct LogoutRequest { + refresh_token: String, +} + +pub async fn login( + State(data): State>, + Extension(creds): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering login function"); + tracing::trace!("call login"); + + let auth_response = login_internal(data.clone(), creds).await?; + let result = encrypt_response(&data, &session_id, &auth_response).await; + debug!("Exiting login function"); + result +} + +async fn login_internal(data: Arc, creds: Credentials) -> Result { + // Get user based on provided credentials + let user = match (&creds.email, &creds.id) { + (Some(email), _) => { + // Try email first if provided + match data.db.get_user_by_email(email.clone()) { + Ok(user) => user, + Err(DBError::UserNotFound) => { + error!("User not found by email: {email}"); + return Err(ApiError::InvalidUsernameOrPassword); + } + Err(e) => { + error!("Error fetching user by email: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + } + (None, Some(id)) => { + // Only allow ID-based login for guest users + match data.db.get_user_by_uuid(*id) { + Ok(user) => { + if !user.is_guest() { + error!("ID-based login not allowed for users with email addresses"); + return Err(ApiError::InvalidUsernameOrPassword); + } + user + } + Err(DBError::UserNotFound) => { + error!("User not found by ID: {id}"); + return Err(ApiError::InvalidUsernameOrPassword); + } + Err(e) => { + error!("Error fetching user by ID: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + } + (None, None) => { + error!("Neither email nor ID provided for login"); + return Err(ApiError::InvalidUsernameOrPassword); + } + }; + + // Check if the user is an OAuth-only user + if user.password_enc.is_none() { + error!("Attempted password login for OAuth-only user"); + return Err(ApiError::InvalidUsernameOrPassword); + } + + // Proceed with password authentication + match data.authenticate_user(creds).await { + Ok(Some(authenticated_user)) => { + let access_token = NewToken::new(&authenticated_user, TokenType::Access, &data)?; + let refresh_token = NewToken::new(&authenticated_user, TokenType::Refresh, &data)?; + let auth_response = AuthResponse { + id: authenticated_user.get_id(), + email: authenticated_user.get_email().map(|s| s.to_string()), + access_token: access_token.token, + refresh_token: refresh_token.token, + }; + Ok(auth_response) + } + Ok(None) => { + error!("Invalid password attempt"); + Err(ApiError::InvalidUsernameOrPassword) + } + Err(e) => { + error!("Error authenticating user: {:?}", e); + Err(ApiError::InternalServerError) + } + } +} + +pub async fn logout( + State(data): State>, + Extension(logout_request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering logout function"); + info!("Logout request received"); + // TODO actually delete the refresh token + tracing::info!( + "Logout request for refresh token: {}", + logout_request.refresh_token + ); + let response = json!({ "message": "Logged out successfully" }); + let result = encrypt_response(&data, &session_id, &response).await; + debug!("Exiting logout function"); + result +} + +pub async fn register( + State(data): State>, + Extension(creds): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering register function"); + tracing::trace!("call register"); + + // Skip invite code check for preview mode + if data.app_mode != AppMode::Preview { + // Check the invite code (case-insensitive) + let lowercase_invite_code = creds.invite_code.to_lowercase(); + if !VALID_INVITE_CODES.contains(&lowercase_invite_code.as_str()) { + return Err(ApiError::InvalidInviteCode); + } + } + + let user = match data.register_user(creds.clone()).await { + Ok(user) => user, + Err(Error::UserAlreadyExists) => { + tracing::warn!("Cannot register user that already exists"); + return Err(ApiError::EmailAlreadyExists); + } + Err(e) => { + tracing::error!("Error registering user: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + + // Handle new user registration + handle_new_user_registration(&data, &user, true).await?; + + // After registration, proceed with login + let login_result = login_internal( + data.clone(), + Credentials { + email: creds.email, + id: Some(user.uuid), + password: creds.password, + }, + ) + .await?; + + let result = encrypt_response(&data, &session_id, &login_result).await; + debug!("Exiting register function"); + result +} + +pub async fn handle_new_user_registration( + data: &AppState, + user: &User, + requires_email_verification: bool, +) -> Result<(), ApiError> { + // Only handle email verification if user has an email + if requires_email_verification && !user.is_guest() { + // Create email verification entry + let new_verification = NewEmailVerification::new(user.uuid, 24, false); + let verification = match data.db.create_email_verification(new_verification) { + Ok(v) => v, + Err(e) => { + tracing::error!("Error creating email verification: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + + // Send verification email in the background + if let Some(email) = user.get_email() { + let email = email.to_string(); + let verification_code = verification.verification_code; + let app_mode = data.app_mode.clone(); + let resend_api_key = data.resend_api_key.clone(); + spawn(async move { + match send_verification_email(app_mode, resend_api_key, email, verification_code) + .await + { + Ok(_) => { + tracing::debug!("Sent verification email"); + } + Err(e) => { + tracing::error!("Could not send verification email: {e}"); + } + } + }); + } + } + + // Only send welcome email if user has an email + if !user.is_guest() { + let welcome_email = user.get_email().unwrap().to_string(); // Safe to unwrap since we checked is_guest() + let app_mode = data.app_mode.clone(); + let resend_api_key = data.resend_api_key.clone(); + spawn(async move { + match send_hello_email(app_mode, resend_api_key, welcome_email).await { + Ok(_) => { + tracing::debug!("Scheduled welcome email"); + } + Err(e) => { + tracing::error!("Could not schedule welcome email: {e}"); + } + } + }); + } + + Ok(()) +} + +pub async fn refresh_token( + State(data): State>, + Extension(refresh_request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering refresh_token function"); + info!("Refresh token request received"); + + let claims = validate_token(&refresh_request.refresh_token, &data, "refresh")?; + + // Audience check is now handled by validate_token + let user_id = Uuid::parse_str(&claims.sub).map_err(|_| ApiError::InvalidJwt)?; + + let user = data + .get_user(user_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let new_access_token = NewToken::new(&user, TokenType::Access, &data)?; + let new_refresh_token = NewToken::new(&user, TokenType::Refresh, &data)?; + + let response = RefreshResponse { + access_token: new_access_token.token, + refresh_token: new_refresh_token.token, + }; + let result = encrypt_response(&data, &session_id, &response).await; + debug!("Exiting refresh_token function"); + result +} + +pub async fn verify_email( + State(data): State>, + Path(code): Path, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering verify_email function"); + let verification = match data.db.get_email_verification_by_code(code) { + Ok(v) => v, + Err(DBError::EmailVerificationNotFound) => return Err(ApiError::BadRequest), + Err(_) => return Err(ApiError::InternalServerError), + }; + + if verification.is_expired() { + return Err(ApiError::BadRequest); + } + + if verification.is_verified { + let response = json!({ + "message": "Email already verified" + }); + return encrypt_response(&data, &session_id, &response).await; + } + + let mut verification = verification; + if data.db.verify_email(&mut verification).is_err() { + return Err(ApiError::InternalServerError); + } + + let response = json!({ + "message": "Email verified successfully" + }); + let result = encrypt_response(&data, &session_id, &response).await; + debug!("Exiting verify_email function"); + result +} + +pub async fn password_reset_request( + State(data): State>, + Extension(payload): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering password_reset_request function"); + + // Check if user exists and is not an OAuth-only user + match data.db.get_user_by_email(payload.email.clone()) { + Ok(user) => { + if user.password_enc.is_none() { + error!("OAuth-only user attempted to reset password"); + // Still return success to not leak information about the account + let response = json!({ + "message": "If an account with that email exists, we have sent a password reset link." + }); + return encrypt_response(&data, &session_id, &response).await; + } + } + Err(DBError::UserNotFound) => { + // User doesn't exist, but we don't want to leak this information + let response = json!({ + "message": "If an account with that email exists, we have sent a password reset link." + }); + return encrypt_response(&data, &session_id, &response).await; + } + Err(e) => { + error!("Error in password reset request: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + + // Proceed with password reset request + let _ = data + .create_password_reset_request(payload.email.clone(), payload.hashed_secret) + .await + .map_err(|e| { + error!("Error in create_password_reset_request: {:?}", e); + // We don't expose this error to the user + }); + + let response = json!({ + "message": "If an account with that email exists, we have sent a password reset link." + }); + let result = encrypt_response(&data, &session_id, &response).await; + debug!("Exiting password_reset_request function"); + result +} + +pub async fn password_reset_confirm( + State(data): State>, + Extension(payload): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering password_reset_confirm function"); + + // Check if user exists and is not an OAuth-only user + match data.db.get_user_by_email(payload.email.clone()) { + Ok(user) => { + if user.password_enc.is_none() { + error!("OAuth-only user attempted to reset password"); + return Err(ApiError::InvalidUsernameOrPassword); + } + } + Err(DBError::UserNotFound) => { + error!("User not found in password reset confirm"); + return Err(ApiError::InvalidUsernameOrPassword); + } + Err(e) => { + error!("Error in password reset confirm: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + + // Proceed with password reset confirmation + data.confirm_password_reset( + payload.email, + payload.alphanumeric_code, + payload.plaintext_secret, + payload.new_password, + ) + .await + .map_err(|e| match e { + crate::Error::PasswordResetExpired => ApiError::BadRequest, + crate::Error::InvalidPasswordResetSecret => ApiError::BadRequest, + crate::Error::InvalidPasswordResetRequest => ApiError::BadRequest, + _ => ApiError::InternalServerError, + })?; + + let response = json!({ + "message": "Password reset successful. You can now log in with your new password." + }); + let result = encrypt_response(&data, &session_id, &response).await; + debug!("Exiting password_reset_confirm function"); + result +} diff --git a/src/web/oauth_routes.rs b/src/web/oauth_routes.rs new file mode 100644 index 0000000..eff4cf7 --- /dev/null +++ b/src/web/oauth_routes.rs @@ -0,0 +1,589 @@ +use crate::models::email_verification::NewEmailVerification; +use crate::models::oauth::NewUserOAuthConnection; +use crate::web::encryption_middleware::{decrypt_request, encrypt_response, EncryptedResponse}; +use crate::web::login_routes::{handle_new_user_registration, VALID_INVITE_CODES}; +use crate::AppMode; +use crate::{encrypt, DBError}; +use crate::{ + jwt::{NewToken, TokenType}, + models::users::{NewUser, User}, + ApiError, AppState, +}; +use axum::{ + extract::{Extension, State}, + routing::post, + Json, Router, +}; +use reqwest::header::AUTHORIZATION; +use secp256k1::SecretKey; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use tracing::{debug, error, trace}; +use uuid::Uuid; + +pub fn router(app_state: Arc) -> Router { + Router::new() + .route( + "/auth/github", + post(|state, ext1, ext2| initiate_oauth(state, ext1, ext2, "github")).layer( + axum::middleware::from_fn_with_state( + app_state.clone(), + decrypt_request::, + ), + ), + ) + .route( + "/auth/github/callback", + post(|state, ext1, ext2| oauth_callback(state, ext1, ext2, "github")).layer( + axum::middleware::from_fn_with_state( + app_state.clone(), + decrypt_request::, + ), + ), + ) + .route( + "/auth/google", + post(|state, ext1, ext2| initiate_oauth(state, ext1, ext2, "google")).layer( + axum::middleware::from_fn_with_state( + app_state.clone(), + decrypt_request::, + ), + ), + ) + .route( + "/auth/google/callback", + post(|state, ext1, ext2| oauth_callback(state, ext1, ext2, "google")).layer( + axum::middleware::from_fn_with_state( + app_state.clone(), + decrypt_request::, + ), + ), + ) + .with_state(app_state) +} + +#[derive(Serialize)] +struct OAuthOAuthCallbackResponse { + auth_url: String, + csrf_token: String, +} + +#[derive(Deserialize, Clone)] +struct OAuthAuthRequest { + invite_code: Option, +} + +#[derive(Deserialize, Clone)] +struct OAuthCallbackRequest { + code: String, + state: String, + invite_code: String, +} + +#[derive(Serialize)] +struct OAuthCallbackResponse { + id: Uuid, + email: String, + access_token: String, + refresh_token: String, +} + +#[derive(Deserialize, Clone, Debug)] +struct GithubUser { + id: i64, + login: String, + name: Option, + email: Option, +} + +#[derive(Deserialize)] +struct GithubEmail { + email: String, + primary: bool, + verified: bool, +} + +#[derive(Deserialize, Clone, Debug)] +struct GoogleUser { + sub: String, + email: String, + email_verified: bool, + name: Option, +} + +async fn initiate_oauth( + State(app_state): State>, + Extension(auth_request): Extension, + Extension(session_id): Extension, + provider_name: &str, +) -> Result>, ApiError> { + debug!("Entering init {} auth function", provider_name); + + // Check the invite code if it's provided (for sign-ups) + if let Some(invite_code) = &auth_request.invite_code { + let lowercase_invite_code = invite_code.to_lowercase(); + if !VALID_INVITE_CODES.contains(&lowercase_invite_code.as_str()) { + error!("Invalid invite code: {}", lowercase_invite_code); + return Err(ApiError::InvalidInviteCode); + } + } + + let oauth_client = app_state + .oauth_manager + .get_provider(provider_name) + .ok_or(ApiError::InternalServerError)?; + + let (auth_url, csrf_token) = oauth_client.generate_authorize_url().await; + + let response = OAuthOAuthCallbackResponse { + auth_url, + csrf_token: csrf_token.secret().clone(), + }; + + debug!("Exiting init {} auth function", provider_name); + encrypt_response(&app_state, &session_id, &response).await +} + +async fn oauth_callback( + State(app_state): State>, + Extension(callback_request): Extension, + Extension(session_id): Extension, + provider_name: &str, +) -> Result>, ApiError> { + debug!("Entering {} callback function", provider_name); + trace!("Received code: {}", callback_request.code); + trace!("Received state: {}", callback_request.state); + trace!("Received invite code: {}", callback_request.invite_code); + + let oauth_client = app_state + .oauth_manager + .get_provider(provider_name) + .ok_or_else(|| { + error!("{} client not initialized", provider_name); + ApiError::InternalServerError + })?; + + // Validate the state + if !oauth_client.validate_state(&callback_request.state).await { + error!("Invalid state in {} callback", provider_name); + return Err(ApiError::BadRequest); + } + + // Exchange the code for an access token + let token = match oauth_client + .exchange_code(callback_request.code.clone()) + .await + { + Ok(token) => { + debug!("Successfully exchanged code for token"); + token + } + Err(e) => { + error!("Failed to exchange code for access token: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + + // Fetch user information and find or create the user + let user = match provider_name { + "github" => { + debug!("Access token obtained, fetching GitHub user"); + let github_user = match fetch_github_user(token.secret()).await { + Ok(user) => { + debug!("Successfully fetched GitHub user"); + user + } + Err(e) => { + error!("Failed to fetch GitHub user: {:?}", e); + return Err(e); + } + }; + + find_or_create_user_from_oauth( + &app_state, + github_user.email.clone().unwrap_or_default(), + github_user.id.to_string(), + "github", + token.secret().to_string(), + &callback_request.invite_code, + github_user.name.clone().or(Some(github_user.login.clone())), + ) + .await? + } + "google" => { + debug!("Access token obtained, fetching Google user"); + let google_user = match fetch_google_user(token.secret()).await { + Ok(user) => { + debug!("Successfully fetched Google user"); + user + } + Err(e) => { + error!("Failed to fetch Google user: {:?}", e); + return Err(e); + } + }; + + find_or_create_user_from_oauth( + &app_state, + google_user.email.clone(), + google_user.sub.clone(), + "google", + token.secret().to_string(), + &callback_request.invite_code, + google_user.name.clone(), + ) + .await? + } + _ => { + error!("Unsupported provider: {}", provider_name); + return Err(ApiError::InternalServerError); + } + }; + + // Generate JWT tokens + let access_token = NewToken::new(&user, TokenType::Access, &app_state).map_err(|e| { + error!("Failed to generate access token: {:?}", e); + ApiError::InternalServerError + })?; + let refresh_token = NewToken::new(&user, TokenType::Refresh, &app_state).map_err(|e| { + error!("Failed to generate refresh token: {:?}", e); + ApiError::InternalServerError + })?; + + let auth_response = OAuthCallbackResponse { + id: user.get_id(), + email: user.get_email() + .expect("OAuth user must have email") + .to_string(), + access_token: access_token.token, + refresh_token: refresh_token.token, + }; + + debug!("Exiting {} callback function", provider_name); + encrypt_response(&app_state, &session_id, &auth_response).await +} + +async fn fetch_github_user(access_token: &str) -> Result { + let client = reqwest::Client::new(); + let user_url = "https://api.github.com/user"; + + debug!("Sending request to GitHub API: {}", user_url); + let response = client + .get(user_url) + .header("Authorization", format!("token {}", access_token)) + .header("User-Agent", "OpenSecret") + .send() + .await + .map_err(|e| { + error!("Failed to send request to GitHub API: {:?}", e); + ApiError::InternalServerError + })?; + + // Get status and headers before consuming the response + let status = response.status(); + let headers = response.headers().clone(); + debug!("GitHub API response status: {}", status); + trace!("GitHub API response headers: {:?}", headers); + + if !status.is_success() { + let error_body = response + .text() + .await + .unwrap_or_else(|_| "Unable to read error body".to_string()); + error!( + "GitHub API returned non-success status: {} {}", + status, + status.canonical_reason().unwrap_or("") + ); + error!("Error response body: {}", error_body); + return Err(ApiError::InternalServerError); + } + + let user_body = response.text().await.map_err(|e| { + error!("Failed to read GitHub user response body: {:?}", e); + ApiError::InternalServerError + })?; + + trace!("GitHub user response body: {}", user_body); + + let mut github_user: GithubUser = serde_json::from_str(&user_body).map_err(|e| { + error!("Failed to parse GitHub user JSON: {:?}", e); + error!("GitHub user response body: {}", user_body); + ApiError::InternalServerError + })?; + + // If the email is not public, fetch the email separately + if github_user.email.is_none() { + let emails_url = "https://api.github.com/user/emails"; + debug!("Fetching GitHub user emails: {}", emails_url); + let emails_response = client + .get(emails_url) + .header("Authorization", format!("token {}", access_token)) + .header("User-Agent", "OpenSecret") + .send() + .await + .map_err(|e| { + error!("Failed to send request for GitHub user emails: {:?}", e); + ApiError::InternalServerError + })?; + + let emails_status = emails_response.status(); + let emails_headers = emails_response.headers().clone(); + trace!("GitHub emails API response status: {}", emails_status); + trace!("GitHub emails API response headers: {:?}", emails_headers); + + if !emails_status.is_success() { + let error_body = emails_response + .text() + .await + .unwrap_or_else(|_| "Unable to read error body".to_string()); + error!( + "GitHub API returned non-success status for emails: {} {}", + emails_status, + emails_status.canonical_reason().unwrap_or("") + ); + error!("Error response body for emails: {}", error_body); + return Err(ApiError::InternalServerError); + } + + let emails_body = emails_response.text().await.map_err(|e| { + error!("Failed to read GitHub emails response body: {:?}", e); + ApiError::InternalServerError + })?; + + trace!("GitHub emails response body: {}", emails_body); + + let emails: Vec = serde_json::from_str(&emails_body).map_err(|e| { + error!("Failed to parse GitHub emails JSON: {:?}", e); + error!("GitHub emails response body: {}", emails_body); + ApiError::InternalServerError + })?; + + github_user.email = emails + .into_iter() + .find(|e| e.primary && e.verified) + .map(|e| e.email); + } + + // If we still don't have an email, return an error + if github_user.email.is_none() { + error!("No valid email found for GitHub user"); + return Err(ApiError::NoEmailFound); + } + + Ok(github_user) +} + +async fn fetch_google_user(access_token: &str) -> Result { + let client = reqwest::Client::new(); + let user_url = "https://www.googleapis.com/oauth2/v3/userinfo"; + + debug!("Sending request to Google API: {}", user_url); + let response = client + .get(user_url) + .header(AUTHORIZATION, format!("Bearer {}", access_token)) + .send() + .await + .map_err(|e| { + error!("Failed to send request to Google API: {:?}", e); + ApiError::InternalServerError + })?; + + let status = response.status(); + if !status.is_success() { + let error_body = response + .text() + .await + .unwrap_or_else(|_| "Unable to read error body".to_string()); + error!( + "Google API returned non-success status: {} {}", + status, error_body + ); + return Err(ApiError::InternalServerError); + } + + let google_user: GoogleUser = response.json().await.map_err(|e| { + error!("Failed to parse Google user JSON: {:?}", e); + ApiError::InternalServerError + })?; + + // Ensure email is present and verified + if google_user.email.is_empty() || !google_user.email_verified { + error!("Google user email is not present or not verified"); + return Err(ApiError::BadRequest); + } + + Ok(google_user) +} + +async fn find_or_create_user_from_oauth( + app_state: &AppState, + email: String, + provider_user_id: String, + provider_name: &str, + access_token: String, + invite_code: &str, + user_name: Option, +) -> Result { + let provider = app_state + .db + .get_oauth_provider_by_name(provider_name) + .map_err(|e| { + error!("Failed to get {} OAuth provider: {:?}", provider_name, e); + ApiError::InternalServerError + })? + .ok_or_else(|| { + error!("{} OAuth provider not found", provider_name); + ApiError::InternalServerError + })?; + + // Try to find the user by email + match app_state.db.get_user_by_email(email.clone()) { + Ok(existing_user) => { + // User exists, check if they have a connection with the provider + let existing_connection = app_state + .db + .get_user_oauth_connection_by_user_and_provider(existing_user.uuid, provider.id) + .map_err(|e| { + error!("Failed to get existing OAuth connection: {:?}", e); + ApiError::InternalServerError + })?; + + if existing_connection.is_some() { + // User has already linked their account, update the token + update_provider_connection(app_state, &existing_user, provider.id, &access_token) + .await?; + Ok(existing_user) + } else { + // User exists but hasn't linked the provider before + error!("User exists but hasn't linked {} before", provider_name); + Err(ApiError::UserExistsNotLinked) + } + } + Err(DBError::UserNotFound) => { + // If invite code is empty and not in preview mode, return UserNotFound error + if invite_code.is_empty() && app_state.app_mode != AppMode::Preview { + return Err(ApiError::UserNotFound); + } + + // Check the invite code for new sign-ups, but skip for preview mode + if app_state.app_mode != AppMode::Preview { + let lowercase_invite_code = invite_code.to_lowercase(); + if !VALID_INVITE_CODES.contains(&lowercase_invite_code.as_str()) { + error!( + "Invalid invite code for new user: {}", + lowercase_invite_code + ); + return Err(ApiError::InvalidInviteCode); + } + } + + // Create new user + let new_user = NewUser::new(Some(email.clone()), None) + .with_name(user_name.unwrap_or_default()); + + let user = app_state.db.create_user(new_user).map_err(|e| { + error!("Failed to create new user: {:?}", e); + ApiError::InternalServerError + })?; + + // Create connection for the new user + create_provider_connection( + app_state, + &user, + provider.id, + provider_user_id, + &access_token, + ) + .await?; + + // Create email verification entry as already verified + let new_verification = NewEmailVerification::new(user.uuid, 24, true); + app_state + .db + .create_email_verification(new_verification) + .map_err(|e| { + error!("Error creating email verification: {:?}", e); + ApiError::InternalServerError + })?; + + // Handle new user registration + handle_new_user_registration(app_state, &user, false).await?; + + Ok(user) + } + Err(e) => { + error!("Database error when fetching user: {:?}", e); + Err(ApiError::InternalServerError) + } + } +} + +async fn update_provider_connection( + app_state: &AppState, + user: &User, + provider_id: i32, + access_token: &str, +) -> Result<(), ApiError> { + let encrypted_access_token = encrypt_access_token(app_state, access_token).await?; + + let mut connection = app_state + .db + .get_user_oauth_connection_by_user_and_provider(user.uuid, provider_id) + .map_err(|e| { + error!("Failed to get existing OAuth connection: {:?}", e); + ApiError::InternalServerError + })? + .expect("Connection should exist"); + + connection.access_token_enc = encrypted_access_token; + app_state + .db + .update_user_oauth_connection(&connection) + .map_err(|e| { + error!("Failed to update OAuth connection: {:?}", e); + ApiError::InternalServerError + })?; + + Ok(()) +} + +async fn create_provider_connection( + app_state: &AppState, + user: &User, + provider_id: i32, + provider_user_id: String, + access_token: &str, +) -> Result<(), ApiError> { + let encrypted_access_token = encrypt_access_token(app_state, access_token).await?; + + let new_connection = NewUserOAuthConnection { + user_id: user.uuid, + provider_id, + provider_user_id, + access_token_enc: encrypted_access_token, + refresh_token_enc: None, // Assuming no refresh tokens for both providers + expires_at: None, // Assuming tokens don't expire unless revoked + }; + + app_state + .db + .create_user_oauth_connection(new_connection) + .map_err(|e| { + error!("Failed to create new OAuth connection: {:?}", e); + ApiError::InternalServerError + })?; + + Ok(()) +} + +async fn encrypt_access_token( + app_state: &AppState, + access_token: &str, +) -> Result, ApiError> { + let secret_key = SecretKey::from_slice(&app_state.enclave_key).map_err(|e| { + error!("Failed to create SecretKey from enclave key: {:?}", e); + ApiError::InternalServerError + })?; + Ok(encrypt::encrypt_with_key(&secret_key, access_token.as_bytes()).await) +} diff --git a/src/web/openai.rs b/src/web/openai.rs new file mode 100644 index 0000000..78919d6 --- /dev/null +++ b/src/web/openai.rs @@ -0,0 +1,315 @@ +use crate::is_default_openai_domain; +use crate::models::token_usage::NewTokenUsage; +use crate::models::users::User; +use crate::sqs::UsageEvent; +use crate::web::encryption_middleware::decrypt_request; +use crate::{ApiError, AppState}; +use axum::http::{header, HeaderMap}; +use axum::{ + extract::State, + response::sse::{Event, Sse}, + routing::post, + Router, +}; +use base64::{engine::general_purpose, Engine as _}; +use bigdecimal::BigDecimal; +use chrono::Utc; +use futures::stream::{self, Stream, StreamExt}; +use futures::TryStreamExt; +use hyper::body::to_bytes; +use hyper::header::{HeaderName, HeaderValue}; +use hyper::{Body, Client, Request}; +use hyper_tls::HttpsConnector; +use serde_json::{json, Value}; +use std::convert::Infallible; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use tracing::{debug, error, info, trace}; +use uuid::Uuid; + +pub fn router(app_state: Arc) -> Router<()> { + Router::new() + .route("/v1/chat/completions", post(proxy_openai)) + .layer(axum::middleware::from_fn_with_state( + app_state.clone(), + decrypt_request::, + )) + .with_state(app_state) +} + +async fn proxy_openai( + State(state): State>, + headers: HeaderMap, + axum::Extension(session_id): axum::Extension, + axum::Extension(user): axum::Extension, + axum::Extension(body): axum::Extension, +) -> Result>>, ApiError> { + debug!("Entering proxy_openai function"); + + // Check billing if client exists + if let Some(billing_client) = &state.billing_client { + debug!("Checking billing server for user {}", user.uuid); + match billing_client.can_user_chat(user.uuid).await { + Ok(true) => { + // User can chat, proceed with existing logic + debug!("Billing service passed for user {}", user.uuid); + } + Ok(false) => { + error!("Usage limit reached for user: {}", user.uuid); + return Err(ApiError::UsageLimitReached); + } + Err(e) => { + // Log the error but allow the request + error!("Billing service error, allowing request: {}", e); + } + } + } + + if body.is_null() || body.as_object().map_or(true, |obj| obj.is_empty()) { + error!("Request body is empty or invalid"); + return Err(ApiError::BadRequest); + } + + // We already verified it's a valid object above, so this expect should never trigger + let mut modified_body = body.as_object().expect("body was just checked").clone(); + modified_body.insert("stream_options".to_string(), json!({"include_usage": true})); + let modified_body = Value::Object(modified_body); + + // Use the OpenAI API key and base URL from AppState + let openai_api_key = match &state.openai_api_key { + Some(key) if !key.is_empty() => key, + _ => { + if is_default_openai_domain(&state.openai_api_base) { + error!("OpenAI API key is required for OpenAI domain"); + return Err(ApiError::InternalServerError); + } + "" // Empty string if not using OpenAI's domain + } + }; + let openai_api_base = &state.openai_api_base; + + // Create a new hyper client + let https = HttpsConnector::new(); + let client = Client::builder() + .pool_idle_timeout(Duration::from_secs(15)) + .build::<_, Body>(https); + + // Prepare the request to OpenAI + let body_json = serde_json::to_string(&modified_body).map_err(|e| { + error!("Failed to serialize request body: {:?}", e); + ApiError::InternalServerError + })?; + + let mut req = Request::builder() + .method("POST") + .uri(format!("{}/v1/chat/completions", openai_api_base)) + .header("Content-Type", "application/json"); + + if !openai_api_key.is_empty() { + req = req.header("Authorization", format!("Bearer {}", openai_api_key)); + } + + // Forward relevant headers from the original request + for (key, value) in headers.iter() { + if key != header::HOST && key != header::AUTHORIZATION && key != header::CONTENT_LENGTH { + if let (Ok(name), Ok(val)) = ( + HeaderName::from_bytes(key.as_ref()), + HeaderValue::from_str(value.to_str().unwrap_or_default()), + ) { + req = req.header(name, val); + } + } + } + + let req = req.body(Body::from(body_json)).map_err(|e| { + error!("Failed to create request body: {:?}", e); + ApiError::InternalServerError + })?; + + debug!("Sending request to OpenAI"); + // Send the request to OpenAI + let res = client.request(req).await.map_err(|e| { + error!("Failed to send request to OpenAI: {:?}", e); + ApiError::InternalServerError + })?; + + // Check if the response is successful + if !res.status().is_success() { + error!("OpenAI API returned non-success status: {}", res.status()); + + // Log headers + debug!("Response headers: {:?}", res.headers()); + + // Read and log the response body + let body_bytes = to_bytes(res.into_body()).await.map_err(|e| { + error!("Failed to read response body: {:?}", e); + ApiError::InternalServerError + })?; + + let body_str = String::from_utf8_lossy(&body_bytes); + error!("Response body: {}", body_str); + + return Err(ApiError::InternalServerError); + } + + debug!("Successfully received response from OpenAI"); + + let stream = res.into_body().into_stream(); + let buffer = Arc::new(Mutex::new(String::new())); + let stream = stream + .map(move |chunk| { + let state = state.clone(); + let session_id = session_id; + let user = user.clone(); + let buffer = buffer.clone(); + async move { + match chunk { + Ok(chunk) => { + let chunk_str = String::from_utf8_lossy(&chunk); + let mut events = Vec::new(); + { + let mut buffer = buffer.lock().unwrap(); + buffer.push_str(&chunk_str); + while let Some(event_end) = buffer.find("\n\n") { + let event = buffer[..event_end].to_string(); + *buffer = buffer[event_end + 2..].to_string(); + events.push(event); + } + if events.is_empty() { + trace!("No complete events in buffer. Current buffer: {}", buffer); + } + } + + let mut processed_events = Vec::new(); + for event in events { + if let Some(processed_event) = + encrypt_and_process_event(&state, &session_id, &user, &event).await + { + processed_events.push(Ok(processed_event)); + } + } + processed_events + } + Err(e) => { + error!( + "Error reading response body: {:?}. Current buffer: {}", + e, + buffer.lock().unwrap() + ); + vec![Ok(Event::default().data("Error reading response"))] + } + } + } + }) + .flat_map(stream::once) + .flat_map(stream::iter); + + debug!("Exiting proxy_openai function"); + Ok(Sse::new(stream)) +} + +async fn encrypt_and_process_event( + state: &AppState, + session_id: &Uuid, + user: &User, + event: &str, +) -> Option { + if event.trim() == "data: [DONE]" { + return Some(Event::default().data("[DONE]")); + } + + if let Some(data) = event.strip_prefix("data: ") { + match serde_json::from_str::(data) { + Ok(json) => { + // Handle usage statistics if available + if let Some(usage) = json.get("usage") { + if !usage.is_null() && usage.is_object() { + let input_tokens = usage + .get("prompt_tokens") + .and_then(|v| v.as_i64()) + .unwrap_or(0) as i32; + let output_tokens = usage + .get("completion_tokens") + .and_then(|v| v.as_i64()) + .unwrap_or(0) as i32; + + // Calculate estimated cost with correct pricing + let input_cost = BigDecimal::from_str("0.0000054").unwrap() + * BigDecimal::from(input_tokens); + let output_cost = BigDecimal::from_str("0.0000162").unwrap() + * BigDecimal::from(output_tokens); + let total_cost = input_cost + output_cost; + + info!( + "OpenAI API usage for user {}: prompt_tokens={}, completion_tokens={}, total_tokens={}, estimated_cost={}", + user.uuid, input_tokens, output_tokens, + input_tokens + output_tokens, + total_cost + ); + + // Create token usage record and post to SQS in the background + let state = state.clone(); + let user_id = user.uuid; + tokio::spawn(async move { + // Create and store token usage record + let new_usage = NewTokenUsage::new( + user_id, + input_tokens, + output_tokens, + total_cost.clone(), + ); + + if let Err(e) = state.db.create_token_usage(new_usage) { + error!("Failed to save token usage: {:?}", e); + } + + // Post event to SQS if configured + if let Some(publisher) = &state.sqs_publisher { + let event = UsageEvent { + event_id: Uuid::new_v4(), // Generate new UUID for idempotency + user_id, + input_tokens, + output_tokens, + estimated_cost: total_cost, + chat_time: Utc::now(), + }; + + match publisher.publish_event(event).await { + Ok(_) => debug!("published usage event successfully"), + Err(e) => error!("error publishing usage event: {e}"), + } + } + }); + } + } + + let json_str = json.to_string(); + match state + .encrypt_session_data(session_id, json_str.as_bytes()) + .await + { + Ok(encrypted_data) => { + let base64_encrypted = general_purpose::STANDARD.encode(&encrypted_data); + Some(process_event(&base64_encrypted)) + } + Err(e) => { + error!("Failed to encrypt event data: {:?}", e); + Some(Event::default().data("Error: Encryption failed")) + } + } + } + Err(e) => { + error!("Received non-JSON data event. Error: {:?}", e); + Some(Event::default().data("Error: Invalid JSON")) + } + } + } else { + error!("Received non-data event"); + Some(Event::default().data("Error: Invalid event format")) + } +} + +fn process_event(data: &str) -> Event { + Event::default().data(data) +} diff --git a/src/web/protected_routes.rs b/src/web/protected_routes.rs new file mode 100644 index 0000000..61ab1c1 --- /dev/null +++ b/src/web/protected_routes.rs @@ -0,0 +1,834 @@ +use crate::encrypt; +use crate::jwt::{NewToken, TokenType}; +use crate::message_signing::SigningAlgorithm; +use crate::private_key::decrypt_user_seed_to_mnemonic; +use crate::web::encryption_middleware::{decrypt_request, encrypt_response, EncryptedResponse}; +use crate::web::login_routes::handle_new_user_registration; +use crate::Credentials; +use crate::Error; +use crate::KVPair; +use crate::{ + db::DBError, email::send_verification_email, models::email_verification::NewEmailVerification, + models::users::User, ApiError, AppState, +}; +use axum::middleware::from_fn_with_state; +use axum::{ + extract::{Path, Query, State}, + routing::{delete, get, post, put}, + Router, +}; +use axum::{Extension, Json}; +use base64::{engine::general_purpose, Engine}; +use bitcoin::bip32::DerivationPath; +use chrono::{DateTime, Utc}; +use secp256k1::Secp256k1; +use secp256k1::SecretKey; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::str::FromStr; +use std::sync::Arc; +use tracing::{debug, error, info}; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum LoginMethod { + Email, + Github, + Google, + Guest, +} + +// Update AppUser struct to include login_method +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AppUser { + pub id: Uuid, + pub name: Option, + pub email: Option, + pub email_verified: bool, + pub login_method: LoginMethod, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +impl From<&User> for AppUser { + fn from(user: &User) -> Self { + AppUser { + id: user.uuid, + name: user.name.clone(), + email: user.email.clone(), + // This will be set separately + email_verified: false, + // This will be updated for oauth + login_method: if user.is_guest() { + LoginMethod::Guest + } else { + LoginMethod::Email + }, + created_at: user.created_at, + updated_at: user.updated_at, + } + } +} + +#[derive(Serialize)] +pub struct ProtectedUserData { + pub user: AppUser, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ChangePasswordRequest { + pub current_password: String, + pub new_password: String, +} + +#[derive(Debug, Serialize)] +pub struct PrivateKeyResponse { + mnemonic: String, +} + +/// Response struct for the private key bytes endpoint. +/// Contains the private key encoded as a hexadecimal string. +#[derive(Debug, Serialize)] +pub struct PrivateKeyBytesResponse { + /// The private key as a 64-character hexadecimal string (32 bytes). + /// This is the standard secp256k1 private key format. + private_key: String, +} + +/// Query parameters for endpoints that accept a derivation path. +/// The derivation path should follow BIP32 format (e.g., "m/44'/0'/0'/0/0"). +#[derive(Debug, Clone, Deserialize)] +pub struct DerivationPathQuery { + derivation_path: Option, +} + +impl DerivationPathQuery { + /// Validates that the derivation path follows BIP32 format if present. + /// Both absolute (starting with "m/") and relative paths are valid. + pub fn validate(&self) -> Result<(), ApiError> { + if let Some(ref path) = self.derivation_path { + // Allow empty path or "m" alone + if path.is_empty() || path == "m" { + return Ok(()); + } + + // For non-empty paths, validate using bitcoin library's DerivationPath + DerivationPath::from_str(path).map_err(|e| { + error!("Invalid derivation path format: {}", e); + ApiError::BadRequest + })?; + } + Ok(()) + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct SignMessageRequest { + pub message_base64: String, + pub algorithm: SigningAlgorithm, + pub derivation_path: Option, +} + +#[derive(Debug, Serialize)] +pub struct SignMessageResponseJson { + pub signature: String, + pub message_hash: String, +} + +#[derive(Debug, Serialize)] +pub struct PublicKeyResponseJson { + pub public_key: String, + pub algorithm: SigningAlgorithm, +} + +#[derive(Debug, Deserialize)] +pub struct PublicKeyQuery { + algorithm: SigningAlgorithm, + derivation_path: Option, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ConvertGuestRequest { + pub email: String, + pub password: String, + pub name: Option, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ThirdPartyTokenRequest { + pub audience: String, +} + +#[derive(Debug, Serialize)] +pub struct ThirdPartyTokenResponse { + pub token: String, +} + +pub fn router(app_state: Arc) -> Router<()> { + Router::new() + .route( + "/protected/user", + get(user_protected).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/kv/:key", + get(get_kv).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/kv/:key", + put(put_kv).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/protected/kv/:key", + delete(delete_kv).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/kv", + get(list_kv).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/request_verification", + post(request_new_verification_code) + .layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/change_password", + post(change_password).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/protected/private_key", + get(get_private_key) + .layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/private_key_bytes", + get(get_private_key_bytes).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/protected/sign_message", + post(sign_message).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/protected/public_key", + get(get_public_key).layer(from_fn_with_state(app_state.clone(), decrypt_request::<()>)), + ) + .route( + "/protected/convert_guest", + post(convert_guest_to_email).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .route( + "/protected/third_party_token", + post(generate_third_party_token).layer(from_fn_with_state( + app_state.clone(), + decrypt_request::, + )), + ) + .with_state(app_state.clone()) +} + +pub async fn user_protected( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering user_protected function"); + tracing::info!("user_protected request"); + let mut app_user: AppUser = AppUser::from(&user); + + // Set email verification status - only if not a guest user + app_user.email_verified = if user.is_guest() { + false + } else { + match data.db.get_email_verification_by_user_id(user.uuid) { + Ok(verification) => verification.is_verified, + Err(DBError::EmailVerificationNotFound) => false, + Err(e) => { + tracing::error!("Error checking email verification: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + }; + + // Determine login method + if user.password_enc.is_none() { + // This is an OAuth user - find out which provider + match data.db.get_all_user_oauth_connections_for_user(user.uuid) { + Ok(connections) => { + if let Some(connection) = connections.first() { + // Get the provider details + match data.db.get_oauth_provider_by_id(connection.provider_id) { + Ok(Some(provider)) => { + // Set the login method based on the provider name + app_user.login_method = match provider.name.as_str() { + "github" => LoginMethod::Github, + "google" => LoginMethod::Google, + // Add other providers here as they're supported + _ => { + tracing::error!("Unknown OAuth provider: {}", provider.name); + return Err(ApiError::InternalServerError); + } + }; + } + Ok(None) => { + tracing::error!( + "OAuth provider not found for id: {}", + connection.provider_id + ); + return Err(ApiError::InternalServerError); + } + Err(e) => { + tracing::error!("Error fetching OAuth provider: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + } + } + Err(e) => { + tracing::error!("Error fetching OAuth connections: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + } + + let response = ProtectedUserData { user: app_user }; + + debug!("Exiting user_protected function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn get_kv( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, + Path(key): Path, +) -> Result>>, ApiError> { + debug!("Entering get_kv function"); + let value = match data.get(user.uuid, key).await { + Ok(kv) => kv, + Err(e) => { + tracing::error!("Error getting key-value pair: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + debug!("Exiting get_kv function"); + encrypt_response(&data, &session_id, &value).await +} + +pub async fn put_kv( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, + Path(key): Path, + Extension(value): Extension, +) -> Result>, ApiError> { + debug!("Entering put_kv function"); + info!("Putting key-value pair for user"); + tracing::trace!("putting key-value pair: {} = {}", key, value); + + match data.put(user.uuid, key, value.clone()).await { + Ok(kv) => kv, + Err(e) => { + tracing::error!("Error putting key-value pair: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + + debug!("Exiting put_kv function"); + encrypt_response(&data, &session_id, &value).await +} + +pub async fn delete_kv( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, + Path(key): Path, +) -> Result>, ApiError> { + debug!("Entering delete_kv function"); + match data.delete(user.uuid, key).await { + Ok(_) => { + let response = json!({ "message": "Resource deleted successfully" }); + debug!("Exiting delete_kv function"); + encrypt_response(&data, &session_id, &response).await + } + Err(e) => { + tracing::error!("Error deleting key-value pair: {:?}", e); + Err(ApiError::InternalServerError) + } + } +} + +pub async fn list_kv( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, +) -> Result>>, ApiError> { + debug!("Entering list_kv function"); + let kvs = match data.list(user.uuid).await { + Ok(kvs) => kvs, + Err(e) => { + tracing::error!("Error listing key-value pairs: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + debug!("Exiting list_kv function"); + encrypt_response(&data, &session_id, &kvs).await +} + +pub async fn request_new_verification_code( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering request_new_verification_code function"); + + // First check if user has an email + let email = match user.get_email() { + Some(email) => email.to_string(), + None => { + let response = json!({ "error": "No email associated with account" }); + return encrypt_response(&data, &session_id, &response).await; + } + }; + + // Check if the user is already verified + match data.db.get_email_verification_by_user_id(user.uuid) { + Ok(verification) => { + if verification.is_verified { + let response = json!({ "error": "User is already verified" }); + return encrypt_response(&data, &session_id, &response).await; + } + // Delete the old verification + if let Err(e) = data.db.delete_email_verification(&verification) { + tracing::error!("Error deleting old verification: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + Err(DBError::EmailVerificationNotFound) => { + // This is fine, we'll create a new verification + } + Err(e) => { + tracing::error!("Error checking email verification: {:?}", e); + return Err(ApiError::InternalServerError); + } + } + + // Create a new verification entry + let new_verification = NewEmailVerification::new(user.uuid, 24, false); // 24 hours expiration + let verification = match data.db.create_email_verification(new_verification) { + Ok(v) => v, + Err(e) => { + tracing::error!("Error creating email verification: {:?}", e); + return Err(ApiError::InternalServerError); + } + }; + + // Send the new verification email + if let Err(e) = send_verification_email( + data.app_mode.clone(), + data.resend_api_key.clone(), + email, + verification.verification_code, + ) + .await + { + tracing::error!("Error sending verification email: {:?}", e); + return Err(ApiError::InternalServerError); + } + + let response = json!({ "message": "New verification code sent successfully" }); + debug!("Exiting request_new_verification_code function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn change_password( + State(data): State>, + Extension(user): Extension, + Extension(change_request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering change_password function"); + + // Check if user is an OAuth-only user + if user.password_enc.is_none() { + error!("OAuth-only user attempted to change password"); + return Err(ApiError::InvalidUsernameOrPassword); + } + + // Get email if it exists + let email = user.get_email().map(|e| e.to_string()); + + // Verify the current password + let credentials = Credentials { + email, + id: Some(user.uuid), + password: change_request.current_password, + }; + + match data.authenticate_user(credentials).await { + Ok(Some(authenticated_user)) if authenticated_user.uuid == user.uuid => { + // Current password is correct, proceed with password change + match data + .update_user_password(&user, change_request.new_password) + .await + { + Ok(()) => { + let response = json!({ "message": "Password changed successfully" }); + debug!("Exiting change_password function"); + encrypt_response(&data, &session_id, &response).await + } + Err(e) => { + error!("Error changing password: {:?}", e); + Err(ApiError::InternalServerError) + } + } + } + _ => { + // Current password is incorrect + Err(ApiError::InvalidUsernameOrPassword) + } + } +} + +pub async fn get_private_key( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering get_private_key function"); + + // First try to get the existing encrypted seed + let encrypted_seed = match user.get_seed_encrypted().await { + Some(seed) => seed, + None => { + // Only generate a new key if one doesn't exist + debug!("No existing key found, generating new key"); + data.generate_private_key(user.uuid) + .await + .map_err(|e| { + error!("Failed to generate private key: {:?}", e); + ApiError::InternalServerError + })? + .get_seed_encrypted() + .await + .ok_or_else(|| { + error!("Private key not found after generation: {}", user.uuid); + ApiError::InternalServerError + })? + } + }; + + // Decrypt the seed to get the mnemonic + let mnemonic = decrypt_user_seed_to_mnemonic(data.enclave_key.clone(), encrypted_seed) + .map_err(|e| { + error!("Failed to decrypt user seed: {:?}", e); + ApiError::InternalServerError + })?; + + let response = PrivateKeyResponse { + mnemonic: mnemonic.to_string(), + }; + + debug!("Exiting get_private_key function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn get_private_key_bytes( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, + Query(query): Query, +) -> Result>, ApiError> { + debug!("Entering get_private_key_bytes function"); + + // Validate derivation path if present + query.validate()?; + + let secret_key = data + .get_user_key(user.uuid, query.derivation_path.as_deref()) + .await + .map_err(|e| match e { + Error::InvalidDerivationPath(msg) => { + error!("Invalid derivation path: {}", msg); + ApiError::BadRequest + } + Error::KeyDerivationError(msg) => { + error!("Failed to derive key: {}", msg); + ApiError::BadRequest + } + _ => { + error!("Failed to get user key: {:?}", e); + ApiError::InternalServerError + } + })?; + + let response = PrivateKeyBytesResponse { + private_key: secret_key.display_secret().to_string(), + }; + + debug!("Exiting get_private_key_bytes function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn sign_message( + State(data): State>, + Extension(user): Extension, + Extension(sign_request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering sign_message function"); + + let message_bytes = general_purpose::STANDARD + .decode(&sign_request.message_base64) + .map_err(|e| { + error!("Failed to decode base64 message: {:?}", e); + ApiError::BadRequest + })?; + + let response = data + .sign_message( + user.uuid, + &message_bytes, + sign_request.algorithm, + sign_request.derivation_path.as_deref(), + ) + .await + .map_err(|e| { + error!("Error signing message: {:?}", e); + ApiError::InternalServerError + })?; + + let json_response = SignMessageResponseJson { + signature: response.signature.to_string(), + message_hash: hex::encode(response.message_hash), + }; + + debug!("Exiting sign_message function"); + encrypt_response(&data, &session_id, &json_response).await +} + +pub async fn get_public_key( + State(data): State>, + Extension(user): Extension, + Extension(session_id): Extension, + Query(query): Query, +) -> Result>, ApiError> { + debug!("Entering get_public_key function"); + + let user_secret_key = data + .get_user_key(user.uuid, query.derivation_path.as_deref()) + .await + .map_err(|e| { + error!("Error getting user key: {:?}", e); + ApiError::InternalServerError + })?; + + let secp = Secp256k1::new(); + let public_key = user_secret_key.public_key(&secp); + + // Format public key according to algorithm + let public_key_str = match query.algorithm { + SigningAlgorithm::Schnorr => { + let (xonly_pubkey, _parity) = public_key.x_only_public_key(); + xonly_pubkey.to_string() + } + SigningAlgorithm::Ecdsa => { + // For ECDSA, use the compressed format + public_key.to_string() + } + }; + + let response = PublicKeyResponseJson { + public_key: public_key_str, + algorithm: query.algorithm, + }; + + debug!("Exiting get_public_key function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn convert_guest_to_email( + State(data): State>, + Extension(user): Extension, + Extension(convert_request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering convert_guest_to_email function"); + + // Check if user is eligible for conversion + if !user.is_guest() { + error!("User already has an email address"); + return Err(ApiError::BadRequest); + } + + if user.password_enc.is_none() { + error!("OAuth users cannot be converted"); + return Err(ApiError::BadRequest); + } + + // Check if email is already taken + if data + .db + .get_user_by_email(convert_request.email.clone()) + .is_ok() + { + error!("Email address already in use"); + return Err(ApiError::EmailAlreadyExists); + } + + // Hash and encrypt the new password + let password_hash = password_auth::generate_hash(convert_request.password); + let secret_key = + SecretKey::from_slice(&data.enclave_key).map_err(|_| ApiError::InternalServerError)?; + let encrypted_password = encrypt::encrypt_with_key(&secret_key, password_hash.as_bytes()).await; + + // Update the user with new email, password, and optional name + let mut updated_user = user.clone(); + updated_user.email = Some(convert_request.email); + updated_user.password_enc = Some(encrypted_password); + updated_user.name = convert_request.name; + + // Save the changes + if let Err(e) = data.db.update_user(&updated_user) { + error!("Failed to update user: {:?}", e); + return Err(ApiError::InternalServerError); + } + + // Handle email verification and welcome emails + if let Err(e) = handle_new_user_registration(&data, &updated_user, true).await { + error!("Failed to handle registration tasks: {:?}", e); + return Err(e); + } + + let response = json!({ + "message": "Successfully converted guest account to email account. Please check your email for verification." + }); + + debug!("Exiting convert_guest_to_email function"); + encrypt_response(&data, &session_id, &response).await +} + +pub async fn generate_third_party_token( + State(data): State>, + Extension(user): Extension, + Extension(request): Extension, + Extension(session_id): Extension, +) -> Result>, ApiError> { + debug!("Entering generate_third_party_token function"); + info!( + "Generating third party token for user {} with audience {}", + user.uuid, request.audience + ); + + // Validate the audience URL + if url::Url::parse(&request.audience).is_err() { + error!("Invalid audience URL provided: {}", request.audience); + return Err(ApiError::BadRequest); + } + + debug!("Audience URL validation successful"); + + let token = match NewToken::new( + &user, + TokenType::ThirdParty { + aud: request.audience.clone(), + azp: "maple".to_string(), + }, + &data, + ) { + Ok(token) => { + info!( + "Successfully generated third party token for user {}", + user.uuid + ); + token + } + Err(e) => { + error!("Failed to generate third party token: {:?}", e); + return Err(e); + } + }; + + let response = ThirdPartyTokenResponse { token: token.token }; + + debug!("Exiting generate_third_party_token function"); + encrypt_response(&data, &session_id, &response).await +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_derivation_path_validation() { + // Test valid paths + let valid_paths = vec![ + "m/44'/0'/0'/0/0", // Standard BIP44 absolute + "44'/0'/0'/0/0", // Standard BIP44 relative + "m/84'/0'/0'/0/0", // Standard BIP84 absolute + "84'/0'/0'/0/0", // Standard BIP84 relative + "m/49'/0'/0'/0/0", // Standard BIP49 absolute + "0/0", // Simple relative path + "m/0/0", // Simple absolute path + "0", // Single level relative + "m/0", // Single level absolute + "m", // Master key + "", // Empty path + "0'", // Hardened child + "m/0'", // Hardened child absolute + "0h", // Hardened child (alternate notation) + "m/0h", // Hardened child absolute (alternate notation) + ]; + + for path in valid_paths { + let query = DerivationPathQuery { + derivation_path: Some(path.to_string()), + }; + assert!(query.validate().is_ok(), "Path should be valid: {}", path); + } + + // Test invalid paths + let invalid_paths = vec![ + "invalid", // Random string + "m//0", // Double slash + "m/x/0", // Invalid character + "m/0'/x/0", // Invalid character after hardened + "M/0/0", // Wrong case for master key + "m/2147483648", // Exceeds u32::MAX + "n/0/0", // Wrong master key letter + "/0/0", // Slash without master key + ]; + + for path in invalid_paths { + let query = DerivationPathQuery { + derivation_path: Some(path.to_string()), + }; + assert!( + query.validate().is_err(), + "Path should be invalid: {}", + path + ); + } + + // Test None path + let query = DerivationPathQuery { + derivation_path: None, + }; + assert!(query.validate().is_ok(), "None path should be valid"); + } +} diff --git a/update_continuum_url.sh b/update_continuum_url.sh new file mode 100755 index 0000000..0714245 --- /dev/null +++ b/update_continuum_url.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +set -e + +# Function for logging +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" +} + +log "Starting Continuum URL update script" + +while true; do + log "Running update check" + + # Fetch the manifest.toml + manifest=$(curl -s https://cdn.confidential.cloud/continuum/v1/manifest.toml) + + # Extract the maaURL specifically from the attestationService.cpu.azureSEVSNP section + maa_url=$(echo "$manifest" | awk '/^\[attestationService\.cpu\.azureSEVSNP\]/{flag=1; next} /^\[/{flag=0} flag && /^maaURL *=/{print $3; exit}' | tr -d '"') + + if [ -z "$maa_url" ]; then + log "Error: Failed to extract maaURL from manifest" + sleep 300 # Sleep for 5 minutes before trying again + continue + fi + + log "Extracted maaURL: $maa_url" + + # Extract the subdomain + new_subdomain=$(echo "$maa_url" | awk -F[/:/.] '{print $4}') + + if [ -z "$new_subdomain" ]; then + log "Error: Failed to extract subdomain from maaURL" + sleep 300 # Sleep for 5 minutes before trying again + continue + fi + + log "Extracted subdomain: $new_subdomain" + + # Debug: Print the exact content of new_subdomain + log "Debug: new_subdomain content: '${new_subdomain}'" + + # Ensure new_subdomain doesn't contain any unexpected characters + new_subdomain=$(echo "$new_subdomain" | tr -cd '[:alnum:]') + + log "Debug: Cleaned new_subdomain: '${new_subdomain}'" + + # Check if the subdomain has changed + vsock_proxy_file="/etc/nitro_enclaves/vsock-proxy.yaml" + service_file="/etc/systemd/system/vsock-azure-continuum.service" + current_subdomain=$(sudo grep -oP '(?<=address: )[^.]+(?=\.weu\.attest\.azure\.net)' "$vsock_proxy_file" | head -n1) + + log "Debug: Current subdomain: '${current_subdomain}'" + + if [ "$current_subdomain" != "$new_subdomain" ]; then + # Update the vsock-proxy.yaml + sed_command="s/\\(address: \\)[^.]*\\(\\.weu\\.attest\\.azure\\.net\\)/\\1${new_subdomain}\\2/" + log "Debug: sed command for vsock_proxy_file: $sed_command" + sudo sed -i "$sed_command" "$vsock_proxy_file" + log "Updated $vsock_proxy_file" + + # Update the systemd service file + sed_command="s/\\(ExecStart=\\/usr\\/bin\\/vsock-proxy 8009 \\)[^.]*\\(\\.weu\\.attest\\.azure\\.net\\)/\\1${new_subdomain}\\2/" + log "Debug: sed command for service_file: $sed_command" + sudo sed -i "$sed_command" "$service_file" + log "Updated $service_file" + + # Reload systemd daemon + sudo systemctl daemon-reload + log "Reloaded systemd daemon" + + # Restart the nitro proxy service + sudo systemctl restart nitro-enclaves-vsock-proxy.service + log "Restarted nitro-enclaves-vsock-proxy.service" + + # Restart the Azure Continuum service + sudo systemctl restart vsock-azure-continuum.service + log "Restarted vsock-azure-continuum.service" + + # Find the EnclaveID of the previous running enclave + ENCLAVES=$(nitro-cli describe-enclaves) + log "Current enclaves: $ENCLAVES" + OLD_ENCLAVE_ID=$(echo "$ENCLAVES" | jq -r '.[] | select(.EnclaveName == "opensecret") | .EnclaveID') + + if [ -n "$OLD_ENCLAVE_ID" ]; then + log "Found old enclave ID: $OLD_ENCLAVE_ID" + + # Add a small delay before terminating the old enclave + log "Waiting for 10 seconds before terminating old enclave" + sleep 10 + + # Attempt to terminate the old enclave + log "Attempting to terminate old enclave with ID $OLD_ENCLAVE_ID" + if nitro-cli terminate-enclave --enclave-id $OLD_ENCLAVE_ID; then + log "Successfully terminated old enclave with ID $OLD_ENCLAVE_ID" + else + log "Failed to terminate old enclave with ID $OLD_ENCLAVE_ID. Please investigate." + sleep 300 # Sleep for 5 minutes before trying again + continue + fi + else + log "No old enclave found running" + fi + + # Wait for resources to be freed + log "Waiting for 10 seconds before starting new enclave" + sleep 10 + + # Run the new enclave + log "Starting new enclave" + if nitro-cli run-enclave --eif-path ~/opensecret.eif --memory 16384 --cpu-count 4; then + log "Enclave start command executed successfully" + + # Wait for the enclave to fully initialize (increase this if needed) + log "Waiting for 30 seconds for the enclave to initialize" + sleep 30 + + # Check if the enclave is actually running + if nitro-cli describe-enclaves | jq -e '.[] | select(.EnclaveName == "opensecret")' > /dev/null; then + log "Enclave is running successfully" + else + log "Enclave failed to start properly. Please investigate." + sleep 300 # Sleep for 5 minutes before trying again + continue + fi + else + log "Failed to start new enclave. Please investigate." + sleep 300 # Sleep for 5 minutes before trying again + continue + fi + + # Wait for 10 seconds + log "Waiting for 10 seconds before restarting socat proxy" + sleep 10 + + # Restart the socat proxy + sudo systemctl restart socat-proxy.service + log "Restarted socat-proxy.service" + + log "Enclave status after socat-proxy restart:" + nitro-cli describe-enclaves + + log "Continuum URL update and enclave restart completed successfully" + else + log "No update needed. Current subdomain matches the new subdomain." + fi + + log "Sleeping for 5 minutes before next check" + sleep 300 # Sleep for 5 minutes (300 seconds) +done