diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..946e08c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +target +*/target +*/*/target diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bd9afa3..0105818 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,6 +11,15 @@ jobs: steps: - uses: actions/checkout@v3 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - run: cargo build --release - run: cargo test --release - run: cargo clippy -- -Dwarnings diff --git a/examples/sql-integration/Cargo.toml b/examples/sql-integration/Cargo.toml index 107beff..02ca334 100644 --- a/examples/sql-integration/Cargo.toml +++ b/examples/sql-integration/Cargo.toml @@ -20,6 +20,7 @@ sqlx = { version = "0.7.4", features = [ "sqlite", ] } tokio = { version = "1.35.1", features = ["macros", "rt", "rt-multi-thread"] } +tower = { version = "0.5.1", features = ["util"] } tower-http = { version = "0.5.1", features = ["fs", "trace"] } tracing = "0.1.40" tracing-subscriber = "0.3.18" diff --git a/examples/sql-integration/README.md b/examples/sql-integration/README.md index 7de2d3f..fa4c86f 100644 --- a/examples/sql-integration/README.md +++ b/examples/sql-integration/README.md @@ -17,3 +17,60 @@ The easiest way to run the example is as a test that orchestrates the two partie ``` cargo test --release -- --nocapture ``` + +## How to Deploy the Engine + +The following example shows how to deploy the MPC engine for two parties, based on the SQL integration example (but without showing how and where to deploy the databases). If you want to deploy the engine with more parties or a different Garble program, the same principles apply. + +Two Dockerfiles are provided as examples of how to run the MPC engine inside a docker container, `party0.Dockerfile` and `party1.Dockerfile`. They are identical except for the ports that they use, you could of course just use a single Dockerfile in case all of your parties listen on the same port. These Dockerfiles do not contain any DB configuration, it is up to you to either bundle a database into the docker container (similar to how databases are set up using Docker Compose for the tests, see `docker-compose.yml`) or to change the database URLs in the configuration files (`policy0.json` and `policy1.json`) so that DBs that are hosted somewhere else can be accessed. + +Assuming that the databases are hosted somewhere else, most of `party0.Dockerfile` (or `party1.Dockerfile`) can stay as it is. Let's take a look at the last three lines to see what you might want to change: + +``` +EXPOSE 8000 +WORKDIR /usr/src/parlay/examples/sql-integration +CMD ["parlay-sql-integration", "--addr=0.0.0.0", "--port=8000", "--config=./policy0.json"] +``` + +The above Dockerfile exposes the MPC engine on port 8000 and reads its configuration from `policy0.json` (contained here in this repository). + +To build and run the container, use the following commands and **make sure to run them from the top level directory of the repository**: + +``` +docker build -f examples/sql-integration/party0.Dockerfile --tag 'parlay0' . +docker run -t -p 8000:8000 parlay0 +``` + +You will notice that running this docker container will fail, because party 0 is configured to be the leader (in `policy0.json`) and is thus expected all other parties to be listening already: + +``` +2024-11-18T21:59:17.244221Z INFO parlay_sql_integration: listening on 0.0.0.0:8000 +2024-11-18T21:59:17.244366Z INFO parlay_sql_integration: Acting as leader (party 0) +2024-11-18T21:59:17.270663Z INFO parlay_sql_integration: Waiting for confirmation from party http://localhost:8001/ +2024-11-18T21:59:17.274310Z ERROR parlay_sql_integration: Could not reach http://localhost:8001/run: error sending request for url (http://localhost:8001/run): error trying to connect: tcp connect error: Cannot assign requested address (os error 99) +Error: Some participants are missing, aborting... +``` + +To solve this, make sure to deploy and run the contributors first (in this example only party 1, but you could deploy more than two parties, in which case all contributing parties need to be started before the leader starts running), for example: + +``` +docker build -f examples/sql-integration/party1.Dockerfile --tag 'parlay1' . && docker run -t -p 8001:8001 parlay1 +[+] Building 279.4s (20/20) FINISHED +2024-11-18T22:52:32.213120Z INFO parlay_sql_integration: listening on 0.0.0.0:8001 +2024-11-18T22:52:32.213365Z INFO parlay_sql_integration: Listening for connection attempts from other parties +2024-11-18T22:52:42.214689Z INFO parlay_sql_integration: Listening for connection attempts from other parties +2024-11-18T22:52:52.216829Z INFO parlay_sql_integration: Listening for connection attempts from other parties +``` + +You can check that the party is running and listening by making a GET request to its `/ping` route (in this example thus `localhost:8001/ping`), which should respond with a `pong` message. + +Make sure to change the `"participants"` key in the configuration files (in our example case `policy0.json` and `policy1.json`) to the addresses used by the parties. The first address in the array is always the first party, the second address in the array the second party and so on. As a result, the configuration files of the different parties must all use the same `"participants"` array if they want to be able to communicate with each other. + +Let's assume that party 0 is listening at `http://1.2.3.4:8000` and party 1 at `http://5.6.7.8:9000`. The configuration files `policy0.json` and `policy1.json` would then both need to contain: + +```json +{ + "participants": ["http://1.2.3.4:8000", "http://5.6.7.8:9000"], + ... +} +``` diff --git a/examples/sql-integration/party0.Dockerfile b/examples/sql-integration/party0.Dockerfile new file mode 100644 index 0000000..3162dad --- /dev/null +++ b/examples/sql-integration/party0.Dockerfile @@ -0,0 +1,15 @@ +# syntax = docker/dockerfile-upstream:master-labs +FROM rust:1.82 AS builder +WORKDIR /usr/src/parlay +COPY . . +RUN cargo install --path ./examples/sql-integration + +FROM debian:bookworm-slim +COPY --from=builder /usr/local/cargo/bin/parlay-sql-integration /usr/local/bin/parlay-sql-integration +COPY --from=builder /usr/src/parlay/examples/sql-integration /usr/src/parlay/examples/sql-integration +RUN apt update && apt install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/* +RUN update-ca-certificates + +EXPOSE 8000 +WORKDIR /usr/src/parlay/examples/sql-integration +CMD ["parlay-sql-integration", "--addr=0.0.0.0", "--port=8000", "--config=./policy0.json"] diff --git a/examples/sql-integration/party1.Dockerfile b/examples/sql-integration/party1.Dockerfile new file mode 100644 index 0000000..c18b8be --- /dev/null +++ b/examples/sql-integration/party1.Dockerfile @@ -0,0 +1,15 @@ +# syntax = docker/dockerfile-upstream:master-labs +FROM rust:1.82 AS builder +WORKDIR /usr/src/parlay +COPY . . +RUN cargo install --path ./examples/sql-integration + +FROM debian:bookworm-slim +COPY --from=builder /usr/local/cargo/bin/parlay-sql-integration /usr/local/bin/parlay-sql-integration +COPY --from=builder /usr/src/parlay/examples/sql-integration /usr/src/parlay/examples/sql-integration +RUN apt update && apt install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/* +RUN update-ca-certificates + +EXPOSE 8001 +WORKDIR /usr/src/parlay/examples/sql-integration +CMD ["parlay-sql-integration", "--addr=0.0.0.0", "--port=8001", "--config=./policy1.json"] diff --git a/examples/sql-integration/src/main.rs b/examples/sql-integration/src/main.rs index 8bf58f8..a6dd042 100644 --- a/examples/sql-integration/src/main.rs +++ b/examples/sql-integration/src/main.rs @@ -2,7 +2,8 @@ use anyhow::{anyhow, bail, Context, Error}; use axum::{ body::Bytes, extract::{DefaultBodyLimit, Path, State}, - routing::post, + http::{Request, Response}, + routing::{get, post}, Json, Router, }; use clap::Parser; @@ -25,10 +26,12 @@ use sqlx::{ use std::{ borrow::BorrowMut, collections::HashMap, - net::SocketAddr, + env, + net::{IpAddr, SocketAddr}, path::PathBuf, process::exit, result::Result, + str::FromStr, sync::Arc, time::{Duration, Instant}, }; @@ -40,17 +43,21 @@ use tokio::{ }, time::{sleep, timeout}, }; +use tower::ServiceBuilder; use tower_http::trace::TraceLayer; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, warn, Span}; use url::Url; /// A CLI for Multi-Party Computation using the Parlay engine. #[derive(Debug, Parser)] #[command(name = "parlay")] struct Cli { + /// The IP address to listen on for connection attempts from other parties. + #[arg(long, short)] + addr: Option, /// The port to listen on for connection attempts from other parties. - #[arg(required = true, long, short)] - port: u16, + #[arg(long, short)] + port: Option, /// The location of the file with the policy configuration. #[arg(long, short)] config: PathBuf, @@ -101,7 +108,7 @@ type MpcState = Arc>; async fn main() -> Result<(), Error> { tracing_subscriber::fmt::init(); install_default_drivers(); - let Cli { port, config } = Cli::parse(); + let Cli { addr, port, config } = Cli::parse(); let Ok(policy) = fs::read_to_string(&config).await else { error!("Could not find '{}', exiting...", config.display()); exit(-1); @@ -118,7 +125,19 @@ async fn main() -> Result<(), Error> { senders: vec![], })); + let log_layer = TraceLayer::new_for_http() + .on_request(|r: &Request<_>, _: &Span| tracing::info!("{} {}", r.method(), r.uri().path())) + .on_response( + |r: &Response<_>, latency: Duration, _: &Span| match r.status().as_u16() { + 400..=499 => tracing::warn!("{} (in {:?})", r.status(), latency), + 500..=599 => tracing::error!("{} (in {:?})", r.status(), latency), + _ => tracing::info!("{} (in {:?})", r.status(), latency), + }, + ); + let app = Router::new() + // to check whether a server is running: + .route("/ping", get(ping)) // to kick off an MPC session: .route("/run", post(run)) // to receive constants from other parties: @@ -127,9 +146,22 @@ async fn main() -> Result<(), Error> { .route("/msg/:from", post(msg)) .with_state((policy.clone(), Arc::clone(&state))) .layer(DefaultBodyLimit::disable()) - .layer(TraceLayer::new_for_http()); + .layer(ServiceBuilder::new().layer(log_layer)); - let addr = SocketAddr::from(([127, 0, 0, 1], port)); + let addr = if let Ok(socket_addr) = env::var("SOCKET_ADDRESS") { + SocketAddr::from_str(&socket_addr) + .unwrap_or_else(|_| panic!("Invalid socket address: {socket_addr}")) + } else { + let addr = addr.unwrap_or_else(|| "127.0.0.1".into()); + let port = port.unwrap_or(8000); + match addr.parse::() { + Ok(addr) => SocketAddr::new(addr, port), + Err(_) => { + error!("Invalid IP address: {addr}, using 127.0.0.1 instead"); + SocketAddr::from(([127, 0, 0, 1], port)) + } + } + }; info!("listening on {}", addr); let listener = tokio::net::TcpListener::bind(&addr).await?; tokio::spawn(async move { axum::serve(listener, app).await.unwrap() }); @@ -158,19 +190,21 @@ async fn main() -> Result<(), Error> { if party != &policy.participants[policy.party] { info!("Waiting for confirmation from party {party}"); let url = format!("{party}run"); - let Ok(res) = client.post(&url).json(&policy_request).send().await else { - error!("Could not reach {url}"); - participant_missing = true; - continue; - }; - match res.status() { - StatusCode::OK => {} - code => { - error!( - "Unexpected response while trying to trigger execution for {url}: {code}" - ); + match client.post(&url).json(&policy_request).send().await { + Err(err) => { + error!("Could not reach {url}: {err}"); participant_missing = true; + continue; } + Ok(res) => match res.status() { + StatusCode::OK => {} + code => { + error!( + "Unexpected response while trying to start execution for {url}: {code}" + ); + participant_missing = true; + } + }, } } } @@ -583,6 +617,10 @@ fn decode_literal(l: Literal) -> Result>, String> { Ok(records) } +async fn ping() -> &'static str { + "pong" +} + async fn run(State((policy, state)): State<(Policy, MpcState)>, Json(body): Json) { if policy.participants != body.participants || policy.leader != body.leader { error!("Policy not accepted: {body:?}"); diff --git a/src/protocol.rs b/src/protocol.rs index dd2873f..d0e2ea6 100644 --- a/src/protocol.rs +++ b/src/protocol.rs @@ -508,7 +508,6 @@ pub async fn mpc( return Err(MpcError::MissingSharesForInput(*i).into()); }; if *mac != Mac(0) { - // only needed for the trusted dealer version wire_shares_for_others[*i][w] = Some((bit, *mac)); } } @@ -536,13 +535,11 @@ pub async fn mpc( for p in 0..p_max { if let Some((_, key)) = own_macs_and_keys.get(p).copied() { if key != Key(0) { - //only needed for the trusted dealer version let Some(other_shares) = wire_shares_from_others.get(p) else { return Err(MpcError::InvalidInputMacOnWire(w).into()); }; let Some((other_share, mac)) = other_shares.get(w).copied().flatten() else { - println!("Problem 2"); return Err(MpcError::InvalidInputMacOnWire(w).into()); }; if mac != key ^ (other_share & delta) { @@ -656,7 +653,6 @@ pub async fn mpc( for (p, mac_s_key_r) in mac_s_key_r.iter().enumerate() { let (_, key_r) = mac_s_key_r; if *key_r == Key(0) { - //only needed for the trusted dealer version continue; } let Some(GarbledGate(garbled_gate)) = &garbled_gates[p][w] else {