diff --git a/.github/workflows/test-gpu.yaml b/.github/workflows/test-gpu.yaml new file mode 100644 index 000000000..baf66263f --- /dev/null +++ b/.github/workflows/test-gpu.yaml @@ -0,0 +1,133 @@ +name: Rust GPU Tests + +on: + push: + +concurrency: + group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" + cancel-in-progress: true + +jobs: + e2e: + runs-on: gpu + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Validate presence of GPU devices + run: nvidia-smi + + - name: Check shared memory size + run: df -h + + - name: Install OpenSSL && pkg-config + run: sudo apt-get update && sudo apt-get install -y pkg-config libssl-dev + + - name: Install CUDA and NCCL dependencies + if: steps.cache-cuda-nccl.outputs.cache-hit != 'true' + env: + DEBIAN_FRONTEND: noninteractive + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt update + sudo apt install -y cuda-toolkit-12-2 libnccl2 libnccl-dev + + - name: Find libs + run: find /usr -name "libnvrtc*" && find /usr -name libcuda.so + + - name: Cache Rust build + uses: actions/cache@v3 + id: cache-rust + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: rust-build-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + rust-build-${{ runner.os }}- + + - name: Find libs + run: find /usr -name "libnvrtc*" && find /usr -name libcuda.so + + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + + - name: E2E Tests + run: cargo test --release e2e + shell: bash + env: + NCCL_P2P_LEVEL: LOC + NCCL_NET: Socket + NCCL_P2P_DIRECT_DISABLE: 1 + NCCL_SHM_DISABLE: 1 + + e2e-sanitizer: + runs-on: gpu + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Validate presence of GPU devices + run: nvidia-smi + + - name: Check shared memory size + run: df -h + + - name: Install OpenSSL && pkg-config + run: sudo apt-get update && sudo apt-get install -y pkg-config libssl-dev + + - name: Install CUDA and NCCL dependencies + if: steps.cache-cuda-nccl.outputs.cache-hit != 'true' + env: + DEBIAN_FRONTEND: noninteractive + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt update + sudo apt install -y cuda-toolkit-12-2 cuda-command-line-tools-12-2 libnccl2 libnccl-dev + + - name: Find libs + run: find /usr -name "libnvrtc*" && find /usr -name libcuda.so + + - name: Cache Rust build + uses: actions/cache@v3 + id: cache-rust + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: rust-build-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + rust-build-${{ runner.os }}- + + - name: Find libs + run: find /usr -name "libnvrtc*" && find /usr -name libcuda.so + + - name: Find compute-sanitizer + run: find /usr -name "compute-sanitizer" + + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly + + - name: Build e2e test + run: cargo test --release e2e --no-run + + - name: Build e2e test and grab executable name + run: echo TEST_NAME=$(cargo --color=never test --release e2e --no-run 2>&1 | grep "Executable tests/e2e.rs" | sed "s/.*(\(.*\))/\1/") >> $GITHUB_OUTPUT + id: build-e2e + + - name: E2E Tests w/ compute-sanitizer + run: /usr/local/cuda-12.2/bin/compute-sanitizer --tool=memcheck ${{ steps.build-e2e.outputs.TEST_NAME }} --nocapture + env: + NCCL_DEBUG: info + NCCL_P2P_LEVEL: LOC + NCCL_NET: Socket + NCCL_P2P_DIRECT_DISABLE: 1 + NCCL_SHM_DISABLE: 1 diff --git a/src/bin/client.rs b/src/bin/client.rs index 87014cffb..9dabe72bd 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -24,7 +24,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio::{spawn, sync::Mutex, time::sleep}; use uuid::Uuid; -const N_QUERIES: usize = 32 * 20; +const N_QUERIES: usize = 64 * 20; const REGION: &str = "eu-north-1"; const RNG_SEED_SERVER: u64 = 42; const DB_SIZE: usize = 8 * 1_000; diff --git a/src/bin/server.rs b/src/bin/server.rs index 20321b102..7a53d2b0c 100644 --- a/src/bin/server.rs +++ b/src/bin/server.rs @@ -46,7 +46,7 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; const REGION: &str = "eu-north-1"; const DB_SIZE: usize = 8 * 1_000; -const N_QUERIES: usize = 32; +const N_QUERIES: usize = 64; const N_BATCHES: usize = 100; const RNG_SEED: u64 = 42; const SYNC_RESULTS: usize = N_QUERIES * 2; diff --git a/src/dot/distance_comparator.rs b/src/dot/distance_comparator.rs index 319e1bab5..e365d9149 100644 --- a/src/dot/distance_comparator.rs +++ b/src/dot/distance_comparator.rs @@ -73,6 +73,7 @@ impl DistanceComparator { results3: &[CudaView], results_ptrs: &[CudaSlice], db_sizes: &[usize], + real_db_sizes: &[usize], offset: usize, streams: &[CudaStream], ) { @@ -102,6 +103,7 @@ impl DistanceComparator { self.query_length, offset, num_elements, + real_db_sizes[i], ), ) .unwrap(); diff --git a/src/dot/kernel.cu b/src/dot/kernel.cu index 882447e60..c471578a6 100644 --- a/src/dot/kernel.cu +++ b/src/dot/kernel.cu @@ -23,7 +23,7 @@ extern "C" __global__ void matmul_correct_and_reduce(int *c, unsigned short *out } } -extern "C" __global__ void openResults(unsigned long long *result1, unsigned long long *result2, unsigned long long *result3, unsigned int *output, size_t dbLength, size_t queryLength, size_t offset, size_t numElements) +extern "C" __global__ void openResults(unsigned long long *result1, unsigned long long *result2, unsigned long long *result3, unsigned int *output, size_t dbLength, size_t queryLength, size_t offset, size_t numElements, size_t realDbLen) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numElements) @@ -36,7 +36,7 @@ extern "C" __global__ void openResults(unsigned long long *result1, unsigned lon bool match = (result & (1ULL << i)); // Check if we are out of bounds for the query or db - if (queryIdx >= queryLength || dbIdx >= dbLength) { + if (queryIdx >= queryLength || dbIdx >= realDbLen) { continue; } diff --git a/src/dot/share_db.rs b/src/dot/share_db.rs index 330bcc28b..65a5ad407 100644 --- a/src/dot/share_db.rs +++ b/src/dot/share_db.rs @@ -11,10 +11,15 @@ use crate::{ rng::chacha::ChaChaCudaRng, threshold_ring::protocol::ChunkShare, }; +use core::panic; #[cfg(feature = "otp_encrypt")] use cudarc::driver::{CudaView, DeviceSlice}; use cudarc::{ - cublas::{result::gemm_ex, sys, CudaBlas}, + cublas::{ + result::gemm_ex, + sys::{self, lib}, + CudaBlas, + }, driver::{ result::malloc_async, sys::CUdeviceptr, CudaFunction, CudaSlice, CudaStream, DevicePtr, LaunchAsync, LaunchConfig, @@ -25,7 +30,11 @@ use cudarc::{ #[cfg(feature = "otp_encrypt")] use itertools::Itertools; use rayon::prelude::*; -use std::{ffi::c_void, mem, sync::Arc}; +use std::{ + ffi::{c_void, CStr}, + mem, + sync::Arc, +}; const PTX_SRC: &str = include_str!("kernel.cu"); const REDUCE_FUNCTION_NAME: &str = "matmul_correct_and_reduce"; @@ -64,6 +73,13 @@ pub fn gemm( alpha: i32, beta: i32, ) { + // https://docs.nvidia.com/cuda/cublas/#cublasgemmex: + // "CUBLAS_COMPUTE_32I and CUBLAS_COMPUTE_32I_PEDANTIC compute types are only supported with A, B being 4-byte aligned and lda, ldb being multiples of 4." + assert!(m % 4 == 0, "m must be a multiple of 4"); + // We don't enforce the following, since we use it for n=1 and emperial testing + // shows that it works. assert!(n % 4 == 0, "n must be a multiple of 4"); + assert!(a % 4 == 0, "a must be aligned to 4 bytes"); + assert!(b % 4 == 0, "b must be aligned to 4 bytes"); unsafe { let status = gemm_ex( *handle.handle(), @@ -87,14 +103,10 @@ pub fn gemm( sys::cublasGemmAlgo_t::CUBLAS_GEMM_DEFAULT, ); - match status { - Ok(_) => { - println!("GEMM success"); - } - Err(e) => { - // Handle error - eprintln!("CUBLAS error: {:?}", e); - } + // Try to fetch more information in case of an error + if let Err(e) = status { + let c_str = CStr::from_ptr(lib().cublasGetStatusString(e.0)); + panic!("CUBLAS error: {:?}", c_str.to_str()); } } } @@ -825,7 +837,7 @@ mod tests { use std::sync::Arc; const WIDTH: usize = 12_800; - const QUERY_SIZE: usize = 31; + const QUERY_SIZE: usize = 32; const DB_SIZE: usize = 8 * 1000; const RNG_SEED: u64 = 42; @@ -859,6 +871,7 @@ mod tests { let query = random_vec(QUERY_SIZE, WIDTH, u16::MAX as u32); let device_manager = Arc::new(DeviceManager::init()); let n_devices = device_manager.device_count(); + let mut gpu_result = vec![0u16; DB_SIZE / n_devices * QUERY_SIZE]; let db_sizes = vec![DB_SIZE / n_devices; n_devices]; diff --git a/src/helpers/device_manager.rs b/src/helpers/device_manager.rs index 8de746d52..c533ec509 100644 --- a/src/helpers/device_manager.rs +++ b/src/helpers/device_manager.rs @@ -71,7 +71,6 @@ impl DeviceManager { pub fn await_streams(&self, streams: &[CudaStream]) { for i in 0..self.devices.len() { - self.devices[i].bind_to_thread().unwrap(); unsafe { synchronize(streams[i].stream).unwrap() } } } diff --git a/src/server/actor.rs b/src/server/actor.rs index f76d55cc5..e20cbd59e 100644 --- a/src/server/actor.rs +++ b/src/server/actor.rs @@ -16,11 +16,7 @@ use cudarc::{ }; use futures::{Future, FutureExt}; use ring::hkdf::{Algorithm, Okm, Salt, HKDF_SHA256}; -use std::{ - mem, - sync::{Arc, Mutex}, - time::Instant, -}; +use std::{mem, sync::Arc, time::Instant}; use tokio::sync::{mpsc, oneshot}; #[allow(unused)] @@ -67,27 +63,27 @@ const N_QUERIES: usize = 64; const QUERIES: usize = ROTATIONS * N_QUERIES; pub struct ServerActor { - job_queue: mpsc::Receiver, - device_manager: Arc, - party_id: usize, + job_queue: mpsc::Receiver, + device_manager: Arc, + party_id: usize, // engines - codes_engine: ShareDB, - masks_engine: ShareDB, - batch_codes_engine: ShareDB, - batch_masks_engine: ShareDB, - phase2: Circuits, - phase2_batch: Circuits, - distance_comparator: DistanceComparator, + codes_engine: ShareDB, + masks_engine: ShareDB, + batch_codes_engine: ShareDB, + batch_masks_engine: ShareDB, + phase2: Circuits, + phase2_batch: Circuits, + distance_comparator: DistanceComparator, // DB slices - code_db_slices: SlicedProcessedDatabase, - mask_db_slices: SlicedProcessedDatabase, - streams: Vec>, - cublas_handles: Vec>, - results: Vec>, - batch_results: Vec>, - final_results: Vec>, - current_db_size_mutex: Vec>>, - query_db_size: Vec, + code_db_slices: SlicedProcessedDatabase, + mask_db_slices: SlicedProcessedDatabase, + streams: Vec>, + cublas_handles: Vec>, + results: Vec>, + batch_results: Vec>, + final_results: Vec>, + current_db_sizes: Vec, + query_db_size: Vec, } const RESULTS_INIT_HOST: [u32; N_QUERIES * ROTATIONS] = [u32::MAX; N_QUERIES * ROTATIONS]; @@ -273,13 +269,9 @@ impl ServerActor { let results = distance_comparator.prepare_results(); let batch_results = distance_comparator.prepare_results(); - let current_db_size: Vec = + let current_db_sizes: Vec = vec![DB_SIZE / device_manager.device_count(); device_manager.device_count()]; let query_db_size = vec![QUERIES; device_manager.device_count()]; - let current_db_size_mutex = current_db_size - .iter() - .map(|&s| Arc::new(Mutex::new(s))) - .collect::>(); for dev in device_manager.devices() { dev.synchronize().unwrap(); @@ -303,7 +295,7 @@ impl ServerActor { results, batch_results, final_results, - current_db_size_mutex, + current_db_sizes, query_db_size, }) } @@ -355,12 +347,6 @@ impl ServerActor { batch_cublas, )?; - let mut current_db_sizes = self - .current_db_size_mutex - .iter() - .map(|e| *e.lock().unwrap()) - .collect::>(); - // ---- START BATCH DEDUP ---- tracing::debug!(party_id = self.party_id, "Starting batch deduplication"); compact_device_queries.compute_dot_products( @@ -403,6 +389,7 @@ impl ServerActor { &self.batch_results, chunk_size, &db_sizes_batch, + &db_sizes_batch, 0, batch_streams, ); @@ -430,16 +417,24 @@ impl ServerActor { chunk = db_chunk_idx, "starting chunk" ); + let request_streams = &self.streams[db_chunk_idx % 2]; let request_cublas_handles = &self.cublas_handles[db_chunk_idx % 2]; let offset = db_chunk_idx * DB_CHUNK_SIZE; - let chunk_size = current_db_sizes + let chunk_size = self + .current_db_sizes .iter() .map(|s| (s - DB_CHUNK_SIZE * db_chunk_idx).clamp(0, DB_CHUNK_SIZE)) .collect::>(); - tracing::debug!("chunks: {:?}, offset: {}", chunk_size, offset); + // We need to pad the chunk size to be a multiple of 4, because the underlying + // `gemm_ex` expects this. We filter out potential "phantom matches" + // for the padded data in the `open` later. + let dot_chunk_size = chunk_size + .iter() + .map(|s| s.div_ceil(4) * 4) + .collect::>(); // First stream doesn't need to wait if db_chunk_idx == 0 { @@ -465,7 +460,7 @@ impl ServerActor { &mut self.masks_engine, &self.code_db_slices, &self.mask_db_slices, - &chunk_size, + &dot_chunk_size, offset, request_streams, request_cublas_handles, @@ -485,7 +480,7 @@ impl ServerActor { &mut self.masks_engine, &self.code_db_slices, &self.mask_db_slices, - &chunk_size, + &dot_chunk_size, offset, request_streams, ); @@ -499,9 +494,9 @@ impl ServerActor { .record_event(request_streams, &next_dot_event); self.codes_engine - .reshare_results(&chunk_size, request_streams); + .reshare_results(&dot_chunk_size, request_streams); self.masks_engine - .reshare_results(&chunk_size, request_streams); + .reshare_results(&dot_chunk_size, request_streams); // ---- END PHASE 1 ---- @@ -515,7 +510,7 @@ impl ServerActor { // ---- START PHASE 2 ---- // TODO: remove - let max_chunk_size = chunk_size.iter().max().copied().unwrap(); + let max_chunk_size = dot_chunk_size.iter().max().copied().unwrap(); let phase_2_chunk_sizes = vec![max_chunk_size; self.device_manager.device_count()]; let mut code_dots = self.codes_engine.result_chunk_shares(&phase_2_chunk_sizes); let mut mask_dots = self.masks_engine.result_chunk_shares(&phase_2_chunk_sizes); @@ -546,6 +541,7 @@ impl ServerActor { &self.distance_comparator, &self.results, max_chunk_size * QUERIES / 64, + &dot_chunk_size, &chunk_size, offset, request_streams, @@ -580,11 +576,12 @@ impl ServerActor { chunk = db_chunk_idx, "finished chunk" ); + self.device_manager .await_streams(&self.streams[(db_chunk_idx + 1) % 2]); // await other stream // Break if we reached the end of the database - if db_chunk_idx * DB_CHUNK_SIZE >= *current_db_sizes.iter().max().unwrap() { + if db_chunk_idx * DB_CHUNK_SIZE >= *self.current_db_sizes.iter().max().unwrap() { break; } } @@ -640,11 +637,14 @@ impl ServerActor { .collect::>(); // Spread the insertions across devices. - let insertion_list = distribute_insertions(&insertion_list, ¤t_db_sizes); + let insertion_list = distribute_insertions(&insertion_list, &self.current_db_sizes); // Calculate the new indices for the inserted queries - let matches = - calculate_insertion_indices(&mut merged_results, &insertion_list, ¤t_db_sizes); + let matches = calculate_insertion_indices( + &mut merged_results, + &insertion_list, + &self.current_db_sizes, + ); for i in 0..self.device_manager.device_count() { self.device_manager.device(i).bind_to_thread().unwrap(); @@ -665,7 +665,7 @@ impl ServerActor { unsafe { helpers::dtod_at_offset( *db.code_gr.limb_0[i].device_ptr(), - current_db_sizes[i] * IRIS_CODE_LENGTH, + self.current_db_sizes[i] * IRIS_CODE_LENGTH, *query.limb_0[i].device_ptr(), IRIS_CODE_LENGTH * 15 + insertion_idx * IRIS_CODE_LENGTH * ROTATIONS, IRIS_CODE_LENGTH, @@ -674,7 +674,7 @@ impl ServerActor { helpers::dtod_at_offset( *db.code_gr.limb_1[i].device_ptr(), - current_db_sizes[i] * IRIS_CODE_LENGTH, + self.current_db_sizes[i] * IRIS_CODE_LENGTH, *query.limb_1[i].device_ptr(), IRIS_CODE_LENGTH * 15 + insertion_idx * IRIS_CODE_LENGTH * ROTATIONS, IRIS_CODE_LENGTH, @@ -683,7 +683,7 @@ impl ServerActor { helpers::dtod_at_offset( *db.code_sums_gr.limb_0[i].device_ptr(), - current_db_sizes[i] * mem::size_of::(), + self.current_db_sizes[i] * mem::size_of::(), *sums.limb_0[i].device_ptr(), mem::size_of::() * 15 + insertion_idx * mem::size_of::() * ROTATIONS, @@ -693,7 +693,7 @@ impl ServerActor { helpers::dtod_at_offset( *db.code_sums_gr.limb_1[i].device_ptr(), - current_db_sizes[i] * mem::size_of::(), + self.current_db_sizes[i] * mem::size_of::(), *sums.limb_1[i].device_ptr(), mem::size_of::() * 15 + insertion_idx * mem::size_of::() * ROTATIONS, @@ -702,17 +702,14 @@ impl ServerActor { ); } } - current_db_sizes[i] += 1; + self.current_db_sizes[i] += 1; } - // Write new db sizes to device - *self.current_db_size_mutex[i].lock().unwrap() += insertion_list[i].len() as usize; - // DEBUG tracing::debug!( "Updating DB size on device {}: {:?}", i, - *self.current_db_size_mutex[i].lock().unwrap() + self.current_db_sizes[i] ); } @@ -777,6 +774,7 @@ fn open( results_ptrs: &[CudaSlice], chunk_size: usize, db_sizes: &[usize], + real_db_sizes: &[usize], offset: usize, streams: &[CudaStream], ) { @@ -804,7 +802,16 @@ fn open( } cudarc::nccl::result::group_end().unwrap(); - distance_comparator.open_results(&a, &b, &c, results_ptrs, db_sizes, offset, streams); + distance_comparator.open_results( + &a, + &b, + &c, + results_ptrs, + db_sizes, + real_db_sizes, + offset, + streams, + ); } fn get_merged_results(host_results: &[Vec], n_devices: usize) -> Vec { diff --git a/src/store/sync.rs b/src/store/sync.rs index c008cd38b..853f50136 100644 --- a/src/store/sync.rs +++ b/src/store/sync.rs @@ -59,7 +59,7 @@ pub fn sync(comm: &Comm, state: &SyncState) -> Result { } impl SyncState { - pub const MAX_REQUESTS: usize = 64; + pub const MAX_REQUESTS: usize = 128; const SERIAL_SIZE: usize = 8192; /// Serialize the state to a fixed-size buffer. diff --git a/src/threshold_ring/protocol.rs b/src/threshold_ring/protocol.rs index 5f67fad76..ec82f9b72 100644 --- a/src/threshold_ring/protocol.rs +++ b/src/threshold_ring/protocol.rs @@ -281,6 +281,7 @@ impl Buffers { let a = dev.alloc_zeros::(size).unwrap(); let b = dev.alloc_zeros::(size).unwrap(); res.push(ChunkShare::new(a, b)); + dev.synchronize().unwrap(); } res } diff --git a/tests/e2e.rs b/tests/e2e.rs index e3542f44a..591371c1c 100644 --- a/tests/e2e.rs +++ b/tests/e2e.rs @@ -8,19 +8,20 @@ use gpu_iris_mpc::{ iris_db::{db::IrisDB, iris::IrisCode}, }, }; -use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use std::{collections::HashMap, env, sync::Arc}; use tokio::sync::oneshot; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use uuid::Uuid; const DB_SIZE: usize = 8 * 1000; -const RNG_SEED: u64 = 0xdeadbeef; -const NUM_BATCHES: usize = 5; +const DB_RNG_SEED: u64 = 0xdeadbeef; +const INTERNAL_RNG_SEED: u64 = 0xdeadbeef; +const NUM_BATCHES: usize = 10; const BATCH_SIZE: usize = 64; fn generate_db(party_id: usize) -> Result<(Vec, Vec)> { - let mut rng = StdRng::seed_from_u64(RNG_SEED); + let mut rng = StdRng::seed_from_u64(DB_RNG_SEED); let db = IrisDB::new_random_par(DB_SIZE, &mut rng); let codes_db = db @@ -30,7 +31,7 @@ fn generate_db(party_id: usize) -> Result<(Vec, Vec)> { GaloisRingIrisCodeShare::encode_iris_code( &iris.code, &iris.mask, - &mut StdRng::seed_from_u64(RNG_SEED), + &mut StdRng::seed_from_u64(DB_RNG_SEED), )[party_id] .coefs }) @@ -42,7 +43,7 @@ fn generate_db(party_id: usize) -> Result<(Vec, Vec)> { .flat_map(|iris| { GaloisRingIrisCodeShare::encode_mask_code( &iris.mask, - &mut StdRng::seed_from_u64(RNG_SEED), + &mut StdRng::seed_from_u64(DB_RNG_SEED), )[party_id] .coefs }) @@ -167,10 +168,9 @@ async fn e2e_test() -> Result<()> { // make a test query and send it to server - let db = IrisDB::new_random_par(DB_SIZE, &mut StdRng::seed_from_u64(RNG_SEED)); + let db = IrisDB::new_random_par(DB_SIZE, &mut StdRng::seed_from_u64(DB_RNG_SEED)); - let mut choice_rng = thread_rng(); - let mut rng = thread_rng(); + let mut rng = StdRng::seed_from_u64(INTERNAL_RNG_SEED); let mut expected_results: HashMap> = HashMap::new(); let mut requests: HashMap = HashMap::new(); @@ -184,7 +184,7 @@ async fn e2e_test() -> Result<()> { let request_id = Uuid::new_v4(); // Automatic random tests let options = if responses.is_empty() { 2 } else { 3 }; - let template = match choice_rng.gen_range(0..options) { + let template = match rng.gen_range(0..options) { 0 => { println!("Sending new iris code"); expected_results.insert(request_id.to_string(), None);