From e8f10cd46f6ca08b6e9699373f0f72acf314f7bc Mon Sep 17 00:00:00 2001 From: FiveMovesAhead Date: Mon, 17 Jun 2024 13:23:40 +1000 Subject: [PATCH 1/3] Use exponential moving average to adjust solution signature threshold. --- tig-protocol/src/add_block.rs | 38 +++++++++++++++++------------------ tig-structs/src/config.rs | 5 +++-- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/tig-protocol/src/add_block.rs b/tig-protocol/src/add_block.rs index b10f7652..e4d69fd0 100644 --- a/tig-protocol/src/add_block.rs +++ b/tig-protocol/src/add_block.rs @@ -1,10 +1,7 @@ use crate::context::*; use logging_timer::time; use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; -use std::{ - collections::{HashMap, HashSet}, - ops::Mul, -}; +use std::collections::{HashMap, HashSet}; use tig_structs::{config::*, core::*}; use tig_utils::*; @@ -455,21 +452,8 @@ async fn update_solution_signature_thresholds(ctx: &mut T, block: &B } for challenge_id in block.data().active_challenge_ids.iter() { - let num_new_solutions = *num_new_solutions_by_challenge - .get(challenge_id) - .unwrap_or(&0) as f64; - let equilibrium_rate = config.qualifiers.total_qualifiers_threshold as f64 - / config.benchmark_submissions.lifespan_period as f64; - let percentage_error = 1f64 - - num_new_solutions - / (config.solution_signature.equilibrium_rate_multiplier * equilibrium_rate); let max_threshold = u32::MAX as f64; - let percent_delta = (percentage_error * config.solution_signature.percent_error_multiplier) - .abs() - .clamp(0f64, config.solution_signature.max_percent_delta) - .mul(if percentage_error < 0f64 { -1f64 } else { 1f64 }); - - let prev_solution_signature_threshold = + let current_threshold = match get_challenge_by_id(ctx, challenge_id, Some(&block.details.prev_block_id)) .await .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) @@ -478,14 +462,28 @@ async fn update_solution_signature_thresholds(ctx: &mut T, block: &B Some(data) => *data.solution_signature_threshold() as f64, None => max_threshold, }; + let current_rate = *num_new_solutions_by_challenge + .get(challenge_id) + .unwrap_or(&0) as f64; + + let equilibrium_rate = config.qualifiers.total_qualifiers_threshold as f64 + / config.benchmark_submissions.lifespan_period as f64; + let target_rate = config.solution_signature.equilibrium_rate_multiplier * equilibrium_rate; + let target_threshold = if current_rate == 0.0 { + max_threshold + } else { + (current_threshold * target_rate / current_rate).clamp(0.0, max_threshold) + }; + + let threshold_decay = config.solution_signature.threshold_decay.unwrap_or(0.99); let mut block_data = get_challenge_by_id(ctx, challenge_id, Some(&block.id)) .await .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) .block_data() .clone(); block_data.solution_signature_threshold = Some( - (prev_solution_signature_threshold + percent_delta * max_threshold) - .clamp(0f64, max_threshold) as u32, + (current_threshold * threshold_decay + target_threshold * (1.0 - threshold_decay)) + .clamp(0.0, max_threshold) as u32, ); ctx.update_challenge_block_data(challenge_id, &block.id, &block_data) diff --git a/tig-structs/src/config.rs b/tig-structs/src/config.rs index 0aa03263..5a4088b9 100644 --- a/tig-structs/src/config.rs +++ b/tig-structs/src/config.rs @@ -42,9 +42,10 @@ serializable_struct_with_getters! { } serializable_struct_with_getters! { SolutionSignatureConfig { - max_percent_delta: f64, + max_percent_delta: Option, + threshold_decay: Option, equilibrium_rate_multiplier: f64, - percent_error_multiplier: f64, + percent_error_multiplier: Option, } } serializable_struct_with_getters! { From a29143fc337e54ed476230e3b7317371938d34e1 Mon Sep 17 00:00:00 2001 From: FiveMovesAhead Date: Tue, 18 Jun 2024 02:29:44 +1000 Subject: [PATCH 2/3] Store cutoff_frontier. --- tig-protocol/src/add_block.rs | 10 ++++++---- tig-structs/src/core.rs | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tig-protocol/src/add_block.rs b/tig-protocol/src/add_block.rs index e4d69fd0..1ddc9915 100644 --- a/tig-protocol/src/add_block.rs +++ b/tig-protocol/src/add_block.rs @@ -226,6 +226,7 @@ async fn create_block(ctx: &mut T) -> Block { solution_signature_threshold: None, scaled_frontier: None, base_frontier: None, + cutoff_frontier: None, scaling_factor: None, qualifier_difficulties: None, }; @@ -659,7 +660,7 @@ async fn update_frontiers(ctx: &mut T, block: &Block) { let min_difficulty = difficulty_parameters.min_difficulty(); let max_difficulty = difficulty_parameters.max_difficulty(); - let lowest_frontier = block_data + let cutoff_frontier = block_data .qualifier_difficulties() .iter() .map(|d| d.iter().map(|x| -x).collect()) // mirror the points so easiest difficulties are first @@ -679,23 +680,24 @@ async fn update_frontiers(ctx: &mut T, block: &Block) { ( (scaling_factor / (1.0 - min_gap)) .min(config.difficulty.max_scaling_factor), - lowest_frontier + cutoff_frontier .scale(&min_difficulty, &max_difficulty, 1.0 - min_gap) .extend(&min_difficulty, &max_difficulty), ) } else { - (scaling_factor.min(1.0 - min_gap), lowest_frontier.clone()) + (scaling_factor.min(1.0 - min_gap), cutoff_frontier.clone()) } } None => ( scaling_factor.min(config.difficulty.max_scaling_factor), - lowest_frontier, + cutoff_frontier.clone(), ), }; let scaled_frontier = base_frontier .scale(&min_difficulty, &max_difficulty, scaling_factor) .extend(&min_difficulty, &max_difficulty); + block_data.cutoff_frontier = Some(cutoff_frontier); block_data.base_frontier = Some(base_frontier); block_data.scaled_frontier = Some(scaled_frontier); block_data.scaling_factor = Some(scaling_factor); diff --git a/tig-structs/src/core.rs b/tig-structs/src/core.rs index d0c252fb..746ae3fc 100644 --- a/tig-structs/src/core.rs +++ b/tig-structs/src/core.rs @@ -174,6 +174,7 @@ serializable_struct_with_getters! { num_qualifiers: Option, qualifier_difficulties: Option>, base_frontier: Option, + cutoff_frontier: Option, scaled_frontier: Option, scaling_factor: Option, } From 8e102697497520798951b7d4968d16687494f4f8 Mon Sep 17 00:00:00 2001 From: FiveMovesAhead Date: Wed, 19 Jun 2024 05:35:42 +1000 Subject: [PATCH 3/3] Smart difficulty selector. --- .../src/benchmarker/difficulty_sampler.rs | 296 ++++++++++++++++++ tig-benchmarker/src/benchmarker/mod.rs | 107 +++++-- .../src/benchmarker/run_benchmark.rs | 16 +- tig-benchmarker/src/benchmarker/setup_job.rs | 38 +-- tig-benchmarker/src/main.rs | 2 +- 5 files changed, 400 insertions(+), 59 deletions(-) create mode 100644 tig-benchmarker/src/benchmarker/difficulty_sampler.rs diff --git a/tig-benchmarker/src/benchmarker/difficulty_sampler.rs b/tig-benchmarker/src/benchmarker/difficulty_sampler.rs new file mode 100644 index 00000000..148dc624 --- /dev/null +++ b/tig-benchmarker/src/benchmarker/difficulty_sampler.rs @@ -0,0 +1,296 @@ +use rand::{ + distributions::{Distribution, WeightedIndex}, + rngs::StdRng, +}; +use tig_structs::core::*; + +const PADDING_FACTOR: f32 = 0.2; +const DECAY: f32 = 0.7; +const INITIAL_SOLUTIONS_WEIGHT: f32 = 500.0; + +#[derive(Debug, Clone)] +pub struct Weights { + pub qualifier: f32, + pub solutions: f32, + pub within_range: bool, +} + +impl Weights { + pub fn new() -> Self { + Self { + qualifier: 1.0, + solutions: INITIAL_SOLUTIONS_WEIGHT, + within_range: false, + } + } +} + +#[derive(Debug, Clone)] +pub struct DifficultySampler { + pub min_difficulty: Vec, + pub padding: Vec, + pub dimensions: Vec, + pub weights: Vec>, + pub distribution: Option>, +} + +impl DifficultySampler { + pub fn new() -> Self { + Self { + min_difficulty: Vec::new(), + padding: Vec::new(), + dimensions: Vec::new(), + weights: Vec::new(), + distribution: None, + } + } + + pub fn sample(&self, rng: &mut StdRng) -> Vec { + // samples an index from the distribution + let idx = self + .distribution + .clone() + .expect("You must update sampler first") + .sample(rng); + + // convert index into difficulty + let num_cols = self.dimensions[1] + self.padding[1]; + let x = idx / num_cols; + let y = idx % num_cols; + vec![ + x as i32 + self.min_difficulty[0], + y as i32 + self.min_difficulty[1], + ] + } + + pub fn update_with_block_data( + &mut self, + min_difficulty: &Vec, + block_data: &ChallengeBlockData, + ) { + assert_eq!( + min_difficulty.len(), + 2, + "Only difficulty with 2 parameters are supported" + ); + + let left_pad = (0..2) + .into_iter() + .map(|i| match self.min_difficulty.get(i) { + Some(x) => x - min_difficulty[i], + None => 0, + }) + .collect(); + self.min_difficulty = min_difficulty.clone(); + self.update_dimensions_and_padding(block_data); + let size = (0..2) + .into_iter() + .map(|i| self.dimensions[i] + self.padding[i]) + .collect(); + self.resize_weights(&left_pad, &size); + + self.update_qualifier_weights(block_data); + self.update_valid_range(block_data); + self.update_distributions(); + } + + pub fn update_with_solutions(&mut self, difficulty: &Vec, num_solutions: u32) { + let (x, y) = ( + (difficulty[0] - self.min_difficulty[0]) as usize, + (difficulty[1] - self.min_difficulty[1]) as usize, + ); + for x_offset in 0..self.padding[0] { + for y_offset in 0..self.padding[1] { + let dist = ((x_offset as f32 / self.padding[0] as f32).powf(2.0) + + (y_offset as f32 / self.padding[1] as f32).powf(2.0)) + .sqrt(); + if dist > 1.0 { + break; + } + let decay = dist * (1.0 - DECAY) + DECAY; + let delta = (1.0 - decay) * num_solutions as f32; + self.weights[x + x_offset][y + y_offset].solutions *= decay; + self.weights[x + x_offset][y + y_offset].solutions += delta; + if x_offset != 0 && x >= x_offset { + self.weights[x - x_offset][y + y_offset].solutions *= decay; + self.weights[x - x_offset][y + y_offset].solutions += delta; + } + if y_offset != 0 && y >= y_offset { + self.weights[x + x_offset][y - y_offset].solutions *= decay; + self.weights[x + x_offset][y - y_offset].solutions += delta; + } + if x_offset != 0 && y_offset != 0 && x >= x_offset && y >= y_offset { + self.weights[x - x_offset][y - y_offset].solutions *= decay; + self.weights[x - x_offset][y - y_offset].solutions += delta; + } + } + } + } + + fn update_valid_range(&mut self, block_data: &ChallengeBlockData) { + let mut lower_cutoff_points: Vec> = block_data + .base_frontier() + .iter() + .map(|x| { + vec![ + (x[0] - self.min_difficulty[0]) as usize, + (x[1] - self.min_difficulty[1]) as usize, + ] + }) + .collect(); + let mut upper_cutoff_points: Vec> = block_data + .scaled_frontier() + .iter() + .map(|x| { + vec![ + (x[0] - self.min_difficulty[0]) as usize, + (x[1] - self.min_difficulty[1]) as usize, + ] + }) + .collect(); + lower_cutoff_points.sort_by(|a, b| a[0].cmp(&b[0])); + upper_cutoff_points.sort_by(|a, b| a[0].cmp(&b[0])); + if *block_data.scaling_factor() < 1.0 { + (lower_cutoff_points, upper_cutoff_points) = (upper_cutoff_points, lower_cutoff_points); + } + let mut lower_cutoff_idx = 0; + let mut lower_cutoff = lower_cutoff_points.get(0).unwrap().clone(); + let mut upper_cutoff_idx = 0; + let mut upper_cutoff1 = upper_cutoff_points.get(0).unwrap().clone(); + let mut upper_cutoff2 = upper_cutoff_points.get(1).unwrap_or(&upper_cutoff1).clone(); + for (i, row) in self.weights.iter_mut().enumerate() { + if lower_cutoff_idx + 1 < lower_cutoff_points.len() + && i == lower_cutoff_points[lower_cutoff_idx + 1][0] + { + lower_cutoff = lower_cutoff_points[lower_cutoff_idx + 1].clone(); + lower_cutoff_idx += 1; + } + if upper_cutoff_idx + 1 < upper_cutoff_points.len() + && i == upper_cutoff_points[upper_cutoff_idx + 1][0] + { + upper_cutoff1 = upper_cutoff_points[upper_cutoff_idx + 1].clone(); + upper_cutoff2 = upper_cutoff_points + .get(upper_cutoff_idx + 2) + .unwrap_or(&upper_cutoff1) + .clone(); + upper_cutoff_idx += 1; + } + for (j, w) in row.iter_mut().enumerate() { + let within_lower = + j > lower_cutoff[1] || (j == lower_cutoff[1] && i >= lower_cutoff[0]); + let within_upper = (j <= upper_cutoff2[1] && i <= upper_cutoff2[0]) + || (j < upper_cutoff1[1] && i < upper_cutoff2[0]) + || (j == upper_cutoff1[1] && i == upper_cutoff1[0]); + w.within_range = within_lower && within_upper; + } + } + } + + fn update_distributions(&mut self) { + let mut distribution = Vec::::new(); + for row in self.weights.iter() { + for w in row.iter() { + distribution.push(if w.within_range { + w.qualifier * w.solutions + } else { + 0.0 + }); + } + } + self.distribution = Some(WeightedIndex::new(&distribution).unwrap()); + } + + fn update_qualifier_weights(&mut self, block_data: &ChallengeBlockData) { + let mut cutoff_points: Vec> = block_data + .cutoff_frontier() + .iter() + .map(|x| { + vec![ + (x[0] - self.min_difficulty[0]) as usize, + (x[1] - self.min_difficulty[1]) as usize, + ] + }) + .collect(); + cutoff_points.sort_by(|a, b| a[0].cmp(&b[0])); + + let mut cutoff_idx = 0; + let mut cutoff = cutoff_points.get(0).unwrap_or(&vec![0, 0]).clone(); // every point is a qualifier if there is no cutoff + for (i, row) in self.weights.iter_mut().enumerate() { + if cutoff_idx + 1 < cutoff_points.len() && i == cutoff_points[cutoff_idx + 1][0] { + cutoff = cutoff_points[cutoff_idx + 1].clone(); + cutoff_idx += 1; + } + for (j, w) in row.iter_mut().enumerate() { + w.qualifier *= 0.9; + if j > cutoff[1] || (j == cutoff[1] && i >= cutoff[0]) { + w.qualifier += 0.1; + } + } + } + } + + fn resize_weights(&mut self, left_pad: &Vec, size: &Vec) { + if left_pad[0] > 0 { + self.weights + .splice(0..0, vec![Vec::new(); left_pad[0] as usize]); + } else if left_pad[0] < 0 { + self.weights.drain(0..(left_pad[0].abs() as usize)); + } + + if left_pad[1] > 0 { + let padding_vec = vec![Weights::new(); left_pad[1] as usize]; + for row in self.weights.iter_mut() { + row.splice(0..0, padding_vec.clone()); + } + } else if left_pad[1] < 0 { + for row in self.weights.iter_mut() { + row.drain(0..(left_pad[1].abs() as usize)); + } + } + + if self.weights.len() != size[0] { + self.weights.resize_with(size[0], || Vec::new()); + } + for row in self.weights.iter_mut() { + if row.len() != size[1] { + row.resize(size[1], Weights::new()); + } + } + } + + fn update_dimensions_and_padding(&mut self, block_data: &ChallengeBlockData) { + let hardest_difficulty: Vec = (0..2) + .into_iter() + .map(|i| { + let v1 = block_data + .qualifier_difficulties() + .iter() + .map(|x| x[i]) + .max() + .unwrap(); + let v2 = block_data + .scaled_frontier() + .iter() + .map(|x| x[i]) + .max() + .unwrap(); + let v3 = block_data + .base_frontier() + .iter() + .map(|x| x[i]) + .max() + .unwrap(); + v1.max(v2).max(v3) + }) + .collect(); + self.dimensions = (0..2) + .into_iter() + .map(|i| (hardest_difficulty[i] - self.min_difficulty[i] + 1) as usize) + .collect(); + self.padding = self + .dimensions + .iter() + .map(|x| (*x as f32 * PADDING_FACTOR).ceil() as usize) + .collect(); + } +} diff --git a/tig-benchmarker/src/benchmarker/mod.rs b/tig-benchmarker/src/benchmarker/mod.rs index aacc81d8..75c0b43c 100644 --- a/tig-benchmarker/src/benchmarker/mod.rs +++ b/tig-benchmarker/src/benchmarker/mod.rs @@ -1,3 +1,4 @@ +mod difficulty_sampler; mod download_wasm; mod find_proof_to_submit; mod query_data; @@ -7,11 +8,15 @@ mod submit_benchmark; mod submit_proof; use crate::future_utils::{sleep, spawn, time, Mutex}; +use difficulty_sampler::DifficultySampler; use once_cell::sync::OnceCell; use serde::Serialize; use std::{collections::HashMap, sync::Arc}; use tig_api::Api; -use tig_structs::{config::WasmVMConfig, core::*}; +use tig_structs::{ + config::{MinMaxDifficulty, WasmVMConfig}, + core::*, +}; pub type Result = std::result::Result; @@ -130,6 +135,8 @@ pub struct State { pub selected_algorithms: HashMap, pub job: Option, pub submission_errors: HashMap, + #[serde(skip_serializing)] + pub difficulty_samplers: HashMap, } static BLOBS: OnceCell>>> = OnceCell::new(); @@ -173,34 +180,58 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> { // retain only benchmarks that are within the lifespan period // preserves solution_meta_data and solution_data let mut new_query_data = query_data::execute().await?; - { - let mut state = (*state()).lock().await; - let block_started_cutoff = new_query_data.latest_block.details.height.saturating_sub( - new_query_data - .latest_block - .config() - .benchmark_submissions - .lifespan_period, - ); - let mut latest_benchmarks = state.query_data.benchmarks.clone(); - latest_benchmarks.retain(|_, x| x.details.block_started >= block_started_cutoff); - latest_benchmarks.extend(new_query_data.benchmarks.drain()); + if { + let state = (*state()).lock().await; + state.query_data.latest_block.id != new_query_data.latest_block.id + } { + { + let mut state = (*state()).lock().await; + let block_started_cutoff = new_query_data.latest_block.details.height.saturating_sub( + new_query_data + .latest_block + .config() + .benchmark_submissions + .lifespan_period, + ); + let mut latest_benchmarks = state.query_data.benchmarks.clone(); + latest_benchmarks.retain(|_, x| x.details.block_started >= block_started_cutoff); + latest_benchmarks.extend(new_query_data.benchmarks.drain()); - let mut latest_proofs = state.query_data.proofs.clone(); - latest_proofs.retain(|id, _| latest_benchmarks.contains_key(id)); - latest_proofs.extend(new_query_data.proofs.drain()); + let mut latest_proofs = state.query_data.proofs.clone(); + latest_proofs.retain(|id, _| latest_benchmarks.contains_key(id)); + latest_proofs.extend(new_query_data.proofs.drain()); - let mut latest_frauds = state.query_data.frauds.clone(); - latest_frauds.retain(|id, _| latest_benchmarks.contains_key(id)); - latest_frauds.extend(new_query_data.frauds.drain()); + let mut latest_frauds = state.query_data.frauds.clone(); + latest_frauds.retain(|id, _| latest_benchmarks.contains_key(id)); + latest_frauds.extend(new_query_data.frauds.drain()); - (*state) - .submission_errors - .retain(|id, _| latest_benchmarks.contains_key(id)); - new_query_data.benchmarks = latest_benchmarks; - new_query_data.proofs = latest_proofs; - new_query_data.frauds = latest_frauds; - (*state).query_data = new_query_data; + (*state) + .submission_errors + .retain(|id, _| latest_benchmarks.contains_key(id)); + new_query_data.benchmarks = latest_benchmarks; + new_query_data.proofs = latest_proofs; + new_query_data.frauds = latest_frauds; + (*state).query_data = new_query_data; + } + + update_status("Updating difficulty sampler with query data").await; + { + let mut state = state().lock().await; + let State { + query_data, + difficulty_samplers, + .. + } = &mut (*state); + for challenge in query_data.challenges.iter() { + let difficulty_sampler = difficulty_samplers + .entry(challenge.id.clone()) + .or_insert_with(|| DifficultySampler::new()); + let min_difficulty = query_data.latest_block.config().difficulty.parameters + [&challenge.id] + .min_difficulty(); + difficulty_sampler.update_with_block_data(&min_difficulty, challenge.block_data()); + } + } } update_status("Finding proof to submit").await; @@ -218,7 +249,6 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> { update_status("No proof to submit").await; } } - // creates a benchmark & proof with job.benchmark_id update_status("Selecting settings to benchmark").await; setup_job::execute().await?; @@ -250,12 +280,14 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> { .collect(), }; let solutions_data = Arc::new(Mutex::new(Vec::::new())); + let solutions_count = Arc::new(Mutex::new(0u32)); update_status("Starting benchmark").await; run_benchmark::execute( nonce_iters.iter().cloned().collect(), &job, &wasm, solutions_data.clone(), + solutions_count.clone(), ) .await; { @@ -321,6 +353,17 @@ async fn run_once(num_workers: u32, ms_per_benchmark: u32) -> Result<()> { .await; } } else { + update_status("Updating difficulty sampler with solutions").await; + { + let num_solutions = *solutions_count.lock().await; + let mut state = state().lock().await; + state + .difficulty_samplers + .get_mut(&job.settings.challenge_id) + .unwrap() + .update_with_solutions(&job.settings.difficulty, num_solutions); + } + if num_solutions == 0 { update_status("Finished. No solutions to submit").await; } else { @@ -422,11 +465,21 @@ pub async fn setup(api_url: String, api_key: String, player_id: String) { API.get_or_init(|| Api::new(api_url, api_key)); PLAYER_ID.get_or_init(|| player_id); let query_data = query_data::execute().await.expect("Failed to query data"); + let mut difficulty_samplers = HashMap::new(); + for challenge in query_data.challenges.iter() { + let difficulty_sampler = difficulty_samplers + .entry(challenge.id.clone()) + .or_insert_with(|| DifficultySampler::new()); + let min_difficulty = + query_data.latest_block.config().difficulty.parameters[&challenge.id].min_difficulty(); + difficulty_sampler.update_with_block_data(&min_difficulty, challenge.block_data()); + } STATE.get_or_init(|| { Mutex::new(State { status: Status::Stopped, timer: None, query_data, + difficulty_samplers, selected_algorithms: HashMap::new(), job: None, submission_errors: HashMap::new(), diff --git a/tig-benchmarker/src/benchmarker/run_benchmark.rs b/tig-benchmarker/src/benchmarker/run_benchmark.rs index 8678326e..1027a635 100644 --- a/tig-benchmarker/src/benchmarker/run_benchmark.rs +++ b/tig-benchmarker/src/benchmarker/run_benchmark.rs @@ -9,11 +9,13 @@ pub async fn execute( job: &Job, wasm: &Vec, solutions_data: Arc>>, + solutions_count: Arc>, ) { for nonce_iter in nonce_iters { let job = job.clone(); let wasm = wasm.clone(); let solutions_data = solutions_data.clone(); + let solutions_count = solutions_count.clone(); spawn(async move { let mut last_yield = time(); loop { @@ -23,6 +25,11 @@ pub async fn execute( } { None => break, Some(nonce) => { + let now = time(); + if now - last_yield > 25 { + yield_now().await; + last_yield = now; + } if let Ok(ComputeResult::ValidSolution(solution_data)) = compute_solution( &job.settings, nonce, @@ -30,6 +37,10 @@ pub async fn execute( job.wasm_vm_config.max_memory, job.wasm_vm_config.max_fuel, ) { + { + let mut solutions_count = (*solutions_count).lock().await; + *solutions_count += 1; + } if solution_data.calc_solution_signature() <= job.solution_signature_threshold { @@ -39,11 +50,6 @@ pub async fn execute( } } } - let now = time(); - if now - last_yield > 25 { - yield_now().await; - last_yield = now; - } } }); } diff --git a/tig-benchmarker/src/benchmarker/setup_job.rs b/tig-benchmarker/src/benchmarker/setup_job.rs index ff365651..0afac0ac 100644 --- a/tig-benchmarker/src/benchmarker/setup_job.rs +++ b/tig-benchmarker/src/benchmarker/setup_job.rs @@ -1,14 +1,13 @@ use super::{player_id, state, Job, QueryData, Result, State}; use crate::future_utils::time; use rand::{ - distributions::{Alphanumeric, DistString, Uniform, WeightedIndex}, + distributions::{Alphanumeric, DistString, WeightedIndex}, rngs::StdRng, - Rng, SeedableRng, + SeedableRng, }; use rand_distr::Distribution; use std::collections::HashMap; -use tig_structs::{config::*, core::*}; -use tig_utils::{FrontierOps, PointOps}; +use tig_structs::core::*; pub async fn execute() -> Result<()> { let job = if let Some(x) = find_settings_to_recompute().await? { @@ -88,6 +87,7 @@ async fn pick_settings_to_benchmark() -> Result { let State { query_data, selected_algorithms, + difficulty_samplers, .. } = &(*state().lock().await); let QueryData { @@ -106,7 +106,7 @@ async fn pick_settings_to_benchmark() -> Result { download_urls, &selected_algorithms[&challenge.details.name], )?; - let difficulty = pick_difficulty(&mut rng, latest_block, challenge)?; + let difficulty = difficulty_samplers[&challenge.id].sample(&mut rng); Ok(Job { benchmark_id: Alphanumeric.sample_string(&mut rng, 32), download_url: get_download_url(&selected_algorithm_id, download_urls)?, @@ -155,7 +155,7 @@ fn pick_challenge<'a>( .collect(); if selected_algorithms.len() == 0 { return Err("Your .json is empty".to_string()); - } + }; let mut challenge_weights = Vec::<(String, f64)>::new(); for challenge_name in selected_algorithms.keys() { let challenge_id = challenge_name_2_id.get(challenge_name).ok_or_else(|| { @@ -164,9 +164,14 @@ fn pick_challenge<'a>( challenge_name ) })?; + let max_percent_qualifiers = *percent_qualifiers_by_challenge + .values() + .max_by(|a, b| a.partial_cmp(b).unwrap()) + .unwrap(); challenge_weights.push(( challenge_id.clone(), - 1f64 - percent_qualifiers_by_challenge[challenge_id] + 1e-10f64, + 4.0 * max_percent_qualifiers / 3.0 - percent_qualifiers_by_challenge[challenge_id] + + 1e-10f64, )); } let dist = WeightedIndex::new( @@ -185,25 +190,6 @@ fn pick_challenge<'a>( Ok(challenge) } -fn pick_difficulty(rng: &mut StdRng, block: &Block, challenge: &Challenge) -> Result> { - let difficulty_parameters = &block.config().difficulty.parameters[&challenge.id]; - let min_difficulty = difficulty_parameters.min_difficulty(); - let max_difficulty = difficulty_parameters.max_difficulty(); - let block_data = challenge.block_data(); - let scaling_factor = *block_data.scaling_factor(); - let distribution = if scaling_factor >= 1.0 { - Uniform::new(1.0, scaling_factor) - } else { - Uniform::new(scaling_factor, 1.0) - }; - let random_difficulty = block_data.base_frontier().sample(rng).scale( - &min_difficulty, - &max_difficulty, - rng.sample(&distribution), - ); - Ok(random_difficulty) -} - fn get_algorithm_id( algorithms_by_challenge: &HashMap>, challenge: &Challenge, diff --git a/tig-benchmarker/src/main.rs b/tig-benchmarker/src/main.rs index 29ee4060..84dbfa3e 100644 --- a/tig-benchmarker/src/main.rs +++ b/tig-benchmarker/src/main.rs @@ -39,7 +39,7 @@ fn cli() -> Command { Arg::new("duration") .long("duration") .help("(Optional) Set duration of a benchmark in milliseconds") - .default_value("15000") + .default_value("7500") .value_parser(value_parser!(u32)), ) .arg(