Skip to content

Commit

Permalink
Merge pull request #743 from reilabs/piohei/offchain
Browse files Browse the repository at this point in the history
Refactor code and introduce offchain mode.
  • Loading branch information
0xKitsune authored Jun 20, 2024
2 parents 356bc07 + 66d1306 commit dd3d8ff
Show file tree
Hide file tree
Showing 44 changed files with 1,080 additions and 722 deletions.
91 changes: 47 additions & 44 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,16 @@ use crate::contracts::IdentityManager;
use crate::database::query::DatabaseQuery as _;
use crate::database::Database;
use crate::ethereum::Ethereum;
use crate::identity::transaction_manager::{
IdentityTransactionManager, OnChainIdentityTransactionManager,
use crate::identity::processor::{
IdentityProcessor, OffChainIdentityProcessor, OnChainIdentityProcessor,
};
use crate::identity::validator::IdentityValidator;
use crate::identity_tree::initializer::TreeInitializer;
use crate::identity_tree::{
Hash, InclusionProof, ProcessedStatus, RootItem, TreeState, TreeVersionReadOps,
};
use crate::prover::map::initialize_prover_maps;
use crate::prover::repository::ProverRepository;
use crate::prover::{ProverConfig, ProverType};
use crate::server::data::{
InclusionProofResponse, ListBatchSizesResponse, VerifySemaphoreProofQuery,
Expand All @@ -30,11 +31,11 @@ use crate::server::error::Error as ServerError;
use crate::utils::retry_tx;

pub struct App {
pub database: Arc<Database>,
pub identity_manager: Arc<IdentityManager>,
pub transaction_manager: Arc<dyn IdentityTransactionManager>,
tree_state: OnceLock<TreeState>,
pub config: Config,
pub database: Arc<Database>,
pub identity_processor: Arc<dyn IdentityProcessor>,
pub prover_repository: Arc<ProverRepository>,
tree_state: OnceLock<TreeState>,
pub config: Config,

pub identity_validator: IdentityValidator,
}
Expand All @@ -50,11 +51,7 @@ impl App {
/// on the tree state will also error.
#[instrument(name = "App::new", level = "debug", skip_all)]
pub async fn new(config: Config) -> anyhow::Result<Arc<Self>> {
let ethereum = Ethereum::new(&config);
let db = Database::new(&config.database);

let (ethereum, db) = tokio::try_join!(ethereum, db)?;

let db = Database::new(&config.database).await?;
let database = Arc::new(db);
let mut provers: HashSet<ProverConfig> = database.get_provers().await?;

Expand All @@ -65,29 +62,36 @@ impl App {

let (insertion_prover_map, deletion_prover_map) = initialize_prover_maps(provers)?;

let identity_manager = Arc::new(
IdentityManager::new(
&config,
ethereum.clone(),
insertion_prover_map,
deletion_prover_map,
)
.await?,
);
let prover_repository = Arc::new(ProverRepository::new(
insertion_prover_map,
deletion_prover_map,
));

let transaction_manager = Arc::new(OnChainIdentityTransactionManager::new(
ethereum.clone(),
config.clone(),
database.clone(),
identity_manager.clone(),
)?);
let identity_processor: Arc<dyn IdentityProcessor> = if config.offchain_mode.enabled {
Arc::new(OffChainIdentityProcessor::new(database.clone()).await?)
} else {
let ethereum = Ethereum::new(&config).await?;

let identity_manager = Arc::new(IdentityManager::new(&config, ethereum.clone()).await?);

Arc::new(
OnChainIdentityProcessor::new(
ethereum.clone(),
config.clone(),
database.clone(),
identity_manager.clone(),
prover_repository.clone(),
)
.await?,
)
};

let identity_validator = Default::default();
let identity_validator = IdentityValidator::new(&config);

let app = Arc::new(Self {
database,
identity_manager,
transaction_manager,
identity_processor,
prover_repository,
tree_state: OnceLock::new(),
config,
identity_validator,
Expand All @@ -101,8 +105,7 @@ impl App {
pub async fn init_tree(self: Arc<Self>) -> anyhow::Result<()> {
let tree_state = TreeInitializer::new(
self.database.clone(),
self.identity_manager.clone(),
self.transaction_manager.clone(),
self.identity_processor.clone(),
self.config.tree.clone(),
)
.run()
Expand Down Expand Up @@ -132,12 +135,12 @@ impl App {
/// queue malfunctions.
#[instrument(level = "debug", skip(self))]
pub async fn insert_identity(&self, commitment: Hash) -> Result<(), ServerError> {
if commitment == self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(&commitment) {
warn!(?commitment, "Attempt to insert initial leaf.");
return Err(ServerError::InvalidCommitment);
}

if !self.identity_manager.has_insertion_provers().await {
if !self.prover_repository.has_insertion_provers().await {
warn!(
?commitment,
"Identity Manager has no insertion provers. Add provers with /addBatchSize \
Expand All @@ -146,7 +149,7 @@ impl App {
return Err(ServerError::NoProversOnIdInsert);
}

if !self.identity_validator.identity_is_reduced(commitment) {
if !self.identity_validator.is_reduced(commitment) {
warn!(
?commitment,
"The provided commitment is not an element of the field."
Expand Down Expand Up @@ -188,7 +191,7 @@ impl App {
commitment: &Hash,
) -> Result<(), ServerError> {
// Ensure that deletion provers exist
if !self.identity_manager.has_deletion_provers().await {
if !self.prover_repository.has_deletion_provers().await {
warn!(
?commitment,
"Identity Manager has no deletion provers. Add provers with /addBatchSize request."
Expand Down Expand Up @@ -245,23 +248,23 @@ impl App {
new_commitment: &Hash,
) -> Result<(), ServerError> {
retry_tx!(self.database.pool, tx, {
if *new_commitment == self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(new_commitment) {
warn!(
?new_commitment,
"Attempt to insert initial leaf in recovery."
);
return Err(ServerError::InvalidCommitment);
}

if !self.identity_manager.has_insertion_provers().await {
if !self.prover_repository.has_insertion_provers().await {
warn!(
?new_commitment,
"Identity Manager has no provers. Add provers with /addBatchSize request."
);
return Err(ServerError::NoProversOnIdInsert);
}

if !self.identity_validator.identity_is_reduced(*new_commitment) {
if !self.identity_validator.is_reduced(*new_commitment) {
warn!(
?new_commitment,
"The new identity commitment is not reduced."
Expand Down Expand Up @@ -320,7 +323,7 @@ impl App {
timeout_seconds: u64,
prover_type: ProverType,
) -> Result<(), ServerError> {
self.identity_manager
self.prover_repository
.add_batch_size(&url, batch_size, timeout_seconds, prover_type)
.await?;

Expand All @@ -341,7 +344,7 @@ impl App {
batch_size: usize,
prover_type: ProverType,
) -> Result<(), ServerError> {
self.identity_manager
self.prover_repository
.remove_batch_size(batch_size, prover_type)
.await?;

Expand All @@ -355,7 +358,7 @@ impl App {
/// Will return `Err` if something unknown went wrong.
#[instrument(level = "debug", skip(self))]
pub async fn list_batch_sizes(&self) -> Result<ListBatchSizesResponse, ServerError> {
let batches = self.identity_manager.list_batch_sizes().await?;
let batches = self.prover_repository.list_batch_sizes().await?;

Ok(ListBatchSizesResponse::from(batches))
}
Expand All @@ -368,7 +371,7 @@ impl App {
&self,
commitment: &Hash,
) -> Result<InclusionProofResponse, ServerError> {
if commitment == &self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(commitment) {
return Err(ServerError::InvalidCommitment);
}

Expand Down Expand Up @@ -424,7 +427,7 @@ impl App {
request.signal_hash,
request.external_nullifier_hash,
&request.proof,
self.identity_manager.tree_depth(),
self.config.tree.tree_depth,
);

match checked {
Expand Down
45 changes: 26 additions & 19 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,17 @@ use crate::utils::serde_utils::JsonStrWrapper;

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub app: AppConfig,
pub tree: TreeConfig,
pub network: NetworkConfig,
pub providers: ProvidersConfig,
pub relayer: RelayerConfig,
pub database: DatabaseConfig,
pub server: ServerConfig,
pub app: AppConfig,
pub tree: TreeConfig,
pub network: Option<NetworkConfig>,
pub providers: Option<ProvidersConfig>,
pub relayer: Option<RelayerConfig>,
pub database: DatabaseConfig,
pub server: ServerConfig,
#[serde(default)]
pub service: ServiceConfig,
pub service: ServiceConfig,
#[serde(default)]
pub offchain_mode: OffchainModeConfig,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand Down Expand Up @@ -219,6 +221,12 @@ pub struct DatadogConfig {
pub traces_endpoint: Option<String>,
}

#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct OffchainModeConfig {
#[serde(default = "default::offchain_mode_enabled")]
pub enabled: bool,
}

pub mod default {
use std::time::Duration;

Expand Down Expand Up @@ -311,6 +319,10 @@ pub mod default {
"0000000000000000000000000000000000000000000000000000000000000000"
))
}

pub fn offchain_mode_enabled() -> bool {
false
}
}

#[cfg(test)]
Expand All @@ -323,22 +335,14 @@ mod tests {
[tree]
[network]
identity_manager_address = "0x0000000000000000000000000000000000000000"
[providers]
primary_network_provider = "http://localhost:8545"
[relayer]
kind = "tx_sitter"
tx_sitter_url = "http://localhost:3000"
tx_sitter_address = "0x0000000000000000000000000000000000000000"
[database]
database = "postgres://user:password@localhost:5432/database"
[server]
address = "0.0.0.0:3001"
[offchain_mode]
enabled = false
"#};

#[test]
Expand Down Expand Up @@ -394,6 +398,9 @@ mod tests {
[service.datadog]
traces_endpoint = "http://localhost:8126"
[offchain_mode]
enabled = false
"#};

#[test]
Expand Down
Loading

0 comments on commit dd3d8ff

Please sign in to comment.