diff --git a/das_api/Cargo.toml b/das_api/Cargo.toml new file mode 100644 index 000000000..5a2c93658 --- /dev/null +++ b/das_api/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "das_api" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +digital_asset_types = { path = "../digital_asset_types", features = ["json_types", "sql_types"] } +jsonrpsee = {version = "0.14.0", features = ["server", "macros"]} +hyper = "0.14.19" +tracing = "0.1.35" +metrics = "0.19.0" +figment = { version = "0.10.6", features = ["env"] } +serde = "1.0.137" +thiserror = "1.0.31" +tokio = {version="1.19.2"} +async-trait = "0.1.56" +serde_json = "1.0.81" +cadence = "0.29.0" +cadence-macros = "0.29.0" +sqlx = { version = "0.6.0", features = ["macros", "runtime-tokio-rustls", "postgres", "uuid", "offline", "json"] } +sea-orm = { version = "0.9.0", features = ["macros", "runtime-tokio-rustls", "sqlx-postgres"] } +tokio-postgres = "0.7.5" +solana-sdk = { version = "=1.10.10" } +bs58 = "0.4.0" \ No newline at end of file diff --git a/das_api/src/api.rs b/das_api/src/api.rs new file mode 100644 index 000000000..d9acec48c --- /dev/null +++ b/das_api/src/api.rs @@ -0,0 +1,152 @@ +use crate::{DasApiError, RpcModule}; +use async_trait::async_trait; +use digital_asset_types::rpc::filter::{AssetSorting, ListingSorting, OfferSorting}; +use digital_asset_types::rpc::response::{AssetList, ListingsList, OfferList}; +use digital_asset_types::rpc::{Asset, AssetProof}; + +#[async_trait] +pub trait ApiContract: Send + Sync + 'static { + async fn check_health(&self) -> Result<(), DasApiError>; + async fn get_asset_proof(&self, asset_id: String) -> Result; + async fn get_asset(&self, asset_id: String) -> Result; + async fn get_assets_by_owner( + &self, + owner_address: String, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; + async fn get_listed_assets_by_owner( + &self, + owner_address: String, + sort_by: ListingSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; + async fn get_offers_by_owner( + &self, + owner_address: String, + sort_by: OfferSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; + async fn get_assets_by_group( + &self, + group_expression: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; + async fn get_assets_by_creator( + &self, + creator_expression: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; + async fn search_assets( + &mut self, + search_expression: String, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result; +} + +pub struct RpcApiBuilder; + +impl<'a> RpcApiBuilder { + pub fn build( + contract: Box, + ) -> Result>, DasApiError> { + let mut module = RpcModule::new(contract); + module.register_async_method("healthz", |rpc_params, rpc_context| async move { + println!("Checking Health"); + rpc_context.check_health() + .await + .map_err(Into::into) + })?; + module.register_async_method("get_asset_proof", |rpc_params, rpc_context| async move { + let asset_id = rpc_params.one::()?; + println!("Asset Id {}", asset_id); + rpc_context + .get_asset_proof(asset_id) + .await + .map_err(Into::into) + })?; + module.register_async_method("get_asset", |rpc_params, rpc_context| async move { + let asset_id = rpc_params.one::()?; + println!("Asset Id {}", asset_id); + rpc_context.get_asset(asset_id).await.map_err(Into::into) + })?; + module.register_async_method( + "get_assets_by_owner", + |rpc_params, rpc_context| async move { + let (owner_address, sort_by, limit, page, before, after) = + rpc_params.parse().unwrap(); + rpc_context + .get_assets_by_owner(owner_address, sort_by, limit, page, before, after) + .await + .map_err(Into::into) + }, + )?; + module.register_async_method( + "get_assets_by_creator", + |rpc_params, rpc_context| async move { + let (creator_expression, sort_by, limit, page, before, after) = + rpc_params.parse().unwrap(); + rpc_context + .get_assets_by_creator(creator_expression, sort_by, limit, page, before, after) + .await + .map_err(Into::into) + }, + )?; + module.register_async_method( + "get_assets_by_group", + |rpc_params, rpc_context| async move { + let (group_expression, sort_by, limit, page, before, after) = + rpc_params.parse().unwrap(); + rpc_context + .get_assets_by_group(group_expression, sort_by, limit, page, before, after) + .await + .map_err(Into::into) + }, + )?; + module.register_async_method( + "get_listed_assets_by_owner", + |rpc_params, rpc_context| async move { + let (owner_address, sort_by, limit, page, before, after) = + rpc_params.parse().unwrap(); + rpc_context + .get_listed_assets_by_owner(owner_address, sort_by, limit, page, before, after) + .await + .map_err(Into::into) + }, + )?; + module.register_async_method( + "get_offers_by_owner", + |rpc_params, rpc_context| async move { + let (owner_address, sort_by, limit, page, before, after) = + rpc_params.parse().unwrap(); + rpc_context + .get_offers_by_owner(owner_address, sort_by, limit, page, before, after) + .await + .map_err(Into::into) + }, + )?; + + Ok(module) + } +} diff --git a/das_api/src/api_impl.rs b/das_api/src/api_impl.rs new file mode 100644 index 000000000..3169ddfc9 --- /dev/null +++ b/das_api/src/api_impl.rs @@ -0,0 +1,317 @@ +use sea_orm::{ConnectionTrait, DbBackend, Statement}; +use tokio_postgres::types::ToSql; +use { + crate::api::ApiContract, + crate::config::Config, + crate::validation::validate_pubkey, + crate::DasApiError, + async_trait::async_trait, + digital_asset_types::{ + dapi::{ + asset::*, assets_by_creator::*, assets_by_group::*, assets_by_owner::*, change_logs::*, + listed_assets_by_owner::*, offers_by_owner::*, + }, + rpc::{ + filter::{AssetSorting, ListingSorting, OfferSorting}, + response::{AssetList, ListingsList, OfferList}, + Asset, AssetProof, + }, + }, + sea_orm::{DatabaseConnection, DbErr, SqlxPostgresConnector}, + sqlx::postgres::PgPoolOptions, +}; + +pub struct DasApi { + db_connection: DatabaseConnection, +} + +impl DasApi { + pub async fn from_config(config: Config) -> Result { + let pool = PgPoolOptions::new() + .max_connections(5) + .connect(&*config.database_url) + .await?; + + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); + Ok(DasApi { + db_connection: conn, + }) + } +} + +pub fn not_found(asset_id: &String) -> DbErr { + DbErr::RecordNotFound(format!("Asset Proof for {} Not Found", asset_id)) +} + +#[async_trait] +impl ApiContract for DasApi { + async fn check_health(self: &DasApi) -> Result<(), DasApiError> { + &self + .db_connection + .execute(Statement::from_string( + DbBackend::Postgres, + "SELECT 1".to_string(), + )) + .await?; + Ok(()) + } + + async fn get_asset_proof(self: &DasApi, asset_id: String) -> Result { + let id = validate_pubkey(asset_id.clone())?; + let id_bytes = id.to_bytes().to_vec(); + get_proof_for_asset(&self.db_connection, id_bytes) + .await + .and_then(|p| { + if p.proof.len() == 0 { + return Err(not_found(&asset_id)); + } + Ok(p) + }) + .map_err(Into::into) + } + + async fn get_asset(self: &DasApi, asset_id: String) -> Result { + let id = validate_pubkey(asset_id.clone())?; + let id_bytes = id.to_bytes().to_vec(); + get_asset(&self.db_connection, id_bytes) + .await + .map_err(Into::into) + } + + async fn get_assets_by_owner( + self: &DasApi, + owner_address: String, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result { + let owner_address = validate_pubkey(owner_address.clone())?; + let owner_address_bytes = owner_address.to_bytes().to_vec(); + if page > 0 && (!before.is_empty() || !after.is_empty()) { + return Err(DasApiError::PaginationError); + }; + + if !before.is_empty() || !after.is_empty() { + return Err(DasApiError::PaginationError); + }; + + let before = if !before.is_empty() { + validate_pubkey(before.clone())?.to_bytes().to_vec() + } else { + before.as_bytes().to_vec() + }; + + let after = if !after.is_empty() { + validate_pubkey(after.clone())?.to_bytes().to_vec() + } else { + after.as_bytes().to_vec() + }; + + get_assets_by_owner( + &self.db_connection, + owner_address_bytes, + sort_by, + limit, + page, + before, + after, + ) + .await + .map_err(Into::into) + } + + async fn get_listed_assets_by_owner( + self: &DasApi, + owner_address: String, + sort_by: ListingSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result { + let owner_address = validate_pubkey(owner_address.clone())?; + let owner_address_bytes = owner_address.to_bytes().to_vec(); + if page > 0 && (!before.is_empty() || !after.is_empty()) { + return Err(DasApiError::PaginationError); + }; + + if !before.is_empty() || !after.is_empty() { + return Err(DasApiError::PaginationError); + }; + + let before = if !before.is_empty() { + validate_pubkey(before.clone())?.to_bytes().to_vec() + } else { + before.as_bytes().to_vec() + }; + + let after = if !after.is_empty() { + validate_pubkey(after.clone())?.to_bytes().to_vec() + } else { + after.as_bytes().to_vec() + }; + + get_listed_assets_by_owner( + &self.db_connection, + owner_address_bytes, + sort_by, + limit, + page, + before, + after, + ) + .await + .map_err(Into::into) + } + + async fn get_offers_by_owner( + self: &DasApi, + owner_address: String, + sort_by: OfferSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result { + let owner_address = validate_pubkey(owner_address.clone())?; + let owner_address_bytes = owner_address.to_bytes().to_vec(); + if page > 0 && (!before.is_empty() || !after.is_empty()) { + return Err(DasApiError::PaginationError); + }; + + if !before.is_empty() || !after.is_empty() { + return Err(DasApiError::PaginationError); + }; + + let before = if !before.is_empty() { + validate_pubkey(before.clone())?.to_bytes().to_vec() + } else { + before.as_bytes().to_vec() + }; + + let after = if !after.is_empty() { + validate_pubkey(after.clone())?.to_bytes().to_vec() + } else { + after.as_bytes().to_vec() + }; + + get_offers_by_owner( + &self.db_connection, + owner_address_bytes, + sort_by, + limit, + page, + before, + after, + ) + .await + .map_err(Into::into) + } + + async fn get_assets_by_group( + self: &DasApi, + group_expression: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result { + let group_values = group_expression + .into_iter() + .map(|x| validate_pubkey(x).unwrap().to_string()) + .collect::>(); + + if page > 0 && (!before.is_empty() || !after.is_empty()) { + return Err(DasApiError::PaginationError); + }; + + if !before.is_empty() || !after.is_empty() { + return Err(DasApiError::PaginationError); + }; + + let before = if !before.is_empty() { + validate_pubkey(before.clone())?.to_bytes().to_vec() + } else { + before.as_bytes().to_vec() + }; + + let after = if !after.is_empty() { + validate_pubkey(after.clone())?.to_bytes().to_vec() + } else { + after.as_bytes().to_vec() + }; + + get_assets_by_group( + &self.db_connection, + group_values, + sort_by, + limit, + page, + before, + after, + ) + .await + .map_err(Into::into) + } + + async fn get_assets_by_creator( + self: &DasApi, + creator_expression: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: String, + after: String, + ) -> Result { + let creator_addresses = creator_expression + .into_iter() + .map(|x| validate_pubkey(x).unwrap().to_bytes().to_vec()) + .collect::>(); + + if page > 0 && (!before.is_empty() || !after.is_empty()) { + return Err(DasApiError::PaginationError); + }; + + if !before.is_empty() || !after.is_empty() { + return Err(DasApiError::PaginationError); + }; + let before = if !before.is_empty() { + validate_pubkey(before.clone())?.to_bytes().to_vec() + } else { + before.as_bytes().to_vec() + }; + + let after = if !after.is_empty() { + validate_pubkey(after.clone())?.to_bytes().to_vec() + } else { + after.as_bytes().to_vec() + }; + + get_assets_by_creator( + &self.db_connection, + creator_addresses, + sort_by, + limit, + page, + before, + after, + ) + .await + .map_err(Into::into) + } + + async fn search_assets( + &mut self, + _search_expression: String, + _sort_by: AssetSorting, + _limit: u32, + _page: u32, + _before: String, + _after: String, + ) -> Result { + todo!() + } +} diff --git a/das_api/src/config.rs b/das_api/src/config.rs new file mode 100644 index 000000000..1ea45c761 --- /dev/null +++ b/das_api/src/config.rs @@ -0,0 +1,20 @@ +use crate::error::DasApiError; +use { + figment::{providers::Env, Figment}, + serde::Deserialize, +}; + +#[derive(Deserialize)] +pub struct Config { + pub database_url: String, + pub metrics_port: u16, + pub metrics_host: String, + pub server_port: u16, +} + +pub fn load_config() -> Result { + Figment::new() + .join(Env::prefixed("APP_")) + .extract() + .map_err(|config_error| DasApiError::ConfigurationError(config_error.to_string())) +} diff --git a/das_api/src/error.rs b/das_api/src/error.rs new file mode 100644 index 000000000..42aad30ef --- /dev/null +++ b/das_api/src/error.rs @@ -0,0 +1,26 @@ +use {jsonrpsee::core::Error as RpcError, jsonrpsee::types::error::CallError, thiserror::Error}; + +#[derive(Error, Debug)] +pub enum DasApiError { + #[error("Config Missing or Error {0}")] + ConfigurationError(String), + #[error("Server Failed to Start")] + ServerStartError(#[from] RpcError), + #[error("Database Connection Failed")] + DatabaseConnectionError(#[from] sqlx::Error), + #[error("Pubkey Validation Err {0} is invalid")] + PubkeyValidationError(String), + #[error("Validation Error {0}")] + ValidationError(String), + #[error("Database Error {0}")] + DatabaseError(#[from] sea_orm::DbErr), + #[error("Pagination Error. Only one pagination parameter supported per query.")] + PaginationError, +} + +impl Into for DasApiError { + fn into(self) -> RpcError { + println!("{}", self.to_string()); + RpcError::Call(CallError::from_std_error(self)) + } +} diff --git a/das_api/src/main.rs b/das_api/src/main.rs new file mode 100644 index 000000000..e22851434 --- /dev/null +++ b/das_api/src/main.rs @@ -0,0 +1,48 @@ +mod api; +mod api_impl; +mod config; +mod error; +mod validation; + +use { + crate::api::RpcApiBuilder, + crate::api_impl::DasApi, + crate::config::load_config, + crate::config::Config, + crate::error::DasApiError, + cadence::{BufferedUdpMetricSink, QueuingMetricSink, StatsdClient}, + cadence_macros::set_global_default, + jsonrpsee::http_server::{HttpServerBuilder, RpcModule}, + std::net::SocketAddr, + std::net::UdpSocket, + tokio, +}; + +fn setup_metrics(config: &Config) { + let uri = config.metrics_host.clone(); + let port = config.metrics_port.clone(); + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + socket.set_nonblocking(true).unwrap(); + let host = (uri, port); + let udp_sink = BufferedUdpMetricSink::from(host, socket).unwrap(); + let queuing_sink = QueuingMetricSink::from(udp_sink); + let client = StatsdClient::from_sink("das_api", queuing_sink); + set_global_default(client); +} + +#[tokio::main] +async fn main() -> Result<(), DasApiError> { + let config = load_config()?; + let addr = SocketAddr::from(([0, 0, 0, 0], config.server_port)); + let server = HttpServerBuilder::default() + .health_api("/healthz", "healthz")? + .build(addr) + .await?; + setup_metrics(&config); + let api = DasApi::from_config(config).await?; + let rpc = RpcApiBuilder::build(Box::new(api))?; + println!("Server Started"); + server.start(rpc)?.await; + println!("Server ended"); + Ok(()) +} diff --git a/das_api/src/rpc-tests.http b/das_api/src/rpc-tests.http new file mode 100644 index 000000000..66df95f33 --- /dev/null +++ b/das_api/src/rpc-tests.http @@ -0,0 +1,78 @@ +POST http://localhost:9090 HTTP/1.1 +content-type: application/json + +{ + "jsonrpc": "2.0", + "method": "get_assets_by_owner", + "params": [ + "FWXys3VXUQhAS7BkpBqnAYuJhz1dNUsexVGuJgaRio8v", + "created", + 50, + 1, + "", + "" + ], + "id": 0 +} + +### +POST http://localhost:9090 HTTP/1.1 +content-type: application/json + +{ + "jsonrpc": "2.0", + "method": "get_assets_by_creator", + "params": [ + ["FWXys3VXUQhAS7BkpBqnAYuJhz1dNUsexVGuJgaRio8v"], + "created", + 50, + 1, + "", + "" + ], + "id": 0 +} + +### +POST http://localhost:9090 HTTP/1.1 +content-type: application/json + +{ + "jsonrpc": "2.0", + "method": "get_assets_by_group", + "params": [ + ["FWXys3VXUQhAS7BkpBqnAYuJhz1dNUsexVGuJgaRio8v"], + "created", + 50, + 1, + "", + "" + ], + "id": 0 +} + +### +POST http://localhost:9090 HTTP/1.1 +content-type: application/json + +{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_asset_proof", + "params": [ + "BEczha2EVgjJMqDLrqv2z19C3t5YVjpER9AHBYeh5zjG" + ] +} + +### +POST http://localhost:9090 HTTP/1.1 +content-type: application/json + +{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_asset", + "params": [ + "BEczha2EVgjJMqDLrqv2z19C3t5YVjpER9AHBYeh5zjG" + ] +} \ No newline at end of file diff --git a/das_api/src/validation.rs b/das_api/src/validation.rs new file mode 100644 index 000000000..f6224eb82 --- /dev/null +++ b/das_api/src/validation.rs @@ -0,0 +1,7 @@ +use crate::DasApiError; +use solana_sdk::pubkey::Pubkey; +use std::str::FromStr; + +pub fn validate_pubkey(str_pubkey: String) -> Result { + Pubkey::from_str(&*str_pubkey).map_err(|_| DasApiError::PubkeyValidationError(str_pubkey)) +} diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index ab989d634..ee593e4a8 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -8,9 +8,10 @@ edition = "2021" default=["json_types", "sql_types"] json_types = ["serde", "serde_json"] sql_types = ["sea-orm"] +mock = [] [dependencies] -sea-orm = { optional = true, git = "https://github.com/liberwang1013/sea-orm", branch = "insert-on-conflict", features = [ "macros", "runtime-tokio-rustls", "sqlx-postgres", "with-chrono"] } +sea-orm = { optional = true, version="0.9.0", features = [ "macros", "runtime-tokio-rustls", "sqlx-postgres", "with-chrono", "mock"] } sea-query = {version="0.25.0", features = ["postgres-array"]} serde = { version = "1.0.137", optional = true } serde_json = { version = "1.0.81", optional = true } @@ -25,4 +26,7 @@ concurrent-merkle-tree = {path = "../lib/concurrent-merkle-tree" } mpl-token-metadata = { git = "https://github.com/jarry-xiao/metaplex-program-library", rev = "7e2810a", features = ["no-entrypoint"] } jsonpath_lib = "0.3.0" mime_guess = "2.0.4" -url = "2.2.2" \ No newline at end of file +url = "2.2.2" +anchor-client = { path="../deps/anchor/client" } +futures= "0.3.21" +tokio = { version="1.19.2" } \ No newline at end of file diff --git a/digital_asset_types/src/dapi/asset.rs b/digital_asset_types/src/dapi/asset.rs index 02e5cb8e6..7a1d0605d 100644 --- a/digital_asset_types/src/dapi/asset.rs +++ b/digital_asset_types/src/dapi/asset.rs @@ -1,14 +1,17 @@ -use std::collections::HashMap; -use std::path::Path; +use crate::dao::prelude::{Asset, AssetData}; +use crate::dao::{asset, asset_authority, asset_creators, asset_data, asset_grouping}; +use crate::rpc::{ + Asset as RpcAsset, Authority, Compression, Content, Creator, File, Group, Interface, Links, + Ownership, Royalty, Scope, +}; use jsonpath_lib::JsonPathError; use mime_guess::Mime; -use sea_orm::{DatabaseConnection}; +use sea_orm::DatabaseConnection; use sea_orm::{entity::*, query::*, DbErr}; -use url::Url; -use crate::dao::{asset, asset_authority, asset_creators, asset_data, asset_grouping}; -use crate::dao::prelude::{Asset, AssetData}; -use crate::rpc::{Asset as RpcAsset, Authority, Compression, Content, Creator, File, Group, Interface, Links, Ownership, Royalty, Scope}; use serde_json::Value; +use std::collections::HashMap; +use std::path::Path; +use url::Url; pub fn to_uri(uri: String) -> Option { Url::parse(&*uri).ok() @@ -19,9 +22,7 @@ pub fn get_mime(url: Url) -> Option { } pub fn get_mime_type_from_uri(uri: String) -> Option { - to_uri(uri) - .and_then(get_mime) - .map(|m| { m.to_string() }) + to_uri(uri).and_then(get_mime).map(|m| m.to_string()) } pub fn file_from_str(str: String) -> File { @@ -34,7 +35,10 @@ pub fn file_from_str(str: String) -> File { } } -pub fn track_top_level_file(file_map: &mut HashMap, top_level_file: Option<&serde_json::Value>) { +pub fn track_top_level_file( + file_map: &mut HashMap, + top_level_file: Option<&serde_json::Value>, +) { if top_level_file.is_some() { let img = top_level_file.and_then(|x| x.as_str()).unwrap(); let entry = file_map.get(img); @@ -44,7 +48,10 @@ pub fn track_top_level_file(file_map: &mut HashMap, top_level_file } } -pub fn safe_select<'a>(selector: &mut impl FnMut(&str) -> Result, JsonPathError>, expr: &str) -> Option<&'a Value> { +pub fn safe_select<'a>( + selector: &mut impl FnMut(&str) -> Result, JsonPathError>, + expr: &str, +) -> Option<&'a Value> { selector(expr) .ok() .filter(|d| !Vec::is_empty(d)) @@ -59,15 +66,15 @@ fn v1_content_from_json(metadata: &serde_json::Value) -> Result println!("{}", metadata.to_string()); let image = safe_select(selector, "$.image"); let animation = safe_select(selector, "$.animation_url"); - let external_url = safe_select(selector,"$.external_url") - .map(|val| { - let mut links = HashMap::new(); - links.insert("external_url".to_string(), val[0].to_owned()); - links - }); + let external_url = safe_select(selector, "$.external_url").map(|val| { + let mut links = HashMap::new(); + links.insert("external_url".to_string(), val[0].to_owned()); + links + }); let metadata = safe_select(selector, "description"); let mut actual_files: HashMap = HashMap::new(); - selector("$.properties.files[*]").ok() + selector("$.properties.files[*]") + .ok() .filter(|d| !Vec::is_empty(d)) .map(|files| { for v in files.iter() { @@ -78,12 +85,15 @@ fn v1_content_from_json(metadata: &serde_json::Value) -> Result (Some(u), Some(m)) => { let str_uri = u.as_str().unwrap().to_string(); let str_mime = m.as_str().unwrap().to_string(); - actual_files.insert(str_uri.clone(), File { - uri: Some(str_uri), - mime: Some(str_mime), - quality: None, - contexts: None, - }); + actual_files.insert( + str_uri.clone(), + File { + uri: Some(str_uri), + mime: Some(str_mime), + quality: None, + contexts: None, + }, + ); } (Some(u), None) => { let str_uri = serde_json::to_string(u).unwrap(); @@ -109,61 +119,59 @@ fn v1_content_from_json(metadata: &serde_json::Value) -> Result }) } -fn get_content(asset: &asset::Model, data: &asset_data::Model) -> Result { +pub fn get_content(asset: &asset::Model, data: &asset_data::Model) -> Result { match data.schema_version { - 1 => { - v1_content_from_json(&data.metadata) - } - _ => Err(DbErr::Custom("Version Not Implemented".to_string())) + 1 => v1_content_from_json(&data.metadata), + _ => Err(DbErr::Custom("Version Not Implemented".to_string())), } } pub fn to_authority(authority: Vec) -> Vec { - authority.iter().map(|a| { - Authority { + authority + .iter() + .map(|a| Authority { address: bs58::encode(&a.authority).into_string(), scopes: vec![Scope::Full], - } - }).collect() + }) + .collect() } pub fn to_creators(creators: Vec) -> Vec { - creators.iter().map(|a| { - Creator { + creators + .iter() + .map(|a| Creator { address: bs58::encode(&a.creator).into_string(), share: a.share, verified: a.verified, - } - }).collect() + }) + .collect() } pub fn to_grouping(groups: Vec) -> Vec { - groups.iter().map(|a| { - Group { + groups + .iter() + .map(|a| Group { group_key: a.group_key.clone(), group_value: a.group_value.clone(), - } - }).collect() + }) + .collect() } pub async fn get_asset(db: &DatabaseConnection, asset_id: Vec) -> Result { - let asset_data: (asset::Model, - asset_data::Model) = Asset::find_by_id(asset_id) + let asset_data: (asset::Model, asset_data::Model) = Asset::find_by_id(asset_id) .find_also_related(AssetData) .one(db) .await - .and_then(|o| { - match o { - Some((a, Some(d))) => Ok((a, d)), - _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())) - } + .and_then(|o| match o { + Some((a, Some(d))) => Ok((a, d)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), })?; let (asset, data) = asset_data; let interface = match asset.specification_version { 1 => Interface::NftOneZero, - _ => Interface::Nft + _ => Interface::Nft, }; let content = get_content(&asset, &data)?; @@ -184,27 +192,22 @@ pub async fn get_asset(db: &DatabaseConnection, asset_id: Vec) -> Result>, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: Vec, + after: Vec, +) -> Result { + let sort_column = match sort_by { + AssetSorting::Created => asset::Column::CreatedAt, + AssetSorting::Updated => todo!(), + AssetSorting::RecentAction => todo!(), + }; + + let mut conditions = Condition::any(); + for creator in creator_expression { + conditions = conditions.add(asset_creators::Column::Creator.eq(creator.clone())); + } + + let assets = if page > 0 { + let paginator = asset::Entity::find() + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_creators::Entity).into(), + ) + .filter(conditions) + .find_also_related(AssetData) + .order_by_asc(sort_column) + .paginate(db, limit.try_into().unwrap()); + + paginator.fetch_page((page - 1).try_into().unwrap()).await? + } else if !before.is_empty() { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_creators::Entity).into(), + ) + .filter(conditions) + .cursor_by(asset_creators::Column::AssetId) + .before(before.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + } else { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_creators::Entity).into(), + ) + .filter(conditions) + .cursor_by(asset_creators::Column::AssetId) + .after(after.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + }; + + let filter_assets: Result, _> = assets + .into_iter() + .map(|(asset, asset_data)| match asset_data { + Some(asset_data) => Ok((asset, asset_data)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), + }) + .collect(); + + let build_asset_list = filter_assets? + .into_iter() + .map(|(asset, asset_data)| async move { + let interface = match asset.specification_version { + 1 => Interface::NftOneZero, + _ => Interface::Nft, + }; + + let content = get_content(&asset, &asset_data).unwrap(); + + let authorities = asset_authority::Entity::find() + .filter(asset_authority::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let creators = asset_creators::Entity::find() + .filter(asset_creators::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let grouping = asset_grouping::Entity::find() + .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let rpc_authorities = to_authority(authorities); + let rpc_creators = to_creators(creators); + let rpc_groups = to_grouping(grouping); + + RpcAsset { + interface, + id: bs58::encode(asset.id).into_string(), + content: Some(content), + authorities: Some(rpc_authorities), + compression: Some(Compression { + eligible: asset.compressible, + compressed: asset.compressed, + }), + grouping: Some(rpc_groups), + royalty: Some(Royalty { + royalty_model: asset.royalty_target_type.into(), + target: asset.royalty_target.map(|s| bs58::encode(s).into_string()), + percent: (asset.royalty_amount as f64) * 0.0001, + locked: false, + }), + creators: Some(rpc_creators), + ownership: Ownership { + frozen: asset.frozen, + delegated: asset.delegate.is_some(), + delegate: asset.delegate.map(|s| bs58::encode(s).into_string()), + ownership_model: asset.owner_type.into(), + owner: bs58::encode(asset.owner).into_string(), + }, + } + }); + + let built_assets = futures::future::join_all(build_asset_list).await; + + let total = built_assets.len() as u32; + + let page = if page > 0 { Some(page) } else { None }; + let before = if !before.is_empty() { + Some(String::from_utf8(before).unwrap()) + } else { + None + }; + let after = if !after.is_empty() { + Some(String::from_utf8(after).unwrap()) + } else { + None + }; + + Ok(AssetList { + total, + limit, + page, + before, + after, + items: built_assets, + }) +} diff --git a/digital_asset_types/src/dapi/assets_by_group.rs b/digital_asset_types/src/dapi/assets_by_group.rs new file mode 100644 index 000000000..24b2bc301 --- /dev/null +++ b/digital_asset_types/src/dapi/assets_by_group.rs @@ -0,0 +1,179 @@ +use crate::dao::prelude::AssetData; +use crate::dao::{asset, asset_authority, asset_creators, asset_grouping}; +use crate::dapi::asset::{get_content, to_authority, to_creators, to_grouping}; +use crate::rpc::filter::AssetSorting; +use crate::rpc::response::AssetList; +use crate::rpc::{Asset as RpcAsset, Compression, Interface, Ownership, Royalty}; +use sea_orm::DatabaseConnection; +use sea_orm::{entity::*, query::*, DbErr}; + +pub async fn get_assets_by_group( + db: &DatabaseConnection, + creator_expression: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: Vec, + after: Vec, +) -> Result { + let sort_column = match sort_by { + AssetSorting::Created => asset::Column::CreatedAt, + AssetSorting::Updated => todo!(), + AssetSorting::RecentAction => todo!(), + }; + + let mut conditions = Condition::any(); + for creator in creator_expression { + conditions = conditions.add(asset_creators::Column::Creator.eq(creator.clone())); + } + + let assets = if page > 0 { + let paginator = asset::Entity::find() + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_grouping::Entity).into(), + ) + .filter(conditions) + .find_also_related(AssetData) + .order_by_asc(sort_column) + .paginate(db, limit.try_into().unwrap()); + + paginator.fetch_page((page - 1).try_into().unwrap()).await? + } else if !before.is_empty() { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_grouping::Entity).into(), + ) + .filter(conditions) + .cursor_by(asset_creators::Column::AssetId) + .before(before.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + } else { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_grouping::Entity).into(), + ) + .filter(conditions) + .cursor_by(asset_creators::Column::AssetId) + .after(after.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + }; + + let filter_assets: Result, _> = assets + .into_iter() + .map(|(asset, asset_data)| match asset_data { + Some(asset_data) => Ok((asset, asset_data)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), + }) + .collect(); + + let build_asset_list = filter_assets? + .into_iter() + .map(|(asset, asset_data)| async move { + let interface = match asset.specification_version { + 1 => Interface::NftOneZero, + _ => Interface::Nft, + }; + + let content = get_content(&asset, &asset_data).unwrap(); + + let authorities = asset_authority::Entity::find() + .filter(asset_authority::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let creators = asset_creators::Entity::find() + .filter(asset_creators::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let grouping = asset_grouping::Entity::find() + .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + + let rpc_authorities = to_authority(authorities); + let rpc_creators = to_creators(creators); + let rpc_groups = to_grouping(grouping); + + RpcAsset { + interface, + id: bs58::encode(asset.id).into_string(), + content: Some(content), + authorities: Some(rpc_authorities), + compression: Some(Compression { + eligible: asset.compressible, + compressed: asset.compressed, + }), + grouping: Some(rpc_groups), + royalty: Some(Royalty { + royalty_model: asset.royalty_target_type.into(), + target: asset.royalty_target.map(|s| bs58::encode(s).into_string()), + percent: (asset.royalty_amount as f64) * 0.0001, + locked: false, + }), + creators: Some(rpc_creators), + ownership: Ownership { + frozen: asset.frozen, + delegated: asset.delegate.is_some(), + delegate: asset.delegate.map(|s| bs58::encode(s).into_string()), + ownership_model: asset.owner_type.into(), + owner: bs58::encode(asset.owner).into_string(), + }, + } + }); + + let built_assets = futures::future::join_all(build_asset_list).await; + + let total = built_assets.len() as u32; + + let page = if page > 0 { Some(page) } else { None }; + let before = if !before.is_empty() { + Some(String::from_utf8(before).unwrap()) + } else { + None + }; + let after = if !after.is_empty() { + Some(String::from_utf8(after).unwrap()) + } else { + None + }; + + Ok(AssetList { + total, + limit, + page, + before, + after, + items: built_assets, + }) +} diff --git a/digital_asset_types/src/dapi/assets_by_owner.rs b/digital_asset_types/src/dapi/assets_by_owner.rs new file mode 100644 index 000000000..906dbf8fb --- /dev/null +++ b/digital_asset_types/src/dapi/assets_by_owner.rs @@ -0,0 +1,156 @@ +use crate::dao::prelude::AssetData; +use crate::dao::{asset, asset_authority, asset_creators, asset_grouping}; +use crate::rpc::filter::AssetSorting; +use crate::rpc::response::AssetList; +use crate::rpc::{Asset as RpcAsset, Compression, Interface, Ownership, Royalty}; +use sea_orm::DatabaseConnection; +use sea_orm::{entity::*, query::*, DbErr}; + +use super::asset::{get_content, to_authority, to_creators, to_grouping}; + +pub async fn get_assets_by_owner( + db: &DatabaseConnection, + owner_address: Vec, + sort_by: AssetSorting, + limit: u32, + page: u32, + before: Vec, + after: Vec, +) -> Result { + let sort_column = match sort_by { + AssetSorting::Created => asset::Column::CreatedAt, + AssetSorting::Updated => todo!(), + AssetSorting::RecentAction => todo!(), + }; + + let assets = if page > 0 { + let paginator = asset::Entity::find() + .filter(asset::Column::Owner.eq(owner_address.clone())) + .find_also_related(AssetData) + .order_by_asc(sort_column) + .paginate(db, limit.try_into().unwrap()); + + paginator.fetch_page((page - 1).try_into().unwrap()).await? + } else if !before.is_empty() { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .filter(asset::Column::Owner.eq(owner_address.clone())) + .cursor_by(asset::Column::Id) + .before(before.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + } else { + let rows = asset::Entity::find() + .order_by_asc(sort_column) + .filter(asset::Column::Owner.eq(owner_address.clone())) + .cursor_by(asset::Column::Id) + .after(after.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + }; + + let filter_assets: Result, _> = assets + .into_iter() + .map(|(asset, asset_data)| match asset_data { + Some(asset_data) => Ok((asset, asset_data)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), + }) + .collect(); + let build_asset_list = filter_assets? + .into_iter() + .map(|(asset, asset_data)| async move { + let interface = match asset.specification_version { + 1 => Interface::NftOneZero, + _ => Interface::Nft, + }; + let content = get_content(&asset, &asset_data).unwrap(); + let authorities = asset_authority::Entity::find() + .filter(asset_authority::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + let creators = asset_creators::Entity::find() + .filter(asset_creators::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + let grouping = asset_grouping::Entity::find() + .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) + .all(db) + .await + .unwrap(); + let rpc_authorities = to_authority(authorities); + let rpc_creators = to_creators(creators); + let rpc_groups = to_grouping(grouping); + RpcAsset { + interface, + id: bs58::encode(asset.id).into_string(), + content: Some(content), + authorities: Some(rpc_authorities), + compression: Some(Compression { + eligible: asset.compressible, + compressed: asset.compressed, + }), + grouping: Some(rpc_groups), + royalty: Some(Royalty { + royalty_model: asset.royalty_target_type.into(), + target: asset.royalty_target.map(|s| bs58::encode(s).into_string()), + percent: (asset.royalty_amount as f64) * 0.0001, + locked: false, + }), + creators: Some(rpc_creators), + ownership: Ownership { + frozen: asset.frozen, + delegated: asset.delegate.is_some(), + delegate: asset.delegate.map(|s| bs58::encode(s).into_string()), + ownership_model: asset.owner_type.into(), + owner: bs58::encode(asset.owner).into_string(), + }, + } + }); + + let built_assets = futures::future::join_all(build_asset_list).await; + + let total = built_assets.len() as u32; + + let page = if page > 0 { Some(page) } else { None }; + let before = if !before.is_empty() { + Some(String::from_utf8(before).unwrap()) + } else { + None + }; + let after = if !after.is_empty() { + Some(String::from_utf8(after).unwrap()) + } else { + None + }; + + Ok(AssetList { + total, + limit, + page, + before, + after, + items: built_assets, + }) +} diff --git a/digital_asset_types/src/dapi/change_logs.rs b/digital_asset_types/src/dapi/change_logs.rs index bf87f6527..6797a8bf4 100644 --- a/digital_asset_types/src/dapi/change_logs.rs +++ b/digital_asset_types/src/dapi/change_logs.rs @@ -1,21 +1,19 @@ +use sea_orm::sea_query::Expr; use sea_orm::{DatabaseConnection, DbBackend}; -use std::fmt::format; -use sea_orm::sea_query::{Expr, PostgresQueryBuilder, Query}; use { crate::dao::asset, crate::dao::cl_items, - sea_orm::{entity::*, FromQueryResult, query::*, DbErr}, - concurrent_merkle_tree::utils::empty_node, crate::rpc::AssetProof, + concurrent_merkle_tree::utils::empty_node, + sea_orm::{entity::*, query::*, DbErr, FromQueryResult}, }; - #[derive(FromQueryResult, Debug, Default, Clone, Eq, PartialEq)] struct SimpleChangeLog { hash: Vec, level: i64, node_idx: i64, - seq: i64 + seq: i64, } pub async fn get_proof_for_asset( @@ -32,16 +30,21 @@ pub async fn get_proof_for_asset( ) .order_by_desc(cl_items::Column::Seq) .filter(Expr::cust("asset.tree_id = cl_items.tree")) - .filter(Expr::cust_with_values("asset.id = ?::bytea", vec![asset_id])) + .filter(Expr::cust_with_values( + "asset.id = ?::bytea", + vec![asset_id], + )) .filter(cl_items::Column::Level.eq(0i64)) - .one(db).await?; + .one(db) + .await?; if leaf.is_none() { return Err(DbErr::RecordNotFound("Asset Proof Not Found".to_string())); } let leaf = leaf.unwrap(); let req_indexes = get_required_nodes_for_proof(leaf.node_idx); let expected_proof_size = req_indexes.len(); - let mut final_node_list: Vec = vec![SimpleChangeLog::default(); expected_proof_size]; + let mut final_node_list: Vec = + vec![SimpleChangeLog::default(); expected_proof_size]; let mut query = cl_items::Entity::find() .select_only() .column(cl_items::Column::NodeIdx) @@ -55,12 +58,15 @@ pub async fn get_proof_for_asset( .order_by_desc(cl_items::Column::Id) .order_by_desc(cl_items::Column::Seq) .build(DbBackend::Postgres); - query.sql = query.sql.replace("SELECT", "SELECT DISTINCT ON (cl_items.node_idx)"); + query.sql = query + .sql + .replace("SELECT", "SELECT DISTINCT ON (cl_items.node_idx)"); println!("sql {} ", query.sql); - let nodes: Vec = db.query_all(query).await - .map(|qr| { - qr.iter().map(|q| SimpleChangeLog::from_query_result(q, "").unwrap() ).collect() - })?; + let nodes: Vec = db.query_all(query).await.map(|qr| { + qr.iter() + .map(|q| SimpleChangeLog::from_query_result(q, "").unwrap()) + .collect() + })?; if nodes.len() != expected_proof_size { for node in nodes.iter() { if node.level < final_node_list.len().try_into().unwrap() { @@ -74,7 +80,13 @@ pub async fn get_proof_for_asset( } } for n in final_node_list.iter() { - println!("level {} index {} seq {} hash {}", n.level, n.node_idx, n.seq, bs58::encode(&n.hash).into_string()); + println!( + "level {} index {} seq {} hash {}", + n.level, + n.node_idx, + n.seq, + bs58::encode(&n.hash).into_string() + ); } Ok(AssetProof { root: bs58::encode(final_node_list.pop().unwrap().hash).into_string(), @@ -88,7 +100,6 @@ pub async fn get_proof_for_asset( }) } - fn make_empty_node(lvl: i64, node_index: i64) -> SimpleChangeLog { SimpleChangeLog { node_idx: node_index, diff --git a/digital_asset_types/src/dapi/listed_assets_by_owner.rs b/digital_asset_types/src/dapi/listed_assets_by_owner.rs new file mode 100644 index 000000000..c1fc1ee8b --- /dev/null +++ b/digital_asset_types/src/dapi/listed_assets_by_owner.rs @@ -0,0 +1,118 @@ +use crate::dao::asset; +use crate::dao::prelude::AssetData; +use crate::rpc::filter::ListingSorting; +use crate::rpc::response::ListingsList; +use crate::rpc::AssetSale; +use sea_orm::DatabaseConnection; +use sea_orm::{entity::*, query::*, DbErr}; + +pub async fn get_listed_assets_by_owner( + db: &DatabaseConnection, + owner_address: Vec, + sort_by: ListingSorting, + limit: u32, + page: u32, + before: Vec, + after: Vec, +) -> Result { + let assets = if page > 0 { + let paginator = asset::Entity::find() + .filter( + Condition::all() + .add(asset::Column::Owner.eq(owner_address.clone())) + .add(asset::Column::Delegate.is_not_null()), + ) + .find_also_related(AssetData) + // .order_by_asc(sort_column) + .paginate(db, limit.try_into().unwrap()); + + paginator.fetch_page((page - 1).try_into().unwrap()).await? + } else if !before.is_empty() { + let rows = asset::Entity::find() + // .order_by_asc(sort_column) + .filter( + Condition::all() + .add(asset::Column::Owner.eq(owner_address.clone())) + .add(asset::Column::Delegate.is_not_null()), + ) + .cursor_by(asset::Column::Id) + .before(before.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + } else { + let rows = asset::Entity::find() + // .order_by_asc(sort_column) + .filter( + Condition::all() + .add(asset::Column::Owner.eq(owner_address.clone())) + .add(asset::Column::Delegate.is_not_null()), + ) + .cursor_by(asset::Column::Id) + .after(after.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + }; + + let filter_assets: Result, _> = assets + .into_iter() + .map(|(asset, asset_data)| match asset_data { + Some(asset_data) => Ok((asset, asset_data)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), + }) + .collect(); + let build_listings_list = filter_assets?.into_iter().map(|(asset)| async move { + AssetSale { + listing_id: todo!(), + asset_id: todo!(), + amount: todo!(), + price: todo!(), + market_id: todo!(), + highest_offers: todo!(), + } + }); + + let built_assets = futures::future::join_all(build_listings_list).await; + + let total = built_assets.len() as u32; + + let page = if page > 0 { Some(page) } else { None }; + let before = if !before.is_empty() { + Some(String::from_utf8(before).unwrap()) + } else { + None + }; + let after = if !after.is_empty() { + Some(String::from_utf8(after).unwrap()) + } else { + None + }; + + Ok(ListingsList { + total, + limit, + page, + before, + after, + items: built_assets, + }) +} diff --git a/digital_asset_types/src/dapi/mod.rs b/digital_asset_types/src/dapi/mod.rs index ac915c54f..22660779a 100644 --- a/digital_asset_types/src/dapi/mod.rs +++ b/digital_asset_types/src/dapi/mod.rs @@ -1,2 +1,7 @@ +pub mod asset; +pub mod assets_by_creator; +pub mod assets_by_group; +pub mod assets_by_owner; pub mod change_logs; -pub mod asset; \ No newline at end of file +pub mod listed_assets_by_owner; +pub mod offers_by_owner; diff --git a/digital_asset_types/src/dapi/offers_by_owner.rs b/digital_asset_types/src/dapi/offers_by_owner.rs new file mode 100644 index 000000000..e95c813da --- /dev/null +++ b/digital_asset_types/src/dapi/offers_by_owner.rs @@ -0,0 +1,104 @@ +use crate::dao::asset; +use crate::dao::prelude::AssetData; +use crate::rpc::filter::OfferSorting; +use crate::rpc::response::OfferList; +use crate::rpc::Offer; +use sea_orm::DatabaseConnection; +use sea_orm::{entity::*, query::*, DbErr}; + +pub async fn get_offers_by_owner( + db: &DatabaseConnection, + owner_address: Vec, + sort_by: OfferSorting, + limit: u32, + page: u32, + before: Vec, + after: Vec, +) -> Result { + let assets = if page > 0 { + let paginator = asset::Entity::find() + .filter(Condition::all().add(asset::Column::Owner.eq(owner_address.clone()))) + .find_also_related(AssetData) + // .order_by_asc(sort_column) + .paginate(db, limit.try_into().unwrap()); + + paginator.fetch_page((page - 1).try_into().unwrap()).await? + } else if !before.is_empty() { + let rows = asset::Entity::find() + // .order_by_asc(sort_column) + .filter(asset::Column::Owner.eq(owner_address.clone())) + .cursor_by(asset::Column::Id) + .before(before.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + } else { + let rows = asset::Entity::find() + // .order_by_asc(sort_column) + .filter(asset::Column::Owner.eq(owner_address.clone())) + .cursor_by(asset::Column::Id) + .after(after.clone()) + .first(limit.into()) + .all(db) + .await? + .into_iter() + .map(|x| async move { + let asset_data = x.find_related(AssetData).one(db).await.unwrap(); + + (x, asset_data) + }); + + let assets = futures::future::join_all(rows).await; + assets + }; + + let filter_assets: Result, _> = assets + .into_iter() + .map(|(asset, asset_data)| match asset_data { + Some(asset_data) => Ok((asset, asset_data)), + _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), + }) + .collect(); + let build_listings_list = filter_assets?.into_iter().map(|(asset)| async move { + Offer { + from: todo!(), + amount: todo!(), + price: todo!(), + market_id: todo!(), + } + }); + + let built_assets = futures::future::join_all(build_listings_list).await; + + let total = built_assets.len() as u32; + + let page = if page > 0 { Some(page) } else { None }; + let before = if !before.is_empty() { + Some(String::from_utf8(before).unwrap()) + } else { + None + }; + let after = if !after.is_empty() { + Some(String::from_utf8(after).unwrap()) + } else { + None + }; + + Ok(OfferList { + total, + limit, + page, + before, + after, + items: built_assets, + }) +} diff --git a/digital_asset_types/src/lib.rs b/digital_asset_types/src/lib.rs index 79096fe9f..84286da77 100644 --- a/digital_asset_types/src/lib.rs +++ b/digital_asset_types/src/lib.rs @@ -8,3 +8,5 @@ pub mod dapi; pub mod json; #[cfg(feature = "json_types")] pub mod rpc; +#[cfg(feature = "mock")] +pub mod tests; \ No newline at end of file diff --git a/digital_asset_types/src/rpc/asset.rs b/digital_asset_types/src/rpc/asset.rs index f52542d3d..5448601bc 100644 --- a/digital_asset_types/src/rpc/asset.rs +++ b/digital_asset_types/src/rpc/asset.rs @@ -1,13 +1,10 @@ +#[cfg(feature = "sql_types")] +use crate::dao::sea_orm_active_enums::{ChainMutability, Mutability, OwnerType, RoyaltyTargetType}; use std::str::FromStr; use { serde::{Deserialize, Serialize}, std::collections::HashMap, }; -#[cfg(feature = "sql_types")] -use crate::dao::{ - sea_orm_active_enums::{Mutability,ChainMutability,OwnerType,RoyaltyTargetType} -}; - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct AssetProof { @@ -107,12 +104,11 @@ impl From for Scope { "royalty" => Scope::Royalty, "metadata" => Scope::Metadata, "extension" => Scope::Extension, - _ => Scope::Full + _ => Scope::Full, } } } - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct Authority { pub address: String, @@ -150,7 +146,7 @@ impl From for RoyaltyModel { "creators" => RoyaltyModel::Creators, "fanout" => RoyaltyModel::Fanout, "single" => RoyaltyModel::Single, - _ => RoyaltyModel::Creators + _ => RoyaltyModel::Creators, } } } @@ -162,13 +158,11 @@ impl From for RoyaltyModel { RoyaltyTargetType::Creators => RoyaltyModel::Creators, RoyaltyTargetType::Fanout => RoyaltyModel::Fanout, RoyaltyTargetType::Single => RoyaltyModel::Single, - _ => RoyaltyModel::Creators + _ => RoyaltyModel::Creators, } } } - - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct Royalty { pub royalty_model: RoyaltyModel, @@ -196,13 +190,12 @@ pub enum OwnershipModel { Token, } - impl From for OwnershipModel { fn from(s: String) -> Self { match &*s { "single" => OwnershipModel::Single, "token" => OwnershipModel::Token, - _ => OwnershipModel::Single + _ => OwnershipModel::Single, } } } @@ -213,7 +206,7 @@ impl From for OwnershipModel { match s { OwnerType::Token => OwnershipModel::Token, OwnerType::Single => OwnershipModel::Single, - _ => OwnershipModel::Single + _ => OwnershipModel::Single, } } } diff --git a/digital_asset_types/src/tests/get_asset_by_id.rs b/digital_asset_types/src/tests/get_asset_by_id.rs new file mode 100644 index 000000000..7302c78c0 --- /dev/null +++ b/digital_asset_types/src/tests/get_asset_by_id.rs @@ -0,0 +1,122 @@ +#[cfg(test)] +mod get_asset_by_id { + use sea_orm::{entity::prelude::*, DatabaseBackend, MockDatabase}; + use solana_sdk::{signature::Keypair, signer::Signer}; + + use crate::{ + adapter::{Creator, TokenProgramVersion, TokenStandard}, + dao::{ + asset, asset_authority, asset_creators, asset_data, + prelude::AssetData, + sea_orm_active_enums::{OwnerType, RoyaltyTargetType}, + }, + tests::{ + create_asset, create_asset_authority, create_asset_creator, create_asset_data, + MetadataArgs, + }, + }; + + #[cfg(feature = "mock")] + #[tokio::test] + async fn get_asset_by_id() -> Result<(), DbErr> { + let id = Keypair::new().pubkey(); + let owner = Keypair::new().pubkey(); + let update_authority = Keypair::new().pubkey(); + let creator_1 = Keypair::new().pubkey(); + let uri = Keypair::new().pubkey(); + + let metadata_1 = MetadataArgs { + name: String::from("Test #1"), + symbol: String::from("BUBBLE"), + uri: uri.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_1, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_1 = create_asset_data(metadata_1.clone(), 1); + let asset_1 = create_asset( + id.to_bytes().to_vec(), + owner.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_1.seller_fee_basis_points as i32, + Some(1), + ); + + let asset_creator_1_1 = create_asset_creator( + id.to_bytes().to_vec(), + metadata_1.creators[0].address.to_bytes().to_vec(), + 100, + true, + 1, + ); + + let asset_authority_1 = create_asset_authority( + id.to_bytes().to_vec(), + update_authority.to_bytes().to_vec(), + 1, + ); + + let db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results(vec![vec![asset_data_1.1.clone()]]) + .append_query_results(vec![vec![asset_1.1.clone()]]) + .append_query_results(vec![vec![asset_creator_1_1.1]]) + .append_query_results(vec![vec![asset_authority_1.1]]) + .append_query_results(vec![vec![(asset_1.1.clone(), asset_data_1.1.clone())]]) + .into_connection(); + + let insert_result = asset_data::Entity::insert(asset_data_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset::Entity::insert(asset_1.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_1_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_authority::Entity::insert(asset_authority_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + assert_eq!( + asset::Entity::find_by_id(id.to_bytes().to_vec()) + .find_also_related(AssetData) + .one(&db) + .await?, + Some((asset_1.1.clone(), Some(asset_data_1.1.clone()))) + ); + + Ok(()) + } +} diff --git a/digital_asset_types/src/tests/get_assets_by_creator.rs b/digital_asset_types/src/tests/get_assets_by_creator.rs new file mode 100644 index 000000000..fa1161cda --- /dev/null +++ b/digital_asset_types/src/tests/get_assets_by_creator.rs @@ -0,0 +1,331 @@ +#[cfg(test)] +mod get_assets_by_creator { + use sea_orm::{ + entity::prelude::*, Condition, DatabaseBackend, JoinType, MockDatabase, QuerySelect, + }; + use solana_sdk::{signature::Keypair, signer::Signer}; + + use crate::{ + adapter::{Creator, TokenProgramVersion, TokenStandard}, + dao::{ + asset, asset_authority, asset_creators, asset_data, + prelude::AssetData, + sea_orm_active_enums::{OwnerType, RoyaltyTargetType}, + }, + tests::{ + create_asset, create_asset_authority, create_asset_creator, create_asset_data, + MetadataArgs, + }, + }; + + #[cfg(feature = "mock")] + #[tokio::test] + async fn get_assets_by_creator() -> Result<(), DbErr> { + let id_1 = Keypair::new().pubkey(); + let owner_1 = Keypair::new().pubkey(); + let update_authority_1 = Keypair::new().pubkey(); + let creator_1 = Keypair::new().pubkey(); + let uri_1 = Keypair::new().pubkey(); + + let id_2 = Keypair::new().pubkey(); + let owner_2 = Keypair::new().pubkey(); + let update_authority_2 = Keypair::new().pubkey(); + let creator_2 = Keypair::new().pubkey(); + let uri_2 = Keypair::new().pubkey(); + + let id_3 = Keypair::new().pubkey(); + let update_authority_3 = Keypair::new().pubkey(); + let creator_3 = Keypair::new().pubkey(); + let uri_3 = Keypair::new().pubkey(); + + let metadata_1 = MetadataArgs { + name: String::from("Test #1"), + symbol: String::from("BUBBLE"), + uri: uri_1.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_1, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_1 = create_asset_data(metadata_1.clone(), 1); + let asset_1 = create_asset( + id_1.to_bytes().to_vec(), + owner_1.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_1.seller_fee_basis_points as i32, + Some(1), + ); + + let asset_creator_1_1 = create_asset_creator( + id_1.to_bytes().to_vec(), + metadata_1.creators[0].address.to_bytes().to_vec(), + 100, + true, + 1, + ); + + let asset_authority_1 = create_asset_authority( + id_1.to_bytes().to_vec(), + update_authority_1.to_bytes().to_vec(), + 1, + ); + + let metadata_2 = MetadataArgs { + name: String::from("Test #2"), + symbol: String::from("BUBBLE"), + uri: uri_2.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_2, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_2 = create_asset_data(metadata_2.clone(), 2); + let asset_2 = create_asset( + id_2.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_2.seller_fee_basis_points as i32, + Some(2), + ); + + let asset_creator_2_1 = create_asset_creator( + id_2.to_bytes().to_vec(), + metadata_2.creators[0].address.to_bytes().to_vec(), + 100, + true, + 2, + ); + + let asset_authority_2 = create_asset_authority( + id_2.to_bytes().to_vec(), + update_authority_2.to_bytes().to_vec(), + 2, + ); + + let metadata_3 = MetadataArgs { + name: String::from("Test #3"), + symbol: String::from("BUBBLE"), + uri: uri_3.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![ + Creator { + address: creator_2, + share: 10, + verified: true, + }, + Creator { + address: creator_3, + share: 90, + verified: true, + }, + ] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_3 = create_asset_data(metadata_3.clone(), 3); + let asset_3 = create_asset( + id_3.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_3.seller_fee_basis_points as i32, + Some(3), + ); + + let asset_creator_3_1 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[0].address.to_bytes().to_vec(), + 10, + true, + 3, + ); + + let asset_creator_3_2 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[1].address.to_bytes().to_vec(), + 90, + true, + 4, + ); + + let asset_authority_3 = create_asset_authority( + id_3.to_bytes().to_vec(), + update_authority_3.to_bytes().to_vec(), + 3, + ); + + let db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results(vec![vec![asset_data_1.1]]) + .append_query_results(vec![vec![asset_1.1]]) + .append_query_results(vec![vec![asset_creator_1_1.1]]) + .append_query_results(vec![vec![asset_authority_1.1]]) + .append_query_results(vec![vec![asset_data_2.1.clone()]]) + .append_query_results(vec![vec![asset_2.1.clone()]]) + .append_query_results(vec![vec![asset_creator_2_1.1]]) + .append_query_results(vec![vec![asset_authority_2.1]]) + .append_query_results(vec![vec![asset_data_3.1.clone()]]) + .append_query_results(vec![vec![asset_3.1.clone()]]) + .append_query_results(vec![vec![asset_creator_3_1.1]]) + .append_query_results(vec![vec![asset_creator_3_2.1]]) + .append_query_results(vec![vec![asset_authority_3.1]]) + .append_query_results(vec![vec![ + (asset_2.1.clone(), asset_data_2.1.clone()), + (asset_3.1.clone(), asset_data_3.1.clone()), + ]]) + .into_connection(); + + let insert_result = asset_data::Entity::insert(asset_data_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset::Entity::insert(asset_1.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_1.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_1_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_authority::Entity::insert(asset_authority_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_data::Entity::insert(asset_data_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset::Entity::insert(asset_2.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_2.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_2_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_authority::Entity::insert(asset_authority_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_data::Entity::insert(asset_data_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset::Entity::insert(asset_3.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_3.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 4); + + let insert_result = asset_authority::Entity::insert(asset_authority_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + assert_eq!( + asset::Entity::find() + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_creators::Entity).into(), + ) + .filter( + Condition::any() + .add(asset_creators::Column::Creator.eq(creator_2.to_bytes().to_vec())), + ) + .find_also_related(AssetData) + .all(&db) + .await?, + vec![ + (asset_2.1.clone(), Some(asset_data_2.1.clone())), + (asset_3.1.clone(), Some(asset_data_3.1.clone())) + ] + ); + + Ok(()) + } +} diff --git a/digital_asset_types/src/tests/get_assets_by_group.rs b/digital_asset_types/src/tests/get_assets_by_group.rs new file mode 100644 index 000000000..e3acbd59e --- /dev/null +++ b/digital_asset_types/src/tests/get_assets_by_group.rs @@ -0,0 +1,352 @@ +#[cfg(test)] +mod get_assets_by_group { + use sea_orm::{ + entity::prelude::*, Condition, DatabaseBackend, JoinType, MockDatabase, QuerySelect, + }; + use solana_sdk::{signature::Keypair, signer::Signer}; + + use crate::{ + adapter::{Creator, TokenProgramVersion, TokenStandard}, + dao::{ + asset, asset_authority, asset_creators, asset_data, + prelude::AssetData, + sea_orm_active_enums::{OwnerType, RoyaltyTargetType}, + }, + tests::{ + create_asset, create_asset_authority, create_asset_creator, create_asset_data, + create_asset_grouping, MetadataArgs, + }, + }; + + #[cfg(feature = "mock")] + #[tokio::test] + async fn get_assets_by_group() -> Result<(), DbErr> { + use crate::dao::asset_grouping; + + let id_1 = Keypair::new().pubkey(); + let owner_1 = Keypair::new().pubkey(); + let update_authority_1 = Keypair::new().pubkey(); + let creator_1 = Keypair::new().pubkey(); + let uri_1 = Keypair::new().pubkey(); + + let id_2 = Keypair::new().pubkey(); + let owner_2 = Keypair::new().pubkey(); + let update_authority_2 = Keypair::new().pubkey(); + let creator_2 = Keypair::new().pubkey(); + let uri_2 = Keypair::new().pubkey(); + + let id_3 = Keypair::new().pubkey(); + let update_authority_3 = Keypair::new().pubkey(); + let creator_3 = Keypair::new().pubkey(); + let uri_3 = Keypair::new().pubkey(); + + let collection = Keypair::new().pubkey(); + + let metadata_1 = MetadataArgs { + name: String::from("Test #1"), + symbol: String::from("BUBBLE"), + uri: uri_1.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_1, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_1 = create_asset_data(metadata_1.clone(), 1); + let asset_1 = create_asset( + id_1.to_bytes().to_vec(), + owner_1.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_1.seller_fee_basis_points as i32, + Some(1), + ); + + let asset_creator_1_1 = create_asset_creator( + id_1.to_bytes().to_vec(), + metadata_1.creators[0].address.to_bytes().to_vec(), + 100, + true, + 1, + ); + + let asset_authority_1 = create_asset_authority( + id_1.to_bytes().to_vec(), + update_authority_1.to_bytes().to_vec(), + 1, + ); + + let metadata_2 = MetadataArgs { + name: String::from("Test #2"), + symbol: String::from("BUBBLE"), + uri: uri_2.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_2, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_2 = create_asset_data(metadata_2.clone(), 2); + let asset_2 = create_asset( + id_2.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_2.seller_fee_basis_points as i32, + Some(2), + ); + + let asset_creator_2_1 = create_asset_creator( + id_2.to_bytes().to_vec(), + metadata_2.creators[0].address.to_bytes().to_vec(), + 100, + true, + 2, + ); + + let asset_authority_2 = create_asset_authority( + id_2.to_bytes().to_vec(), + update_authority_2.to_bytes().to_vec(), + 2, + ); + + let asset_grouping_2 = + create_asset_grouping(id_2.to_bytes().to_vec(), collection.clone(), 1); + + let metadata_3 = MetadataArgs { + name: String::from("Test #3"), + symbol: String::from("BUBBLE"), + uri: uri_3.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![ + Creator { + address: creator_2, + share: 10, + verified: true, + }, + Creator { + address: creator_3, + share: 90, + verified: true, + }, + ] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_3 = create_asset_data(metadata_3.clone(), 3); + let asset_3 = create_asset( + id_3.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_3.seller_fee_basis_points as i32, + Some(3), + ); + + let asset_creator_3_1 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[0].address.to_bytes().to_vec(), + 10, + true, + 3, + ); + + let asset_creator_3_2 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[1].address.to_bytes().to_vec(), + 90, + true, + 4, + ); + + let asset_authority_3 = create_asset_authority( + id_3.to_bytes().to_vec(), + update_authority_3.to_bytes().to_vec(), + 3, + ); + + let asset_grouping_3 = + create_asset_grouping(id_3.to_bytes().to_vec(), collection.clone(), 2); + + let db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results(vec![vec![asset_data_1.1]]) + .append_query_results(vec![vec![asset_1.1]]) + .append_query_results(vec![vec![asset_creator_1_1.1]]) + .append_query_results(vec![vec![asset_authority_1.1]]) + .append_query_results(vec![vec![asset_data_2.1.clone()]]) + .append_query_results(vec![vec![asset_2.1.clone()]]) + .append_query_results(vec![vec![asset_creator_2_1.1]]) + .append_query_results(vec![vec![asset_authority_2.1]]) + .append_query_results(vec![vec![asset_grouping_2.1]]) + .append_query_results(vec![vec![asset_data_3.1.clone()]]) + .append_query_results(vec![vec![asset_3.1.clone()]]) + .append_query_results(vec![vec![asset_creator_3_1.1]]) + .append_query_results(vec![vec![asset_creator_3_2.1]]) + .append_query_results(vec![vec![asset_authority_3.1]]) + .append_query_results(vec![vec![asset_grouping_3.1]]) + .append_query_results(vec![vec![ + (asset_2.1.clone(), asset_data_2.1.clone()), + (asset_3.1.clone(), asset_data_3.1.clone()), + ]]) + .into_connection(); + + let insert_result = asset_data::Entity::insert(asset_data_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset::Entity::insert(asset_1.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_1.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_1_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_authority::Entity::insert(asset_authority_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_data::Entity::insert(asset_data_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset::Entity::insert(asset_2.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_2.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_2_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_authority::Entity::insert(asset_authority_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_grouping::Entity::insert(asset_grouping_2.0) + .exec(&db) + .await?; + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_data::Entity::insert(asset_data_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset::Entity::insert(asset_3.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_3.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 4); + + let insert_result = asset_authority::Entity::insert(asset_authority_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset_grouping::Entity::insert(asset_grouping_3.0) + .exec(&db) + .await?; + assert_eq!(insert_result.last_insert_id, 2); + + assert_eq!( + asset::Entity::find() + .join( + JoinType::LeftJoin, + asset::Entity::has_many(asset_grouping::Entity).into(), + ) + .filter(Condition::any().add( + asset_grouping::Column::GroupValue.eq(bs58::encode(collection).into_string()), + )) + .find_also_related(AssetData) + .all(&db) + .await?, + vec![ + (asset_2.1.clone(), Some(asset_data_2.1.clone())), + (asset_3.1.clone(), Some(asset_data_3.1.clone())) + ] + ); + + Ok(()) + } +} diff --git a/digital_asset_types/src/tests/get_assets_by_owner.rs b/digital_asset_types/src/tests/get_assets_by_owner.rs new file mode 100644 index 000000000..b3909c5f5 --- /dev/null +++ b/digital_asset_types/src/tests/get_assets_by_owner.rs @@ -0,0 +1,322 @@ +#[cfg(test)] +mod get_assets_by_owner { + use sea_orm::{ + entity::prelude::*, Condition, DatabaseBackend, JoinType, MockDatabase, QuerySelect, + }; + use solana_sdk::{signature::Keypair, signer::Signer}; + + use crate::{ + adapter::{Creator, TokenProgramVersion, TokenStandard}, + dao::{ + asset, asset_authority, asset_creators, asset_data, + prelude::AssetData, + sea_orm_active_enums::{OwnerType, RoyaltyTargetType}, + }, + tests::{ + create_asset, create_asset_authority, create_asset_creator, create_asset_data, + MetadataArgs, + }, + }; + + #[cfg(feature = "mock")] + #[tokio::test] + async fn get_assets_by_owner() -> Result<(), DbErr> { + let id_1 = Keypair::new().pubkey(); + let owner_1 = Keypair::new().pubkey(); + let update_authority_1 = Keypair::new().pubkey(); + let creator_1 = Keypair::new().pubkey(); + let uri_1 = Keypair::new().pubkey(); + + let id_2 = Keypair::new().pubkey(); + let owner_2 = Keypair::new().pubkey(); + let update_authority_2 = Keypair::new().pubkey(); + let creator_2 = Keypair::new().pubkey(); + let uri_2 = Keypair::new().pubkey(); + + let id_3 = Keypair::new().pubkey(); + let update_authority_3 = Keypair::new().pubkey(); + let creator_3 = Keypair::new().pubkey(); + let uri_3 = Keypair::new().pubkey(); + let metadata_1 = MetadataArgs { + name: String::from("Test #1"), + symbol: String::from("BUBBLE"), + uri: uri_1.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_1, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_1 = create_asset_data(metadata_1.clone(), 1); + let asset_1 = create_asset( + id_1.to_bytes().to_vec(), + owner_1.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_1.seller_fee_basis_points as i32, + Some(1), + ); + + let asset_creator_1_1 = create_asset_creator( + id_1.to_bytes().to_vec(), + metadata_1.creators[0].address.to_bytes().to_vec(), + 100, + true, + 1, + ); + + let asset_authority_1 = create_asset_authority( + id_1.to_bytes().to_vec(), + update_authority_1.to_bytes().to_vec(), + 1, + ); + + let metadata_2 = MetadataArgs { + name: String::from("Test #2"), + symbol: String::from("BUBBLE"), + uri: uri_2.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![Creator { + address: creator_2, + share: 100, + verified: true, + }] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_2 = create_asset_data(metadata_2.clone(), 2); + let asset_2 = create_asset( + id_2.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_2.seller_fee_basis_points as i32, + Some(2), + ); + + let asset_creator_2_1 = create_asset_creator( + id_2.to_bytes().to_vec(), + metadata_2.creators[0].address.to_bytes().to_vec(), + 100, + true, + 2, + ); + + let asset_authority_2 = create_asset_authority( + id_2.to_bytes().to_vec(), + update_authority_2.to_bytes().to_vec(), + 2, + ); + + let metadata_3 = MetadataArgs { + name: String::from("Test #3"), + symbol: String::from("BUBBLE"), + uri: uri_3.to_string(), + primary_sale_happened: true, + is_mutable: true, + edition_nonce: None, + token_standard: Some(TokenStandard::NonFungible), + collection: None, + uses: None, + token_program_version: TokenProgramVersion::Original, + creators: vec![ + Creator { + address: creator_2, + share: 10, + verified: true, + }, + Creator { + address: creator_3, + share: 90, + verified: true, + }, + ] + .to_vec(), + seller_fee_basis_points: 100, + }; + + let asset_data_3 = create_asset_data(metadata_3.clone(), 3); + let asset_3 = create_asset( + id_3.to_bytes().to_vec(), + owner_2.to_bytes().to_vec(), + OwnerType::Single, + None, + false, + 1, + None, + true, + false, + None, + 1, + 0 as i64, + None, + RoyaltyTargetType::Creators, + None, + metadata_3.seller_fee_basis_points as i32, + Some(3), + ); + + let asset_creator_3_1 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[0].address.to_bytes().to_vec(), + 10, + true, + 3, + ); + + let asset_creator_3_2 = create_asset_creator( + id_3.to_bytes().to_vec(), + metadata_3.creators[1].address.to_bytes().to_vec(), + 90, + true, + 4, + ); + + let asset_authority_3 = create_asset_authority( + id_3.to_bytes().to_vec(), + update_authority_3.to_bytes().to_vec(), + 3, + ); + + let db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results(vec![vec![asset_data_1.1]]) + .append_query_results(vec![vec![asset_1.1]]) + .append_query_results(vec![vec![asset_creator_1_1.1]]) + .append_query_results(vec![vec![asset_authority_1.1]]) + .append_query_results(vec![vec![asset_data_2.1.clone()]]) + .append_query_results(vec![vec![asset_2.1.clone()]]) + .append_query_results(vec![vec![asset_creator_2_1.1]]) + .append_query_results(vec![vec![asset_authority_2.1]]) + .append_query_results(vec![vec![asset_data_3.1.clone()]]) + .append_query_results(vec![vec![asset_3.1.clone()]]) + .append_query_results(vec![vec![asset_creator_3_1.1]]) + .append_query_results(vec![vec![asset_creator_3_2.1]]) + .append_query_results(vec![vec![asset_authority_3.1]]) + .append_query_results(vec![vec![ + (asset_2.1.clone(), asset_data_2.1.clone()), + (asset_3.1.clone(), asset_data_3.1.clone()), + ]]) + .into_connection(); + + let insert_result = asset_data::Entity::insert(asset_data_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset::Entity::insert(asset_1.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_1.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_1_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_authority::Entity::insert(asset_authority_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 1); + + let insert_result = asset_data::Entity::insert(asset_data_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset::Entity::insert(asset_2.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_2.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_2_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_authority::Entity::insert(asset_authority_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 2); + + let insert_result = asset_data::Entity::insert(asset_data_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset::Entity::insert(asset_3.0).exec(&db).await.unwrap(); + assert_eq!(insert_result.last_insert_id, id_3.to_bytes().to_vec()); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_1.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + let insert_result = asset_creators::Entity::insert(asset_creator_3_2.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 4); + + let insert_result = asset_authority::Entity::insert(asset_authority_3.0) + .exec(&db) + .await + .unwrap(); + assert_eq!(insert_result.last_insert_id, 3); + + assert_eq!( + asset::Entity::find() + .filter(asset::Column::Owner.eq(owner_2.to_bytes().to_vec())) + .find_also_related(AssetData) + .all(&db) + .await?, + vec![ + (asset_2.1.clone(), Some(asset_data_2.1.clone())), + (asset_3.1.clone(), Some(asset_data_3.1.clone())) + ] + ); + Ok(()) + } +} diff --git a/digital_asset_types/src/tests/mod.rs b/digital_asset_types/src/tests/mod.rs new file mode 100644 index 000000000..09f64dc38 --- /dev/null +++ b/digital_asset_types/src/tests/mod.rs @@ -0,0 +1,226 @@ +mod get_asset_by_id; +mod get_assets_by_creator; +mod get_assets_by_group; +mod get_assets_by_owner; + +pub use get_asset_by_id::*; +pub use get_assets_by_creator::*; +pub use get_assets_by_group::*; +pub use get_assets_by_owner::*; +use sea_orm::{JsonValue, Set}; +use solana_sdk::{signature::Keypair, signer::Signer, pubkey::Pubkey}; + +use crate::{ + adapter::{Collection, Creator, TokenProgramVersion, TokenStandard, Uses}, + dao::{ + asset, asset_authority, asset_creators, asset_data, asset_grouping, + sea_orm_active_enums::{ChainMutability, Mutability, OwnerType, RoyaltyTargetType}, + }, + json::ChainDataV1, +}; + +#[derive(Clone)] +pub struct MetadataArgs { + /// The name of the asset + pub name: String, + /// The symbol for the asset + pub symbol: String, + /// URI pointing to JSON representing the asset + pub uri: String, + /// Royalty basis points that goes to creators in secondary sales (0-10000) + pub seller_fee_basis_points: u16, + // Immutable, once flipped, all sales of this metadata are considered secondary. + pub primary_sale_happened: bool, + // Whether or not the data struct is mutable, default is not + pub is_mutable: bool, + /// nonce for easy calculation of editions, if present + pub edition_nonce: Option, + /// Since we cannot easily change Metadata, we add the new DataV2 fields here at the end. + pub token_standard: Option, + /// Collection + pub collection: Option, + /// Uses + pub uses: Option, + pub token_program_version: TokenProgramVersion, + pub creators: Vec, +} + +pub fn create_asset_data( + metadata: MetadataArgs, + row_num: i64, +) -> (asset_data::ActiveModel, asset_data::Model) { + let chain_data = ChainDataV1 { + name: metadata.name, + symbol: metadata.symbol, + edition_nonce: metadata.edition_nonce, + primary_sale_happened: metadata.primary_sale_happened, + token_standard: metadata.token_standard, + uses: None, + }; + + let chain_data_json = serde_json::to_value(chain_data).unwrap(); + + let chain_mutability = match metadata.is_mutable { + true => ChainMutability::Mutable, + false => ChainMutability::Immutable, + }; + + ( + asset_data::ActiveModel { + chain_data_mutability: Set(chain_mutability), + schema_version: Set(1), + chain_data: Set(chain_data_json), + metadata_url: Set(metadata.uri), + metadata: Set(JsonValue::String("processing".to_string())), + metadata_mutability: Set(Mutability::Mutable), + ..Default::default() + }, + asset_data::Model { + id: row_num, + chain_data_mutability: ChainMutability::Mutable, + schema_version: 1, + chain_data: serde_json::to_value(ChainDataV1 { + name: String::from("Test #`row_num`"), + symbol: String::from("BUBBLE"), + edition_nonce: None, + primary_sale_happened: true, + token_standard: Some(TokenStandard::NonFungible), + uses: None, + }) + .unwrap(), + metadata_url: Keypair::new().pubkey().to_string(), + metadata_mutability: Mutability::Mutable, + metadata: JsonValue::String("processing".to_string()), + }, + ) +} + +pub fn create_asset( + id: Vec, + owner: Vec, + owner_type: OwnerType, + delegate: Option>, + frozen: bool, + supply: i64, + supply_mint: Option>, + compressed: bool, + compressible: bool, + tree_id: Option>, + specification_version: i32, + nonce: i64, + leaf: Option>, + royalty_target_type: RoyaltyTargetType, + royalty_target: Option>, + royalty_amount: i32, + chain_data_id: Option, +) -> (asset::ActiveModel, asset::Model) { + ( + asset::ActiveModel { + id: Set(id.clone()), + owner: Set(owner.clone()), + owner_type: Set(owner_type.clone()), + delegate: Set(delegate.clone()), + frozen: Set(frozen), + supply: Set(supply), + supply_mint: Set(supply_mint.clone()), + compressed: Set(compressed), + compressible: Set(compressible), + tree_id: Set(tree_id.clone()), + specification_version: Set(specification_version), + nonce: Set(nonce), + leaf: Set(leaf.clone()), + royalty_target_type: Set(royalty_target_type.clone()), + royalty_target: Set(royalty_target.clone()), + royalty_amount: Set(royalty_amount), //basis points + chain_data_id: Set(chain_data_id), + ..Default::default() + }, + asset::Model { + id, + owner, + owner_type, + delegate, + frozen, + supply, + supply_mint, + compressed, + compressible, + tree_id, + specification_version, + nonce, + leaf, + royalty_target_type, + royalty_target, + royalty_amount, + chain_data_id, + burnt: false, + created_at: None, + }, + ) +} + +pub fn create_asset_creator( + asset_id: Vec, + creator: Vec, + share: i32, + verified: bool, + row_num: i64, +) -> (asset_creators::ActiveModel, asset_creators::Model) { + ( + asset_creators::ActiveModel { + asset_id: Set(asset_id.clone()), + creator: Set(creator.clone()), + share: Set(share), + verified: Set(verified), + ..Default::default() + }, + asset_creators::Model { + id: row_num, + asset_id, + creator, + share, + verified, + }, + ) +} + +pub fn create_asset_authority( + asset_id: Vec, + update_authority: Vec, + row_num: i64, +) -> (asset_authority::ActiveModel, asset_authority::Model) { + ( + asset_authority::ActiveModel { + asset_id: Set(asset_id.clone()), + authority: Set(update_authority.clone()), + ..Default::default() + }, + asset_authority::Model { + asset_id, + authority: update_authority, + id: row_num, + scopes: None, + }, + ) +} + +pub fn create_asset_grouping( + asset_id: Vec, + collection: Pubkey, + row_num: i64, +) -> (asset_grouping::ActiveModel, asset_grouping::Model) { + ( + asset_grouping::ActiveModel { + asset_id: Set(asset_id.clone()), + group_key: Set(String::from("collection")), + group_value: Set(bs58::encode(collection).into_string()), + ..Default::default() + }, + asset_grouping::Model { + asset_id, + group_value: bs58::encode(collection).into_string(), + id: row_num, + group_key: "collection".to_string(), + }, + ) +} diff --git a/nft_api/.dockerignore b/nft_api/.dockerignore deleted file mode 100644 index 1de565933..000000000 --- a/nft_api/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -target \ No newline at end of file diff --git a/nft_api/Cargo.toml b/nft_api/Cargo.toml deleted file mode 100644 index 1082a0493..000000000 --- a/nft_api/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "nft_api" -version = "0.1.0" -edition = "2021" - -[dependencies] -hex = "0.4.3" -routerify = "3" -routerify-json-response = "3" -redis = { version = "0.21.5", features = ["aio", "tokio-comp", "streams"] } -futures-util = "0.3.0" -hyper = "0.14" -anchor-client = { path = "../deps/anchor/client" } -base64 = "0.13.0" -thiserror = "1.0.30" -tokio = { version = "1.17.0", features = ["full"] } -sqlx = { version = "0.5.11", features = ["runtime-tokio-rustls", "postgres", "uuid"] } -tokio-postgres = "0.7.5" -serde = "1.0.136" -bs58 = "0.4.0" -reqwest = "0.11.10" -csv = "1.1.6" -messenger = { path = "../messenger" } -plerkle = { path = "../plerkle" } -flatbuffers = "2.1.2" -solana-sdk = { version = "=1.10.10" } -lazy_static = "1.4.0" -regex = "1.5.5" -plerkle-serialization = { path = "../plerkle_serialization" } -uuid = "1.0.0" -gummyroll = { path = "../contracts/programs/gummyroll", features = ["no-entrypoint"] } -gummyroll-crud = { path = "../contracts/programs/gummyroll_crud", features = ["no-entrypoint"] } -bubblegum = { path = "../contracts/programs/bubblegum", features = ["no-entrypoint"] } -concurrent-merkle-tree = { path = "../lib/concurrent-merkle-tree" } - -[dependencies.num-integer] -version = "0.1.44" -default-features = false diff --git a/nft_api/rustfmt.toml b/nft_api/rustfmt.toml deleted file mode 100644 index 36c419bb3..000000000 --- a/nft_api/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -edition = "2021" \ No newline at end of file diff --git a/nft_api/src/error.rs b/nft_api/src/error.rs deleted file mode 100644 index 9585dbf31..000000000 --- a/nft_api/src/error.rs +++ /dev/null @@ -1,10 +0,0 @@ -use hyper::StatusCode; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ApiError { - #[error("Parameter Invalid")] - ParameterInvalid, - #[error("Request Error {status:?}, reason {msg:?} ")] - ResponseError { status: StatusCode, msg: String }, -} diff --git a/nft_api/src/main.rs b/nft_api/src/main.rs deleted file mode 100644 index e4e7adb5b..000000000 --- a/nft_api/src/main.rs +++ /dev/null @@ -1,485 +0,0 @@ -use hyper::{header, Body, Request, Response, Server, StatusCode}; -// Import the routerify prelude traits. -use anchor_client::solana_sdk::pubkey::Pubkey; - -use futures_util::StreamExt; - -use concurrent_merkle_tree::utils::empty_node; -use hyper::header::HeaderValue; - -use redis::Commands; -use routerify::prelude::*; -use routerify::{Middleware, Router, RouterService}; -use routerify_json_response::{json_failed_resp, json_failed_resp_with_message, json_success_resp}; -use serde::Serialize; -use sqlx; -use sqlx::postgres::PgPoolOptions; -use sqlx::{Pool, Postgres}; - -use std::net::SocketAddr; -use std::ops::Index; -use std::str::FromStr; - -mod error; - -use error::ApiError; - -async fn logger(req: Request) -> Result, routerify_json_response::Error> { - println!( - "{} {} {}", - req.remote_addr(), - req.method(), - req.uri().path() - ); - Ok(req) -} - -#[derive(sqlx::FromRow, Clone, Debug)] -struct NodeDAO { - pub hash: Vec, - pub level: i64, - pub node_idx: i64, - pub seq: i64, -} - -#[derive(sqlx::FromRow, Clone, Debug)] -struct AssetDAO { - pub data: String, - pub index: i64, - pub owner: Vec, - pub tree: Vec, - pub admin: Vec, - pub hash: Vec, - pub level: i64, -} - -#[derive(Serialize)] -struct AssetView { - pub data: String, - pub index: i64, - pub owner: String, - pub treeAccount: String, - pub treeAdmin: String, - pub hash: String, -} - -#[derive(sqlx::FromRow)] -struct Root { - pub hash: Vec, -} - -#[derive(sqlx::FromRow)] -struct Level { - pub level: i64, -} - -#[derive(Serialize, Default, Clone, PartialEq)] -struct NodeView { - pub hash: String, - pub level: i64, - pub index: i64, - pub seq: i64, -} - -#[derive(Serialize)] -struct AssetProof { - pub root: String, - pub hash: String, - pub proof: Vec, -} - -fn node_list_to_view(items: Vec) -> Vec { - let mut view = vec![]; - for r in items { - view.push(node_to_view(r)) - } - view -} - -fn node_to_view(r: NodeDAO) -> NodeView { - NodeView { - hash: bs58::encode(r.hash).into_string(), - level: r.level, - index: r.node_idx, - seq: r.seq, - } -} - -fn asset_list_to_view(items: Vec) -> Vec { - let mut view = vec![]; - for r in items { - view.push(asset_to_view(r)) - } - view -} - -fn asset_to_view(r: AssetDAO) -> AssetView { - AssetView { - index: node_idx_to_leaf_idx(r.index, r.level as u32), - treeAccount: bs58::encode(r.tree).into_string(), - owner: bs58::encode(r.owner).into_string().to_string(), - treeAdmin: bs58::encode(r.admin).into_string().to_string(), - hash: bs58::encode(r.hash).into_string().to_string(), - data: r.data, - } -} - -fn leaf_idx_to_node_idx(index: i64, tree_height: u32) -> i64 { - index + 2i64.pow(tree_height) -} - -fn node_idx_to_leaf_idx(index: i64, tree_height: u32) -> i64 { - index - 2i64.pow(tree_height) -} - -/// Takes in an index from leaf-space -async fn handle_get_asset( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let tree_id = decode_b58_param(req.param("tree_id").unwrap()).unwrap(); - let leaf_idx = req.param("index").unwrap().parse::().unwrap(); - - let tree_height = get_height(db, &tree_id).await.unwrap(); - let node_idx = leaf_idx_to_node_idx(leaf_idx, tree_height); - let result = get_asset(db, &tree_id, node_idx).await; - if result.is_err() { - return json_failed_resp_with_message( - StatusCode::INTERNAL_SERVER_ERROR, - result.err().unwrap().to_string(), - ); - } - let asset = result.unwrap(); - json_success_resp(&asset) -} - -async fn handler_get_assets_for_owner( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let owner = decode_b58_param(req.param("owner").unwrap()).unwrap(); - - let results = sqlx::query_as::<_, AssetDAO>(r#" - select a.msg as data, c.node_idx as index, a.owner, a.tree_id as tree , aso.authority as admin, a.leaf as hash, max(c.seq) as seq, c2.level as level from app_specific as a - join cl_items as c on c.tree = a.tree_id and c.hash = a.leaf - join app_specific_ownership aso on a.tree_id = aso.tree_id - join cl_items as c2 on c2.tree = c.tree - where a.owner = $1 and c2.node_idx = 1 - group by c.node_idx, a.msg, a.owner, a.tree_id, aso.authority, a.leaf, c2.level - order by seq"# - ) - .bind(owner) - .fetch_all(db).await; - if results.is_err() { - return json_failed_resp_with_message( - StatusCode::INTERNAL_SERVER_ERROR, - results.err().unwrap().to_string(), - ); - } - let assets = results.unwrap(); - json_success_resp(&asset_list_to_view(assets)) -} - -async fn handle_get_tree( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let tree_id = decode_b58_param(req.param("tree_id").unwrap()).unwrap(); - let results = sqlx::query_as::<_, NodeDAO>("select distinct on (node_idx) node_idx, level, hash, seq from cl_items where tree = $1 order by node_idx, seq, level desc") - .bind(tree_id) - .fetch_all(db).await; - if results.is_err() { - return json_failed_resp_with_message( - StatusCode::INTERNAL_SERVER_ERROR, - results.err().unwrap().to_string(), - ); - } - json_success_resp(&node_list_to_view(results.unwrap())) -} - -async fn handle_get_root( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let tree_id = decode_b58_param(req.param("tree_id").unwrap()).unwrap(); - let result = get_root(&db, &tree_id).await; - if result.is_err() { - return json_failed_resp_with_message( - StatusCode::INTERNAL_SERVER_ERROR, - result.err().unwrap().to_string(), - ); - } - json_success_resp(&result.unwrap()) -} - -async fn handle_get_proof( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let tree_id = decode_b58_param(req.param("tree_id").unwrap()).unwrap(); - let index = req.param("index").unwrap().parse::().unwrap(); - - let proof = get_proof_and_root(db, &tree_id, index).await; - if proof.is_err() { - return if let ApiError::ResponseError { status, msg } = proof.err().unwrap() { - json_failed_resp_with_message(status, msg) - } else { - json_failed_resp(StatusCode::INTERNAL_SERVER_ERROR) - }; - } - let proof_unwrapped = proof.unwrap(); - json_success_resp(&proof_unwrapped[..proof_unwrapped.len() - 1].to_vec()) -} - -async fn handle_get_asset_proof( - req: Request, -) -> Result, routerify_json_response::Error> { - let db: &Pool = req.data::>().unwrap(); - let tree_id = decode_b58_param(req.param("tree_id").unwrap()).unwrap(); - - let leaf_idx = req.param("index").unwrap().parse::().unwrap(); - - let tree_height = get_height(db, &tree_id).await.unwrap(); - let node_idx = leaf_idx_to_node_idx(leaf_idx, tree_height); - let proof: Result, ApiError> = get_proof_and_root(db, &tree_id, node_idx) - .await - .map(|p| p.iter().map(|node| node.hash.clone()).collect()); - - let result = get_asset(db, &tree_id, node_idx).await; - let string: String; - if result.is_err() { - println!("Could not find asset...\n"); - let empty_leaf = empty_node(0).to_vec(); - string = bs58::encode(empty_leaf).into_string(); - } else { - string = result.unwrap().hash.clone(); - } - - let asset_proof = proof.map(|p| AssetProof { - hash: string, - root: p[p.len() - 1].clone(), - proof: p[..p.len() - 1].to_vec(), - }); - - if asset_proof.is_err() { - println!("Asset proof is error :/ \n"); - return if let ApiError::ResponseError { status, msg } = asset_proof.err().unwrap() { - json_failed_resp_with_message(status, msg) - } else { - json_failed_resp(StatusCode::INTERNAL_SERVER_ERROR) - }; - } - - json_success_resp(&asset_proof.unwrap()) -} - -async fn get_height(db: &Pool, tree_id: &Vec) -> Result { - let result = sqlx::query_as::<_, Level>( - "select level from cl_items where node_idx = 1 AND tree = $1 order by seq desc limit 1", - ) - .bind(tree_id) - .fetch_one(db) - .await; - - result - .map(|r| r.level as u32) - .map_err(|e| ApiError::ResponseError { - status: StatusCode::INTERNAL_SERVER_ERROR, - msg: e.to_string(), - }) -} - -async fn get_root(db: &Pool, tree_id: &Vec) -> Result { - let result = sqlx::query_as::<_, Root>( - "select hash from cl_items where node_idx = 1 AND tree = $1 order by seq desc limit 1", - ) - .bind(tree_id) - .fetch_one(db) - .await; - - result - .map(|r| bs58::encode(r.hash).into_string()) - .map_err(|e| ApiError::ResponseError { - status: StatusCode::INTERNAL_SERVER_ERROR, - msg: e.to_string(), - }) -} - -async fn get_asset( - db: &Pool, - tree_id: &Vec, - node_idx: i64, -) -> Result { - let result = sqlx::query_as::<_, AssetDAO>(r#" - select a.msg as data, c.node_idx as index, a.owner, a.tree_id as tree , aso.authority as admin, a.leaf as hash, max(c.seq) as seq, c2.level as level from app_specific as a - join cl_items as c on c.tree = a.tree_id and c.hash = a.leaf - join app_specific_ownership aso on a.tree_id = aso.tree_id - join cl_items as c2 on c2.tree = c.tree - where a.tree_id = $1 AND c.node_idx = $2 and c2.node_idx = 1 - group by c.node_idx, a.msg, a.owner, a.tree_id, aso.authority, a.leaf, c2.level - order by seq - limit 1 - "# - ) - .bind(&tree_id) - .bind(&node_idx) - .fetch_one(db).await; - result - .map(asset_to_view) - .map_err(|e| ApiError::ResponseError { - status: StatusCode::INTERNAL_SERVER_ERROR, - msg: e.to_string(), - }) -} - -async fn get_proof_and_root( - db: &Pool, - tree_id: &Vec, - index: i64, -) -> Result, ApiError> { - let nodes = get_required_nodes_for_proof(index); - let expected_proof_size = nodes.len(); - let results = sqlx::query_as::<_, NodeDAO>( - r#" - select distinct on (node_idx) node_idx, hash, level, max(seq) as seq - from cl_items - where node_idx = ANY ($1) and tree = $2 - and seq <= ( - select max(seq) as seq - from cl_items - where node_idx = 1 and tree = $2 - ) - group by seq, node_idx, level, hash - order by node_idx desc, seq desc - "#, - ) - .bind(&nodes.as_slice()) - .bind(&tree_id) - .fetch_all(db) - .await; - let nodes_from_db = results.unwrap(); - let mut final_node_list: Vec = vec![NodeView::default(); expected_proof_size]; - if nodes_from_db.len() > expected_proof_size { - return Err(ApiError::ResponseError { - status: StatusCode::INTERNAL_SERVER_ERROR, - msg: "Tree Corrupted".to_string(), - }); - } - if nodes_from_db.len() != expected_proof_size { - for returned in nodes_from_db.iter() { - let node_view = node_to_view(returned.to_owned()); - println!( - "Node from db: {} {} {}", - &node_view.level, &node_view.hash, &node_view.index - ); - if returned.level < final_node_list.len().try_into().unwrap() { - final_node_list[returned.level as usize] = node_view; - } - } - for (i, (n, nin)) in final_node_list.iter_mut().zip(nodes).enumerate() { - if *n == NodeView::default() { - *n = node_to_view(make_empty_node(i as i64, nin)); - } - } - } else { - final_node_list = node_list_to_view(nodes_from_db); - } - Ok(final_node_list) -} - -fn get_required_nodes_for_proof(index: i64) -> Vec { - let mut indexes = vec![]; - let mut idx = index; - while idx > 1 { - if idx % 2 == 0 { - indexes.push(idx + 1) - } else { - indexes.push(idx - 1) - } - idx >>= 1 - } - indexes.push(1); - println!("nodes {:?}", indexes); - return indexes; -} - -fn decode_b58_param(param: &String) -> Result, ApiError> { - let pub_key = Pubkey::from_str(&*param).map_err(|e| { - println!("{}", e.to_string()); - ApiError::ParameterInvalid - })?; - Ok(pub_key.to_bytes().to_vec()) -} - -fn make_empty_node(lvl: i64, node_index: i64) -> NodeDAO { - NodeDAO { - node_idx: node_index, - level: lvl, - hash: empty_node(lvl as u32).to_vec(), - seq: 0, - } -} - -fn router(db: Pool) -> Router { - Router::builder() - .middleware(Middleware::pre(logger)) - .middleware(Middleware::post(|mut res| async move { - let headers = res.headers_mut(); - headers.insert( - header::ACCESS_CONTROL_ALLOW_ORIGIN, - HeaderValue::from_static("*"), - ); - headers.insert( - header::ACCESS_CONTROL_ALLOW_METHODS, - HeaderValue::from_static("*"), - ); - headers.insert( - header::ACCESS_CONTROL_ALLOW_HEADERS, - HeaderValue::from_static("*"), - ); - headers.insert( - header::ACCESS_CONTROL_EXPOSE_HEADERS, - HeaderValue::from_static("*"), - ); - Ok(res) - })) - .data(db) - .get("/assets/:tree_id/:index/proof", handle_get_asset_proof) - .get("/assets/:tree_id/:index", handle_get_asset) - .get("/owner/:owner/assets", handler_get_assets_for_owner) - .get("/tree/:tree_id", handle_get_tree) - .get("/root/:tree_id", handle_get_root) - .get("/proof/:tree_id/:index", handle_get_proof) - .build() - .unwrap() -} - -#[derive(Default)] -struct AppEvent { - op: String, - message: String, - leaf: String, - owner: String, - tree_id: String, -} - -#[tokio::main] -async fn main() { - let main_pool = PgPoolOptions::new() - .max_connections(5) - .connect("postgres://solana:solana@db/solana") - .await - .unwrap(); - let router = router(main_pool); - // Create a Service from the router above to handle incoming requests. - let service = RouterService::new(router).unwrap(); - // The address on which the server will be listening. - let addr = SocketAddr::from(([0, 0, 0, 0], 9090)); - // Create a server by passing the created service to `.serve` method. - let server = Server::bind(&addr).serve(service); - - println!("App is running on: {}", addr); - if let Err(err) = server.await { - eprintln!("Server error: {}", err); - } -}