Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimize searchAssets and getAssetsByOwner by removing sorting #214

Closed
wants to merge 198 commits into from
Closed
Changes from 1 commit
Commits
Show all changes
198 commits
Select commit Hold shift + click to select a range
9933c41
Applying patch 133
Juanito87 Nov 27, 2023
7abbc0e
Patch137 2 (#111)
Juanito87 Nov 27, 2023
67ed605
Applying patch 139. (#110)
Juanito87 Nov 27, 2023
9bb9f8f
fix: splt tokens with no token stanard are incorrectly categorized as…
kespinola Dec 1, 2023
ba20d9d
fix(backfiller): when querying chain for trees to backfill the merkle…
kespinola Dec 1, 2023
1968c00
[WIP] Tree Transaction Backfiller (#114)
kespinola Dec 12, 2023
3515b63
[Tree Backfiller] Metrics and Logging (#115)
kespinola Dec 14, 2023
9591066
fix(backfiller): unable to parse signatures from char mismatch from s…
kespinola Dec 15, 2023
50fd51d
fix(backfiller): nft ingester need to be updated for saving tree tran…
kespinola Dec 15, 2023
6d846cf
fix model types for tree transaction after fixes to backfiller
kespinola Dec 16, 2023
1548bd9
refactor(backfiller): select fixed list of trees to process. switch f…
kespinola Dec 18, 2023
5b6a17f
refactor(backfiller): add retries to rpc calls (#118)
kespinola Dec 19, 2023
cb271ca
refactor(backfiller): remove using tree transactions because similar …
kespinola Dec 20, 2023
f36193f
fix(ingester): drop on conflict for now since unable to add until uni…
kespinola Dec 20, 2023
cff8e67
feat(backfiller): add cl_audit_v2 to ingestion for recording every se…
kespinola Dec 22, 2023
d1c16d1
[Tree Backfiller] Gap Filling (#121)
kespinola Dec 29, 2023
0cc0482
feat(bgtask_creator): add run command to lookup asset_data that is fl…
kespinola Jan 4, 2024
e9e0afd
Metadata JSON Ingest and Backfill (#123)
kespinola Jan 17, 2024
090897a
refactor(tree_backfiller): adjust thread management. add documentatio…
kespinola Jan 22, 2024
2ffedab
Align triton-build with upstream 1/26/24 (#127)
kespinola Jan 26, 2024
497c3cb
Merge remote-tracking branch 'origin' into triton-build
kespinola Jan 26, 2024
828508d
refactor(ingester): move more logs to debug but do log info for simpl…
kespinola Jan 29, 2024
a03627f
Merge branch 'main' into triton-build
kespinola Feb 1, 2024
609cfa4
fix(ingest): formatting of account logs to base58
kespinola Feb 1, 2024
001655d
fix: remove the insertion of backfill item it is no longer used for t…
kespinola Feb 4, 2024
63008f1
Merge branch 'main' into triton-build
kespinola Feb 9, 2024
417f71b
feat(metadata_json): add single command to index a metadata json in t…
kespinola Feb 19, 2024
beca08a
fix(ops): backfiller was not filtering out failed transactions (#131)
kespinola Feb 23, 2024
e973ab2
feat(ops): optionally fix transfer failures by deleting the cl_audits…
kespinola Feb 26, 2024
4cc7f8c
feat: config for max connections on the read api (#133)
kespinola Mar 11, 2024
5ff0ad4
fix(das): config max request connections (#134)
kespinola Mar 11, 2024
2fb5d26
test: no retries when looking up token and owner when indexing nfts (…
kespinola Mar 12, 2024
4bb672c
Revert "test: no retries when looking up token and owner when indexin…
kespinola Mar 14, 2024
d61382a
feat: add report command to metadata json. remove tree_backfiller rep…
kespinola Mar 14, 2024
767e1a3
Merge branch 'main' into triton-build
kespinola Mar 19, 2024
47594a9
Merge remote-tracking branch 'origin/main' into triton-build
kespinola Mar 27, 2024
b9bcbda
Merge remote-tracking branch 'origin/main' into triton-build
kespinola Mar 28, 2024
d5f1967
feat(ops): add account backfill to ops cli by program or for a single…
kespinola Apr 5, 2024
ea5379e
fmt
fanatid Nov 20, 2023
8b12316
clippy
fanatid Nov 20, 2023
974c90f
ci: add lock, fmt, clippy checks
fanatid Nov 20, 2023
788973e
include migration to workspace
fanatid Nov 20, 2023
7a870c2
include das_api to workspace
fanatid Nov 20, 2023
7e3f9ef
add global clippy lints
fanatid Nov 20, 2023
aa916a4
use workspace
fanatid Nov 20, 2023
033873d
add crate program_transformers
fanatid Nov 22, 2023
1008f86
nft_ingester: use program_transformers crate
fanatid Nov 23, 2023
83b171e
remove not used deps
fanatid Nov 23, 2023
9d8031e
remove AccountInfo
fanatid Nov 23, 2023
096616d
remove plerkle from program_transformers
fanatid Nov 24, 2023
ef0b0e9
nft_ingester2: grpc2redis
fanatid Nov 27, 2023
076bffa
add redis streaming for ingester
fanatid Nov 28, 2023
d8673cf
create pg pool
fanatid Nov 29, 2023
c93adff
parse incoming message from redis
fanatid Nov 29, 2023
53fa481
add force shutdown with signals
fanatid Nov 30, 2023
11b4151
insert download metadata tasks
fanatid Dec 1, 2023
8fca2c2
download-metadata subtask
fanatid Dec 2, 2023
fcc18d6
refactor: rename nft_ingest2
kespinola Apr 10, 2024
294fa1a
fix: applying account and transction filters to grpc subscription req…
kespinola Apr 15, 2024
8f7baa7
Merge remote-tracking branch 'origin' into triton-build
kespinola Apr 16, 2024
36cba58
Grpc ingest triton build (#142)
kespinola Apr 16, 2024
98a7e4e
refactor: take out multiple connections to dragonmouth and then have …
kespinola May 2, 2024
997d515
refactor: switch to accepting multiple dragonmouth endoints to mimic …
kespinola May 3, 2024
75c4115
Merge branch 'main' of github.com:rpcpool/digital-asset-rpc-infrastru…
kespinola May 9, 2024
67cc21e
fix: clippy errors
kespinola May 9, 2024
554b06b
Merge remote-tracking branch 'origin' into triton-build
kespinola May 30, 2024
b23804b
Merge branch 'main' of github.com:rpcpool/digital-asset-rpc-infrastru…
kespinola Jun 12, 2024
9c294d6
Merge branch 'main' of github.com:rpcpool/digital-asset-rpc-infrastru…
kespinola Jun 18, 2024
c537a72
fix: program-transform no longer accepts optional cl_audits write flag
kespinola Jun 18, 2024
5f08298
fix: processes transactions for a tree sequentially to make sure ther…
kespinola May 15, 2024
0eced5d
refactor: use program transform in the account backfill no queuing in…
kespinola May 16, 2024
138bf2e
refactor: move bubble backfill to a lib so can be used by other proje…
kespinola May 30, 2024
d80e7b8
Merge remote-tracking branch 'origin' into grpc-ingest
kespinola Jul 19, 2024
2cc3411
chor: refactor grpc command to use topograph to clean up the control …
kespinola Jul 30, 2024
a9b1504
Merge branch 'main' into triton-build
kespinola Jul 31, 2024
81f5d71
chore: switch to run_v2 for ingest which handles ingesting accounts a…
kespinola Aug 9, 2024
10c46d5
Merge remote-tracking branch 'origin' into grpc-ingest
kespinola Aug 10, 2024
a151630
chore: handle metadata json within the ingest command
kespinola Aug 12, 2024
211c23f
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 12, 2024
f398880
test: metadata json stream processing
kespinola Aug 12, 2024
656c0c8
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 12, 2024
dd8103a
fix: special use of das-metadata-json in triton-build
kespinola Aug 12, 2024
70eef04
feat: set num threads for topograph. report on postgres and redis in …
kespinola Aug 14, 2024
24f3e63
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 14, 2024
77a98f6
chore: switch to topograph for thread management. create separate ing…
kespinola Aug 20, 2024
5cad80d
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 20, 2024
0db0612
fix: config and ack
kespinola Aug 22, 2024
a4c42b4
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 22, 2024
32f9852
hack: disable pending processing while see why messages arent being d…
kespinola Aug 22, 2024
49de638
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 22, 2024
bab4678
fix: exit early if slot matches metadata json download and reindex is…
kespinola Aug 22, 2024
2d2e7fd
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 22, 2024
0c4f872
fix: picking dangling pending messages before reading new
kespinola Aug 22, 2024
eab20c6
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 22, 2024
eb75ca3
fix: ingest stream shutdown and requesting snapshots
kespinola Aug 23, 2024
d6d83f2
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 23, 2024
5cf5b75
fix: revert change to redis for grpc-ingest
kespinola Aug 23, 2024
3213080
fix: processing pending. add metrics for total workers.
kespinola Aug 23, 2024
381491c
Merge branch 'grpc-ingest' of github.com:rpcpool/digital-asset-rpc-in…
kespinola Aug 23, 2024
7f3b391
fix: bubble backfill transform. no op out of download metadata info.
kespinola Aug 26, 2024
9e9a8a2
Merge branch 'grpc-ingest' into triton-build
kespinola Aug 26, 2024
4c3f649
Add force flag to TreeWorkerArgs for complete reindexing (#148)
0xC0A1 Sep 13, 2024
8549983
Add force flag to TreeWorkerArgs for complete reindexing (#148) (#149)
kespinola Sep 13, 2024
36524b6
fix: sort was newest to oldest we need to swap in order to index corr…
kespinola Sep 13, 2024
a6b054c
fix: order oldest to newest using block time
kespinola Sep 13, 2024
4e3c076
refactor: drop exculsion clauses on cl_items to allow for reindexing …
kespinola Sep 16, 2024
febc55c
Merge branch 'grpc-ingest' into triton-build
kespinola Sep 16, 2024
4fd9758
refactor: bubblegum backfill orders off of slot (#155)
kespinola Sep 17, 2024
c789b52
feat: replay bubblegum transactions based on what is in cl_audits_v2
kespinola Sep 26, 2024
2421252
Merge branch 'grpc-ingest' into triton-build
kespinola Sep 26, 2024
45a5dcf
fix: order bubblegum backfill instructions by seq (#158)
kespinola Oct 2, 2024
d483abd
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 2, 2024
83199fd
Return seq check on writing cl items
kespinola Oct 4, 2024
f4744c0
include back seq checks
kespinola Oct 7, 2024
1afee45
Remove enforcing seq in bubblegum backfill
kespinola Oct 7, 2024
cbb2140
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 7, 2024
f329846
More proof debug logging
kespinola Oct 7, 2024
18411a9
Replay single tree with targeted seq replay (#159)
kespinola Oct 8, 2024
5948709
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 8, 2024
110f71c
Support Account Snapshot Flushes (#154)
kespinola Oct 9, 2024
118baec
fix: keep grpc alive by sending using ping-pong (#160)
Nagaprasadvr Oct 9, 2024
01bbe99
Only Time Based Flushes (#161)
kespinola Oct 9, 2024
f180736
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 9, 2024
cde4594
delete and recreate the consumer (#162)
kespinola Oct 11, 2024
a27789e
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 11, 2024
0005b3e
Type out redis message handlers (#163)
kespinola Oct 12, 2024
3bb3f08
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 12, 2024
d092ad8
skip reading new messages if the ack buffer is full. track running ta…
kespinola Oct 13, 2024
8204236
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 13, 2024
6f158f3
only push handler job for ingest if capacity (#165)
kespinola Oct 14, 2024
378152b
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 14, 2024
a347066
Only throttle based on the task semaphore (#166)
kespinola Oct 14, 2024
6f822f0
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 14, 2024
188c878
Remove topograph from ingest command (#167)
kespinola Oct 14, 2024
7010545
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 14, 2024
e15172f
Configure database connection idle timeout and max lifetime (#169)
kespinola Oct 21, 2024
c2530b8
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 21, 2024
fdbf03a
Add single NFT fetch and backfill feature to ops (#170)
Nagaprasadvr Oct 22, 2024
d7cc058
Add lock timeout on v1_asset statements
kespinola Oct 23, 2024
95072f0
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 23, 2024
17d7e0a
Set the lock timeout to 5s
kespinola Oct 23, 2024
e91c61c
Merge branch 'grpc-ingest' into triton-build
kespinola Oct 23, 2024
99b59c0
Bubblegum verify (#150)
kespinola Oct 29, 2024
ecceb24
Merge branch 'grpc-ingest' of github.com:rpcpool/digital-asset-rpc-in…
kespinola Oct 29, 2024
d3bfe1b
Separate global stream to multiple Program separated streams and hand…
Nagaprasadvr Nov 8, 2024
f823e43
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 8, 2024
f311e14
Report proofs as they come (#178)
kespinola Nov 8, 2024
6699d3b
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 8, 2024
f8bdef2
Buffer redis messages (#179)
kespinola Nov 11, 2024
f650522
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 11, 2024
776ae0a
Fix tracking ack tasks
kespinola Nov 11, 2024
b53ae3d
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 11, 2024
151168e
fix: add back ack tasks total
kespinola Nov 11, 2024
75b3c12
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 11, 2024
ae11907
recreate consumer but not groups (#181)
kespinola Nov 12, 2024
0150f58
Merge remote-tracking branch 'origin/grpc-ingest' into triton-build
kespinola Nov 12, 2024
80d61dc
Time Ingest jobs (#182)
kespinola Nov 14, 2024
ea897eb
Merge branch 'grpc-ingest' into triton-build
kespinola Nov 14, 2024
4baaca6
Use Example Configs (#180)
kespinola Nov 27, 2024
700cd9f
Remove Selects From Standard NFT Indexing (#183)
kespinola Dec 9, 2024
01a8de7
Save pgpool on program transformer and take out connection on handler…
kespinola Dec 16, 2024
322a7ee
Write cl items in the transaction (#185)
kespinola Dec 17, 2024
449eb45
Only update the record when fields have changed (#187)
kespinola Dec 18, 2024
0d80844
patch condition check on asset_grouping (#188)
kespinola Dec 18, 2024
ceab0ce
Revert "patch condition check on asset_grouping (#188)" (#189)
kespinola Dec 18, 2024
aec25fa
Revert "Only update the record when fields have changed (#187)" (#190)
kespinola Dec 18, 2024
c235125
Respect commitment level set in grpc config (#191)
kespinola Dec 18, 2024
fcdcfca
Merge branch 'grpc-ingest' into triton-build
kespinola Dec 18, 2024
ada3534
Respect commitment level set in grpc config (#191) (#192)
kespinola Dec 18, 2024
9262ddb
Only update record when fields change (#193)
kespinola Dec 19, 2024
ee907c3
Write cl items in a batch (#194)
kespinola Dec 19, 2024
7b7c0a8
Set local lock timeouts for token program and stricter timeout for as…
kespinola Dec 21, 2024
ecde43d
Merge remote-tracking branch 'origin/main' into triton-build
kespinola Dec 30, 2024
a464fd3
Merge remote-tracking branch 'origin/main' into grpc-ingest
kespinola Jan 15, 2025
c24b6d4
Merge remote-tracking branch 'origin/grpc-ingest' into triton-build
kespinola Jan 15, 2025
5f30bad
Update to rust 1.79.0
kespinola Jan 15, 2025
b07b475
Update fixtures
kespinola Jan 16, 2025
9f3d543
Use tokio join set ingest stream task tracking
kespinola Jan 20, 2025
7ff1c77
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 20, 2025
3d7a88d
Add metrics to track metadata_json_download success and failure (#207)
Nagaprasadvr Jan 21, 2025
44ce53f
add retry config to download_metadata_json task (#208)
Nagaprasadvr Jan 21, 2025
991bf70
Merge remote-tracking branch 'origin/main' into grpc-ingest
kespinola Jan 22, 2025
b491d0a
Resolve clippy
kespinola Jan 22, 2025
5ef5fc6
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 22, 2025
c05c77b
Patch committing to txn
kespinola Jan 22, 2025
aaee1ad
Use multiple pipelines per subscription (#211)
kespinola Jan 23, 2025
6a9d074
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 23, 2025
841ffa7
revert metaplex/plerkle-validator image to from ..79 to ..75 (#212)
Nagaprasadvr Jan 24, 2025
1343e33
Das 106 add metadata json backfiller to das (#210)
Nagaprasadvr Jan 24, 2025
e678814
Shutdown program on stream error or close (#213)
kespinola Jan 24, 2025
72ff117
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 24, 2025
5aa04bd
Global shutdown in the None arm
kespinola Jan 24, 2025
13bc52a
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 24, 2025
c071850
Close stream on error
kespinola Jan 24, 2025
b45d04b
Merge branch 'grpc-ingest' into triton-build
kespinola Jan 24, 2025
504c683
increase timeout for ta handler
kespinola Jan 27, 2025
675c9eb
Only asset field update in txn for ta handler
kespinola Jan 27, 2025
c004ce4
Only try to update owner on asset when ta amount is 1
kespinola Jan 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
214 changes: 174 additions & 40 deletions das_api/src/api/api_impl.rs
Original file line number Diff line number Diff line change
@@ -4,19 +4,23 @@ use digital_asset_types::{
sea_orm_active_enums::{
OwnerType, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions,
},
SearchAssetsQuery,
Cursor, PageOptions, SearchAssetsQuery,
},
dapi::{
get_asset, get_assets_by_authority, get_assets_by_creator, get_assets_by_group,
get_assets_by_owner, get_proof_for_asset, search_assets,
get_asset, get_asset_batch, get_asset_proof_batch, get_assets_by_authority,
get_assets_by_creator, get_assets_by_group, get_assets_by_owner, get_proof_for_asset,
search_assets,
},
rpc::{
filter::{AssetSortBy, SearchConditionType},
response::GetGroupingResponse,
},
rpc::{filter::SearchConditionType, response::GetGroupingResponse},
rpc::{OwnershipModel, RoyaltyModel},
};
use open_rpc_derive::document_rpc;
use sea_orm::{sea_query::ConditionType, ConnectionTrait, DbBackend, Statement};

use crate::validation::validate_opt_pubkey;
use crate::validation::{validate_opt_pubkey, validate_search_with_name};
use open_rpc_schema::document::OpenrpcDocument;
use {
crate::api::*,
@@ -46,21 +50,37 @@ impl DasApi {
})
}

fn get_cursor(&self, cursor: &Option<String>) -> Result<Cursor, DasApiError> {
match cursor {
Some(cursor_b64) => {
let cursor_vec = bs58::decode(cursor_b64)
.into_vec()
.map_err(|_| DasApiError::CursorValidationError(cursor_b64.clone()))?;
let cursor_struct = Cursor {
id: Some(cursor_vec),
};
Ok(cursor_struct)
}
None => Ok(Cursor::default()),
}
}

fn validate_pagination(
&self,
limit: &Option<u32>,
page: &Option<u32>,
before: &Option<String>,
after: &Option<String>,
) -> Result<(), DasApiError> {
if page.is_none() && before.is_none() && after.is_none() {
return Err(DasApiError::PaginationEmptyError);
}
cursor: &Option<String>,
sorting: &Option<&AssetSorting>,
) -> Result<PageOptions, DasApiError> {
let mut is_cursor_enabled = true;
let mut page_opt = PageOptions::default();

if let Some(limit) = limit {
// make config item
if *limit > 1000 {
return Err(DasApiError::PaginationError);
return Err(DasApiError::PaginationExceededError);
}
}

@@ -70,20 +90,57 @@ impl DasApi {
}

// make config item
if before.is_some() || after.is_some() {
if before.is_some() || after.is_some() || cursor.is_some() {
return Err(DasApiError::PaginationError);
}

is_cursor_enabled = false;
}

if let Some(before) = before {
if cursor.is_some() {
return Err(DasApiError::PaginationError);
}
if let Some(sort) = &sorting {
if sort.sort_by != AssetSortBy::Id {
return Err(DasApiError::PaginationSortingValidationError);
}
}
validate_pubkey(before.clone())?;
is_cursor_enabled = false;
}

if let Some(after) = after {
if cursor.is_some() {
return Err(DasApiError::PaginationError);
}
if let Some(sort) = &sorting {
if sort.sort_by != AssetSortBy::Id {
return Err(DasApiError::PaginationSortingValidationError);
}
}
validate_pubkey(after.clone())?;
is_cursor_enabled = false;
}

Ok(())
page_opt.limit = limit.map(|x| x as u64).unwrap_or(1000);
if is_cursor_enabled {
if let Some(sort) = &sorting {
if sort.sort_by != AssetSortBy::Id {
return Err(DasApiError::PaginationSortingValidationError);
}
page_opt.cursor = Some(self.get_cursor(&cursor)?);
}
} else {
page_opt.page = page.map(|x| x as u64);
page_opt.before = before
.clone()
.map(|x| bs58::decode(x).into_vec().unwrap_or_default());
page_opt.after = after
.clone()
.map(|x| bs58::decode(x).into_vec().unwrap_or_default());
}
Ok(page_opt)
}
}

@@ -121,14 +178,76 @@ impl ApiContract for DasApi {
.map_err(Into::into)
}

async fn get_asset_proof_batch(
self: &DasApi,
payload: GetAssetProofBatch,
) -> Result<HashMap<String, Option<AssetProof>>, DasApiError> {
let GetAssetProofBatch { ids } = payload;

let batch_size = ids.len();
if batch_size > 1000 {
return Err(DasApiError::BatchSizeExceededError);
}

let id_bytes = ids
.iter()
.map(|id| validate_pubkey(id.clone()).map(|id| id.to_bytes().to_vec()))
.collect::<Result<Vec<Vec<u8>>, _>>()?;

let proofs = get_asset_proof_batch(&self.db_connection, id_bytes).await?;

let result: HashMap<String, Option<AssetProof>> = ids
.iter()
.map(|id| (id.clone(), proofs.get(id).cloned()))
.collect();
Ok(result)
}

async fn get_asset(self: &DasApi, payload: GetAsset) -> Result<Asset, DasApiError> {
let id = validate_pubkey(payload.id.clone())?;
let id_bytes = id.to_bytes().to_vec();
get_asset(&self.db_connection, id_bytes)
let GetAsset {
id,
display_options,
} = payload;
let id_bytes = validate_pubkey(id.clone())?.to_bytes().to_vec();
let display_options = display_options.unwrap_or_default();
get_asset(&self.db_connection, id_bytes, &display_options.into())
.await
.map_err(Into::into)
}

async fn get_asset_batch(
self: &DasApi,
payload: GetAssetBatch,
) -> Result<Vec<Option<Asset>>, DasApiError> {
let GetAssetBatch {
ids,
display_options,
} = payload;

let batch_size = ids.len();
if batch_size > 1000 {
return Err(DasApiError::BatchSizeExceededError);
}

let id_bytes = ids
.iter()
.map(|id| validate_pubkey(id.clone()).map(|id| id.to_bytes().to_vec()))
.collect::<Result<Vec<Vec<u8>>, _>>()?;

let display_options = display_options.unwrap_or_default();

let assets = get_asset_batch(
&self.db_connection,
id_bytes,
batch_size as u64,
&display_options.into(),
)
.await?;

let result: Vec<Option<Asset>> = ids.iter().map(|id| assets.get(id).cloned()).collect();
Ok(result)
}

async fn get_assets_by_owner(
self: &DasApi,
payload: GetAssetsByOwner,
@@ -140,21 +259,23 @@ impl ApiContract for DasApi {
page,
before,
after,
display_options,
cursor,
} = payload;
let before: Option<String> = before.filter(|before| !before.is_empty());
let after: Option<String> = after.filter(|after| !after.is_empty());
let owner_address = validate_pubkey(owner_address.clone())?;
let owner_address_bytes = owner_address.to_bytes().to_vec();
let sort_by = sort_by.unwrap_or_default();
self.validate_pagination(&limit, &page, &before, &after)?;
let display_options = display_options.unwrap_or_default();
let page_options =
self.validate_pagination(&limit, &page, &before, &after, &cursor, &Some(&sort_by))?;
get_assets_by_owner(
&self.db_connection,
owner_address_bytes,
sort_by,
limit.map(|x| x as u64).unwrap_or(1000),
page.map(|x| x as u64),
before.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
after.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
&page_options,
&display_options,
)
.await
.map_err(Into::into)
@@ -172,20 +293,22 @@ impl ApiContract for DasApi {
page,
before,
after,
display_options,
cursor,
} = payload;
let before: Option<String> = before.filter(|before| !before.is_empty());
let after: Option<String> = after.filter(|after| !after.is_empty());
let sort_by = sort_by.unwrap_or_default();
self.validate_pagination(&limit, &page, &before, &after)?;
let display_options = display_options.unwrap_or_default();
let page_options =
self.validate_pagination(&limit, &page, &before, &after, &cursor, &Some(&sort_by))?;
get_assets_by_group(
&self.db_connection,
group_key,
group_value,
sort_by,
limit.map(|x| x as u64).unwrap_or(1000),
page.map(|x| x as u64),
before.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
after.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
&page_options,
&display_options,
)
.await
.map_err(Into::into)
@@ -203,22 +326,24 @@ impl ApiContract for DasApi {
page,
before,
after,
display_options,
cursor,
} = payload;
let creator_address = validate_pubkey(creator_address.clone())?;
let creator_address_bytes = creator_address.to_bytes().to_vec();

self.validate_pagination(&limit, &page, &before, &after)?;
let sort_by = sort_by.unwrap_or_default();
let page_options =
self.validate_pagination(&limit, &page, &before, &after, &cursor, &Some(&sort_by))?;
let only_verified = only_verified.unwrap_or_default();
let display_options = display_options.unwrap_or_default();
get_assets_by_creator(
&self.db_connection,
creator_address_bytes,
only_verified,
sort_by,
limit.map(|x| x as u64).unwrap_or(1000),
page.map(|x| x as u64),
before.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
after.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
&page_options,
&display_options,
)
.await
.map_err(Into::into)
@@ -235,20 +360,22 @@ impl ApiContract for DasApi {
page,
before,
after,
display_options,
cursor,
} = payload;
let sort_by = sort_by.unwrap_or_default();
let authority_address = validate_pubkey(authority_address.clone())?;
let authority_address_bytes = authority_address.to_bytes().to_vec();
let display_options = display_options.unwrap_or_default();

self.validate_pagination(&limit, &page, &before, &after)?;
let page_options =
self.validate_pagination(&limit, &page, &before, &after, &cursor, &Some(&sort_by))?;
get_assets_by_authority(
&self.db_connection,
authority_address_bytes,
sort_by,
limit.map(|x| x as u64).unwrap_or(1000),
page.map(|x| x as u64),
before.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
after.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
&page_options,
&display_options,
)
.await
.map_err(Into::into)
@@ -282,9 +409,13 @@ impl ApiContract for DasApi {
before,
after,
json_uri,
display_options,
cursor,
name,
} = payload;


// Deserialize search assets query
self.validate_pagination(&limit, &page, &before, &after)?;
let spec: Option<(SpecificationVersions, SpecificationAssetClass)> =
interface.map(|x| x.into());
let specification_version = spec.clone().map(|x| x.0);
@@ -294,6 +425,7 @@ impl ApiContract for DasApi {
SearchConditionType::All => ConditionType::All,
});
let owner_address = validate_opt_pubkey(&owner_address)?;
let name = validate_search_with_name(&name, &owner_address)?;
let creator_address = validate_opt_pubkey(&creator_address)?;
let delegate = validate_opt_pubkey(&delegate)?;

@@ -332,17 +464,19 @@ impl ApiContract for DasApi {
royalty_amount,
burnt,
json_uri,
name,
};
let display_options = display_options.unwrap_or_default();
let sort_by = sort_by.unwrap_or_default();
let page_options =
self.validate_pagination(&limit, &page, &before, &after, &cursor, &Some(&sort_by))?;
// Execute query
search_assets(
&self.db_connection,
saq,
sort_by,
limit.map(|x| x as u64).unwrap_or(1000),
page.map(|x| x as u64),
before.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
after.map(|x| bs58::decode(x).into_vec().unwrap_or_default()),
&page_options,
&display_options,
)
.await
.map_err(Into::into)
Loading