From e5d4a15aacca4c4dcd52e0e02608bf28c3abad75 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 5 Oct 2023 13:44:52 +0200 Subject: [PATCH] itest: make loadtest ready for multiple runs --- itest/assertions.go | 42 +++++++++++++++---------------- itest/loadtest/mint_batch_test.go | 39 ++++++++++++++++++++-------- 2 files changed, 50 insertions(+), 31 deletions(-) diff --git a/itest/assertions.go b/itest/assertions.go index 6b43ba051..0b65fb24d 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -932,12 +932,17 @@ func AssertSplitTombstoneTransfer(t *testing.T, func AssertNumGroups(t *testing.T, client taprpc.TaprootAssetsClient, num int) { + require.Equal(t, num, NumGroups(t, client)) +} + +// NumGroups returns the current number of asset groups present. +func NumGroups(t *testing.T, client taprpc.TaprootAssetsClient) int { ctxb := context.Background() groupResp, err := client.ListGroups( ctxb, &taprpc.ListGroupsRequest{}, ) require.NoError(t, err) - require.Equal(t, num, len(groupResp.Groups)) + return len(groupResp.Groups) } // AssertGroupSizes asserts that a set of groups the daemon is aware of contain @@ -1050,31 +1055,26 @@ func AssertUniverseRoot(t *testing.T, client unirpc.UniverseClient, t, bothSet || neitherSet, "only set one of assetID or groupKey", ) - // Re-parse and serialize the keys to account for the different - // formats returned in RPC responses. - matchingGroupKey := func(root *unirpc.UniverseRoot) bool { - rootGroupKeyBytes := root.Id.GetGroupKey() - require.NotNil(t, rootGroupKeyBytes) - - expectedGroupKey, err := btcec.ParsePubKey(groupKey) - require.NoError(t, err) - require.Equal( - t, rootGroupKeyBytes, - schnorr.SerializePubKey(expectedGroupKey), - ) - - return true - } - // Comparing the asset ID is always safe, even if nil. matchingRoot := func(root *unirpc.UniverseRoot) bool { - require.Equal(t, root.MssmtRoot.RootSum, int64(sum)) - require.Equal(t, root.Id.GetAssetId(), assetID) + sumEqual := root.MssmtRoot.RootSum == int64(sum) + idEqual := bytes.Equal(root.Id.GetAssetId(), assetID) + groupKeyEqual := true if groupKey != nil { - return matchingGroupKey(root) + parsedGroupKey, err := btcec.ParsePubKey(groupKey) + require.NoError(t, err) + + rootGroupKey := root.Id.GetGroupKey() + if rootGroupKey != nil { + groupKeyEqual = bytes.Equal( + rootGroupKey, schnorr.SerializePubKey( + parsedGroupKey, + ), + ) + } } - return true + return sumEqual && idEqual && groupKeyEqual } ctx := context.Background() diff --git a/itest/loadtest/mint_batch_test.go b/itest/loadtest/mint_batch_test.go index 7803b4619..56311905a 100644 --- a/itest/loadtest/mint_batch_test.go +++ b/itest/loadtest/mint_batch_test.go @@ -6,7 +6,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "strconv" + "math/rand" "strings" "testing" "time" @@ -17,6 +17,7 @@ import ( "github.com/lightninglabs/taproot-assets/taprpc" "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" + "github.com/lightninglabs/taproot-assets/universe" "github.com/stretchr/testify/require" ) @@ -74,11 +75,15 @@ func mintBatchStressTest(t *testing.T, ctx context.Context, var ( batchReqs = make([]*mintrpc.MintAssetRequest, batchSize) - baseName = "jpeg" + baseName = fmt.Sprintf("jpeg-%d", rand.Int31()) metaPrefixSize = binary.MaxVarintLen16 metadataPrefix = make([]byte, metaPrefixSize) ) + // Before we mint a new group, let's first find out how many there + // already are. + initialGroups := itest.NumGroups(t, alice) + // Each asset in the batch will share a name and metdata preimage, that // will be updated based on the asset's index in the batch. collectibleRequestTemplate := mintrpc.MintAssetRequest{ @@ -95,9 +100,9 @@ func mintBatchStressTest(t *testing.T, ctx context.Context, } // Update the asset name and metadata to match an index. - incrementMintAsset := func(asset *mintrpc.MintAsset, ind int) { - asset.Name = asset.Name + strconv.Itoa(ind) - binary.PutUvarint(metadataPrefix, uint64(ind)) + incrementMintAsset := func(asset *mintrpc.MintAsset, idx int) { + asset.Name = fmt.Sprintf("%s-%d", asset.Name, idx) + binary.PutUvarint(metadataPrefix, uint64(idx)) copy(asset.AssetMeta.Data[0:metaPrefixSize], metadataPrefix) } @@ -139,7 +144,7 @@ func mintBatchStressTest(t *testing.T, ctx context.Context, // We should have one group, with the specified number of assets and an // equivalent balance, since the group is made of collectibles. - groupCount := 1 + groupCount := initialGroups + 1 groupBalance := batchSize itest.AssertNumGroups(t, alice, groupCount) @@ -154,9 +159,7 @@ func mintBatchStressTest(t *testing.T, ctx context.Context, // The universe tree should reflect the same properties about the batch; // there should be one root with a group key and balance matching what // we asserted previously. - uniRoots, err := alice.AssetRoots( - ctx, &unirpc.AssetRootRequest{}, - ) + uniRoots, err := alice.AssetRoots(ctx, &unirpc.AssetRootRequest{}) require.NoError(t, err) require.Len(t, uniRoots.UniverseRoots, groupCount) @@ -194,7 +197,23 @@ func mintBatchStressTest(t *testing.T, ctx context.Context, }, }, ) - require.NoError(t, err) + if err != nil { + // Only fail the test for other errors than duplicate universe + // errors, as we might have already added the server in a + // previous run. + require.ErrorContains( + t, err, universe.ErrDuplicateUniverse.Error(), + ) + + // If we've already added the server in a previous run, we'll + // just need to kick off a sync (as that would otherwise be done + // by adding the server request already). + _, err := bob.SyncUniverse(ctx, &unirpc.SyncRequest{ + UniverseHost: aliceHost, + SyncMode: unirpc.UniverseSyncMode_SYNC_ISSUANCE_ONLY, + }) + require.NoError(t, err) + } require.Eventually(t, func() bool { return itest.AssertUniverseStateEqual(