diff --git a/README.md b/README.md index ca14762bcb4..4d21bb37dac 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,8 @@ Code](https://img.shields.io/tokei/lines/github/osmosis-labs/osmosis?style=flat- Super-Linter](https://img.shields.io/github/workflow/status/osmosis-labs/osmosis/Lint?style=flat-square&label=Lint)](https://github.com/marketplace/actions/super-linter) [![Discord](https://badgen.net/badge/icon/discord?icon=discord&label)](https://discord.gg/osmosis) +**For Celatone Osmosis Indexer Node documentation, please refer to this [README.md](./celatone-docker/README.md)** + Osmosis is a fair-launched, customizable automated market maker for interchain assets that allows the creation and management of non-custodial, self-balancing, interchain token index similar to one of diff --git a/app/app.go b/app/app.go index 2265f129bda..328139414c9 100644 --- a/app/app.go +++ b/app/app.go @@ -45,6 +45,9 @@ import ( "github.com/cosmos/cosmos-sdk/x/crisis" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/osmosis-labs/osmosis/v15/hooks/common" + "github.com/osmosis-labs/osmosis/v15/hooks/emitter" + "github.com/osmosis-labs/osmosis/v15/app/keepers" "github.com/osmosis-labs/osmosis/v15/app/upgrades" v10 "github.com/osmosis-labs/osmosis/v15/app/upgrades/v10" @@ -138,6 +141,13 @@ type OsmosisApp struct { mm *module.Manager configurator module.Configurator + + // DeliverContext is set during InitGenesis/BeginBlock and cleared during Commit. + // It allows anyone to read/mutate Osmosis consensus state at anytime. + DeliverContext sdk.Context + + // List of hooks + hooks common.Hooks } // init sets DefaultNodeHome to default osmosisd install location. @@ -176,6 +186,7 @@ func NewOsmosisApp( loadLatest bool, skipUpgradeHeights map[int64]bool, homePath string, + withEmitter string, invCheckPeriod uint, appOpts servertypes.AppOptions, wasmEnabledProposals []wasm.ProposalType, @@ -301,6 +312,12 @@ func NewOsmosisApp( app.SetPostHandler(NewPostHandler(app.ProtoRevKeeper)) app.SetEndBlocker(app.EndBlocker) + // Initialize emitter hook and append to the app hooks. + app.hooks = make(common.Hooks, 0) + if withEmitter != "" { + app.hooks = append(app.hooks, emitter.NewHook(encodingConfig, app.AppKeepers, withEmitter)) + } + // Register snapshot extensions to enable state-sync for wasm. if manager := app.SnapshotManager(); manager != nil { err := manager.RegisterExtensions( @@ -343,12 +360,29 @@ func (app *OsmosisApp) Name() string { return app.BaseApp.Name() } // BeginBlocker application updates every begin block. func (app *OsmosisApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { BeginBlockForks(ctx, app) - return app.mm.BeginBlock(ctx, req) + app.DeliverContext = ctx + res := app.mm.BeginBlock(ctx, req) + cacheContext, _ := ctx.CacheContext() + app.hooks.AfterBeginBlock(cacheContext, req, res) + + return res } // EndBlocker application updates every end block. func (app *OsmosisApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { - return app.mm.EndBlock(ctx, req) + res := app.mm.EndBlock(ctx, req) + cacheContext, _ := ctx.CacheContext() + app.hooks.AfterEndBlock(cacheContext, req, res) + + return res +} + +// Commit overrides the default BaseApp's ABCI commit by adding DeliverContext clearing. +func (app *OsmosisApp) Commit() (res abci.ResponseCommit) { + app.hooks.BeforeCommit() + app.DeliverContext = sdk.Context{} + + return app.BaseApp.Commit() } // InitChainer application update at chain initialization. @@ -359,8 +393,20 @@ func (app *OsmosisApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) a } app.UpgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap()) + res := app.mm.InitGenesis(ctx, app.appCodec, genesisState) + cacheContext, _ := ctx.CacheContext() + app.hooks.AfterInitChain(cacheContext, req, res) + + return res +} + +// DeliverTx overwrite DeliverTx to apply the AfterDeliverTx hook. +func (app *OsmosisApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + res := app.BaseApp.DeliverTx(req) + cacheCtx, _ := app.DeliverContext.CacheContext() + app.hooks.AfterDeliverTx(cacheCtx, req, res) - return app.mm.InitGenesis(ctx, app.appCodec, genesisState) + return res } // LoadHeight loads a particular height. diff --git a/app/config.go b/app/config.go index 6d271c9b836..9fad7503662 100644 --- a/app/config.go +++ b/app/config.go @@ -50,7 +50,7 @@ func DefaultConfig() network.Config { func NewAppConstructor() network.AppConstructor { return func(val network.Validator) servertypes.Application { return NewOsmosisApp( - val.Ctx.Logger, dbm.NewMemDB(), nil, true, make(map[int64]bool), val.Ctx.Config.RootDir, 0, + val.Ctx.Logger, dbm.NewMemDB(), nil, true, make(map[int64]bool), val.Ctx.Config.RootDir, "", 0, simapp.EmptyAppOptions{}, GetWasmEnabledProposals(), EmptyWasmOpts, diff --git a/app/test_helpers.go b/app/test_helpers.go index 331e7c15804..16b59eef989 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -29,7 +29,7 @@ func getDefaultGenesisStateBytes() []byte { // Setup initializes a new OsmosisApp. func Setup(isCheckTx bool) *OsmosisApp { db := dbm.NewMemDB() - app := NewOsmosisApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, simapp.EmptyAppOptions{}, GetWasmEnabledProposals(), EmptyWasmOpts) + app := NewOsmosisApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, "", 0, simapp.EmptyAppOptions{}, GetWasmEnabledProposals(), EmptyWasmOpts) if !isCheckTx { stateBytes := getDefaultGenesisStateBytes() @@ -56,7 +56,7 @@ func SetupTestingAppWithLevelDb(isCheckTx bool) (app *OsmosisApp, cleanupFn func if err != nil { panic(err) } - app = NewOsmosisApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 5, simapp.EmptyAppOptions{}, GetWasmEnabledProposals(), EmptyWasmOpts) + app = NewOsmosisApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, "", 5, simapp.EmptyAppOptions{}, GetWasmEnabledProposals(), EmptyWasmOpts) if !isCheckTx { genesisState := NewDefaultGenesisState() stateBytes, err := json.MarshalIndent(genesisState, "", " ") diff --git a/celatone-docker/Dockerfile b/celatone-docker/Dockerfile new file mode 100644 index 00000000000..c7fa4575d59 --- /dev/null +++ b/celatone-docker/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.19-buster + +WORKDIR /chain +COPY . /chain + + +COPY ./celatone-docker/run.sh . + +RUN make install + +CMD osmosisd start --rpc.laddr tcp://0.0.0.0:26657 diff --git a/celatone-docker/README.md b/celatone-docker/README.md new file mode 100644 index 00000000000..f3bde7e735b --- /dev/null +++ b/celatone-docker/README.md @@ -0,0 +1,181 @@ +# Celatone Osmosis Indexer Node + +![Banner!](./banner.png) + +[![Telegram](https://badgen.net/badge/icon/telegram?icon=telegram&label)](https://t.me/celatone_announcements) + +[Osmosis](https://www.github.com/osmosis-labs/osmosis) indexer node implementation for Celatone, an open-source +explorer and CosmWasm development tool. + +## Documentation + +For the most up-to-date Celatone documentation, please visit +[docs.celat.one](https://docs.celat.one/) and the Osmosis documentation at +[docs.osmosis.zone](https://docs.osmosis.zone/) + +The following sections below will guide anyone interested in running their own indexer and deploy a local version of +Celatone on their machine. + +## Indexing the LocalOsmosis network + +### Prerequisite + +The Celatone indexer node comes with a [celatone-docker](../celatone-docker) directory which enables fast and +easy deployment. + +For Ubuntu users, install docker via this script +```shell +cd && mkdir docker && cd docker +curl -fsSL https://get.docker.com -o get-docker.sh +sudo sh get-docker.sh +``` + +For macOS users, you can easily install Docker Desktop +via [this link](https://docs.docker.com/desktop/install/mac-install/). + +### Starting the service + +To start the service, simply run the following command +(Ubuntu users might need to add `sudo` at the beginning of the command if there is a permission denied error) + +```shell +cd +docker compose up -d --build +``` + +Please note that **flusher-daemon** error while starting the docker-compose service is expected. + +This command will launch the services specified in the [docker-compose](../docker-compose.yml) file, +which include: + +- **indexer_node** for indexing LocalOsmosis +- **postgres** for storing indexed data +- **faucet** an optional service for requesting additional tokens +- **graphql-engine** for hosting the Hasura GraphQL engine and server +- **zookeeper** provides an in-sync view of Kafka cluster, topics and messages +- **kafka** acts as a message queue for the indexer node +- **flusher-init** runs a single command to create required tables and views in the Postgres database +- **flusher-daemon** processes messages from Kafka and flushes them into the database +- **proxy-ssl-server** hosts a reverse proxy server for the node RPC, LCD and the Hasura server. All components are required to connect with [celatone-frontend](https://www.github.com/alleslabs/celatone-frontend) + +### Tracking the database + +Now that all docker-compose services are up and well alive, +database tracking is still required to allow data querying via Hasura GraphQL. + +1.Go to Hasura Console by visiting [this link](http://localhost/hasura). The webpage should look something like this. + +![Hasura Console](docs/hasura-1.png) + +2.Click on the `DATA` button at the top of the page. + +![Hasura Data Page](docs/hasura-2.png) + +3.The available schemas will be shown at the top left of the page. Select the `default` database and the `public` schema. + +![Public Schema](docs/hasura-4.png) + +4.Click on `Track All` to track all tables and views available. A confirmation prompt may appear, select `Ok` to confirm. + +![Track All Tables](docs/hasura-3.png) + +5.The page will reload with all tables tracked. Click `Track All` again, but this time for all foreign-key relationships. + +![Track All Relationships](docs/hasura-5.png) + +6.Finally, we can go back to the `API` page and try some example queries. +Using the subscription script below, new block should come up in realtime. + +![Hasura API Page](docs/hasura-7.png) + +```graphql +subscription LatestBlock { + blocks(limit: 1, order_by: {height: desc}) { + height + hash + proposer + timestamp + } +} +``` + +![Example Query](docs/hasura-6.png) + +## Further Customization + +Completing the steps above would get vanilla LocalOsmosis indexer up and running. +However, for anyone looking to customize the chain itself and add more indexer support, +the following sections below outline the essential components and actions needed to customize Celatone indexer. + +## Core Components + +### Hooks + +Celatone indexer hooks basically process events occurring in the chain and flush them into the message queue. +This implementation modifies the original Osmosis implementation by adding our own hooks and adapters. +A hook is a customizable interface that can be processed along with the ABCI application of the Osmosis app, while an +adapter is a component inside the hook that defines how each hook-supported module should behave in the ABCI process. +Usually, the scope of each adapter is defined by keepers required to complete the tasks. + +You can find the implementation of Hooks and Adapters implementation in the [hooks](../hooks) directory. + +To use hooks and adapters in the Osmosis app, we need to modify the [app.go](../app/app.go) file. +Start by adding the hooks to the OsmosisApp struct itself and initialize a new hook into the app hooks. +Then, specify when to call the hook functions in the ABCI lifecycle according to the +hook interface, as seen in the `BeginBlocker`, `EndBlocker`, `Commit`, `InitChainer` and `DeliverTx` functions of the +OsmosisApp. + +### Flusher + +The Flusher is a simple Python service that consumes messages from a message queue +and loads them into the Postgres database. +The schema for the database is also specified in the [db.py](../flusher/flusher/db.py). +Any changes to the existing database schema, whether it is additional tables or relationships, need to be specified +in this file. + +Message consuming is implemented in the [sync.py](../flusher/flusher/sync.py), where the Kafka topic is fetched from the +database created by calling `init` function in [init.py](../flusher/flusher/init.py). +A message from the topic consists of a key and a value, where the key is used to determine the appropriate handler +function for the value. + +[Handler.py](../flusher/flusher/handler.py) is where most of the actions happen. +Logic for extracting, transforming and loading the data consumed from the message queue is implemented here. + +For instructions on how to set up the Flusher, please refer to the [README.md](../flusher/README.md) file. + +### Hasura + +Hasura GraphQL Engine enables accessing Postgres data over a secured GraphQL API. +This API is required for running the [celatone-frontend](https://www.github.com/alleslabs/celatone-frontend). + +For more information about Hasura, refer to the [official documentation](https://hasura.io/docs/latest/index/). + +## Adding New Modules + +The steps below summarize how to add new functionality to the existing hook. +Let's say we would like to add indexing support for the `x/celatone` module and a new adapter is required +for such case. + +1. **Flusher** + 1. Determine how you would like to store the new data and configure it in [db.py](../flusher/flusher/db.py). + 2. Implement the data transforming logic from the message queue into the database in + [handle.py](../flusher/flusher/handler.py). + Make sure to use the newly created handle keys in the indexer node implementation. + +2. **Indexer Node** + 1. Create a new `celatone.go` adapter file for the `x/celatone` module inside the [emitter](../hooks/emitter) + directory. + 2. Inside the created file, handle the events accordingly during the `AfterInitChain`, `AfterBeginBlock`, + `PreDeliverTx`, `CheckMsg`, `HandleMsgEvents`, `PostDeliverTx` and `AfterEndBlock` phases of the hook adapter. + 3. Append the created adapter to the list of adapters inside the hook returned by the `NewHook` function + in [emitter.go](../hooks/emitter/emitter.go). + This `NewHook` function would eventually be called in [app.go](../app/app.go) and assign the updated hook to the + OsmosisApp. + +3. **Hasura** + 1. Do not forget to track the new tables and relationships via the Hasura Console. + +These are the general steps you need to follow to customize the Celatone Osmosis Indexer Node and add support for new +modules. +Make sure to refer to the specific files and directories mentioned in the steps for more detailed information +on each component and how to implement each customization. diff --git a/celatone-docker/banner.png b/celatone-docker/banner.png new file mode 100644 index 00000000000..9c30f2b0699 Binary files /dev/null and b/celatone-docker/banner.png differ diff --git a/celatone-docker/config/app.toml b/celatone-docker/config/app.toml new file mode 100644 index 00000000000..1dce121d17c --- /dev/null +++ b/celatone-docker/config/app.toml @@ -0,0 +1,152 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Base Configuration ### +############################################################################### + +# The minimum gas prices a validator is willing to accept for processing a +# transaction. A transaction's fees must meet the minimum of any denomination +# specified in this config (e.g. 0.25token1;0.0001token2). +minimum-gas-prices = "0.025uosmo" + +# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals +# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) +# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals +# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' +pruning = "default" + +# These are applied if and only if the pruning strategy is custom. +pruning-keep-recent = "0" +pruning-keep-every = "0" +pruning-interval = "0" + +# HaltHeight contains a non-zero block height at which a node will gracefully +# halt and shutdown that can be used to assist upgrades and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-height = 0 + +# HaltTime contains a non-zero minimum block time (in Unix seconds) at which +# a node will gracefully halt and shutdown that can be used to assist upgrades +# and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-time = 0 + +# MinRetainBlocks defines the minimum block height offset from the current +# block being committed, such that all blocks past this offset are pruned +# from Tendermint. It is used as part of the process of determining the +# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates +# that no blocks should be pruned. +# +# This configuration value is only responsible for pruning Tendermint blocks. +# It has no bearing on application state pruning which is determined by the +# "pruning-*" configurations. +# +# Note: Tendermint block pruning is dependant on this parameter in conunction +# with the unbonding (safety threshold) period, state pruning and state sync +# snapshot parameters to determine the correct minimum value of +# ResponseCommit.RetainHeight. +min-retain-blocks = 0 + +# InterBlockCache enables inter-block caching. +inter-block-cache = true + +# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, +# which informs Tendermint what to index. If empty, all events will be indexed. +# +# Example: +# ["message.sender", "message.recipient"] +index-events = [] + +############################################################################### +### Telemetry Configuration ### +############################################################################### + +[telemetry] + +# Prefixed with keys to separate services. +service-name = "" + +# Enabled enables the application telemetry functionality. When enabled, +# an in-memory sink is also enabled by default. Operators may also enabled +# other sinks such as Prometheus. +enabled = false + +# Enable prefixing gauge values with hostname. +enable-hostname = false + +# Enable adding hostname to labels. +enable-hostname-label = false + +# Enable adding service to labels. +enable-service-label = false + +# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. +prometheus-retention-time = 0 + +# GlobalLabels defines a global set of name/value label tuples applied to all +# metrics emitted using the wrapper functions defined in telemetry package. +# +# Example: +# [["chain_id", "cosmoshub-1"]] +global-labels = [ +] + +############################################################################### +### API Configuration ### +############################################################################### + +[api] + +# Enable defines if the API server should be enabled. +enable = true + +# Swagger defines if swagger documentation should automatically be registered. +swagger = false + +# Address defines the API server to listen on. +address = "tcp://0.0.0.0:1317" + +# MaxOpenConnections defines the number of maximum open connections. +max-open-connections = 1000 + +# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). +rpc-read-timeout = 10 + +# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). +rpc-write-timeout = 0 + +# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). +rpc-max-body-bytes = 1000000 + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enabled-unsafe-cors = false + +############################################################################### +### gRPC Configuration ### +############################################################################### + +[grpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the gRPC server address to bind to. +address = "0.0.0.0:9090" + +############################################################################### +### State Sync Configuration ### +############################################################################### + +# State sync snapshots allow other nodes to rapidly join the network without replaying historical +# blocks, instead downloading and applying a snapshot of the application state at a given height. +[state-sync] + +# snapshot-interval specifies the block interval at which local state sync snapshots are +# taken (0 to disable). Must be a multiple of pruning-keep-every. +snapshot-interval = 0 + +# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). +snapshot-keep-recent = 2 diff --git a/celatone-docker/config/config.toml b/celatone-docker/config/config.toml new file mode 100644 index 00000000000..c13ad173c51 --- /dev/null +++ b/celatone-docker/config/config.toml @@ -0,0 +1,466 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "celatone-indexer" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "localhost:6060" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 80 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 60 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +version = "v0" + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 10000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "112h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "5s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/celatone-docker/config/genesis.json b/celatone-docker/config/genesis.json new file mode 100644 index 00000000000..84749061971 --- /dev/null +++ b/celatone-docker/config/genesis.json @@ -0,0 +1,625 @@ +{ + "genesis_time": "2023-06-16T08:48:17.702937Z", + "chain_id": "localosmosis", + "initial_height": "1", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "1048576" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + }, + "version": {} + }, + "app_hash": "", + "app_state": { + "auth": { + "params": { + "max_memo_characters": "256", + "tx_sig_limit": "7", + "tx_size_cost_per_byte": "10", + "sig_verify_cost_ed25519": "590", + "sig_verify_cost_secp256k1": "1000" + }, + "accounts": [ + { + "@type": "/cosmos.auth.v1beta1.BaseAccount", + "address": "osmo1r8u4m0wefrdyp7sdde7drtrt94qczggka95qdv", + "pub_key": null, + "account_number": "0", + "sequence": "0" + }, + { + "@type": "/cosmos.auth.v1beta1.BaseAccount", + "address": "osmo15d4apf20449ajvwycq8ruaypt7v6d345z36n9l", + "pub_key": null, + "account_number": "0", + "sequence": "0" + }, + { + "@type": "/cosmos.auth.v1beta1.BaseAccount", + "address": "osmo153qunx95ehzq8srwuw953uvlz4465k8aywe7yw", + "pub_key": null, + "account_number": "0", + "sequence": "0" + } + ] + }, + "authz": { + "authorization": [] + }, + "bank": { + "params": { + "send_enabled": [], + "default_send_enabled": true + }, + "balances": [ + { + "address": "osmo1r8u4m0wefrdyp7sdde7drtrt94qczggka95qdv", + "coins": [ + { + "denom": "uosmo", + "amount": "10000000000000" + } + ] + }, + { + "address": "osmo15d4apf20449ajvwycq8ruaypt7v6d345z36n9l", + "coins": [ + { + "denom": "uosmo", + "amount": "10000000000000" + } + ] + }, + { + "address": "osmo153qunx95ehzq8srwuw953uvlz4465k8aywe7yw", + "coins": [ + { + "denom": "uosmo", + "amount": "1" + } + ] + } + ], + "supply": [], + "denom_metadata": [], + "supply_offsets": [] + }, + "capability": { + "index": "1", + "owners": [] + }, + "crisis": { + "constant_fee": { + "denom": "uosmo", + "amount": "1000" + } + }, + "distribution": { + "params": { + "community_tax": "0.020000000000000000", + "base_proposer_reward": "0.010000000000000000", + "bonus_proposer_reward": "0.040000000000000000", + "withdraw_addr_enabled": true + }, + "fee_pool": { + "community_pool": [] + }, + "delegator_withdraw_infos": [], + "previous_proposer": "", + "outstanding_rewards": [], + "validator_accumulated_commissions": [], + "validator_historical_rewards": [], + "validator_current_rewards": [], + "delegator_starting_infos": [], + "validator_slash_events": [] + }, + "downtimedetector": { + "downtimes": [ + { + "duration": "DURATION_30S", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_1M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_2M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_3M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_4M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_5M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_10M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_20M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_30M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_40M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_50M", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_1H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_1_5H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_2H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_2_5H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_3H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_4H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_5H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_6H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_9H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_12H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_18H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_24H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_36H", + "last_downtime": "1970-01-01T00:00:00Z" + }, + { + "duration": "DURATION_48H", + "last_downtime": "1970-01-01T00:00:00Z" + } + ], + "last_block_time": "1970-01-01T00:00:00Z" + }, + "epochs": { + "epochs": [ + { + "identifier": "day", + "start_time": "0001-01-01T00:00:00Z", + "duration": "86400s", + "current_epoch": "0", + "current_epoch_start_time": "0001-01-01T00:00:00Z", + "epoch_counting_started": false, + "current_epoch_start_height": "0" + }, + { + "identifier": "hour", + "start_time": "0001-01-01T00:00:00Z", + "duration": "3600s", + "current_epoch": "0", + "current_epoch_start_time": "0001-01-01T00:00:00Z", + "epoch_counting_started": false, + "current_epoch_start_height": "0" + }, + { + "identifier": "week", + "start_time": "0001-01-01T00:00:00Z", + "duration": "604800s", + "current_epoch": "0", + "current_epoch_start_time": "0001-01-01T00:00:00Z", + "epoch_counting_started": false, + "current_epoch_start_height": "0" + } + ] + }, + "evidence": { + "evidence": [] + }, + "gamm": { + "pools": [], + "next_pool_number": "1", + "params": { + "pool_creation_fee": [ + { + "denom": "uosmo", + "amount": "1000000000" + } + ] + } + }, + "genutil": { + "gen_txs": [ + { + "body": { + "messages": [ + { + "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", + "description": { + "moniker": "celatone-indexer", + "identity": "", + "website": "", + "security_contact": "", + "details": "" + }, + "commission": { + "rate": "0.100000000000000000", + "max_rate": "0.200000000000000000", + "max_change_rate": "0.010000000000000000" + }, + "min_self_delegation": "1", + "delegator_address": "osmo1r8u4m0wefrdyp7sdde7drtrt94qczggka95qdv", + "validator_address": "osmovaloper1r8u4m0wefrdyp7sdde7drtrt94qczggk8jur6t", + "pubkey": { + "@type": "/cosmos.crypto.ed25519.PubKey", + "key": "Wnigtqzsjnmwy/wSsOKOrC2QFkhmHta2D29g3+Kyz0Y=" + }, + "value": { + "denom": "uosmo", + "amount": "1000000000" + } + } + ], + "memo": "6c5c77ae4bffe807c1d7e504516f39fcc09ca94f@192.168.1.92:26656", + "timeout_height": "0", + "extension_options": [], + "non_critical_extension_options": [] + }, + "auth_info": { + "signer_infos": [ + { + "public_key": { + "@type": "/cosmos.crypto.secp256k1.PubKey", + "key": "A7oCgVpleYqAv151VjJTE5A7fpVecrAtH+fz6Xc+Qqjm" + }, + "mode_info": { + "single": { + "mode": "SIGN_MODE_DIRECT" + } + }, + "sequence": "0" + } + ], + "fee": { + "amount": [], + "gas_limit": "350000", + "payer": "", + "granter": "" + } + }, + "signatures": [ + "uprRlCdXpYm93i3PcGhOyJ/1y2R5dcnpRg/CHn8/XNIcj+yK8rFPXs0OV3XlkHryz1eUGg8z6/CUNqhGxAechQ==" + ] + } + ] + }, + "gov": { + "starting_proposal_id": "1", + "deposits": [], + "votes": [], + "proposals": [], + "deposit_params": { + "min_deposit": [ + { + "denom": "uosmo", + "amount": "1000000" + } + ], + "max_deposit_period": "172800s", + "min_expedited_deposit": [ + { + "denom": "uosmo", + "amount": "50000000" + } + ], + "min_initial_deposit_ratio": "0.000000000000000000" + }, + "voting_params": { + "voting_period": "120s", + "proposal_voting_periods": [], + "expedited_voting_period": "120s" + }, + "tally_params": { + "quorum": "0.334000000000000000", + "threshold": "0.500000000000000000", + "veto_threshold": "0.334000000000000000", + "expedited_threshold": "0.667000000000000000" + } + }, + "ibc": { + "client_genesis": { + "clients": [], + "clients_consensus": [], + "clients_metadata": [], + "params": { + "allowed_clients": [ + "06-solomachine", + "07-tendermint" + ] + }, + "create_localhost": false, + "next_client_sequence": "0" + }, + "connection_genesis": { + "connections": [], + "client_connection_paths": [], + "next_connection_sequence": "0", + "params": { + "max_expected_time_per_block": "30000000000" + } + }, + "channel_genesis": { + "channels": [], + "acknowledgements": [], + "commitments": [], + "receipts": [], + "send_sequences": [], + "recv_sequences": [], + "ack_sequences": [], + "next_channel_sequence": "0" + } + }, + "ibchooks": {}, + "incentives": { + "params": { + "distr_epoch_identifier": "week" + }, + "gauges": [], + "lockable_durations": [ + "1s", + "3600s", + "10800s", + "25200s" + ], + "last_gauge_id": "0" + }, + "interchainaccounts": { + "controller_genesis_state": { + "active_channels": [], + "interchain_accounts": [], + "ports": [], + "params": { + "controller_enabled": true + } + }, + "host_genesis_state": { + "active_channels": [], + "interchain_accounts": [], + "port": "icahost", + "params": { + "host_enabled": true, + "allow_messages": [] + } + } + }, + "interchainquery": { + "host_port": "icqhost", + "params": { + "host_enabled": true, + "allow_queries": [] + } + }, + "lockup": { + "last_lock_id": "0", + "locks": [], + "synthetic_locks": [] + }, + "mint": { + "minter": { + "epoch_provisions": "0.000000000000000000" + }, + "params": { + "mint_denom": "uosmo", + "genesis_epoch_provisions": "5000000.000000000000000000", + "epoch_identifier": "week", + "reduction_period_in_epochs": "156", + "reduction_factor": "0.500000000000000000", + "distribution_proportions": { + "staking": "0.400000000000000000", + "pool_incentives": "0.300000000000000000", + "developer_rewards": "0.200000000000000000", + "community_pool": "0.100000000000000000" + }, + "weighted_developer_rewards_receivers": [], + "minting_rewards_distribution_start_epoch": "0" + }, + "reduction_started_epoch": "0" + }, + "packetfowardmiddleware": { + "params": { + "fee_percentage": "0.000000000000000000" + }, + "in_flight_packets": {} + }, + "params": null, + "poolincentives": { + "params": { + "minted_denom": "uosmo" + }, + "lockable_durations": [ + "3600s", + "10800s", + "25200s" + ], + "distr_info": { + "total_weight": "0", + "records": [] + }, + "pool_to_gauges": null + }, + "poolmanager": { + "next_pool_id": "1", + "params": { + "pool_creation_fee": [ + { + "denom": "uosmo", + "amount": "1000000000" + } + ] + }, + "pool_routes": [] + }, + "protorev": { + "params": { + "enabled": true, + "admin": "osmo17nv67dvc7f8yr00rhgxd688gcn9t9wvhn783z4" + }, + "token_pair_arb_routes": [], + "base_denoms": [ + { + "denom": "uosmo", + "step_size": "1000000" + } + ], + "pool_weights": { + "stable_weight": "5", + "balancer_weight": "2", + "concentrated_weight": "2" + }, + "days_since_module_genesis": "0", + "developer_fees": [], + "latest_block_height": "0", + "developer_address": "", + "max_pool_points_per_block": "100", + "max_pool_points_per_tx": "18", + "point_count_for_block": "0" + }, + "rate-limited-ibc": { + "params": { + "contract_address": "" + } + }, + "slashing": { + "params": { + "signed_blocks_window": "100", + "min_signed_per_window": "0.500000000000000000", + "downtime_jail_duration": "600s", + "slash_fraction_double_sign": "0.050000000000000000", + "slash_fraction_downtime": "0.010000000000000000" + }, + "signing_infos": [], + "missed_blocks": [] + }, + "staking": { + "params": { + "unbonding_time": "1814400s", + "max_validators": 100, + "max_entries": 7, + "historical_entries": 10000, + "bond_denom": "uosmo", + "min_commission_rate": "0.000000000000000000", + "min_self_delegation": "0" + }, + "last_total_power": "0", + "last_validator_powers": [], + "validators": [], + "delegations": [], + "unbonding_delegations": [], + "redelegations": [], + "exported": false + }, + "superfluid": { + "params": { + "minimum_risk_factor": "0.500000000000000000" + }, + "superfluid_assets": [], + "osmo_equivalent_multipliers": [], + "intermediary_accounts": [], + "intemediary_account_connections": [] + }, + "tokenfactory": { + "params": { + "denom_creation_fee": [ + { + "denom": "uosmo", + "amount": "10000000" + } + ] + }, + "factory_denoms": [] + }, + "transfer": { + "port_id": "transfer", + "denom_traces": [], + "params": { + "send_enabled": true, + "receive_enabled": true + } + }, + "twap": { + "twaps": [], + "params": { + "prune_epoch_identifier": "day", + "record_history_keep_period": "172800s" + } + }, + "txfees": { + "basedenom": "uosmo", + "feetokens": [] + }, + "upgrade": {}, + "valsetpref": null, + "vesting": {}, + "wasm": { + "params": { + "code_upload_access": { + "permission": "Nobody", + "address": "", + "addresses": [] + }, + "instantiate_default_permission": "Everybody" + }, + "codes": [], + "contracts": [], + "sequences": [], + "gen_msgs": [] + } + } +} \ No newline at end of file diff --git a/celatone-docker/docs/hasura-1.png b/celatone-docker/docs/hasura-1.png new file mode 100644 index 00000000000..8a1b7abe86b Binary files /dev/null and b/celatone-docker/docs/hasura-1.png differ diff --git a/celatone-docker/docs/hasura-2.png b/celatone-docker/docs/hasura-2.png new file mode 100644 index 00000000000..2fbe9dab983 Binary files /dev/null and b/celatone-docker/docs/hasura-2.png differ diff --git a/celatone-docker/docs/hasura-3.png b/celatone-docker/docs/hasura-3.png new file mode 100644 index 00000000000..d13cfd9f24f Binary files /dev/null and b/celatone-docker/docs/hasura-3.png differ diff --git a/celatone-docker/docs/hasura-4.png b/celatone-docker/docs/hasura-4.png new file mode 100644 index 00000000000..d1e24e130f0 Binary files /dev/null and b/celatone-docker/docs/hasura-4.png differ diff --git a/celatone-docker/docs/hasura-5.png b/celatone-docker/docs/hasura-5.png new file mode 100644 index 00000000000..8959ccf1924 Binary files /dev/null and b/celatone-docker/docs/hasura-5.png differ diff --git a/celatone-docker/docs/hasura-6.png b/celatone-docker/docs/hasura-6.png new file mode 100644 index 00000000000..d0d660dd260 Binary files /dev/null and b/celatone-docker/docs/hasura-6.png differ diff --git a/celatone-docker/docs/hasura-7.png b/celatone-docker/docs/hasura-7.png new file mode 100644 index 00000000000..218c719c2dd Binary files /dev/null and b/celatone-docker/docs/hasura-7.png differ diff --git a/celatone-docker/hasura-metadata/graphql_schema_introspection.json b/celatone-docker/hasura-metadata/graphql_schema_introspection.json new file mode 100644 index 00000000000..ddc3b697961 --- /dev/null +++ b/celatone-docker/hasura-metadata/graphql_schema_introspection.json @@ -0,0 +1,923 @@ +{ + "version": 2, + "tables": [ + { + "table": { + "schema": "public", + "name": "account_transactions" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "account_id" + } + }, + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "transaction_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "accounts" + }, + "object_relationships": [ + { + "name": "validator", + "using": { + "manual_configuration": { + "remote_table": { + "schema": "public", + "name": "validators" + }, + "column_mapping": { + "id": "account_id" + } + } + } + } + ], + "array_relationships": [ + { + "name": "account_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "account_id", + "table": { + "schema": "public", + "name": "account_transactions" + } + } + } + }, + { + "name": "codes", + "using": { + "foreign_key_constraint_on": { + "column": "uploader", + "table": { + "schema": "public", + "name": "codes" + } + } + } + }, + { + "name": "contract_histories", + "using": { + "foreign_key_constraint_on": { + "column": "sender", + "table": { + "schema": "public", + "name": "contract_histories" + } + } + } + }, + { + "name": "contracts", + "using": { + "foreign_key_constraint_on": { + "column": "admin", + "table": { + "schema": "public", + "name": "contracts" + } + } + } + }, + { + "name": "contractsByInitBy", + "using": { + "foreign_key_constraint_on": { + "column": "init_by", + "table": { + "schema": "public", + "name": "contracts" + } + } + } + }, + { + "name": "pools", + "using": { + "foreign_key_constraint_on": { + "column": "creator", + "table": { + "schema": "public", + "name": "pools" + } + } + } + }, + { + "name": "proposals", + "using": { + "foreign_key_constraint_on": { + "column": "proposer_id", + "table": { + "schema": "public", + "name": "proposals" + } + } + } + }, + { + "name": "transactions", + "using": { + "foreign_key_constraint_on": { + "column": "sender", + "table": { + "schema": "public", + "name": "transactions" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "begin_block_events" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "blocks" + }, + "object_relationships": [ + { + "name": "validator", + "using": { + "foreign_key_constraint_on": "proposer" + } + } + ], + "array_relationships": [ + { + "name": "account_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "account_transactions" + } + } + } + }, + { + "name": "begin_block_events", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "begin_block_events" + } + } + } + }, + { + "name": "code_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "resolved_height", + "table": { + "schema": "public", + "name": "code_proposals" + } + } + } + }, + { + "name": "contract_histories", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "contract_histories" + } + } + } + }, + { + "name": "contract_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "resolved_height", + "table": { + "schema": "public", + "name": "contract_proposals" + } + } + } + }, + { + "name": "end_block_events", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "end_block_events" + } + } + } + }, + { + "name": "lcd_tx_results", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "lcd_tx_results" + } + } + } + }, + { + "name": "pool_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "pool_transactions" + } + } + } + }, + { + "name": "profit_by_denoms", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "profit_by_denoms" + } + } + } + }, + { + "name": "profit_by_routes", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "profit_by_routes" + } + } + } + }, + { + "name": "proposals", + "using": { + "foreign_key_constraint_on": { + "column": "resolved_height", + "table": { + "schema": "public", + "name": "proposals" + } + } + } + }, + { + "name": "trade_by_routes", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "trade_by_routes" + } + } + } + }, + { + "name": "trades", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "trades" + } + } + } + }, + { + "name": "transactions", + "using": { + "foreign_key_constraint_on": { + "column": "block_height", + "table": { + "schema": "public", + "name": "transactions" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "code_proposals" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "resolved_height" + } + }, + { + "name": "code", + "using": { + "foreign_key_constraint_on": "code_id" + } + }, + { + "name": "proposal", + "using": { + "foreign_key_constraint_on": "proposal_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "codes" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "uploader" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "transaction_id" + } + } + ], + "array_relationships": [ + { + "name": "code_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "code_id", + "table": { + "schema": "public", + "name": "code_proposals" + } + } + } + }, + { + "name": "contract_histories", + "using": { + "foreign_key_constraint_on": { + "column": "code_id", + "table": { + "schema": "public", + "name": "contract_histories" + } + } + } + }, + { + "name": "contracts", + "using": { + "foreign_key_constraint_on": { + "column": "code_id", + "table": { + "schema": "public", + "name": "contracts" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "contract_histories" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "sender" + } + }, + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + }, + { + "name": "code", + "using": { + "foreign_key_constraint_on": "code_id" + } + }, + { + "name": "contract", + "using": { + "foreign_key_constraint_on": "contract_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "contract_proposals" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "resolved_height" + } + }, + { + "name": "contract", + "using": { + "foreign_key_constraint_on": "contract_id" + } + }, + { + "name": "proposal", + "using": { + "foreign_key_constraint_on": "proposal_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "contract_transactions" + }, + "object_relationships": [ + { + "name": "contract", + "using": { + "foreign_key_constraint_on": "contract_id" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "tx_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "contract_transactions_view" + } + }, + { + "table": { + "schema": "public", + "name": "contracts" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "admin" + } + }, + { + "name": "accountByInitBy", + "using": { + "foreign_key_constraint_on": "init_by" + } + }, + { + "name": "code", + "using": { + "foreign_key_constraint_on": "code_id" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "init_tx_id" + } + } + ], + "array_relationships": [ + { + "name": "contract_histories", + "using": { + "foreign_key_constraint_on": { + "column": "contract_id", + "table": { + "schema": "public", + "name": "contract_histories" + } + } + } + }, + { + "name": "contract_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "contract_id", + "table": { + "schema": "public", + "name": "contract_proposals" + } + } + } + }, + { + "name": "contract_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "contract_id", + "table": { + "schema": "public", + "name": "contract_transactions" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "end_block_events" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "lcd_tx_results" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "transaction_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "pool_transactions" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + }, + { + "name": "pool", + "using": { + "foreign_key_constraint_on": "pool_id" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "transaction_id" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "pools" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "creator" + } + }, + { + "name": "transaction", + "using": { + "foreign_key_constraint_on": "create_tx_id" + } + } + ], + "array_relationships": [ + { + "name": "pool_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "pool_id", + "table": { + "schema": "public", + "name": "pool_transactions" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "profit_by_denoms" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "profit_by_routes" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "proposals" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "proposer_id" + } + }, + { + "name": "block", + "using": { + "foreign_key_constraint_on": "resolved_height" + } + } + ], + "array_relationships": [ + { + "name": "code_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "proposal_id", + "table": { + "schema": "public", + "name": "code_proposals" + } + } + } + }, + { + "name": "contract_proposals", + "using": { + "foreign_key_constraint_on": { + "column": "proposal_id", + "table": { + "schema": "public", + "name": "contract_proposals" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "tracking" + } + }, + { + "table": { + "schema": "public", + "name": "trade_by_routes" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "trades" + }, + "object_relationships": [ + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "transactions" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "sender" + } + }, + { + "name": "block", + "using": { + "foreign_key_constraint_on": "block_height" + } + } + ], + "array_relationships": [ + { + "name": "account_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "transaction_id", + "table": { + "schema": "public", + "name": "account_transactions" + } + } + } + }, + { + "name": "codes", + "using": { + "foreign_key_constraint_on": { + "column": "transaction_id", + "table": { + "schema": "public", + "name": "codes" + } + } + } + }, + { + "name": "contracts", + "using": { + "foreign_key_constraint_on": { + "column": "init_tx_id", + "table": { + "schema": "public", + "name": "contracts" + } + } + } + }, + { + "name": "contract_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "tx_id", + "table": { + "schema": "public", + "name": "contract_transactions" + } + } + } + }, + { + "name": "lcd_tx_results", + "using": { + "foreign_key_constraint_on": { + "column": "transaction_id", + "table": { + "schema": "public", + "name": "lcd_tx_results" + } + } + } + }, + { + "name": "pools", + "using": { + "foreign_key_constraint_on": { + "column": "create_tx_id", + "table": { + "schema": "public", + "name": "pools" + } + } + } + }, + { + "name": "pool_transactions", + "using": { + "foreign_key_constraint_on": { + "column": "transaction_id", + "table": { + "schema": "public", + "name": "pool_transactions" + } + } + } + } + ] + }, + { + "table": { + "schema": "public", + "name": "validators" + }, + "object_relationships": [ + { + "name": "account", + "using": { + "foreign_key_constraint_on": "account_id" + } + } + ], + "array_relationships": [ + { + "name": "blocks", + "using": { + "foreign_key_constraint_on": { + "column": "proposer", + "table": { + "schema": "public", + "name": "blocks" + } + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/celatone-docker/indexer-node/node_key.json b/celatone-docker/indexer-node/node_key.json new file mode 100644 index 00000000000..77e3370154f --- /dev/null +++ b/celatone-docker/indexer-node/node_key.json @@ -0,0 +1,6 @@ +{ + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "vDTMtarpxHMEA6lOMD0isPpmYBH7DzplOhu0J3flfPG89SQSGT6JLvzA7EZU5nXuSBxiGCNP34nmvlfFZm4CjA==" + } +} \ No newline at end of file diff --git a/celatone-docker/indexer-node/priv_validator_key.json b/celatone-docker/indexer-node/priv_validator_key.json new file mode 100644 index 00000000000..491700c3a61 --- /dev/null +++ b/celatone-docker/indexer-node/priv_validator_key.json @@ -0,0 +1,11 @@ +{ + "address": "90C7DF49FBA7F50758268B7077D94EA8B3B4002F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Wnigtqzsjnmwy/wSsOKOrC2QFkhmHta2D29g3+Kyz0Y=" + }, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "ogEQcubqqicdf1J0TQm8Fy1W5WG/LGpGK44OgSBVRDJaeKC2rOyOebDL/BKw4o6sLZAWSGYe1rYPb2Df4rLPRg==" + } +} \ No newline at end of file diff --git a/celatone-docker/proxy-ssl/Dockerfile b/celatone-docker/proxy-ssl/Dockerfile new file mode 100644 index 00000000000..63390378c0d --- /dev/null +++ b/celatone-docker/proxy-ssl/Dockerfile @@ -0,0 +1,3 @@ +FROM nginx:1.23.0 + +COPY nginx.conf /etc/nginx/nginx.conf diff --git a/celatone-docker/proxy-ssl/nginx.conf b/celatone-docker/proxy-ssl/nginx.conf new file mode 100644 index 00000000000..5b24ffa0d61 --- /dev/null +++ b/celatone-docker/proxy-ssl/nginx.conf @@ -0,0 +1,47 @@ +events { + worker_connections 1024; +} + +http { + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + server { + listen 80; + location /rest/ { + proxy_pass http://172.18.0.11:1317/; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Headers' 'Authorization,Accept,Origin,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; + add_header 'Access-Control-Allow-Methods' 'GET,POST,OPTIONS,PUT,DELETE,PATCH'; + add_header 'Access-Control-Max-Age' '86400'; + } + location /rpc/ { + proxy_pass http://172.18.0.11:26657/; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Headers' 'Authorization,Accept,Origin,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; + add_header 'Access-Control-Allow-Methods' 'GET,POST,OPTIONS,PUT,DELETE,PATCH'; + add_header 'Access-Control-Max-Age' '86400'; + } + location /api/ { + proxy_pass http://172.18.0.12:8080/; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + location /hasura/ { + proxy_pass http://172.18.0.89:5433/; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } + } +} diff --git a/celatone-docker/run.sh b/celatone-docker/run.sh new file mode 100755 index 00000000000..9eb7d873d3e --- /dev/null +++ b/celatone-docker/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +osmosisd init "$1" --chain-id localosmosis + +cp /chain/celatone-docker/"$1"/priv_validator_key.json ~/.osmosisd/config/priv_validator_key.json +cp /chain/celatone-docker/"$1"/node_key.json ~/.osmosisd/config/node_key.json + +cp /chain/celatone-docker/config/genesis.json ~/.osmosisd/config/genesis.json +cp /chain/celatone-docker/config/app.toml ~/.osmosisd/config/app.toml +cp /chain/celatone-docker/config/config.toml ~/.osmosisd/config/config.toml + +sleep 60 +osmosisd start --rpc.laddr tcp://0.0.0.0:26657 --with-emitter LocalOsmosis@172.18.0.31:9092 diff --git a/celatone-docker/start_docker.sh b/celatone-docker/start_docker.sh new file mode 100755 index 00000000000..78a805b1eee --- /dev/null +++ b/celatone-docker/start_docker.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +DIR=$(dirname "$0") + +docker compose up -d --build + +sleep 10 diff --git a/cmd/osmosisd/cmd/root.go b/cmd/osmosisd/cmd/root.go index 4ed67dd1d02..195573cef7d 100644 --- a/cmd/osmosisd/cmd/root.go +++ b/cmd/osmosisd/cmd/root.go @@ -46,6 +46,8 @@ import ( osmosis "github.com/osmosis-labs/osmosis/v15/app" ) +const FlagWithEmitter = "with-emitter" + // NewRootCmd creates a new root command for simd. It is called once in the // main function. func NewRootCmd() (*cobra.Command, params.EncodingConfig) { @@ -204,6 +206,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { func addModuleInitFlags(startCmd *cobra.Command) { crisis.AddModuleInitFlags(startCmd) wasm.AddModuleInitFlags(startCmd) + startCmd.Flags().String(FlagWithEmitter, "", "Enable data indexing to a message queue") } // queryCommand adds transaction and account querying commands. @@ -294,6 +297,7 @@ func newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts serverty return osmosis.NewOsmosisApp( logger, db, traceStore, true, skipUpgradeHeights, cast.ToString(appOpts.Get(flags.FlagHome)), + cast.ToString(appOpts.Get(FlagWithEmitter)), cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)), appOpts, osmosis.GetWasmEnabledProposals(), @@ -320,7 +324,7 @@ func createOsmosisAppAndExport( encCfg.Marshaler = codec.NewProtoCodec(encCfg.InterfaceRegistry) loadLatest := height == -1 homeDir := cast.ToString(appOpts.Get(flags.FlagHome)) - app := osmosis.NewOsmosisApp(logger, db, traceStore, loadLatest, map[int64]bool{}, homeDir, 0, appOpts, osmosis.GetWasmEnabledProposals(), osmosis.EmptyWasmOpts) + app := osmosis.NewOsmosisApp(logger, db, traceStore, loadLatest, map[int64]bool{}, homeDir, "", 0, appOpts, osmosis.GetWasmEnabledProposals(), osmosis.EmptyWasmOpts) if !loadLatest { if err := app.LoadHeight(height); err != nil { diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000000..44da58a192d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,133 @@ +version: "3.7" + +services: + indexer-node: + build: + context: . + dockerfile: celatone-docker/Dockerfile + image: localosmosis-indexer:latest + ports: + - "26656:26656" + - "1317:1317" + - "26657:26657" + networks: + localosmosis: + ipv4_address: 172.18.0.11 + command: sh -c "chmod +x ./run.sh && ./run.sh indexer-node" + + postgres: + image: postgres:12 + restart: always + ports: + - "5432:5432" + networks: + localosmosis: + ipv4_address: 172.18.0.88 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgrespassword + POSTGRES_DB: localosmosis + + faucet: + image: alleslabs/osmosis-faucet:latest + ports: + - "5005:5005" + networks: + localosmosis: + ipv4_address: 172.18.0.17 + environment: + CHAIN_ID: localosmosis + NODE: tcp://172.18.0.11:26657 + MNEMONIC: 'smile stem oven genius cave resource better lunar nasty moon company ridge brass rather supply used horn three panic put venue analyst leader comic' + GAS_PRICES: 1uosmo + AMOUNT: 10000000 + DENOM: uosmo + command: sh -c "echo $${MNEMONIC} | ./faucet keys add provider --recover && ./faucet run --chain-id $${CHAIN_ID} --node $${NODE} --gas-prices $${GAS_PRICES} --port 5005 --amount $${AMOUNT} --denom $${DENOM} --rate-limit=false" + + graphql-engine: + image: hasura/graphql-engine:latest + ports: + - "5433:5433" + depends_on: + - "postgres" + restart: always + networks: + localosmosis: + ipv4_address: 172.18.0.89 + environment: + HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@172.18.0.88:5432/localosmosis + HASURA_GRAPHQL_METADATA_DATABASE_URL: postgres://postgres:postgrespassword@172.18.0.88:5432/localosmosis + HASURA_GRAPHQL_ENABLE_CONSOLE: "true" + HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log + HASURA_GRAPHQL_SERVER_HOST: 0.0.0.0 + HASURA_GRAPHQL_SERVER_PORT: 5433 + HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES: "true" + + zookeeper: + image: zookeeper + networks: + localosmosis: + ipv4_address: 172.18.0.30 + + kafka: + image: wurstmeister/kafka + networks: + localosmosis: + ipv4_address: 172.18.0.31 + environment: + KAFKA_ADVERTISED_HOST_NAME: 172.18.0.31 + KAFKA_ADVERTISED_PORT: 9092 + KAFKA_CREATE_TOPICS: LocalOsmosis:1:1 + KAFKA_ZOOKEEPER_CONNECT: 172.18.0.30:2181 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + + flusher-init: + build: + context: flusher + image: localosmosis-flusher:latest + networks: + localosmosis: + command: sh -c "sleep 30 && python main.py init localosmosis LocalOsmosis replay --db postgres:postgrespassword@172.18.0.88:5432/localosmosis" + + flusher-daemon: + image: localosmosis-flusher:latest + networks: + localosmosis: + restart: always + + celatone-api: + image: alleslabs/celatone-api:latest + ports: + - "8081:8080" + networks: + localosmosis: + ipv4_address: 172.18.0.12 + environment: + LCD_DICT: '{"osmosis":{"localosmosis":"http://172.18.0.11:1317"}}' + HIVE_DICT: '{"localosmosis":""}' + GRAPHQL_DICT: '{"osmosis":{"localosmosis":"http://172.18.0.89:5433/v1/graphql"}}' + SCANWORKS_URL: 'https://raw.githubusercontent.com/teamscanworks/cw-contracts-registry/main/' + PRICE_CACHER_URL: 'https://celatone-price-cacher-h2bc4rnx5a-as.a.run.app' + GRAPHQL_TEST_DICT: '{"osmosis":{"localosmosis":"http://172.18.0.89:5433/v1/graphql"}}' + + proxy-ssl-server: + build: + context: ./celatone-docker/proxy-ssl + image: proxy-ssl-server:latest + networks: + localosmosis: + ipv4_address: 172.18.0.98 + depends_on: + - indexer-node + ports: + - "80:80" + - "443:443" + - "9091:9091" + +networks: + localosmosis: + ipam: + driver: default + config: + - subnet: "172.18.0.0/16" diff --git a/flusher/Dockerfile b/flusher/Dockerfile new file mode 100644 index 00000000000..e3bdb15b90d --- /dev/null +++ b/flusher/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.8-slim + +COPY . . +RUN apt-get update && apt-get install -y libpq-dev gcc +RUN pip install -r requirements.txt + +CMD python main.py sync --db postgres:postgrespassword@172.18.0.88:5432/localosmosis -s 172.18.0.31:9092 diff --git a/flusher/README.md b/flusher/README.md new file mode 100644 index 00000000000..2a8c3ea6a5c --- /dev/null +++ b/flusher/README.md @@ -0,0 +1,155 @@ +# Flusher + +## About + +**Flusher** is a simple program implemented in Python to consume Kafka messages from a queue and load them into the +Postgres database + +## Setup instructions for Ubuntu users + +For Ubuntu users, to install dependencies including Java, Postgres and Kafka + +```shell +sudo apt update +sudo apt upgrade --yes + +sudo apt install default-jdk +cd $HOME +curl "https://downloads.apache.org/kafka/3.3.2/kafka_2.13-3.3.2.tgz" -o kafka.tgz +mkdir $HOME/kafka && cd $HOME/kafka +tar -xvzf $HOME/kafka.tgz --strip 1 +``` + +Config the downloaded zookeeper and kafka. Edit the file + +```shell +vim $HOME/kafka/config/server.properties +``` + +With the following values + +```toml +log.dirs=/home/ubuntu/kafka/logs +delete.topic.enable = true +message.max.bytes = 52428800 +replica.fetch.max.bytes = 52428800 +``` + +Next, let's create a system service for zookeeper. Create the service file + +```shell +sudo vim /etc/systemd/system/zookeeper.service +``` + +With the following code + +```shell +[Unit] +Requires=network.target remote-fs.target +After=network.target remote-fs.target + +[Service] +Type=simple +User=ubuntu +ExecStart=/home/ubuntu/kafka/bin/zookeeper-server-start.sh /home/ubuntu/kafka/config/zookeeper.properties +ExecStop=/home/ubuntu/kafka/bin/zookeeper-server-stop.sh +Restart=on-abnormal + +[Install] +WantedBy=multi-user.target +``` + +In the similar manner, create a service for kafka + +```shell +sudo vim /etc/systemd/system/kafka.service +``` + +With the following code + +```shell +[Unit] +Requires=zookeeper.service +After=zookeeper.service + +[Service] +Type=simple +User=ubuntu +ExecStart=/bin/sh -c '/home/ubuntu/kafka/bin/kafka-server-start.sh /home/ubuntu/kafka/config/server.properties > /home/ubuntu/kafka/kafka.log 2>&1' +ExecStop=/home/ubuntu/kafka/bin/kafka-server-stop.sh +Restart=on-abnormal + +[Install] +WantedBy=multi-user.target +``` + +Finally, we can start both services + +```shell +sudo systemctl enable zookeeper +sudo systemctl enable kafka +sudo systemctl daemon-reload + +sudo systemctl start zookeeper +sudo systemctl start kafka +``` + +## Setup instructions for macOS users + +For macOS users, to install dependencies including Java, Postgres and Kafka + +```shell +brew cask install java +brew install postgresql +brew install kafka +``` +Start the zookeeper service to provide an in-sync view of Kafka cluster, topics and messages + +```shell +brew services start zookeeper +``` + +Start the postgresql service to provide the database server + +```shell +brew services start postgresql +``` + +Start the kafka service to provide a message queue for the indexer node + +```shell +brew services start kafka +``` + +Now make sure python3 venv is installed + +```shell +python3 -m pip install --user virtualenv +``` + +Then run this command to activate the virtual environment and install python dependencies + +```shell +python3 -m venv venv && source venv/bin/activate +pip install -r requirements.txt +``` +Note that if you encounter an openssl problem while installing dependencies, simply run + +```shell +brew install openssl && export LIBRARY_PATH=$LIBRARY_PATH:/usr/local/opt/openssl/lib/ +``` + +## Running Flusher + +Before running the flusher, do not forget to activate the python virtual environment + +```shell +cd flusher +source venv/bin/activate +``` + +And use this command to start flusher + +```shell +python3 main.py sync --db +``` diff --git a/flusher/flusher/__init__.py b/flusher/flusher/__init__.py new file mode 100644 index 00000000000..1357c2a2031 --- /dev/null +++ b/flusher/flusher/__init__.py @@ -0,0 +1,2 @@ +import flusher.init +import flusher.sync diff --git a/flusher/flusher/cli.py b/flusher/flusher/cli.py new file mode 100644 index 00000000000..aca14aa7771 --- /dev/null +++ b/flusher/flusher/cli.py @@ -0,0 +1,7 @@ +import click + + +@click.group() +def cli(): + """flusher utility program.""" + pass diff --git a/flusher/flusher/db.py b/flusher/flusher/db.py new file mode 100644 index 00000000000..1e17ba6cd31 --- /dev/null +++ b/flusher/flusher/db.py @@ -0,0 +1,386 @@ +import base64 as b64 +from datetime import datetime, date +import sqlalchemy as sa +import enum + + +class AccountType(enum.Enum): + """Account types enum based on Cosmos SDK accounts""" + BaseAccount = 0 + InterchainAccount = 1 + ModuleAccount = 2 + ContinuousVestingAccount = 3 + DelayedVestingAccount = 4 + ClawbackVestingAccount = 5 + ContractAccount = 6 + + +class PoolType(enum.Enum): + """Pool types enum based on Osmosis x/gamm pools""" + Balancer = 0 + Stableswap = 1 + + +class ProposalStatus(enum.Enum): + """Proposal statuses enum based on Cosmos SDK governance proposal""" + Nil = 0 + DepositPeriod = 1 + VotingPeriod = 2 + Passed = 3 + Rejected = 4 + Failed = 5 + Inactive = 6 + + +class CustomAccountType(sa.types.TypeDecorator): + impl = sa.Enum(AccountType) + + def process_bind_param(self, value, dialect): + return AccountType(value) + + +class CustomPoolType(sa.types.TypeDecorator): + impl = sa.Enum(PoolType) + + def process_bind_param(self, value, dialect): + return PoolType(value) + + +class CustomProposalStatus(sa.types.TypeDecorator): + impl = sa.Enum(ProposalStatus) + + def process_bind_param(self, value, dialect): + return ProposalStatus(value) + + +class CustomDateTime(sa.types.TypeDecorator): + """Custom DateTime type that accepts Python nanosecond epoch int.""" + + impl = sa.DateTime + + def process_bind_param(self, value, dialect): + return datetime.fromtimestamp(value / 1e9) + + +class CustomBase64(sa.types.TypeDecorator): + """Custom LargeBinary type that accepts base64-encoded string.""" + + impl = sa.LargeBinary + + def process_bind_param(self, value, dialect): + if value is None: + return value + return b64.decodebytes(value.encode()) + + +class CustomDate(sa.types.TypeDecorator): + """Custom Date type that accepts Python nanosecond epoch int.""" + + impl = sa.Date + + def process_bind_param(self, value, dialect): + dt = datetime.fromtimestamp(value / 1e9) + return date(dt.year, dt.month, dt.day) + + +def Column(*args, **kwargs): + """Forward into SQLAlchemy's Column construct, but with 'nullable' default to False.""" + if "nullable" not in kwargs: + kwargs["nullable"] = False + return sa.Column(*args, **kwargs) + + +metadata = sa.MetaData() + +tracking = sa.Table( + "tracking", + metadata, + Column("chain_id", sa.String, primary_key=True), + Column("topic", sa.String), + Column("kafka_offset", sa.Integer), + Column("replay_topic", sa.String), + Column("replay_offset", sa.Integer), +) + +blocks = sa.Table( + "blocks", + metadata, + Column("height", sa.Integer, primary_key=True), + Column("timestamp", CustomDateTime, index=True), + Column("proposer", sa.String, sa.ForeignKey("validators.operator_address"), nullable=True), + Column("hash", CustomBase64), +) + +transactions = sa.Table( + "transactions", + metadata, + Column("id", sa.Integer, sa.Sequence("seq_transaction_id"), unique=True), + Column("hash", CustomBase64, primary_key=True), + Column( + "block_height", + sa.Integer, + sa.ForeignKey("blocks.height"), + index=True, + primary_key=True, + ), + Column("gas_used", sa.Integer), + Column("gas_limit", sa.Integer), + Column("gas_fee", sa.String), + Column("err_msg", sa.String, nullable=True), + Column("success", sa.Boolean), + Column("sender", sa.Integer, sa.ForeignKey("accounts.id")), + Column("memo", sa.String), + Column("messages", sa.JSON), + Column("is_ibc", sa.Boolean), + Column("is_store_code", sa.Boolean), + Column("is_instantiate", sa.Boolean), + Column("is_execute", sa.Boolean), + Column("is_send", sa.Boolean), + Column("is_update_admin", sa.Boolean), + Column("is_clear_admin", sa.Boolean), + Column("is_migrate", sa.Boolean), +) + +accounts = sa.Table( + "accounts", + metadata, + Column("id", sa.Integer, sa.Sequence("seq_account_id"), unique=True), + Column("address", sa.String, primary_key=True), + Column("type", CustomAccountType, nullable=True), + Column("name", sa.String, nullable=True), +) + +account_transactions = sa.Table( + "account_transactions", + metadata, + Column( + "transaction_id", sa.Integer, sa.ForeignKey("transactions.id"), primary_key=True + ), + Column("account_id", sa.Integer, sa.ForeignKey("accounts.id"), primary_key=True), + Column("is_signer", sa.Boolean), + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height")), +) + +codes = sa.Table( + "codes", + metadata, + Column("id", sa.Integer, primary_key=True, unique=True), + Column("uploader", sa.Integer, sa.ForeignKey("accounts.id")), + Column("contract_instantiated", sa.Integer), + Column( + "transaction_id", sa.Integer, sa.ForeignKey("transactions.id"), nullable=True + ), + Column("access_config_permission", sa.String), + Column("access_config_addresses", sa.JSON), + Column("cw2_contract", sa.String, nullable=True), + Column("cw2_version", sa.String, nullable=True), +) + +contracts = sa.Table( + "contracts", + metadata, + Column("id", sa.Integer, sa.Sequence("seq_contract_id"), unique=True), + Column("code_id", sa.Integer, sa.ForeignKey("codes.id")), + Column("address", sa.String, primary_key=True), + Column("label", sa.String), + Column("admin", sa.Integer, sa.ForeignKey("accounts.id"), nullable=True), + Column("init_msg", sa.String, nullable=True), + Column("init_tx_id", sa.Integer, sa.ForeignKey("transactions.id"), nullable=True), + Column("init_by", sa.Integer, sa.ForeignKey("accounts.id"), nullable=True), + Column("contract_executed", sa.Integer), +) + +contract_transactions = sa.Table( + "contract_transactions", + metadata, + Column("tx_id", sa.Integer, sa.ForeignKey("transactions.id")), + Column("contract_id", sa.Integer, sa.ForeignKey("contracts.id")), +) + +proposals = sa.Table( + "proposals", + metadata, + Column("id", sa.Integer, primary_key=True), + Column("proposer_id", sa.Integer, sa.ForeignKey("accounts.id"), nullable=True), + Column("type", sa.String), + Column("title", sa.String), + Column("description", sa.String), + Column("proposal_route", sa.String), + Column("status", CustomProposalStatus), + Column("submit_time", CustomDateTime), + Column("deposit_end_time", CustomDateTime), + Column("voting_time", CustomDateTime), + Column("voting_end_time", CustomDateTime), + Column("content", sa.JSON, nullable=True), + Column("is_expedited", sa.Boolean), + Column( + "resolved_height", + sa.Integer, + sa.ForeignKey("blocks.height"), + index=True, + nullable=True, + ), +) + +code_proposals = sa.Table( + "code_proposals", + metadata, + Column("code_id", sa.Integer, sa.ForeignKey("codes.id")), + Column("proposal_id", sa.Integer, sa.ForeignKey("proposals.id")), + Column( + "resolved_height", + sa.Integer, + sa.ForeignKey("blocks.height"), + index=True, + nullable=True, + ), +) + +contract_proposals = sa.Table( + "contract_proposals", + metadata, + Column("contract_id", sa.Integer, sa.ForeignKey("contracts.id")), + Column("proposal_id", sa.Integer, sa.ForeignKey("proposals.id")), + Column( + "resolved_height", + sa.Integer, + sa.ForeignKey("blocks.height"), + index=True, + nullable=True, + ), +) + +contract_histories = sa.Table( + "contract_histories", + metadata, + Column("contract_id", sa.Integer, sa.ForeignKey("contracts.id"), index=True), + Column("sender", sa.Integer, sa.ForeignKey("accounts.id")), + Column("code_id", sa.Integer, sa.ForeignKey("codes.id")), + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("remark", sa.JSON), +) + +contract_transactions_view = sa.Table( + "contract_transactions_view", + metadata, + Column("hash", CustomBase64), + Column("success", sa.Boolean), + Column("messages", sa.JSON), + Column("sender", sa.String), + Column("height", sa.Integer), + Column("timestamp", CustomDateTime, index=True), + Column("is_execute", sa.Boolean), + Column("is_ibc", sa.Boolean), + Column("is_instantiate", sa.Boolean), + Column("is_send", sa.Boolean), + Column("is_store_code", sa.Boolean), + Column("is_migrate", sa.Boolean), + Column("is_update_admin", sa.Boolean), + Column("is_clear_admin", sa.Boolean), + Column("contract_address", sa.String), +) + +trade = sa.Table( + "trades", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("count", sa.Integer), +) + +profit_by_denom = sa.Table( + "profit_by_denoms", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("denom", sa.String), + Column("amount", sa.String), +) + +trade_by_route = sa.Table( + "trade_by_routes", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("route", sa.JSON), + Column("count", sa.Integer), +) + +profit_by_route = sa.Table( + "profit_by_routes", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("route", sa.JSON), + Column("denom", sa.String), + Column("amount", sa.String), +) + +lcd_tx_results = sa.Table( + "lcd_tx_results", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("transaction_id", sa.Integer, sa.ForeignKey("transactions.id"), index=True), + Column("result", sa.JSON), +) + +begin_block_events = sa.Table( + "begin_block_events", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("events", sa.JSON), +) + +end_block_events = sa.Table( + "end_block_events", + metadata, + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("events", sa.JSON), +) + +pools = sa.Table( + "pools", + metadata, + Column("id", sa.Integer, primary_key=True, index=True), + Column("liquidity", sa.JSON), + Column("type", CustomPoolType), + Column("creator", sa.ForeignKey("accounts.id"), nullable=True), + Column("create_tx_id", sa.ForeignKey("transactions.id"), nullable=True), + Column("is_superfluid", sa.Boolean), + Column("is_supported", sa.Boolean), + Column("swap_fee", sa.String), + Column("exit_fee", sa.String), + Column("future_pool_governor", sa.String), + Column("smooth_weight_change_params", sa.JSON, nullable=True), + Column("scaling_factors", sa.JSON, nullable=True), + Column("scaling_factor_controller", sa.String, nullable=True), + Column("weight", sa.JSON, nullable=True), + Column("address", sa.String), + Column("total_shares", sa.JSON) +) + +pool_transactions = sa.Table( + "pool_transactions", + metadata, + Column("pool_id", sa.Integer, sa.ForeignKey("pools.id"), index=True), + Column("transaction_id", sa.Integer, sa.ForeignKey("transactions.id"), index=True), + Column("block_height", sa.Integer, sa.ForeignKey("blocks.height"), index=True), + Column("is_swap", sa.Boolean), + Column("is_lp", sa.Boolean), + Column("is_bond", sa.Boolean), + Column("is_superfluid", sa.Boolean), +) + +validators = sa.Table( + "validators", + metadata, + Column("id", sa.Integer, sa.Sequence("seq_validator_id"), unique=True), + Column("account_id", sa.Integer, sa.ForeignKey("accounts.id"), unique=True), + Column("operator_address", sa.String, primary_key=True), + Column("consensus_address", sa.String), + Column("moniker", sa.String), + Column("identity", sa.String), + Column("website", sa.String), + Column("details", sa.String), + Column("commission_rate", sa.String), + Column("commission_max_rate", sa.String), + Column("commission_max_change", sa.String), + Column("min_self_delegation", sa.String), + Column("jailed", sa.Boolean), +) diff --git a/flusher/flusher/handler.py b/flusher/flusher/handler.py new file mode 100644 index 00000000000..5c058548f98 --- /dev/null +++ b/flusher/flusher/handler.py @@ -0,0 +1,346 @@ +from sqlalchemy import select +from sqlalchemy.dialects.postgresql import insert +from base64 import b64encode + +from flusher.db import ( + accounts, + blocks, + transactions, + account_transactions, + codes, + contracts, + contract_transactions, + proposals, + code_proposals, + contract_proposals, + contract_histories, + contract_transactions_view, + trade, + profit_by_denom, + trade_by_route, + profit_by_route, + lcd_tx_results, + pools, + pool_transactions, + validators, +) + + +class Handler(object): + def __init__(self, conn): + self.conn = conn + + def get_transaction_id(self, tx_hash): + return self.conn.execute( + select([transactions.c.id]).where(transactions.c.hash == tx_hash) + ).scalar() + + def get_account_address(self, account_id): + if account_id is None: + return None + return self.conn.execute( + select([accounts.c.address]).where(accounts.c.id == account_id) + ).scalar() + + def get_account_id(self, address): + if address is None: + return None + id = self.conn.execute( + select([accounts.c.id]).where(accounts.c.address == address) + ).scalar() + if id is None: + self.conn.execute(accounts.insert(), {"address": address}) + return self.conn.execute( + select([accounts.c.id]).where(accounts.c.address == address) + ).scalar() + return id + + def get_validator_id(self, val): + return self.conn.execute( + select([validators.c.id]).where(validators.c.operator_address == val) + ).scalar() + + def get_transaction_data(self, tx_hash): + tx_detail = dict( + self.conn.execute( + select( + [ + transactions.c.hash, + transactions.c.success, + transactions.c.messages, + transactions.c.is_execute, + transactions.c.is_ibc, + transactions.c.is_instantiate, + transactions.c.is_send, + transactions.c.is_store_code, + transactions.c.is_migrate, + transactions.c.is_update_admin, + transactions.c.is_clear_admin, + transactions.c.sender, + transactions.c.block_height, + ] + ) + .where(transactions.c.hash == tx_hash) + .limit(1) + ).fetchall()[0] + ) + tx_detail["sender"] = self.get_account_address(tx_detail["sender"]) + + tx_detail["timestamp"] = ( + self.conn.execute( + select([blocks.c.timestamp]).where( + blocks.c.height == tx_detail["block_height"] + ) + ) + .scalar() + .timestamp() + * 1e9 + ) + + tx_detail["height"] = tx_detail["block_height"] + tx_detail["hash"] = b64encode(bytes.fromhex(tx_detail["hash"].hex())).decode() + del tx_detail["block_height"] + + return tx_detail + + def get_contract_id(self, address): + return self.conn.execute( + select([contracts.c.id]).where(contracts.c.address == address) + ).scalar() + + def get_transaction_success_by_id(self, tx_id): + return self.conn.execute( + select([transactions.c.success]).where(transactions.c.id == tx_id) + ).scalar() + + def handle_new_block(self, msg): + self.conn.execute(blocks.insert(), msg) + + def handle_new_transaction(self, msg): + msg["memo"] = msg["memo"].replace("\x00", "\uFFFD") + msg["sender"] = self.get_account_id(msg["sender"]) + self.conn.execute( + insert(transactions) + .values(**msg) + .on_conflict_do_update(constraint="transactions_pkey", set_=msg) + ) + + def handle_set_related_transaction(self, msg): + tx_id = self.get_transaction_id(msg["hash"]) + related_tx_accounts = msg["related_accounts"] + for account in related_tx_accounts: + self.conn.execute( + insert(account_transactions) + .values( + { + "transaction_id": tx_id, + "account_id": self.get_account_id(account), + "block_height": msg["block_height"], + "is_signer": account in msg["signer"], + } + ) + .on_conflict_do_nothing(constraint="account_transactions_pkey") + ) + + def handle_new_code(self, msg): + if "tx_hash" in msg and msg["tx_hash"] is not None: + msg["transaction_id"] = self.get_transaction_id(msg["tx_hash"]) + del msg["tx_hash"] + else: + msg["transaction_id"] = None + + msg["uploader"] = self.get_account_id(msg["uploader"]) + self.conn.execute(codes.insert(), msg) + + def handle_set_account(self, msg): + id = self.conn.execute( + select([accounts.c.id]).where(accounts.c.address == msg["address"]) + ).scalar() + if id is None: + self.conn.execute(accounts.insert(), msg) + else: + msg["id"] = id + self.conn.execute(accounts.update(accounts.c.id == msg["id"]).values(**msg)) + + def handle_update_code(self, msg): + self.conn.execute( + codes.update(codes.c.id == msg["id"]).values( + contract_instantiated=codes.c.contract_instantiated + 1 + ) + ) + + def handle_update_contract(self, id): + self.conn.execute( + contracts.update(contracts.c.id == id).values( + contract_executed=contracts.c.contract_executed + 1 + ) + ) + + def handle_new_contract(self, msg): + if "tx_hash" in msg and msg["tx_hash"] is not None: + msg["init_tx_id"] = self.get_transaction_id(msg["tx_hash"]) + del msg["tx_hash"] + else: + msg["init_tx_id"] = None + + msg["init_by"] = self.get_account_id(msg["init_by"]) + if msg["admin"] != "": + msg["admin"] = self.get_account_id(msg["admin"]) + else: + del msg["admin"] + self.conn.execute(contracts.insert(), msg) + + def handle_new_contract_transaction(self, msg): + if msg["tx_hash"] is not None: + msg["tx_id"] = self.get_transaction_id(msg["tx_hash"]) + + transaction_view = self.get_transaction_data(msg["tx_hash"]) + del msg["tx_hash"] + + transaction_view["contract_address"] = msg["contract_address"] + msg["contract_id"] = self.get_contract_id(msg["contract_address"]) + del msg["contract_address"] + if not msg["contract_id"] and not self.get_transaction_success_by_id( + msg["tx_id"] + ): + return + + if not msg["is_instantiate"]: + self.handle_update_contract(msg["contract_id"]) + del msg["is_instantiate"] + self.conn.execute(contract_transactions.insert(), msg) + self.conn.execute(contract_transactions_view.insert(), transaction_view) + + def handle_update_contract_admin(self, msg): + msg["address"] = msg["contract"] + del msg["contract"] + if msg["admin"] != "": + msg["admin"] = self.get_account_id(msg["admin"]) + else: + msg["admin"] = None + self.conn.execute( + contracts.update() + .where(contracts.c.address == msg["address"]) + .values(**msg) + ) + + def handle_update_contract_code_id(self, msg): + msg["address"] = msg["contract"] + del msg["contract"] + self.conn.execute( + contracts.update() + .where(contracts.c.address == msg["address"]) + .values(**msg) + ) + + def handle_new_proposal(self, msg): + msg["proposer_id"] = self.get_account_id(msg["proposer"]) + del msg["proposer"] + self.conn.execute(proposals.insert(), msg) + + def handle_update_proposal(self, msg): + condition = True + for col in proposals.primary_key.columns.values(): + condition = (col == msg[col.name]) & condition + self.conn.execute(proposals.update().where(condition).values(**msg)) + + def handle_new_code_proposal(self, msg): + self.conn.execute(code_proposals.insert(), msg) + + def handle_new_contract_proposal(self, msg): + msg["contract_id"] = self.get_contract_id(msg["contract_address"]) + del msg["contract_address"] + self.conn.execute(contract_proposals.insert(), msg) + + def handle_update_contract_proposal(self, msg): + msg["contract_id"] = self.get_contract_id(msg["contract_address"]) + del msg["contract_address"] + self.conn.execute( + contract_proposals.update() + .where( + (contract_proposals.c.contract_id == msg["contract_id"]) + & (contract_proposals.c.proposal_id == msg["proposal_id"]) + ) + .values(**msg) + ) + + def handle_new_contract_history(self, msg): + msg["contract_id"] = self.get_contract_id(msg["contract_address"]) + del msg["contract_address"] + msg["sender"] = self.get_account_id(msg["sender"]) + self.conn.execute(contract_histories.insert(), msg) + + def handle_update_cw2_info(self, msg): + msg["id"] = msg["code_id"] + del msg["code_id"] + self.conn.execute(codes.update().where(codes.c.id == msg["id"]).values(**msg)) + + def handle_trade(self, msg): + self.conn.execute(trade.insert(), msg) + + def handle_profit_by_denom(self, msg): + self.conn.execute(profit_by_denom.insert(), msg) + + def handle_trade_by_route(self, msg): + self.conn.execute(trade_by_route.insert(), msg) + + def handle_profit_by_route(self, msg): + self.conn.execute(profit_by_route.insert(), msg) + + def handle_insert_lcd_tx_results(self, msg): + if "tx_hash" in msg and msg["tx_hash"] is not None: + msg["transaction_id"] = self.get_transaction_id(msg["tx_hash"]) + del msg["tx_hash"] + else: + msg["transaction_id"] = None + self.conn.execute(lcd_tx_results.insert(), msg) + + def handle_new_gamm_pool(self, msg): + msg["creator"] = self.get_account_id(msg["creator"]) + if "create_tx" in msg and msg["create_tx"] is not None: + msg["create_tx_id"] = self.get_transaction_id(msg["create_tx"]) + del msg["create_tx"] + else: + msg["create_tx_id"] = None + self.conn.execute(pools.insert(), msg) + + def handle_update_set_superfluid_asset(self, msg): + self.conn.execute(pools.update().where(pools.c.id == msg["id"]).values({"is_superfluid": True})) + + def handle_update_remove_superfluid_asset(self, msg): + self.conn.execute(pools.update().where(pools.c.id == msg["id"]).values({"is_superfluid": False})) + + def handle_update_pool(self, msg): + self.conn.execute( + pools.update() + .where(pools.c.id == msg["id"]) + .values(**msg) + ) + + def handle_new_pool_transaction(self, msg): + msg["transaction_id"] = self.get_transaction_id(msg["tx_hash"]) + del msg["tx_hash"] + pool_id = self.conn.execute( + select([pools.c.id]).where(pools.c.id == msg["pool_id"]) + ).scalar() + if pool_id is None: + return + self.conn.execute(pool_transactions.insert(), msg) + + def handle_set_validator(self, msg): + msg["account_id"] = self.get_account_id(msg["delegator_address"]) + del msg["delegator_address"] + if self.get_validator_id(msg["operator_address"]) is None: + self.conn.execute(validators.insert(), msg) + else: + condition = True + for col in validators.primary_key.columns.values(): + condition = (col == msg[col.name]) & condition + self.conn.execute(validators.update().where(condition).values(**msg)) + + def handle_update_validator(self, msg): + self.conn.execute( + validators.update() + .where(validators.c.operator_address == msg["operator_address"]) + .values(**msg) + ) diff --git a/flusher/flusher/init.py b/flusher/flusher/init.py new file mode 100644 index 00000000000..d5e9dfb1206 --- /dev/null +++ b/flusher/flusher/init.py @@ -0,0 +1,28 @@ +import json + +import click + +from .db import metadata, tracking +from .cli import cli + +from sqlalchemy import create_engine + + +@cli.command() +@click.argument("chain_id") +@click.argument("topic") +@click.argument("replay_topic") +@click.option( + "--db", + help="Database URI connection string.", + default="localhost:5432/postgres", + show_default=True, +) +def init(chain_id, topic, replay_topic, db): + """Initialize the database with empty tables and tracking info.""" + engine = create_engine("postgresql+psycopg2://" + db, echo=True) + metadata.create_all(engine) + engine.execute( + tracking.insert(), + {"chain_id": chain_id, "topic": topic, "replay_topic": replay_topic, "kafka_offset": -1, "replay_offset": -2}, + ) diff --git a/flusher/flusher/sync.py b/flusher/flusher/sync.py new file mode 100644 index 00000000000..f9adecd0d74 --- /dev/null +++ b/flusher/flusher/sync.py @@ -0,0 +1,75 @@ +import json +import click +import sys +import time + +from kafka import KafkaConsumer, TopicPartition +from loguru import logger +from sqlalchemy import create_engine + +from .cli import cli +from .db import tracking +from .handler import Handler + + +@cli.command() +@click.option( + "-c", + "--commit", + "commit_interval", + help="The number of blocks between each commit interval.", + default=1, + show_default=True, +) +@click.option( + "--db", + help="Database URI connection string.", + default="localhost:5432/postgres", + show_default=True, +) +@click.option( + "-s", + "--servers", + help="Kafka bootstrap servers.", + default="localhost:9092", + show_default=True, +) +@click.option("-e", "--echo-sqlalchemy", "echo_sqlalchemy", is_flag=True) +def sync(commit_interval, db, servers, echo_sqlalchemy): + """Subscribe to Kafka and push the updates to the database.""" + # Set up Kafka connection + engine = create_engine("postgresql+psycopg2://" + db, echo=echo_sqlalchemy) + tracking_info = engine.execute(tracking.select()).fetchone() + topic = tracking_info.topic + consumer = KafkaConsumer(topic, bootstrap_servers=servers) + partitions = consumer.partitions_for_topic(topic) + if len(partitions) != 1: + raise Exception("Only exact 1 partition is supported.") + tp = TopicPartition(topic, partitions.pop()) + while True: + consumer.seek_to_end(tp) + last_offset = consumer.position(tp) + if tracking_info.kafka_offset < last_offset: + break + logger.info("Waiting emitter sync current emitter offset is {}", last_offset) + time.sleep(5) + consumer.seek(tp, tracking_info.kafka_offset + 1) + consumer_iter = iter(consumer) + # Main loop + while True: + with engine.begin() as conn: + for msg in consumer_iter: + handler = Handler(conn) + key = msg.key.decode() + value = json.loads(msg.value) + if key == "COMMIT": + if value["height"] % commit_interval == 0: + conn.execute(tracking.update().values(kafka_offset=msg.offset)) + logger.info( + "Committed at block {} and Kafka offset {}", + value["height"], + msg.offset, + ) + break + continue + getattr(handler, "handle_" + key.lower())(value) diff --git a/flusher/main.py b/flusher/main.py new file mode 100644 index 00000000000..fb4fe240ddf --- /dev/null +++ b/flusher/main.py @@ -0,0 +1,5 @@ +from flusher.cli import cli + + +if __name__ == "__main__": + cli() diff --git a/flusher/requirements.txt b/flusher/requirements.txt new file mode 100644 index 00000000000..d2530f804e1 --- /dev/null +++ b/flusher/requirements.txt @@ -0,0 +1,12 @@ +appdirs==1.4.4 +attrs==19.3.0 +black==19.10b0 +click==7.1.2 +kafka-python==2.0.1 +loguru==0.5.1 +pathspec==0.8.0 +psycopg2-binary==2.9.5 +regex==2020.6.8 +SQLAlchemy==1.3.17 +toml==0.10.1 +typed-ast==1.5.4 diff --git a/go.mod b/go.mod index d3a99a2560b..d95edf7b277 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/osmosis-labs/osmosis/x/ibc-hooks v0.0.6 github.com/pkg/errors v0.9.1 github.com/rakyll/statik v0.1.7 + github.com/segmentio/kafka-go v0.4.40 github.com/spf13/cast v1.5.0 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 @@ -59,6 +60,7 @@ require ( github.com/maratori/testableexamples v1.0.0 // indirect github.com/nunnatsa/ginkgolinter v0.8.1 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/regen-network/cosmos-proto v0.3.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect diff --git a/go.sum b/go.sum index 98a2d571f7c..38495a2d2b5 100644 --- a/go.sum +++ b/go.sum @@ -676,6 +676,7 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -892,6 +893,8 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCr github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -999,6 +1002,8 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw= github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/segmentio/kafka-go v0.4.40 h1:sszW7c0/uyv7+VcTW5trx2ZC7kMWDTxuR/6Zn8U1bm8= +github.com/segmentio/kafka-go v0.4.40/go.mod h1:naFEZc5MQKdeL3W6NkZIAn48Y6AazqjRFDhnXeg3h94= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1142,6 +1147,12 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vmihailenco/msgpack/v5 v5.1.4/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1467,6 +1478,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= diff --git a/hooks/common/hook.go b/hooks/common/hook.go new file mode 100644 index 00000000000..1cfd3f79cc2 --- /dev/null +++ b/hooks/common/hook.go @@ -0,0 +1,54 @@ +package common + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" +) + +// Hooks is a type alias for an array of Hook. +type Hooks []Hook + +// Hook is an interface of a hook that can be processed along with the ABCI application +type Hook interface { + AfterInitChain(ctx sdk.Context, req abci.RequestInitChain, res abci.ResponseInitChain) + AfterBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) + AfterDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) + AfterEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) + + BeforeCommit() +} + +// AfterInitChain gets called when initializing a new chain. Usually as a last step in app.InitChainer. +func (h Hooks) AfterInitChain(ctx sdk.Context, req abci.RequestInitChain, res abci.ResponseInitChain) { + for _, hook := range h { + hook.AfterInitChain(ctx, req, res) + } +} + +// AfterBeginBlock gets called before completing a BeginBlocker. +func (h Hooks) AfterBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) { + for _, hook := range h { + hook.AfterBeginBlock(ctx, req, res) + } +} + +// AfterDeliverTx gets called after each transaction in a block completes DeliverTx process. +func (h Hooks) AfterDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) { + for _, hook := range h { + hook.AfterDeliverTx(ctx, req, res) + } +} + +// AfterEndBlock gets called before completing an EndBlocker. +func (h Hooks) AfterEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) { + for _, hook := range h { + hook.AfterEndBlock(ctx, req, res) + } +} + +// BeforeCommit gets called right before committing each block. +func (h Hooks) BeforeCommit() { + for _, hook := range h { + hook.BeforeCommit() + } +} diff --git a/hooks/common/utils.go b/hooks/common/utils.go new file mode 100644 index 00000000000..33bed0e47a2 --- /dev/null +++ b/hooks/common/utils.go @@ -0,0 +1,110 @@ +package common + +import ( + "encoding/binary" + "fmt" + "strconv" + "time" + + "github.com/CosmWasm/wasmd/x/wasm/types" + "github.com/cosmos/cosmos-sdk/client" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + tmtypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// EvMap is a type alias for SDK events mapping from Attr.Key to the list of values. +type EvMap map[string][]string + +// JsDict is a type alias for JSON dictionary. +type JsDict map[string]interface{} + +// Message is a simple wrapper data type for each message published to Kafka. +type Message struct { + Key string + Value JsDict +} + +// Atoi converts the given string into an int64. Panics on errors. +func Atoi(val string) int64 { + res, err := strconv.ParseInt(val, 10, 64) + if err != nil { + panic(err) + } + return res +} + +// Atoui converts the given string into an uint64. Panics on errors. +func Atoui(val string) uint64 { + res, err := strconv.ParseUint(val, 10, 64) + if err != nil { + panic(err) + } + return res +} + +// ParseEvents parses the given sdk.StringEvents objects into a single EvMap. +func ParseEvents(events sdk.StringEvents) EvMap { + evMap := make(EvMap) + for _, event := range events { + for _, kv := range event.Attributes { + key := event.Type + "." + kv.Key + evMap[key] = append(evMap[key], kv.Value) + } + } + return evMap +} + +// BuildContractAddressClassic builds an SDK account address for a CosmWasm contract. +func BuildContractAddressClassic(codeID, instanceID uint64) sdk.AccAddress { + contractID := make([]byte, 16) + binary.BigEndian.PutUint64(contractID[:8], codeID) + binary.BigEndian.PutUint64(contractID[8:], instanceID) + return address.Module(types.ModuleName, contractID)[:types.ContractAddrLen] +} + +// GetRelatedAccounts finds all valid sdk.AccAddress recursively in the given interface. +func GetRelatedAccounts(m interface{}, accs map[string]bool) { + switch m := m.(type) { + case []interface{}: + for _, v := range m { + GetRelatedAccounts(v, accs) + } + case map[string]interface{}: + for _, v := range m { + GetRelatedAccounts(v, accs) + } + case string: + _, err := sdk.AccAddressFromBech32(m) + if err == nil { + accs[m] = true + } + default: + } +} + +// Deprecated: this interface is used only internally for some scenarios we are +// deprecating (StdTxConfig support) +type intoAny interface { + AsAny() *codectypes.Any +} + +// MkTxResult returns a sdk.TxResponse from the given Tendermint ResultTx. +func MkTxResult(txConfig client.TxConfig, resTx *tmtypes.ResultTx, blockTime time.Time) (*sdk.TxResponse, error) { + txb, err := txConfig.TxDecoder()(resTx.Tx) + if err != nil { + return nil, err + } + p, ok := txb.(intoAny) + if !ok { + return nil, fmt.Errorf("expecting a type implementing intoAny, got: %T", txb) + } + asAny := p.AsAny() + return sdk.NewResponseResultTx(resTx, asAny, blockTime.Format(time.RFC3339)), nil +} + +// AppendMessage is a simple function for appending new key and value to an array of Message. +func AppendMessage(msgs *[]Message, key string, value JsDict) { + *msgs = append(*msgs, Message{Key: key, Value: value}) +} diff --git a/hooks/common/utils_test.go b/hooks/common/utils_test.go new file mode 100644 index 00000000000..694d0b8c657 --- /dev/null +++ b/hooks/common/utils_test.go @@ -0,0 +1,103 @@ +package common_test + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/CosmWasm/wasmd/x/wasm/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/osmosis-labs/osmosis/v15/app" + "github.com/osmosis-labs/osmosis/v15/hooks/common" + "github.com/stretchr/testify/assert" +) + +var ( + ALICE = sdk.AccAddress("01234567890123456789") + BOB = sdk.AccAddress("98765432109876543210") + CAROL = sdk.AccAddress("01234567899876543210") + CONTRACT = common.BuildContractAddressClassic(1, 3) + FirstCoin = sdk.NewCoins(sdk.NewCoin("first", sdk.NewInt(1))) + SecondCoin = sdk.NewCoins(sdk.NewCoin("second", sdk.NewInt(1000))) +) + +func GetMsgInterface(msg sdk.Msg) interface{} { + encoding := app.MakeEncodingConfig() + bz, err := encoding.Marshaler.MarshalInterfaceJSON(msg) + fmt.Println(err) + var jsonMsg interface{} + _ = json.Unmarshal(bz, &jsonMsg) + return jsonMsg +} + +func TestMsgSend(t *testing.T) { + accs := make(map[string]bool) + msg := banktypes.NewMsgSend(ALICE, BOB, FirstCoin) + common.GetRelatedAccounts(GetMsgInterface(msg), accs) + assert.Contains(t, accs, ALICE.String()) + assert.Contains(t, accs, BOB.String()) +} + +func TestFundCom(t *testing.T) { + accs := make(map[string]bool) + msg := disttypes.NewMsgFundCommunityPool(FirstCoin, ALICE) + common.GetRelatedAccounts(GetMsgInterface(msg), accs) + assert.Contains(t, accs, ALICE.String()) +} + +func TestMultiSend(t *testing.T) { + accs := make(map[string]bool) + inputs := []banktypes.Input{banktypes.NewInput(ALICE, FirstCoin), banktypes.NewInput(BOB, SecondCoin)} + outputs := []banktypes.Output{ + banktypes.NewOutput(CAROL, + sdk.Coins{ + sdk.NewCoin("jaja", sdk.NewInt(1)), + sdk.NewCoin("yoyo", sdk.NewInt(1000)), + }), + } + + msg := banktypes.NewMsgMultiSend(inputs, outputs) + common.GetRelatedAccounts(GetMsgInterface(msg), accs) + assert.Contains(t, accs, ALICE.String()) + assert.Contains(t, accs, BOB.String()) + assert.Contains(t, accs, CAROL.String()) +} + +func TestMsgExec(t *testing.T) { + accs := make(map[string]bool) + msg := authz.NewMsgExec(ALICE, []sdk.Msg{ + &banktypes.MsgSend{ + Amount: sdk.NewCoins(sdk.NewInt64Coin("steak", 2)), + FromAddress: BOB.String(), + ToAddress: CAROL.String(), + }, + }) + sdkMsg := []sdk.Msg{&msg} + + common.GetRelatedAccounts(GetMsgInterface(sdkMsg[0]), accs) + assert.Contains(t, accs, ALICE.String()) + assert.Contains(t, accs, BOB.String()) + assert.Contains(t, accs, CAROL.String()) +} + +func TestMsgExecute(t *testing.T) { + accs := make(map[string]bool) + + msg := types.MsgExecuteContract{ + Sender: ALICE.String(), + Contract: CONTRACT.String(), + Msg: []byte(`{"release": "osmo15d4apf20449ajvwycq8ruaypt7v6d345z36n9l", "1" : "osmo1zs3qcgu5nsyeq2wnv480uwhqck4jwgza5as3k25wysglggcny0rstfqyz9","key" : {"value":"osmo1mhznfr60vjdp2gejhyv2gax9nvyyzhd3z0qcwseyetkfustjauzqycsy2g","ja":1}}`), + Funds: FirstCoin, + } + + sdkMsg := []sdk.Msg{&msg} + common.GetRelatedAccounts(GetMsgInterface(sdkMsg[0]), accs) + assert.Contains(t, accs, ALICE.String()) + assert.Contains(t, accs, CONTRACT.String()) + assert.Contains(t, accs, "osmo15d4apf20449ajvwycq8ruaypt7v6d345z36n9l") + assert.Contains(t, accs, "osmo1zs3qcgu5nsyeq2wnv480uwhqck4jwgza5as3k25wysglggcny0rstfqyz9") + assert.Contains(t, accs, "osmo1mhznfr60vjdp2gejhyv2gax9nvyyzhd3z0qcwseyetkfustjauzqycsy2g") +} diff --git a/hooks/emitter/adapter.go b/hooks/emitter/adapter.go new file mode 100644 index 00000000000..3bb3bae8502 --- /dev/null +++ b/hooks/emitter/adapter.go @@ -0,0 +1,21 @@ +package emitter + +import ( + "encoding/json" + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +// Adapter defines an interface of an adapter for each emitter-supported module to be processed along with the emitter hook. +type Adapter interface { + AfterInitChain(ctx sdk.Context, encodingConfig params.EncodingConfig, genesisState map[string]json.RawMessage, kafka *[]common.Message) + AfterBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, evMap common.EvMap, kafka *[]common.Message) + PreDeliverTx() + CheckMsg(ctx sdk.Context, msg sdk.Msg) + HandleMsgEvents(ctx sdk.Context, txHash []byte, msg sdk.Msg, evMap common.EvMap, detail common.JsDict, kafka *[]common.Message) + PostDeliverTx(ctx sdk.Context, txHash []byte, txDict common.JsDict, kafka *[]common.Message) + AfterEndBlock(ctx sdk.Context, req abci.RequestEndBlock, evMap common.EvMap, kafka *[]common.Message) +} diff --git a/hooks/emitter/auth.go b/hooks/emitter/auth.go new file mode 100644 index 00000000000..adacd080f64 --- /dev/null +++ b/hooks/emitter/auth.go @@ -0,0 +1,111 @@ +package emitter + +import ( + wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + sdk "github.com/cosmos/cosmos-sdk/types" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + "github.com/cosmos/cosmos-sdk/x/auth/types" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + icatypes "github.com/cosmos/ibc-go/v4/modules/apps/27-interchain-accounts/types" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +// AccountType enumerates the valid types of an account. +type AccountType int32 + +const ( + BaseAccount AccountType = 0 + InterchainAccount AccountType = 1 + ModuleAccount AccountType = 2 + ContinuousVestingAccount AccountType = 3 + DelayedVestingAccount AccountType = 4 + ClawbackVestingAccount AccountType = 5 + ContractAccount AccountType = 6 +) + +var ( + _ AccountVerifier = &ContractAccountVerifier{} + _ AccountVerifier = &AuthAccountVerifier{} +) + +// AccountVerifier defines an interface for the implementation of verifying a valid account. +type AccountVerifier interface { + VerifyAccount(ctx sdk.Context, addr sdk.AccAddress) (common.JsDict, bool) +} + +// ContractAccountVerifier is an AccountVerifier implementation for verifying CosmWasm contract accounts. +type ContractAccountVerifier struct { + keeper wasmkeeper.Keeper +} + +// VerifyAccount returns the account and its type if the given input is a valid CosmWasm contract account. +func (ca ContractAccountVerifier) VerifyAccount(ctx sdk.Context, addr sdk.AccAddress) (common.JsDict, bool) { + if c := ca.keeper.GetContractInfo(ctx, addr); c != nil { + return common.JsDict{ + "address": addr.String(), + "type": ContractAccount, + }, true + } + return nil, false +} + +// AuthAccountVerifier is an AccountVerifier implementation for verifying x/auth and ica accounts. +type AuthAccountVerifier struct { + keeper authkeeper.AccountKeeper +} + +// VerifyAccount returns the account and its type if the given input is a valid x/auth or ica account. +func (aa AuthAccountVerifier) VerifyAccount(ctx sdk.Context, addr sdk.AccAddress) (common.JsDict, bool) { + acc := aa.keeper.GetAccount(ctx, addr) + if acc == nil { + return nil, false + } + + if moduleAccount, ok := acc.(types.ModuleAccountI); ok { + return common.JsDict{ + "address": addr.String(), + "type": ModuleAccount, + "name": moduleAccount.GetName(), + }, true + } + if _, ok := acc.(*icatypes.InterchainAccount); ok { + return common.JsDict{ + "address": addr.String(), + "type": InterchainAccount, + }, true + } + if _, ok := acc.(*vestingtypes.ContinuousVestingAccount); ok { + return common.JsDict{ + "address": addr.String(), + "type": ContinuousVestingAccount, + }, true + } + if _, ok := acc.(*vestingtypes.DelayedVestingAccount); ok { + return common.JsDict{ + "address": addr.String(), + "type": DelayedVestingAccount, + }, true + } + if _, ok := acc.(*vestingtypes.ClawbackVestingAccount); ok { + return common.JsDict{ + "address": addr.String(), + "type": ClawbackVestingAccount, + }, true + } + return common.JsDict{ + "address": addr.String(), + "type": BaseAccount, + }, true +} + +// VerifyAccount iterates through each given AccountVerifier and returns its address and account type according to the input address. +func VerifyAccount(ctx sdk.Context, addr sdk.AccAddress, avs ...AccountVerifier) common.JsDict { + for _, verifier := range avs { + if d, ok := verifier.VerifyAccount(ctx, addr); ok { + return d + } + } + return common.JsDict{ + "address": addr.String(), + } +} diff --git a/hooks/emitter/bank.go b/hooks/emitter/bank.go new file mode 100644 index 00000000000..e6b3ec01884 --- /dev/null +++ b/hooks/emitter/bank.go @@ -0,0 +1,64 @@ +package emitter + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +var ( + _ Adapter = &BankAdapter{} +) + +// BankAdapter defines a struct containing necessary flags to process the x/bank hook. It implements Adapter interface. +type BankAdapter struct { + isSendTx bool +} + +// NewBankAdapter creates new BankAdapter instance that will be added to the emitter hook adapters. +func NewBankAdapter() *BankAdapter { + return &BankAdapter{isSendTx: false} +} + +// AfterInitChain does nothing since no action is required in the InitChainer. +func (ba *BankAdapter) AfterInitChain(_ sdk.Context, _ params.EncodingConfig, _ map[string]json.RawMessage, _ *[]common.Message) { +} + +// AfterBeginBlock does nothing since no action is required in the BeginBlocker. +func (ba *BankAdapter) AfterBeginBlock(_ sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { +} + +// PreDeliverTx sets the necessary flag to the default value before processing each transaction. +func (ba *BankAdapter) PreDeliverTx() { + ba.isSendTx = false +} + +// CheckMsg checks the message type and sets the BankAdapter flag accordingly. +func (ba *BankAdapter) CheckMsg(_ sdk.Context, msg sdk.Msg) { + switch msg.(type) { + case *banktypes.MsgSend: + ba.isSendTx = true + case *banktypes.MsgMultiSend: + ba.isSendTx = true + default: + return + } +} + +// HandleMsgEvents does nothing since no action is required in the transaction events-handling step. +func (ba *BankAdapter) HandleMsgEvents(_ sdk.Context, _ []byte, _ sdk.Msg, _ common.EvMap, _ common.JsDict, _ *[]common.Message) { +} + +// PostDeliverTx assigns BankAdapter flag values to the interface to be written to the message queue. +func (ba *BankAdapter) PostDeliverTx(_ sdk.Context, _ []byte, txDict common.JsDict, _ *[]common.Message) { + txDict["is_send"] = ba.isSendTx +} + +// AfterEndBlock does nothing since no action is required in the EndBlocker. +func (ba *BankAdapter) AfterEndBlock(_ sdk.Context, _ abci.RequestEndBlock, _ common.EvMap, _ *[]common.Message) { +} diff --git a/hooks/emitter/channel.go b/hooks/emitter/channel.go new file mode 100644 index 00000000000..0c9744f5e84 --- /dev/null +++ b/hooks/emitter/channel.go @@ -0,0 +1,110 @@ +package emitter + +import ( + "encoding/json" + sdk "github.com/cosmos/cosmos-sdk/types" + feetypes "github.com/cosmos/ibc-go/v4/modules/apps/29-fee/types" + transfertypes "github.com/cosmos/ibc-go/v4/modules/apps/transfer/types" + ibcclienttypes "github.com/cosmos/ibc-go/v4/modules/core/02-client/types" + ibcconnectiontypes "github.com/cosmos/ibc-go/v4/modules/core/03-connection/types" + ibcchanneltypes "github.com/cosmos/ibc-go/v4/modules/core/04-channel/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +var ( + _ Adapter = &IBCAdapter{} +) + +// IBCAdapter defines a struct containing the necessary flag to process the IBC hook. It implements Adapter interface. +type IBCAdapter struct { + isIBC bool +} + +// NewIBCAdapter creates a new IBCAdapter instance that will be added to the emitter hook adapters. +func NewIBCAdapter() *IBCAdapter { + return &IBCAdapter{isIBC: false} +} + +// AfterInitChain does nothing since no action is required in the InitChainer. +func (ibca *IBCAdapter) AfterInitChain(_ sdk.Context, _ params.EncodingConfig, _ map[string]json.RawMessage, _ *[]common.Message) { +} + +// AfterBeginBlock does nothing since no action is required in the BeginBlocker. +func (ibca *IBCAdapter) AfterBeginBlock(_ sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { +} + +// PreDeliverTx sets the necessary flag to the default value before processing each transaction. +func (ibca *IBCAdapter) PreDeliverTx() { + ibca.isIBC = false +} + +// CheckMsg checks the message type and sets the IBCAdapter flag accordingly. +func (ibca *IBCAdapter) CheckMsg(_ sdk.Context, msg sdk.Msg) { + switch msg.(type) { + case *ibcchanneltypes.MsgRecvPacket: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelOpenInit: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelOpenTry: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelOpenAck: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelOpenConfirm: + ibca.isIBC = true + case *ibcchanneltypes.MsgTimeout: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelCloseInit: + ibca.isIBC = true + case *ibcchanneltypes.MsgChannelCloseConfirm: + ibca.isIBC = true + case *ibcchanneltypes.MsgTimeoutOnClose: + ibca.isIBC = true + case *ibcchanneltypes.MsgAcknowledgement: + ibca.isIBC = true + case *ibcclienttypes.MsgCreateClient: + ibca.isIBC = true + case *ibcclienttypes.MsgUpdateClient: + ibca.isIBC = true + case *ibcclienttypes.MsgUpgradeClient: + ibca.isIBC = true + case *ibcclienttypes.MsgSubmitMisbehaviour: + ibca.isIBC = true + case *ibcconnectiontypes.MsgConnectionOpenInit: + ibca.isIBC = true + case *ibcconnectiontypes.MsgConnectionOpenTry: + ibca.isIBC = true + case *ibcconnectiontypes.MsgConnectionOpenAck: + ibca.isIBC = true + case *ibcconnectiontypes.MsgConnectionOpenConfirm: + ibca.isIBC = true + case *feetypes.MsgRegisterPayee: + ibca.isIBC = true + case *feetypes.MsgRegisterCounterpartyPayee: + ibca.isIBC = true + case *feetypes.MsgPayPacketFee: + ibca.isIBC = true + case *feetypes.MsgPayPacketFeeAsync: + ibca.isIBC = true + case *transfertypes.MsgTransfer: + ibca.isIBC = true + } +} + +// HandleMsgEvents classifies IBC transaction events and assigns the values to the IBCAdapter flag accordingly. +func (ibca *IBCAdapter) HandleMsgEvents(_ sdk.Context, _ []byte, _ sdk.Msg, evMap common.EvMap, _ common.JsDict, _ *[]common.Message) { + if _, ok := evMap[ibcchanneltypes.EventTypeSendPacket+"."+ibcchanneltypes.AttributeKeyData]; ok { + ibca.isIBC = true + } +} + +// PostDeliverTx assigns IBCAdapter flag values to the interface to be written to the message queue. +func (ibca *IBCAdapter) PostDeliverTx(_ sdk.Context, _ []byte, txDict common.JsDict, _ *[]common.Message) { + txDict["is_ibc"] = ibca.isIBC +} + +// AfterEndBlock does nothing since no action is required in the EndBlocker. +func (ibca *IBCAdapter) AfterEndBlock(_ sdk.Context, _ abci.RequestEndBlock, _ common.EvMap, _ *[]common.Message) { +} diff --git a/hooks/emitter/emitter.go b/hooks/emitter/emitter.go new file mode 100644 index 00000000000..990b5d545c0 --- /dev/null +++ b/hooks/emitter/emitter.go @@ -0,0 +1,223 @@ +package emitter + +import ( + "context" + "encoding/json" + "strings" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/segmentio/kafka-go" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" + tmjson "github.com/tendermint/tendermint/libs/json" + + "github.com/osmosis-labs/osmosis/v15/app/keepers" + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +// Hook uses Kafka message queue and adapters functionality to act as an event producer for all events in the blockchains. +type Hook struct { + encodingConfig params.EncodingConfig // The app encoding config + writer *kafka.Writer // Main Kafka writer instance + accsInBlock map[string]bool // Accounts needed to be updated at the end of the block + accsInTx map[string]bool // Accounts related to the current processing transaction + msgs []common.Message // The list of all Kafka messages to be published for this block + adapters []Adapter // Array of adapters needed for the hook + accVerifiers []AccountVerifier // Array of AccountVerifier needed for account verification +} + +// NewHook creates an emitter hook instance that will be added in the Osmosis App. +func NewHook( + encodingConfig params.EncodingConfig, + keeper keepers.AppKeepers, + kafkaURI string, +) *Hook { + paths := strings.SplitN(kafkaURI, "@", 2) + return &Hook{ + encodingConfig: encodingConfig, + writer: kafka.NewWriter(kafka.WriterConfig{ + Brokers: paths[1:], + Topic: paths[0], + Balancer: &kafka.LeastBytes{}, + BatchTimeout: 1 * time.Millisecond, + BatchBytes: 512000000, + }), + adapters: []Adapter{ + NewValidatorAdapter(keeper.StakingKeeper), + NewBankAdapter(), + NewIBCAdapter(), + NewGovAdapter(keeper.GovKeeper), + NewWasmAdapter(keeper.WasmKeeper, keeper.GovKeeper), + NewPoolAdapter(keeper.GAMMKeeper, keeper.GovKeeper, keeper.LockupKeeper), + NewProtorevAdapter(keeper.ProtoRevKeeper), + }, + accVerifiers: []AccountVerifier{ + ContractAccountVerifier{keeper: *keeper.WasmKeeper}, + AuthAccountVerifier{keeper: *keeper.AccountKeeper}, + }, + } +} + +// AddAccountsInBlock adds the given accounts to the array of accounts to be updated at EndBlocker. +func (h *Hook) AddAccountsInBlock(accs ...string) { + for _, acc := range accs { + h.accsInBlock[acc] = true + } +} + +// AddAccountsInTx adds the given accounts to the array of accounts related to the current processing transaction. +func (h *Hook) AddAccountsInTx(accs ...string) { + for _, acc := range accs { + h.accsInTx[acc] = true + } +} + +// FlushMessages publishes all pending messages to Kafka message queue. Blocks until completion. +func (h *Hook) FlushMessages() { + kafkaMsgs := make([]kafka.Message, len(h.msgs)) + for idx, msg := range h.msgs { + res, _ := json.Marshal(msg.Value) // Error must always be nil. + kafkaMsgs[idx] = kafka.Message{Key: []byte(msg.Key), Value: res} + } + err := h.writer.WriteMessages(context.Background(), kafkaMsgs...) + if err != nil { + panic(err) + } +} + +// AfterInitChain specifies actions to be done after chain initialization (app.Hook interface). +func (h *Hook) AfterInitChain(ctx sdk.Context, req abci.RequestInitChain, _ abci.ResponseInitChain) { + var genesisState map[string]json.RawMessage + if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { + panic(err) + } + + var authGenesis authtypes.GenesisState + if genesisState[authtypes.ModuleName] != nil { + h.encodingConfig.Marshaler.MustUnmarshalJSON(genesisState[authtypes.ModuleName], &authGenesis) + } + for _, account := range authGenesis.GetAccounts() { + a, ok := account.GetCachedValue().(authtypes.AccountI) + if !ok { + panic("expected account") + } + + common.AppendMessage(&h.msgs, "SET_ACCOUNT", VerifyAccount(ctx, a.GetAddress(), h.accVerifiers...)) + } + + for idx := range h.adapters { + h.adapters[idx].AfterInitChain(ctx, h.encodingConfig, genesisState, &h.msgs) + } + + common.AppendMessage(&h.msgs, "COMMIT", common.JsDict{"height": 0}) + h.FlushMessages() +} + +// AfterBeginBlock specifies actions needed to be done after each BeginBlock period (app.Hook interface) +func (h *Hook) AfterBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) { + h.accsInBlock = make(map[string]bool) + h.accsInTx = make(map[string]bool) + h.msgs = []common.Message{} + evMap := common.ParseEvents(sdk.StringifyEvents(res.Events)) + for idx := range h.adapters { + h.adapters[idx].AfterBeginBlock(ctx, req, evMap, &h.msgs) + } +} + +// AfterDeliverTx specifies actions to be done after each transaction has been processed (app.Hook interface). +func (h *Hook) AfterDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) { + if ctx.BlockHeight() == 0 { + return + } + + h.accsInTx = make(map[string]bool) + for idx := range h.adapters { + h.adapters[idx].PreDeliverTx() + } + txHash := tmhash.Sum(req.Tx) + tx, err := h.encodingConfig.TxConfig.TxDecoder()(req.Tx) + if err != nil { + panic("cannot decode tx") + } + txDict := getTxDict(ctx, tx, txHash, res) + common.AppendMessage(&h.msgs, "NEW_TRANSACTION", txDict) + + txRes := h.getTxResponse(ctx, txHash, req, res) + common.AppendMessage(&h.msgs, "INSERT_LCD_TX_RESULTS", common.JsDict{ + "tx_hash": txHash, + "block_height": ctx.BlockHeight(), + "result": txRes, + }) + md := getMessageDicts(txRes) + logs, _ := sdk.ParseABCILogs(res.Log) + var msgs []map[string]interface{} + for idx, msg := range tx.GetMsgs() { + for i := range h.adapters { + h.adapters[i].CheckMsg(ctx, msg) + } + common.GetRelatedAccounts(h.GetMsgJson(msg), h.accsInTx) + if res.IsOK() { + h.handleMsg(ctx, txHash, msg, logs[idx], md[idx]) + } + msgs = append(msgs, common.JsDict{ + "detail": md[idx], + "type": sdk.MsgTypeURL(msg), + }) + } + + signers := tx.GetMsgs()[0].GetSigners() + addrs := make([]string, len(signers)) + for idx, signer := range signers { + addrs[idx] = signer.String() + } + + h.updateTxInBlockAndRelatedTx(ctx, txHash, addrs) + h.PostDeliverTx(ctx, txHash, txDict, msgs) +} + +// PostDeliverTx specifies actions to be done by adapters after each transaction has been processed by the hook. +func (h *Hook) PostDeliverTx(ctx sdk.Context, txHash []byte, txDict common.JsDict, msgs []map[string]interface{}) { + txDict["messages"] = msgs + for idx := range h.adapters { + h.adapters[idx].PostDeliverTx(ctx, txHash, txDict, &h.msgs) + } +} + +// AfterEndBlock specifies actions to be done after each end block period (app.Hook interface). +func (h *Hook) AfterEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) { + evMap := common.ParseEvents(sdk.StringifyEvents(res.Events)) + for idx := range h.adapters { + h.adapters[idx].AfterEndBlock(ctx, req, evMap, &h.msgs) + } + + // Index 0 is the message NEW_BLOCK, SET_ACCOUNT messages are inserted between NEW_BLOCK and other messages. + modifiedMsgs := []common.Message{h.msgs[0]} + for accStr := range h.accsInBlock { + acc, _ := sdk.AccAddressFromBech32(accStr) + modifiedMsgs = append(modifiedMsgs, common.Message{ + Key: "SET_ACCOUNT", + Value: VerifyAccount(ctx, acc, h.accVerifiers...), + }) + } + h.msgs = append(modifiedMsgs, h.msgs[1:]...) + common.AppendMessage(&h.msgs, "COMMIT", common.JsDict{"height": req.Height}) +} + +// BeforeCommit specifies actions to be done before commit block (app.Hook interface). +func (h *Hook) BeforeCommit() { + h.FlushMessages() +} + +// GetMsgJson returns an unmarshalled interface of the given sdk.Msg. +func (h *Hook) GetMsgJson(msg sdk.Msg) interface{} { + bz, _ := h.encodingConfig.Marshaler.MarshalInterfaceJSON(msg) + var jsonMsg interface{} + err := json.Unmarshal(bz, &jsonMsg) + if err != nil { + panic(err) + } + return jsonMsg +} diff --git a/hooks/emitter/gov.go b/hooks/emitter/gov.go new file mode 100644 index 00000000000..6bbb926204e --- /dev/null +++ b/hooks/emitter/gov.go @@ -0,0 +1,144 @@ +package emitter + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + "github.com/cosmos/cosmos-sdk/x/gov/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +var ( + StatusInactive = 6 + _ Adapter = &GovAdapter{} +) + +// GovAdapter defines a struct containing the required keeper to process the x/gov hook. It implements Adapter interface. +type GovAdapter struct { + keeper *govkeeper.Keeper +} + +// NewGovAdapter creates a new GovAdapter instance that will be added to the emitter hook adapters. +func NewGovAdapter(keeper *govkeeper.Keeper) *GovAdapter { + return &GovAdapter{ + keeper: keeper, + } +} + +// AfterInitChain does nothing since no action is required in the InitChainer. +func (ga *GovAdapter) AfterInitChain(_ sdk.Context, _ params.EncodingConfig, _ map[string]json.RawMessage, _ *[]common.Message) { +} + +// AfterBeginBlock does nothing since no action is required in the BeginBlocker. +func (ga *GovAdapter) AfterBeginBlock(_ sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { +} + +// PreDeliverTx does nothing since no action is required before processing each transaction. +func (ga *GovAdapter) PreDeliverTx() { +} + +// CheckMsg does nothing since no message check is required for governance module. +func (ga *GovAdapter) CheckMsg(_ sdk.Context, _ sdk.Msg) { +} + +// HandleMsgEvents checks for SubmitProposal or ProposalDeposit events and process new proposals in the current transaction. +func (ga *GovAdapter) HandleMsgEvents(ctx sdk.Context, _ []byte, msg sdk.Msg, evMap common.EvMap, detail common.JsDict, kafka *[]common.Message) { + var submitProposalId uint64 + if rawIds, ok := evMap[types.EventTypeSubmitProposal+"."+types.AttributeKeyProposalID]; ok { + for _, rawId := range rawIds { + submitProposalId = common.Atoui(rawId) + proposal, _ := ga.keeper.GetProposal(ctx, submitProposalId) + content := proposal.GetContent() + common.AppendMessage(kafka, "NEW_PROPOSAL", common.JsDict{ + "id": submitProposalId, + "proposer": msg.GetSigners()[0], + "type": content.ProposalType(), + "title": content.GetTitle(), + "description": content.GetDescription(), + "proposal_route": content.ProposalRoute(), + "status": int(proposal.Status), + "submit_time": proposal.SubmitTime.UnixNano(), + "deposit_end_time": proposal.DepositEndTime.UnixNano(), + "voting_time": proposal.VotingStartTime.UnixNano(), + "voting_end_time": proposal.VotingEndTime.UnixNano(), + "content": content, + "is_expedited": proposal.IsExpedited, + "resolved_height": nil, + }) + common.AppendMessage(kafka, "UPDATE_PROPOSAL", common.JsDict{ + "id": submitProposalId, + "status": int(proposal.Status), + "voting_time": proposal.VotingStartTime.UnixNano(), + "voting_end_time": proposal.VotingEndTime.UnixNano(), + }) + } + } + + if rawIds, ok := evMap[types.EventTypeProposalDeposit+"."+types.AttributeKeyVotingPeriodStart]; ok { + for _, rawId := range rawIds { + id := common.Atoui(rawId) + proposal, _ := ga.keeper.GetProposal(ctx, id) + common.AppendMessage(kafka, "UPDATE_PROPOSAL", common.JsDict{ + "id": id, + "status": int(proposal.Status), + "voting_time": proposal.VotingStartTime.UnixNano(), + "voting_end_time": proposal.VotingEndTime.UnixNano(), + }) + } + } + + switch msg := msg.(type) { + case *types.MsgSubmitProposal: + detail["proposal_id"] = submitProposalId + case *types.MsgDeposit: + proposal, _ := ga.keeper.GetProposal(ctx, msg.ProposalId) + detail["title"] = proposal.GetTitle() + case *types.MsgVote: + proposal, _ := ga.keeper.GetProposal(ctx, msg.ProposalId) + detail["title"] = proposal.GetTitle() + case *types.MsgVoteWeighted: + proposal, _ := ga.keeper.GetProposal(ctx, msg.ProposalId) + detail["title"] = proposal.GetTitle() + } +} + +// PostDeliverTx does nothing since no action is required after the transaction has been processed by the hook. +func (ga *GovAdapter) PostDeliverTx(_ sdk.Context, _ []byte, _ common.JsDict, _ *[]common.Message) { +} + +// AfterEndBlock checks for ActiveProposal or InactiveProposal events and update proposals accordingly. +func (ga *GovAdapter) AfterEndBlock(ctx sdk.Context, _ abci.RequestEndBlock, evMap common.EvMap, kafka *[]common.Message) { + if rawIds, ok := evMap[types.EventTypeActiveProposal+"."+types.AttributeKeyProposalID]; ok { + for idx, rawId := range rawIds { + id := common.Atoui(rawId) + proposal, _ := ga.keeper.GetProposal(ctx, id) + if evMap[types.EventTypeActiveProposal+"."+types.AttributeKeyProposalResult][idx] == types.AttributeValueExpeditedProposalRejected { + common.AppendMessage(kafka, "UPDATE_PROPOSAL", common.JsDict{ + "id": id, + "is_expedited": proposal.IsExpedited, + "voting_end_time": proposal.VotingEndTime.UnixNano(), + }) + } else { + common.AppendMessage(kafka, "UPDATE_PROPOSAL", common.JsDict{ + "id": id, + "status": int(proposal.Status), + "resolved_height": ctx.BlockHeight(), + }) + } + } + } + + if rawIds, ok := evMap[types.EventTypeInactiveProposal+"."+types.AttributeKeyProposalID]; ok { + for _, rawId := range rawIds { + common.AppendMessage(kafka, "UPDATE_PROPOSAL", common.JsDict{ + "id": common.Atoi(rawId), + "status": StatusInactive, + "resolved_height": ctx.BlockHeight(), + }) + } + } +} diff --git a/hooks/emitter/handler.go b/hooks/emitter/handler.go new file mode 100644 index 00000000000..ee99563853a --- /dev/null +++ b/hooks/emitter/handler.go @@ -0,0 +1,23 @@ +package emitter + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +// handleMsg finds related accounts for the given message events and handles the message in each adapter in the hook. +func (h *Hook) handleMsg(ctx sdk.Context, txHash []byte, msg sdk.Msg, log sdk.ABCIMessageLog, detail common.JsDict) { + evMap := common.ParseEvents(log.Events) + for _, values := range evMap { + for _, value := range values { + if _, err := sdk.AccAddressFromBech32(value); err == nil { + h.AddAccountsInTx(value) + } + } + } + + for idx := range h.adapters { + h.adapters[idx].HandleMsgEvents(ctx, txHash, msg, evMap, detail, &h.msgs) + } +} diff --git a/hooks/emitter/pool.go b/hooks/emitter/pool.go new file mode 100644 index 00000000000..5c7bc53174c --- /dev/null +++ b/hooks/emitter/pool.go @@ -0,0 +1,489 @@ +package emitter + +import ( + "encoding/json" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" + + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" + gammkeeper "github.com/osmosis-labs/osmosis/v15/x/gamm/keeper" + "github.com/osmosis-labs/osmosis/v15/x/gamm/pool-models/balancer" + "github.com/osmosis-labs/osmosis/v15/x/gamm/pool-models/stableswap" + gammtypes "github.com/osmosis-labs/osmosis/v15/x/gamm/types" + lockupkeeper "github.com/osmosis-labs/osmosis/v15/x/lockup/keeper" + lockuptypes "github.com/osmosis-labs/osmosis/v15/x/lockup/types" + poolmanagertypes "github.com/osmosis-labs/osmosis/v15/x/poolmanager/types" + superfluidtypes "github.com/osmosis-labs/osmosis/v15/x/superfluid/types" +) + +var _ Adapter = &PoolAdapter{} + +// PoolAdapter defines a struct containing required keepers, maps and flags to process the Osmosis pools related hook. +// It implements Adapter interface. +type PoolAdapter struct { + // tx flags + isSwapTx bool + isLpTx bool + isBondTx bool + isSuperfluidTx bool + + // pool transactions + poolTxs map[uint64]bool + poolInBlock map[uint64]bool + onlyPoolMsgTx bool + + // keepers + gammKeeper *gammkeeper.Keeper + govKeeper *govkeeper.Keeper + lockupKeeper *lockupkeeper.Keeper +} + +// NewPoolAdapter creates a new PoolAdapter instance that will be added to the emitter hook adapters. +func NewPoolAdapter( + gammKeeper *gammkeeper.Keeper, + govKeeper *govkeeper.Keeper, + lockupKeeper *lockupkeeper.Keeper, +) *PoolAdapter { + return &PoolAdapter{ + isSwapTx: false, + isLpTx: false, + isBondTx: false, + isSuperfluidTx: false, + poolTxs: make(map[uint64]bool), + poolInBlock: make(map[uint64]bool), + onlyPoolMsgTx: true, + gammKeeper: gammKeeper, + govKeeper: govKeeper, + lockupKeeper: lockupKeeper, + } +} + +// AfterInitChain does nothing since no action is required in the InitChainer. +func (pa *PoolAdapter) AfterInitChain( + _ sdk.Context, + _ params.EncodingConfig, + _ map[string]json.RawMessage, + _ *[]common.Message, +) { +} + +// AfterBeginBlock sets the necessary map to the starting value before processing each block. +func (pa *PoolAdapter) AfterBeginBlock(_ sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { + pa.poolInBlock = make(map[uint64]bool) +} + +// PreDeliverTx sets the necessary maps and flags to the starting value before processing each transaction. +func (pa *PoolAdapter) PreDeliverTx() { + pa.isSwapTx = false + pa.isLpTx = false + pa.isBondTx = false + pa.isSuperfluidTx = false + pa.poolTxs = make(map[uint64]bool) + pa.onlyPoolMsgTx = true +} + +// CheckMsg checks the message type and extracts message values to PoolAdapter maps and flags accordingly. +func (pa *PoolAdapter) CheckMsg(ctx sdk.Context, msg sdk.Msg) { + switch msg := msg.(type) { + case *gammtypes.MsgJoinPool: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *gammtypes.MsgExitPool: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *gammtypes.MsgSwapExactAmountIn: + pa.isSwapTx = true + for _, route := range msg.Routes { + pa.poolTxs[route.PoolId] = true + } + case *gammtypes.MsgSwapExactAmountOut: + pa.isSwapTx = true + for _, route := range msg.Routes { + pa.poolTxs[route.PoolId] = true + } + case *gammtypes.MsgJoinSwapExternAmountIn: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *gammtypes.MsgJoinSwapShareAmountOut: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *gammtypes.MsgExitSwapExternAmountOut: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *gammtypes.MsgExitSwapShareAmountIn: + pa.isLpTx = true + pa.poolTxs[msg.PoolId] = true + case *poolmanagertypes.MsgSwapExactAmountIn: + pa.isSwapTx = true + for _, route := range msg.Routes { + pa.poolTxs[route.PoolId] = true + } + case *poolmanagertypes.MsgSwapExactAmountOut: + pa.isSwapTx = true + for _, route := range msg.Routes { + pa.poolTxs[route.PoolId] = true + } + case *lockuptypes.MsgLockTokens: + if poolIds, found := getPoolIdsFromCoins(msg.Coins); found { + pa.isBondTx = true + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + case *lockuptypes.MsgForceUnlock: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.ID); found { + pa.isBondTx = true + pa.poolTxs[poolId] = true + } + case *lockuptypes.MsgBeginUnlocking: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.ID); found { + pa.isBondTx = true + pa.poolTxs[poolId] = true + } + case *lockuptypes.MsgExtendLockup: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.ID); found { + pa.isBondTx = true + pa.poolTxs[poolId] = true + } + case *superfluidtypes.MsgSuperfluidDelegate: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.LockId); found { + pa.isSuperfluidTx = true + pa.poolTxs[poolId] = true + } + case *superfluidtypes.MsgSuperfluidUndelegate: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.LockId); found { + pa.isSuperfluidTx = true + pa.poolTxs[poolId] = true + } + case *superfluidtypes.MsgSuperfluidUnbondLock: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.LockId); found { + pa.isSuperfluidTx = true + pa.poolTxs[poolId] = true + } + case *superfluidtypes.MsgLockAndSuperfluidDelegate: + if poolIds, found := getPoolIdsFromCoins(msg.Coins); found { + pa.isSuperfluidTx = true + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + case *superfluidtypes.MsgUnPoolWhitelistedPool: + pa.isSuperfluidTx = true + pa.poolTxs[msg.PoolId] = true + case *superfluidtypes.MsgSuperfluidUndelegateAndUnbondLock: + if poolId, found := pa.getPoolIdFromLockId(ctx, msg.LockId); found { + pa.isSuperfluidTx = true + pa.poolTxs[poolId] = true + } + default: + pa.onlyPoolMsgTx = false + } +} + +// HandleMsgEvents processes a special case which could not be handled during CheckMsg because it has to be a success +// transaction only. Also, handles pool events emitted from messages not from Osmosis pools related modules, e.g., +// via contract execution. +func (pa *PoolAdapter) HandleMsgEvents( + ctx sdk.Context, + txHash []byte, + msg sdk.Msg, + evMap common.EvMap, + _ common.JsDict, + kafka *[]common.Message, +) { + switch msg := msg.(type) { + case *lockuptypes.MsgBeginUnlockingAll: + pa.handleMsgBeginUnlockingAll(ctx, evMap) + default: + if !pa.onlyPoolMsgTx { + pa.handleCreatePoolEvents(ctx, txHash, msg.GetSigners()[0], evMap, kafka) + pa.handleNonPoolMsgsPoolActionEvents(ctx, evMap) + } + } +} + +// PostDeliverTx appends the processed transaction to the array of messages to be written to Kafka and adds the pool +// to the array of pools to be updated at end block. +func (pa *PoolAdapter) PostDeliverTx(ctx sdk.Context, txHash []byte, _ common.JsDict, kafka *[]common.Message) { + for poolId := range pa.poolTxs { + common.AppendMessage(kafka, "NEW_POOL_TRANSACTION", common.JsDict{ + "pool_id": poolId, + "tx_hash": txHash, + "block_height": ctx.BlockHeight(), + "is_swap": pa.isSwapTx, + "is_lp": pa.isLpTx, + "is_bond": pa.isBondTx, + "is_superfluid": pa.isSuperfluidTx, + }) + pa.poolInBlock[poolId] = true + } + pa.poolTxs = make(map[uint64]bool) +} + +// AfterEndBlock checks for superfluid related ActiveProposal events and update the status accordingly. Update pool +// stats at the end. +func (pa *PoolAdapter) AfterEndBlock( + ctx sdk.Context, + _ abci.RequestEndBlock, + evMap common.EvMap, + kafka *[]common.Message, +) { + if rawIds, ok := evMap[govtypes.EventTypeActiveProposal+"."+govtypes.AttributeKeyProposalID]; ok { + for _, rawId := range rawIds { + proposalId := uint64(common.Atoi(rawId)) + proposal, _ := pa.govKeeper.GetProposal(ctx, proposalId) + + switch content := proposal.GetContent().(type) { + case *superfluidtypes.SetSuperfluidAssetsProposal: + for _, asset := range content.Assets { + if id, ok := getPoolIdFromDenom(asset.Denom); ok { + common.AppendMessage(kafka, "UPDATE_SET_SUPERFLUID_ASSET", common.JsDict{ + "id": common.Atoi(id), + }) + } + } + case *superfluidtypes.RemoveSuperfluidAssetsProposal: + for _, denom := range content.SuperfluidAssetDenoms { + if id, ok := getPoolIdFromDenom(denom); ok { + common.AppendMessage(kafka, "UPDATE_REMOVE_SUPERFLUID_ASSET", common.JsDict{ + "id": common.Atoi(id), + }) + } + } + } + } + } + pa.flushUpdatePoolStats(ctx, kafka) +} + +// getPoolIdFromDenom returns pool id if the provided denom is a pool share token. Otherwise, returns false. +func getPoolIdFromDenom(denom string) (string, bool) { + if !strings.HasPrefix(denom, "gamm/pool/") { + return "", false + } + return strings.Trim(denom, "gamm/pool/"), true +} + +// getPoolIdsFromCoins filters an array of coins and returns pool ids of pool share tokens in the array. +func getPoolIdsFromCoins(coins sdk.Coins) ([]uint64, bool) { + result := make([]uint64, 0) + found := false + for _, coin := range coins { + if poolId, ok := getPoolIdFromDenom(coin.GetDenom()); ok { + result = append(result, common.Atoui(poolId)) + found = true + } + } + + return result, found +} + +// getPoolIdFromLockId returns pool id of the given lock if the locked token is a pool share token. +func (pa *PoolAdapter) getPoolIdFromLockId(ctx sdk.Context, lockId uint64) (uint64, bool) { + lock, err := pa.lockupKeeper.GetLockByID(ctx, lockId) + if err != nil { + return 0, false + } + for _, coin := range lock.Coins { + if poolId, ok := getPoolIdFromDenom(coin.GetDenom()); ok { + return common.Atoui(poolId), true + } + } + return 0, false +} + +// getPoolIdsFromLockIds filters an array of locks and returns pool ids of pool share tokens in the locks. +func (pa *PoolAdapter) getPoolIdsFromLockIds(ctx sdk.Context, lockIds []string) []uint64 { + result := make([]uint64, 0) + for _, lockId := range lockIds { + lock, err := pa.lockupKeeper.GetLockByID(ctx, common.Atoui(lockId)) + if err != nil { + continue + } + for _, coin := range lock.Coins { + if poolId, ok := getPoolIdFromDenom(coin.GetDenom()); ok { + result = append(result, common.Atoui(poolId)) + } + } + } + + return result +} + +// handleCreatePoolEvents handles PoolCreated events and processes new pools in the current transaction. +func (pa *PoolAdapter) handleCreatePoolEvents(ctx sdk.Context, txHash []byte, sender sdk.AccAddress, evMap common.EvMap, kafka *[]common.Message) { + if rawIds, ok := evMap[gammtypes.TypeEvtPoolCreated+"."+gammtypes.AttributeKeyPoolId]; ok { + newPool := make(map[uint64]bool) + for _, rawId := range rawIds { + newPool[common.Atoui(rawId)] = true + } + for poolId := range newPool { + poolInfo, _ := pa.gammKeeper.GetPool(ctx, poolId) + switch pool := poolInfo.(type) { + case *balancer.Pool: + weights := make([]common.JsDict, 0) + for _, weight := range pool.PoolAssets { + weights = append(weights, common.JsDict{ + "denom": weight.Token.GetDenom(), + "weight": weight.Weight.String(), + }) + } + poolMsg := common.JsDict{ + "id": poolId, + "liquidity": pool.GetTotalPoolLiquidity(ctx), + "type": pool.GetType(), + "creator": sender.String(), + "create_tx": txHash, + "is_superfluid": false, + "is_supported": false, + "swap_fee": pool.GetSwapFee(ctx).String(), + "exit_fee": pool.GetExitFee(ctx).String(), + "future_pool_governor": pool.FuturePoolGovernor, + "weight": weights, + "address": pool.GetAddress().String(), + "total_shares": pool.TotalShares, + } + if smoothWeightChangeParams := pool.PoolParams.GetSmoothWeightChangeParams(); smoothWeightChangeParams != nil { + poolMsg["smooth_weight_change_params"] = smoothWeightChangeParams + } + common.AppendMessage(kafka, "NEW_GAMM_POOL", poolMsg) + case *stableswap.Pool: + common.AppendMessage(kafka, "NEW_GAMM_POOL", common.JsDict{ + "id": poolId, + "liquidity": pool.GetTotalPoolLiquidity(ctx), + "type": pool.GetType(), + "creator": sender.String(), + "create_tx": txHash, + "is_superfluid": false, + "is_supported": false, + "swap_fee": pool.GetSwapFee(ctx).String(), + "exit_fee": pool.GetExitFee(ctx).String(), + "future_pool_governor": pool.FuturePoolGovernor, + "scaling_factors": pool.GetScalingFactors(), + "scaling_factor_controller": pool.ScalingFactorController, + "address": pool.GetAddress().String(), + "total_shares": pool.TotalShares, + }) + default: + panic("cannot handle pool type") + } + } + } +} + +// handleNonPoolMsgsPoolActionEvents handles events emitted from calling other messages calling pool actions +// (from contracts, ica) +func (pa *PoolAdapter) handleNonPoolMsgsPoolActionEvents(ctx sdk.Context, evMap common.EvMap) { + if poolIds, ok := evMap[gammtypes.TypeEvtPoolJoined+"."+gammtypes.AttributeKeyPoolId]; ok { + pa.isLpTx = true + for _, poolId := range poolIds { + pa.poolTxs[common.Atoui(poolId)] = true + } + } + if poolIds, ok := evMap[gammtypes.TypeEvtPoolExited+"."+gammtypes.AttributeKeyPoolId]; ok { + pa.isLpTx = true + for _, poolId := range poolIds { + pa.poolTxs[common.Atoui(poolId)] = true + } + } + if poolIds, ok := evMap[gammtypes.TypeEvtTokenSwapped+"."+gammtypes.AttributeKeyPoolId]; ok { + pa.isSwapTx = true + for _, poolId := range poolIds { + pa.poolTxs[common.Atoui(poolId)] = true + } + } + if lockIds, ok := evMap[lockuptypes.TypeEvtLockTokens+"."+lockuptypes.AttributePeriodLockID]; ok { + pa.isBondTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[lockuptypes.TypeEvtAddTokensToLock+"."+lockuptypes.AttributePeriodLockID]; ok { + pa.isBondTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[lockuptypes.TypeEvtBeginUnlock+"."+lockuptypes.AttributePeriodLockID]; ok { + pa.isBondTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[superfluidtypes.TypeEvtSuperfluidDelegate+"."+superfluidtypes.AttributeLockId]; ok { + pa.isSuperfluidTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[superfluidtypes.TypeEvtSuperfluidUndelegate+"."+superfluidtypes.AttributeLockId]; ok { + pa.isSuperfluidTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[superfluidtypes.TypeEvtSuperfluidUnbondLock+"."+superfluidtypes.AttributeLockId]; ok { + pa.isSuperfluidTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } + if lockIds, ok := evMap[superfluidtypes.TypeEvtSuperfluidUndelegateAndUnbondLock+"."+superfluidtypes.AttributeLockId]; ok { + pa.isSuperfluidTx = true + poolIds := pa.getPoolIdsFromLockIds(ctx, lockIds) + for _, poolId := range poolIds { + pa.poolTxs[poolId] = true + } + } +} + +// handleMsgBeginUnlockingAll gets all pool ids from lock ids being unlocking via the message. +func (pa *PoolAdapter) handleMsgBeginUnlockingAll(ctx sdk.Context, evMap common.EvMap) { + pa.isBondTx = true + if lockIds, ok := evMap[lockuptypes.TypeEvtBeginUnlock+"."+lockuptypes.AttributePeriodLockID]; ok { + for _, poolId := range pa.getPoolIdsFromLockIds(ctx, lockIds) { + pa.poolTxs[poolId] = true + } + } +} + +// flushUpdatePoolStats appends updated Osmosis pools stats into the provided Kafka messages array. +func (pa *PoolAdapter) flushUpdatePoolStats(ctx sdk.Context, kafka *[]common.Message) { + for poolId := range pa.poolInBlock { + poolInfo, _ := pa.gammKeeper.GetPool(ctx, poolId) + switch pool := poolInfo.(type) { + case *balancer.Pool: + weights := make([]common.JsDict, 0) + for _, weight := range pool.PoolAssets { + weights = append(weights, common.JsDict{ + "denom": weight.Token.GetDenom(), + "weight": weight.Weight.String(), + }) + } + common.AppendMessage(kafka, "UPDATE_POOL", common.JsDict{ + "id": poolId, + "liquidity": pool.GetTotalPoolLiquidity(ctx), + "weight": weights, + "total_shares": pool.TotalShares, + }) + case *stableswap.Pool: + common.AppendMessage(kafka, "UPDATE_POOL", common.JsDict{ + "id": poolId, + "liquidity": pool.GetTotalPoolLiquidity(ctx), + "total_shares": pool.TotalShares, + }) + } + } + pa.poolInBlock = make(map[uint64]bool) +} diff --git a/hooks/emitter/protorev.go b/hooks/emitter/protorev.go new file mode 100644 index 00000000000..ab3eef35f88 --- /dev/null +++ b/hooks/emitter/protorev.go @@ -0,0 +1,89 @@ +package emitter + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/osmosis-labs/osmosis/v15/app/params" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/hooks/common" + "github.com/osmosis-labs/osmosis/v15/x/protorev/keeper" +) + +var ( + _ Adapter = &ProtorevAdapter{} +) + +// ProtorevAdapter defines a struct containing the required keeper to process the Osmosis x/protorev hook. +// It implements Adapter interface. +type ProtorevAdapter struct { + keeper *keeper.Keeper +} + +// NewProtorevAdapter creates a new ProtorevAdapter instance that will be added to the emitter hook adapters. +func NewProtorevAdapter(keeper *keeper.Keeper) *ProtorevAdapter { + return &ProtorevAdapter{ + keeper: keeper, + } +} + +// AfterInitChain does nothing since no action is required in the InitChainer. +func (pa *ProtorevAdapter) AfterInitChain(_ sdk.Context, _ params.EncodingConfig, _ map[string]json.RawMessage, _ *[]common.Message) { +} + +// AfterBeginBlock does nothing since no action is required in the BeginBlocker. +func (pa *ProtorevAdapter) AfterBeginBlock(_ sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { +} + +// PreDeliverTx does nothing since no action is required before processing each transaction. +func (pa *ProtorevAdapter) PreDeliverTx() { +} + +// CheckMsg does nothing since no message check is required for x/protorev module. +func (pa *ProtorevAdapter) CheckMsg(_ sdk.Context, _ sdk.Msg) { +} + +// HandleMsgEvents does nothing since no action is required in the transaction events-handling step. +func (pa *ProtorevAdapter) HandleMsgEvents(_ sdk.Context, _ []byte, _ sdk.Msg, _ common.EvMap, _ common.JsDict, _ *[]common.Message) { +} + +// PostDeliverTx does nothing since no action is required after the transaction has been processed by the hook. +func (pa *ProtorevAdapter) PostDeliverTx(_ sdk.Context, _ []byte, _ common.JsDict, _ *[]common.Message) { +} + +// AfterEndBlock appends new x/protorev data for the current block into the provided Kafka messages array. +func (pa *ProtorevAdapter) AfterEndBlock(ctx sdk.Context, req abci.RequestEndBlock, _ common.EvMap, kafka *[]common.Message) { + trade, _ := pa.keeper.GetNumberOfTrades(ctx) + common.AppendMessage(kafka, "TRADE", common.JsDict{ + "block_height": req.Height, + "count": trade, + }) + + for _, profit := range pa.keeper.GetAllProfits(ctx) { + common.AppendMessage(kafka, "PROFIT_BY_DENOM", common.JsDict{ + "block_height": req.Height, + "denom": profit.Denom, + "amount": profit.Amount, + }) + } + + routes, _ := pa.keeper.GetAllRoutes(ctx) + for _, route := range routes { + tradeByRoute, _ := pa.keeper.GetTradesByRoute(ctx, route) + common.AppendMessage(kafka, "TRADE_BY_ROUTE", common.JsDict{ + "block_height": req.Height, + "route": route, + "count": tradeByRoute, + }) + profits := pa.keeper.GetAllProfitsByRoute(ctx, route) + for _, profit := range profits { + common.AppendMessage(kafka, "PROFIT_BY_ROUTE", common.JsDict{ + "block_height": req.Height, + "route": route, + "denom": profit.Denom, + "amount": profit.Amount, + }) + } + } +} diff --git a/hooks/emitter/tx.go b/hooks/emitter/tx.go new file mode 100644 index 00000000000..0511f37e743 --- /dev/null +++ b/hooks/emitter/tx.go @@ -0,0 +1,102 @@ +package emitter + +import ( + "encoding/json" + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + txtypes "github.com/cosmos/cosmos-sdk/types/tx" + "github.com/osmosis-labs/osmosis/v15/hooks/common" + abci "github.com/tendermint/tendermint/abci/types" + tmtypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// getTxResponse returns an unmarshalled Cosmos SDK TxResponse from Tendermint RequestDeliverTx and ResponseDeliverTx +func (h *Hook) getTxResponse(ctx sdk.Context, txHash []byte, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) common.JsDict { + resTx := tmtypes.ResultTx{ + Hash: txHash, + Height: ctx.BlockHeight(), + TxResult: res, + Tx: req.Tx, + } + txResult, err := common.MkTxResult(h.encodingConfig.TxConfig, &resTx, ctx.BlockTime()) + if err != nil { + panic(err) + } + protoTx, ok := txResult.Tx.GetCachedValue().(*txtypes.Tx) + if !ok { + panic("cannot make proto tx") + } + txResJson, err := codec.ProtoMarshalJSON(&txtypes.GetTxResponse{ + Tx: protoTx, + TxResponse: txResult, + }, nil) + if err != nil { + panic(err) + } + var txResJsDict common.JsDict + err = json.Unmarshal(txResJson, &txResJsDict) + if err != nil { + panic(err) + } + return txResJsDict +} + +// getMessageDicts returns an array of JsDict decoded version for messages in the provided transaction. +func getMessageDicts(txResJsDict common.JsDict) []common.JsDict { + details := make([]common.JsDict, 0) + tx := txResJsDict["tx"].(map[string]interface{}) + body := tx["body"].(map[string]interface{}) + msgs := body["messages"].([]interface{}) + for _, msg := range msgs { + detail := msg.(map[string]interface{}) + details = append(details, detail) + } + return details +} + +// getTxDict returns a JsDict decoded version for the provided transaction. +func getTxDict(ctx sdk.Context, tx sdk.Tx, txHash []byte, res abci.ResponseDeliverTx) common.JsDict { + feeTx, ok := tx.(sdk.FeeTx) + if !ok { + panic(fmt.Sprintf("cannot get fee tx for tx %s", txHash)) + } + memoTx, ok := tx.(sdk.TxWithMemo) + if !ok { + panic(fmt.Sprintf("cannot get memo for tx %s", txHash)) + } + var errMsg *string + if !res.IsOK() { + errMsg = &res.Log + } + + return common.JsDict{ + "hash": txHash, + "block_height": ctx.BlockHeight(), + "gas_used": res.GasUsed, + "gas_limit": feeTx.GetGas(), + "gas_fee": feeTx.GetFee().String(), + "err_msg": errMsg, + "sender": tx.GetMsgs()[0].GetSigners()[0].String(), + "success": res.IsOK(), + "memo": memoTx.GetMemo(), + } +} + +// updateTxInBlockAndRelatedTx is being called at the end of each DeliverTx to update hook accounts in transaction +// and accounts in block maps. +func (h *Hook) updateTxInBlockAndRelatedTx(ctx sdk.Context, txHash []byte, signers []string) { + h.AddAccountsInTx(signers...) + relatedAccounts := make([]string, 0) + for acc := range h.accsInTx { + relatedAccounts = append(relatedAccounts, acc) + } + h.AddAccountsInBlock(relatedAccounts...) + common.AppendMessage(&h.msgs, "SET_RELATED_TRANSACTION", common.JsDict{ + "hash": txHash, + "block_height": ctx.BlockHeight(), + "signer": signers, + "related_accounts": relatedAccounts, + }) +} diff --git a/hooks/emitter/validator.go b/hooks/emitter/validator.go new file mode 100644 index 00000000000..d22e5dd3bab --- /dev/null +++ b/hooks/emitter/validator.go @@ -0,0 +1,155 @@ +package emitter + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +var _ Adapter = &ValidatorAdapter{} + +// ValidatorAdapter defines a struct containing the required keeper to process the validator related hook. +// It implements Adapter interface. +type ValidatorAdapter struct { + keeper keeper.Keeper +} + +// NewValidatorAdapter creates a new ValidatorAdapter instance that will be added to the emitter hook adapters. +func NewValidatorAdapter(keeper *keeper.Keeper) *ValidatorAdapter { + return &ValidatorAdapter{ + keeper: *keeper, + } +} + +// AfterInitChain extracts validators from the given genesis state. +func (va *ValidatorAdapter) AfterInitChain(ctx sdk.Context, encodingConfig params.EncodingConfig, genesisState map[string]json.RawMessage, kafka *[]common.Message) { + var genutilState genutiltypes.GenesisState + encodingConfig.Marshaler.MustUnmarshalJSON(genesisState[genutiltypes.ModuleName], &genutilState) + for _, genTx := range genutilState.GenTxs { + tx, err := encodingConfig.TxConfig.TxJSONDecoder()(genTx) + if err != nil { + panic(err) + } + for _, msg := range tx.GetMsgs() { + if msg, ok := msg.(*stakingtypes.MsgCreateValidator); ok { + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddress) + va.emitSetValidator(ctx, valAddr, kafka) + } + } + } +} + +// AfterBeginBlock emits a new block message and handles validator jailing events. +func (va *ValidatorAdapter) AfterBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, evMap common.EvMap, kafka *[]common.Message) { + validator, _ := va.keeper.GetValidatorByConsAddr(ctx, req.Header.GetProposerAddress()) + common.AppendMessage(kafka, "NEW_BLOCK", common.JsDict{ + "height": req.Header.GetHeight(), + "timestamp": ctx.BlockTime().UnixNano(), + "proposer": validator.GetOperator().String(), + "hash": req.GetHash(), + }) + va.handleJailedEvents(ctx, evMap, kafka) +} + +// PreDeliverTx does nothing since no action is required before processing each transaction. +func (va *ValidatorAdapter) PreDeliverTx() { +} + +// CheckMsg does nothing since no message check is required for staking module. +func (va *ValidatorAdapter) CheckMsg(_ sdk.Context, _ sdk.Msg) { +} + +// HandleMsgEvents checks for a successful message that might require validator info updating and processes them +// correspondingly. +func (va *ValidatorAdapter) HandleMsgEvents(ctx sdk.Context, _ []byte, msg sdk.Msg, _ common.EvMap, detail common.JsDict, kafka *[]common.Message) { + switch msg := msg.(type) { + case *stakingtypes.MsgCreateValidator: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddress) + val := va.emitSetValidator(ctx, valAddr, kafka) + detail["moniker"] = val.Description.Moniker + detail["identity"] = val.Description.Identity + case *stakingtypes.MsgEditValidator: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddress) + val := va.emitSetValidator(ctx, valAddr, kafka) + detail["moniker"] = val.Description.Moniker + detail["identity"] = val.Description.Identity + case *stakingtypes.MsgDelegate: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddress) + val := va.emitSetValidator(ctx, valAddr, kafka) + detail["moniker"] = val.Description.Moniker + detail["identity"] = val.Description.Identity + case *stakingtypes.MsgUndelegate: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddress) + val := va.emitSetValidator(ctx, valAddr, kafka) + detail["moniker"] = val.Description.Moniker + detail["identity"] = val.Description.Identity + case *stakingtypes.MsgBeginRedelegate: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorSrcAddress) + val, _ := va.keeper.GetValidator(ctx, valAddr) + detail["src_moniker"] = val.Description.Moniker + detail["src_identity"] = val.Description.Identity + valAddr, _ = sdk.ValAddressFromBech32(msg.ValidatorDstAddress) + val, _ = va.keeper.GetValidator(ctx, valAddr) + detail["dst_moniker"] = val.Description.Moniker + detail["dst_identity"] = val.Description.Identity + case *slashingtypes.MsgUnjail: + valAddr, _ := sdk.ValAddressFromBech32(msg.ValidatorAddr) + val := va.emitSetValidator(ctx, valAddr, kafka) + detail["moniker"] = val.Description.Moniker + detail["identity"] = val.Description.Identity + default: + return + } +} + +// PostDeliverTx does nothing since no action is required after the transaction has been processed by the hook. +func (va *ValidatorAdapter) PostDeliverTx(_ sdk.Context, _ []byte, _ common.JsDict, _ *[]common.Message) { +} + +// AfterEndBlock only handles validator jailing events in a similar fashion to the AfterBeginBlock. +func (va *ValidatorAdapter) AfterEndBlock(ctx sdk.Context, _ abci.RequestEndBlock, evMap common.EvMap, kafka *[]common.Message) { + va.handleJailedEvents(ctx, evMap, kafka) +} + +// emitSetValidator appends the latest validator information into the provided Kafka messages array. +func (va *ValidatorAdapter) emitSetValidator(ctx sdk.Context, addr sdk.ValAddress, kafka *[]common.Message) stakingtypes.Validator { + val, _ := va.keeper.GetValidator(ctx, addr) + pub, _ := val.ConsPubKey() + common.AppendMessage(kafka, "SET_VALIDATOR", common.JsDict{ + "operator_address": addr.String(), + "delegator_address": sdk.AccAddress(addr).String(), + "consensus_address": sdk.GetConsAddress(pub).String(), + "moniker": val.Description.Moniker, + "identity": val.Description.Identity, + "website": val.Description.Website, + "details": val.Description.Details, + "commission_rate": val.Commission.Rate.String(), + "commission_max_rate": val.Commission.MaxRate.String(), + "commission_max_change": val.Commission.MaxChangeRate.String(), + "min_self_delegation": val.MinSelfDelegation.String(), + "jailed": val.Jailed, + }) + return val +} + +// handleJailedEvents checks for slashing events and update the slashed validator accordingly. +func (va *ValidatorAdapter) handleJailedEvents(ctx sdk.Context, evMap common.EvMap, kafka *[]common.Message) { + if raws, ok := evMap[slashingtypes.EventTypeSlash+"."+slashingtypes.AttributeKeyJailed]; ok { + for _, raw := range raws { + consAddress, _ := sdk.ConsAddressFromBech32(raw) + validator, _ := va.keeper.GetValidatorByConsAddr(ctx, consAddress) + common.AppendMessage(kafka, "UPDATE_VALIDATOR", common.JsDict{ + "operator_address": validator.OperatorAddress, + "jailed": validator.Jailed, + }) + } + } +} diff --git a/hooks/emitter/wasm.go b/hooks/emitter/wasm.go new file mode 100644 index 00000000000..8e26d229fc2 --- /dev/null +++ b/hooks/emitter/wasm.go @@ -0,0 +1,541 @@ +package emitter + +import ( + "encoding/json" + "fmt" + + "github.com/CosmWasm/wasmd/x/wasm" + wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + "github.com/CosmWasm/wasmd/x/wasm/types" + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + sdk "github.com/cosmos/cosmos-sdk/types" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + channeltypes "github.com/cosmos/ibc-go/v4/modules/core/04-channel/types" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/osmosis-labs/osmosis/v15/app/params" + "github.com/osmosis-labs/osmosis/v15/hooks/common" +) + +var ( + _ Adapter = &WasmAdapter{} +) + +// WasmAdapter defines a struct containing required keepers and flags to process the CosmWasm related hook. +// It implements Adapter interface. +type WasmAdapter struct { + // tx flags + contractTxs map[string]bool + isStoreCodeTx bool + isInstantiateTx bool + isExecuteTx bool + isSendTx bool + isUpdateAdmin bool + isClearAdmin bool + isMigrate bool + isIBC bool + + // wasm params + lastInstanceID uint64 + maxCodeIDfromTx uint64 + maxInstanceIDfromTx uint64 + codeIDInstantiateFromProposal []uint64 + + // keepers + wasmKeeper *wasmkeeper.Keeper + govKeeper *govkeeper.Keeper +} + +// NewWasmAdapter creates new WasmAdapter instance that will be added to the emitter hook adapters. +func NewWasmAdapter(wasmKeeper *wasmkeeper.Keeper, govKeeper *govkeeper.Keeper) *WasmAdapter { + return &WasmAdapter{ + contractTxs: make(map[string]bool), + isStoreCodeTx: false, + isInstantiateTx: false, + isExecuteTx: false, + isSendTx: false, + isUpdateAdmin: false, + isClearAdmin: false, + isMigrate: false, + isIBC: false, + lastInstanceID: 0, + wasmKeeper: wasmKeeper, + govKeeper: govKeeper, + } +} + +// AfterInitChain extracts codes and contracts from the given genesis state. +func (wa *WasmAdapter) AfterInitChain(ctx sdk.Context, encodingConfig params.EncodingConfig, genesisState map[string]json.RawMessage, kafka *[]common.Message) { + var wasmGenesis wasmtypes.GenesisState + if genesisState[wasmtypes.ModuleName] != nil { + encodingConfig.Marshaler.MustUnmarshalJSON(genesisState[wasmtypes.ModuleName], &wasmGenesis) + } + codeId := 0 + for _, m := range wasmGenesis.GenMsgs { + if msg := m.GetStoreCode(); msg != nil { + codeId++ + codeInfo := wa.wasmKeeper.GetCodeInfo(ctx, uint64(codeId)) + addresses := make([]string, 0) + switch codeInfo.InstantiateConfig.Permission { + case wasmtypes.AccessTypeOnlyAddress: + addresses = []string{codeInfo.InstantiateConfig.Address} + case wasmtypes.AccessTypeAnyOfAddresses: + addresses = codeInfo.InstantiateConfig.Addresses + } + common.AppendMessage(kafka, "NEW_CODE", common.JsDict{ + "id": codeId, + "uploader": msg.GetSigners()[0], + "contract_instantiated": 0, + "access_config_permission": codeInfo.InstantiateConfig.Permission.String(), + "access_config_addresses": addresses, + }) + } + } + + wa.wasmKeeper.IterateContractInfo(ctx, func(addr sdk.AccAddress, contractInfo types.ContractInfo) bool { + histories := wa.wasmKeeper.GetContractHistory(ctx, addr) + wa.updateContractVersion(ctx, addr.String(), kafka) + for _, history := range histories { + if history.Operation == types.ContractCodeHistoryOperationTypeInit { + common.AppendMessage(kafka, "NEW_CONTRACT", common.JsDict{ + "address": addr, + "code_id": history.CodeID, + "init_msg": string(history.Msg), + "init_by": contractInfo.Creator, + "contract_executed": 0, + "label": contractInfo.Label, + "admin": contractInfo.Admin, + }) + common.AppendMessage(kafka, "UPDATE_CODE", common.JsDict{ + "id": contractInfo.CodeID, + }) + common.AppendMessage(kafka, "NEW_CONTRACT_HISTORY", common.JsDict{ + "contract_address": addr, + "sender": contractInfo.Creator, + "code_id": history.CodeID, + "block_height": ctx.BlockHeight(), + "remark": common.JsDict{ + "type": "genesis", + "operation": history.Operation.String(), + }, + }) + } + } + return false + }) +} + +// AfterBeginBlock assigns updated values to the adapter variables before processing transactions in each block. +func (wa *WasmAdapter) AfterBeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock, _ common.EvMap, _ *[]common.Message) { + wa.maxCodeIDfromTx = wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastCodeID) + wa.maxInstanceIDfromTx = wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastInstanceID) + wa.codeIDInstantiateFromProposal = make([]uint64, 0) +} + +// PreDeliverTx sets the necessary maps and flags to the default value before processing each transaction. +func (wa *WasmAdapter) PreDeliverTx() { + wa.isStoreCodeTx = false + wa.isInstantiateTx = false + wa.isExecuteTx = false + wa.isClearAdmin = false + wa.isUpdateAdmin = false + wa.isMigrate = false + wa.contractTxs = make(map[string]bool) +} + +// CheckMsg checks the message type and extracts message values to WasmAdapter maps and flags accordingly. +func (wa *WasmAdapter) CheckMsg(_ sdk.Context, msg sdk.Msg) { + switch msg := msg.(type) { + case *wasmtypes.MsgStoreCode: + wa.isStoreCodeTx = true + case *wasmtypes.MsgInstantiateContract: + wa.isInstantiateTx = true + case *wasmtypes.MsgExecuteContract: + wa.isExecuteTx = true + wa.contractTxs[msg.Contract] = false + case *wasmtypes.MsgUpdateAdmin: + wa.isUpdateAdmin = true + wa.contractTxs[msg.Contract] = false + case *wasmtypes.MsgClearAdmin: + wa.isClearAdmin = true + wa.contractTxs[msg.Contract] = false + case *wasmtypes.MsgMigrateContract: + wa.isMigrate = true + wa.contractTxs[msg.Contract] = false + case *wasmtypes.MsgInstantiateContract2: + wa.isInstantiateTx = true + case *channeltypes.MsgRecvPacket: + if contractAddr, err := wasm.ContractFromPortID(msg.Packet.DestinationPort); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + case *channeltypes.MsgChannelOpenAck: + if contractAddr, err := wasm.ContractFromPortID(msg.PortId); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + case *channeltypes.MsgChannelOpenTry: + if contractAddr, err := wasm.ContractFromPortID(msg.PortId); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + case *channeltypes.MsgChannelOpenConfirm: + if contractAddr, err := wasm.ContractFromPortID(msg.PortId); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + case *channeltypes.MsgTimeout: + if contractAddr, err := wasm.ContractFromPortID(msg.Packet.SourcePort); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + case *channeltypes.MsgAcknowledgement: + if contractAddr, err := wasm.ContractFromPortID(msg.Packet.SourcePort); err == nil { + wa.contractTxs[contractAddr.String()] = true + } + } +} + +// HandleMsgEvents checks for new code, new contract, contract execution, contract migration and new contract related +// proposal events emitted from the given message. +func (wa *WasmAdapter) HandleMsgEvents(ctx sdk.Context, txHash []byte, msg sdk.Msg, evMap common.EvMap, detail common.JsDict, kafka *[]common.Message) { + wa.updateNewCodeEvents(ctx, txHash, msg, evMap, kafka) + wa.updateNewContractEvents(ctx, txHash, evMap, kafka) + wa.updateContractExecuteEvents(evMap) + wa.updateContractProposalEvents(ctx, evMap, kafka) + wa.updateMigrateContractEvents(ctx, txHash, msg, evMap, kafka) + switch msg := msg.(type) { + case *types.MsgStoreCode: + detail["id"] = common.Atoui(evMap[types.EventTypeStoreCode+"."+types.AttributeKeyCodeID][0]) + case *types.MsgInstantiateContract: + contracts := evMap[types.EventTypeInstantiate+"."+types.AttributeKeyContractAddr] + detail["_contract_address"] = contracts[0] + detail["_contract_addresses"] = contracts + case *types.MsgInstantiateContract2: + contracts := evMap[types.EventTypeInstantiate+"."+types.AttributeKeyContractAddr] + detail["_contract_address"] = contracts[0] + detail["_contract_addresses"] = contracts + case *types.MsgExecuteContract: + detail["msg_json"] = string(msg.Msg) + case *types.MsgClearAdmin: + common.AppendMessage(kafka, "UPDATE_CONTRACT_ADMIN", common.JsDict{ + "contract": msg.Contract, + "admin": "", + }) + case *types.MsgUpdateAdmin: + common.AppendMessage(kafka, "UPDATE_CONTRACT_ADMIN", common.JsDict{ + "contract": msg.Contract, + "admin": msg.NewAdmin, + }) + } +} + +// PostDeliverTx appends contract transactions into the Kafka message array and assigns WasmAdapter flags to the +// interface to be written to the message queue. +func (wa *WasmAdapter) PostDeliverTx(ctx sdk.Context, txHash []byte, txDict common.JsDict, kafka *[]common.Message) { + for contractAddr := range wa.contractTxs { + common.AppendMessage(kafka, "NEW_CONTRACT_TRANSACTION", common.JsDict{ + "contract_address": contractAddr, + "tx_hash": txHash, + "is_instantiate": wa.isInstantiateTx, + }) + } + wa.contractTxs = make(map[string]bool) + + txDict["is_store_code"] = wa.isStoreCodeTx + txDict["is_instantiate"] = wa.isInstantiateTx + txDict["is_execute"] = wa.isExecuteTx + txDict["is_update_admin"] = wa.isUpdateAdmin + txDict["is_clear_admin"] = wa.isClearAdmin + txDict["is_migrate"] = wa.isMigrate + wa.lastInstanceID = wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastInstanceID) +} + +// AfterEndBlock checks for wasm related ActiveProposal events and process them accordingly. +func (wa *WasmAdapter) AfterEndBlock(ctx sdk.Context, _ abci.RequestEndBlock, evMap common.EvMap, kafka *[]common.Message) { + if rawIds, ok := evMap[govtypes.EventTypeActiveProposal+"."+govtypes.AttributeKeyProposalID]; ok { + for _, rawId := range rawIds { + proposalId := common.Atoui(rawId) + proposal, _ := wa.govKeeper.GetProposal(ctx, proposalId) + switch content := proposal.GetContent().(type) { + case *wasmtypes.StoreCodeProposal: + lastCodeID := wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastCodeID) + for id := wa.maxCodeIDfromTx; id < lastCodeID; id++ { + codeInfo := wa.wasmKeeper.GetCodeInfo(ctx, id) + if codeInfo == nil { + break + } + addresses := make([]string, 0) + switch codeInfo.InstantiateConfig.Permission { + case wasmtypes.AccessTypeOnlyAddress: + addresses = []string{codeInfo.InstantiateConfig.Address} + case wasmtypes.AccessTypeAnyOfAddresses: + addresses = codeInfo.InstantiateConfig.Addresses + default: + break + } + + common.AppendMessage(kafka, "NEW_CODE", common.JsDict{ + "id": id, + "uploader": codeInfo.Creator, + "contract_instantiated": 0, + "access_config_permission": codeInfo.InstantiateConfig.Permission.String(), + "access_config_addresses": addresses, + }) + common.AppendMessage(kafka, "NEW_CODE_PROPOSAL", common.JsDict{ + "code_id": id, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + } + case *wasmtypes.InstantiateContractProposal: + contractAddr := wasmkeeper.BuildContractAddressClassic(content.CodeID, wa.lastInstanceID) + wa.updateContractVersion(ctx, contractAddr.String(), kafka) + histories := wa.wasmKeeper.GetContractHistory(ctx, contractAddr) + info := wa.wasmKeeper.GetContractInfo(ctx, contractAddr) + for _, history := range histories { + if history.Operation == wasmtypes.ContractCodeHistoryOperationTypeInit { + common.AppendMessage(kafka, "NEW_CONTRACT", common.JsDict{ + "address": contractAddr, + "code_id": history.CodeID, + "init_msg": string(history.Msg), + "init_by": info.Creator, + "contract_executed": 0, + "label": info.Label, + "admin": info.Admin, + }) + common.AppendMessage(kafka, "UPDATE_CODE", common.JsDict{ + "id": info.CodeID, + }) + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": contractAddr, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + common.AppendMessage(kafka, "NEW_CONTRACT_HISTORY", common.JsDict{ + "contract_address": contractAddr, + "sender": info.Creator, + "code_id": history.CodeID, + "block_height": ctx.BlockHeight(), + "remark": common.JsDict{ + "type": "governance", + "operation": wasmtypes.ContractCodeHistoryOperationTypeInit.String(), + "value": proposalId, + }, + }) + } + break + } + case *wasmtypes.MigrateContractProposal: + wa.updateContractVersion(ctx, content.Contract, kafka) + common.AppendMessage(kafka, "UPDATE_CONTRACT_CODE_ID", common.JsDict{ + "contract": content.Contract, + "code_id": content.CodeID, + }) + common.AppendMessage(kafka, "UPDATE_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + common.AppendMessage(kafka, "NEW_CONTRACT_HISTORY", common.JsDict{ + "contract_address": content.Contract, + "sender": content.Contract, + "code_id": content.CodeID, + "block_height": ctx.BlockHeight(), + "remark": common.JsDict{ + "type": "governance", + "operation": wasmtypes.ContractCodeHistoryOperationTypeMigrate.String(), + "value": proposalId, + }, + }) + case *wasmtypes.UpdateAdminProposal: + common.AppendMessage(kafka, "UPDATE_CONTRACT_ADMIN", common.JsDict{ + "contract": content.Contract, + "admin": content.NewAdmin, + }) + common.AppendMessage(kafka, "UPDATE_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + case *wasmtypes.ClearAdminProposal: + common.AppendMessage(kafka, "UPDATE_CONTRACT_ADMIN", common.JsDict{ + "contract": content.Contract, + "admin": "", + }) + common.AppendMessage(kafka, "UPDATE_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + case *wasmtypes.ExecuteContractProposal: + common.AppendMessage(kafka, "UPDATE_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + "resolved_height": ctx.BlockHeight(), + }) + } + } + } +} + +// updateContractProposalEvents handles wasm related SubmitProposal events that might be emitted from the transaction. +func (wa *WasmAdapter) updateContractProposalEvents(ctx sdk.Context, evMap common.EvMap, kafka *[]common.Message) { + if rawIds, ok := evMap[govtypes.EventTypeSubmitProposal+"."+govtypes.AttributeKeyProposalID]; ok { + for _, rawId := range rawIds { + proposalId := common.Atoui(rawId) + proposal, _ := wa.govKeeper.GetProposal(ctx, proposalId) + content := proposal.GetContent() + switch content := content.(type) { + case *wasmtypes.MigrateContractProposal: + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + }) + case *wasmtypes.SudoContractProposal: + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + }) + case *wasmtypes.ExecuteContractProposal: + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + }) + case *wasmtypes.UpdateAdminProposal: + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + }) + case *wasmtypes.ClearAdminProposal: + common.AppendMessage(kafka, "NEW_CONTRACT_PROPOSAL", common.JsDict{ + "contract_address": content.Contract, + "proposal_id": proposalId, + }) + } + } + } +} + +// updateNewCodeEvents handles StoreCode events that might be emitted from the transaction. +func (wa *WasmAdapter) updateNewCodeEvents(ctx sdk.Context, txHash []byte, msg sdk.Msg, evMap common.EvMap, kafka *[]common.Message) { + if rawIDs, ok := evMap[types.EventTypeStoreCode+"."+types.AttributeKeyCodeID]; ok { + for _, rawId := range rawIDs { + id := common.Atoui(rawId) + codeInfo := wa.wasmKeeper.GetCodeInfo(ctx, id) + wa.maxCodeIDfromTx = wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastCodeID) + addresses := make([]string, 0) + switch codeInfo.InstantiateConfig.Permission { + case wasmtypes.AccessTypeOnlyAddress: + addresses = []string{codeInfo.InstantiateConfig.Address} + case wasmtypes.AccessTypeAnyOfAddresses: + addresses = codeInfo.InstantiateConfig.Addresses + default: + break + } + common.AppendMessage(kafka, "NEW_CODE", common.JsDict{ + "id": id, + "uploader": msg.GetSigners()[0], + "contract_instantiated": 0, + "access_config_permission": codeInfo.InstantiateConfig.Permission.String(), + "access_config_addresses": addresses, + "tx_hash": txHash, + }) + } + } +} + +// ContractVersion is a type for storing CW2 info of a contract. +type ContractVersion struct { + Contract string + Version string +} + +// updateContractVersion updates CW2 info of a contract using the query result. +func (wa *WasmAdapter) updateContractVersion(ctx sdk.Context, contract string, kafka *[]common.Message) { + contractAddress, _ := sdk.AccAddressFromBech32(contract) + contractInfo := wa.wasmKeeper.GetContractInfo(ctx, contractAddress) + rawContractVersion := wa.wasmKeeper.QueryRaw(ctx, contractAddress, []byte("contract_info")) + var contractVersion ContractVersion + err := json.Unmarshal(rawContractVersion, &contractVersion) + if err != nil { + return + } + common.AppendMessage(kafka, "UPDATE_CW2_INFO", common.JsDict{ + "code_id": contractInfo.CodeID, + "cw2_contract": contractVersion.Contract, + "cw2_version": contractVersion.Version, + }) +} + +// updateNewContractEvents handles contract Instantiate events that might be emitted from the transaction. +func (wa *WasmAdapter) updateNewContractEvents(ctx sdk.Context, txHash []byte, evMap common.EvMap, kafka *[]common.Message) { + if events, ok := evMap[types.EventTypeInstantiate+"."+types.AttributeKeyContractAddr]; ok { + wa.maxInstanceIDfromTx = wa.wasmKeeper.PeekAutoIncrementID(ctx, wasmtypes.KeyLastInstanceID) + for _, contractAddr := range events { + addr, _ := sdk.AccAddressFromBech32(contractAddr) + histories := wa.wasmKeeper.GetContractHistory(ctx, addr) + info := wa.wasmKeeper.GetContractInfo(ctx, addr) + wa.updateContractVersion(ctx, contractAddr, kafka) + for _, history := range histories { + if history.Operation == types.ContractCodeHistoryOperationTypeInit { + common.AppendMessage(kafka, "NEW_CONTRACT", common.JsDict{ + "address": contractAddr, + "code_id": history.CodeID, + "init_msg": string(history.Msg), + "tx_hash": txHash, + "init_by": info.Creator, + "contract_executed": 0, + "label": info.Label, + "admin": info.Admin, + }) + common.AppendMessage(kafka, "UPDATE_CODE", common.JsDict{ + "id": info.CodeID, + }) + common.AppendMessage(kafka, "NEW_CONTRACT_HISTORY", common.JsDict{ + "contract_address": contractAddr, + "sender": info.Creator, + "code_id": history.CodeID, + "block_height": ctx.BlockHeight(), + "remark": common.JsDict{ + "type": "transaction", + "operation": history.Operation.String(), + "value": fmt.Sprintf("%X", txHash), + }, + }) + wa.contractTxs[contractAddr] = false + break + } + } + } + } +} + +// updateMigrateContractEvents handles contract Migrate events that might be emitted from the transaction. +func (wa *WasmAdapter) updateMigrateContractEvents(ctx sdk.Context, txHash []byte, msg sdk.Msg, evMap common.EvMap, kafka *[]common.Message) { + if events, ok := evMap[types.EventTypeMigrate+"."+types.AttributeKeyContractAddr]; ok { + for idx, contractAddr := range events { + wa.updateContractVersion(ctx, contractAddr, kafka) + codeID := common.Atoui(evMap[types.EventTypeMigrate+"."+types.AttributeKeyCodeID][idx]) + common.AppendMessage(kafka, "UPDATE_CONTRACT_CODE_ID", common.JsDict{ + "contract": contractAddr, + "code_id": codeID, + }) + common.AppendMessage(kafka, "NEW_CONTRACT_HISTORY", common.JsDict{ + "contract_address": contractAddr, + "sender": msg.GetSigners()[0], + "code_id": codeID, + "block_height": ctx.BlockHeight(), + "remark": common.JsDict{ + "type": "transaction", + "operation": wasmtypes.ContractCodeHistoryOperationTypeMigrate.String(), + "value": fmt.Sprintf("%X", txHash), + }, + }) + wa.contractTxs[contractAddr] = false + } + } +} + +// updateContractExecuteEvents handles contract Execute events that might be emitted from the transaction. +func (wa *WasmAdapter) updateContractExecuteEvents(evMap common.EvMap) { + for _, contract := range evMap[types.EventTypeExecute+"."+types.AttributeKeyContractAddr] { + wa.contractTxs[contract] = false + } +}