diff --git a/cmd/dumpblocks/dumpblocks.go b/cmd/dumpblocks/dumpblocks.go index 30bdc72a..c7f76e36 100644 --- a/cmd/dumpblocks/dumpblocks.go +++ b/cmd/dumpblocks/dumpblocks.go @@ -4,7 +4,6 @@ import ( "encoding/binary" "encoding/json" "fmt" - "net/url" "os" "strconv" "strings" @@ -26,7 +25,7 @@ import ( type ( dumpblocksParams struct { - URL string + RpcUrl string Start uint64 End uint64 BatchSize uint64 @@ -52,12 +51,15 @@ var ( // dumpblocksCmd represents the dumpblocks command var DumpblocksCmd = &cobra.Command{ - Use: "dumpblocks url start end", + Use: "dumpblocks start end", Short: "Export a range of blocks from a JSON-RPC endpoint.", Long: usage, + PreRunE: func(cmd *cobra.Command, args []string) error { + return checkFlags() + }, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - ec, err := ethrpc.DialContext(ctx, args[0]) + ec, err := ethrpc.DialContext(ctx, inputDumpblocks.RpcUrl) if err != nil { return err } @@ -136,19 +138,15 @@ var DumpblocksCmd = &cobra.Command{ return nil }, Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 3 { - return fmt.Errorf("command needs at least three arguments. A URL a start block and an end block") + if len(args) < 2 { + return fmt.Errorf("command needs at least two arguments. A start block and an end block") } - _, err := url.Parse(args[0]) + start, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return err } - start, err := strconv.ParseInt(args[1], 10, 64) - if err != nil { - return err - } - end, err := strconv.ParseInt(args[2], 10, 64) + end, err := strconv.ParseInt(args[1], 10, 64) if err != nil { return err } @@ -159,7 +157,6 @@ var DumpblocksCmd = &cobra.Command{ start, end = end, start } - inputDumpblocks.URL = args[0] inputDumpblocks.Start = uint64(start) inputDumpblocks.End = uint64(end) @@ -187,15 +184,25 @@ var DumpblocksCmd = &cobra.Command{ } func init() { + DumpblocksCmd.PersistentFlags().StringVarP(&inputDumpblocks.RpcUrl, "rpc-url", "r", "http://localhost:8545", "The RPC endpoint url") DumpblocksCmd.PersistentFlags().UintVarP(&inputDumpblocks.Threads, "concurrency", "c", 1, "how many go routines to leverage") DumpblocksCmd.PersistentFlags().BoolVarP(&inputDumpblocks.ShouldDumpBlocks, "dump-blocks", "B", true, "if the blocks will be dumped") - DumpblocksCmd.PersistentFlags().BoolVarP(&inputDumpblocks.ShouldDumpReceipts, "dump-receipts", "r", true, "if the receipts will be dumped") + DumpblocksCmd.PersistentFlags().BoolVar(&inputDumpblocks.ShouldDumpReceipts, "dump-receipts", true, "if the receipts will be dumped") DumpblocksCmd.PersistentFlags().StringVarP(&inputDumpblocks.Filename, "filename", "f", "", "where to write the output to (default stdout)") DumpblocksCmd.PersistentFlags().StringVarP(&inputDumpblocks.Mode, "mode", "m", "json", "the output format [json, proto]") DumpblocksCmd.PersistentFlags().Uint64VarP(&inputDumpblocks.BatchSize, "batch-size", "b", 150, "the batch size. Realistically, this probably shouldn't be bigger than 999. Most providers seem to cap at 1000.") DumpblocksCmd.PersistentFlags().StringVarP(&inputDumpblocks.FilterStr, "filter", "F", "{}", "filter output based on tx to and from, not setting a filter means all are allowed") } +func checkFlags() error { + // Check rpc url flag. + if err := util.ValidateUrl(inputDumpblocks.RpcUrl); err != nil { + return err + } + + return nil +} + // writeResponses writes the data to either stdout or a file if one is provided. // The message type can be either "block" or "transaction". The format of the // output is either "json" or "proto" depending on the mode. diff --git a/cmd/dumpblocks/usage.md b/cmd/dumpblocks/usage.md index 80d1763b..fdedf954 100644 --- a/cmd/dumpblocks/usage.md +++ b/cmd/dumpblocks/usage.md @@ -3,7 +3,7 @@ For various reasons, we might want to dump a large range of blocks for analytics The following command would download the first 500K blocks and zip them and then look for blocks with transactions that create an account. ```bash -$ polycli dumpblocks http://172.26.26.12:8545/ 0 500000 | gzip > foo.gz +$ polycli dumpblocks 0 500000 --rpc-url http://172.26.26.12:8545/ | gzip > foo.gz $ zcat < foo.gz | jq '. | select(.transactions | length > 0) | select(.transactions[].to == null)' ``` diff --git a/cmd/monitor/monitor.go b/cmd/monitor/monitor.go index 3b8c700a..6a1dfd54 100644 --- a/cmd/monitor/monitor.go +++ b/cmd/monitor/monitor.go @@ -502,8 +502,11 @@ func renderMonitorUI(ctx context.Context, ec *ethclient.Client, ms *monitorStatu blockInfo.Rows = []string{} transactionInfo.ColumnWidths = getColumnWidths(transactionColumnRatio, transactionInfo.Dx()) - transactionInfo.Rows = ui.GetBlockTxTable(renderedBlocks[len(renderedBlocks)-1], ms.ChainID) - transactionInfo.Title = fmt.Sprintf("Latest Transactions for Block #%s", renderedBlocks[len(renderedBlocks)-1].Number().String()) + if len(renderedBlocks) > 0 { + i := len(renderedBlocks) - 1 + transactionInfo.Rows = ui.GetBlockTxTable(renderedBlocks[i], ms.ChainID) + transactionInfo.Title = fmt.Sprintf("Latest Transactions for Block #%s", renderedBlocks[i].Number().String()) + } } termui.Render(grid) @@ -592,7 +595,7 @@ func renderMonitorUI(ctx context.Context, ec *ethclient.Client, ms *monitorStatu toBlockNumber.SetInt64(0) } - if !isBlockInCache(ms.BlockCache, toBlockNumber) { + if !ms.isBlockInCache(toBlockNumber) { err := ms.getBlockRange(ctx, toBlockNumber, rpc) if err != nil { log.Warn().Err(err).Msg("Failed to fetch blocks on page down") @@ -631,7 +634,7 @@ func renderMonitorUI(ctx context.Context, ec *ethclient.Client, ms *monitorStatu } // Fetch the blocks in the new range if they are missing - if !isBlockInCache(ms.BlockCache, nextTopBlockNumber) { + if !ms.isBlockInCache(nextTopBlockNumber) { err := ms.getBlockRange(ctx, new(big.Int).Add(nextTopBlockNumber, big.NewInt(int64(windowSize))), rpc) if err != nil { log.Warn().Err(err).Msg("Failed to fetch blocks on page up") @@ -746,8 +749,10 @@ func renderMonitorUI(ctx context.Context, ec *ethclient.Client, ms *monitorStatu } } -func isBlockInCache(cache *lru.Cache, blockNumber *big.Int) bool { - _, exists := cache.Get(blockNumber.String()) +func (ms *monitorStatus) isBlockInCache(blockNumber *big.Int) bool { + ms.BlocksLock.RLock() + _, exists := ms.BlockCache.Get(blockNumber.String()) + ms.BlocksLock.RUnlock() return exists } diff --git a/doc/polycli_dumpblocks.md b/doc/polycli_dumpblocks.md index 35fca679..bfa14085 100644 --- a/doc/polycli_dumpblocks.md +++ b/doc/polycli_dumpblocks.md @@ -14,7 +14,7 @@ Export a range of blocks from a JSON-RPC endpoint. ```bash -polycli dumpblocks url start end [flags] +polycli dumpblocks start end [flags] ``` ## Usage @@ -24,7 +24,7 @@ For various reasons, we might want to dump a large range of blocks for analytics The following command would download the first 500K blocks and zip them and then look for blocks with transactions that create an account. ```bash -$ polycli dumpblocks http://172.26.26.12:8545/ 0 500000 | gzip > foo.gz +$ polycli dumpblocks 0 500000 --rpc-url http://172.26.26.12:8545/ | gzip > foo.gz $ zcat < foo.gz | jq '. | select(.transactions | length > 0) | select(.transactions[].to == null)' ``` @@ -77,11 +77,12 @@ To solve this, add the unknown fields to the `.proto` files and recompile them ( -b, --batch-size uint the batch size. Realistically, this probably shouldn't be bigger than 999. Most providers seem to cap at 1000. (default 150) -c, --concurrency uint how many go routines to leverage (default 1) -B, --dump-blocks if the blocks will be dumped (default true) - -r, --dump-receipts if the receipts will be dumped (default true) + --dump-receipts if the receipts will be dumped (default true) -f, --filename string where to write the output to (default stdout) -F, --filter string filter output based on tx to and from, not setting a filter means all are allowed (default "{}") -h, --help help for dumpblocks -m, --mode string the output format [json, proto] (default "json") + -r, --rpc-url string The RPC endpoint url (default "http://localhost:8545") ``` The command also inherits flags from parent commands.