From 546709a12fcb0b6bedc807de1929998bf311981d Mon Sep 17 00:00:00 2001 From: billettc Date: Wed, 15 Nov 2023 08:05:04 -0500 Subject: [PATCH 01/66] wip --- nodemanager/consolereader.go | 188 ++ nodemanager/consolereader_test.go | 95 + nodemanager/test/type_test.pb.go | 3508 +++++++++++++++++++++++++++++ 3 files changed, 3791 insertions(+) create mode 100644 nodemanager/consolereader.go create mode 100644 nodemanager/consolereader_test.go create mode 100644 nodemanager/test/type_test.pb.go diff --git a/nodemanager/consolereader.go b/nodemanager/consolereader.go new file mode 100644 index 0000000..fe1cb03 --- /dev/null +++ b/nodemanager/consolereader.go @@ -0,0 +1,188 @@ +package nodemanager + +import ( + "encoding/base64" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/logging" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const FirePrefix = "FIRE " +const FirePrefixLen = len(FirePrefix) +const InitLogPrefix = "INIT " +const InitLogPrefixLen = len(InitLogPrefix) +const BlockLogPrefix = "BLOCK " +const BlockLogPrefixLen = len(BlockLogPrefix) + +type parseCtx struct { + readerProtocolVersion string + protoMessageType string +} + +type ConsoleReader struct { + lines chan string + close func() + done chan interface{} + logger *zap.Logger + tracer logging.Tracer + ctx *parseCtx +} + +func NewConsoleReader(lines chan string, logger *zap.Logger, tracer logging.Tracer) *ConsoleReader { + reader := &ConsoleReader{ + lines: lines, + close: func() {}, + done: make(chan interface{}), + ctx: &parseCtx{}, + logger: logger, + tracer: tracer, + } + return reader +} + +func (r *ConsoleReader) Done() <-chan interface{} { + return r.done +} + +func (r *ConsoleReader) ReadBlock() (out *bstream.Block, err error) { + out, err = r.next() + if err != nil { + return nil, err + } + + return out, nil +} + +func (r *ConsoleReader) next() (out *bstream.Block, err error) { + + for line := range r.lines { + if !strings.HasPrefix(line, "FIRE ") { + continue + } + + line = line[FirePrefixLen:] + + switch { + case strings.HasPrefix(line, InitLogPrefix): + err = r.ctx.readInit(line[InitLogPrefixLen:]) + case strings.HasPrefix(line, BlockLogPrefix): + out, err = r.ctx.readBlock(line[BlockLogPrefixLen:]) + default: + if r.tracer.Enabled() { + r.logger.Debug("skipping unknown Firehose log line", zap.String("line", line)) + } + continue + } + + if err != nil { + chunks := strings.SplitN(line, " ", 2) + return nil, fmt.Errorf("%s: %s (line %q)", chunks[0], err, line) + } + + if out != nil { + return out, nil + } + } + + r.logger.Info("lines channel has been closed") + close(r.done) + return nil, io.EOF +} + +// Formats +// [block_num:342342342] [block_hash] [parent_num] [parent_hash] [lib:123123123] [timestamp:unix_nano] B64ENCODED_any +func (ctx *parseCtx) readBlock(line string) (out *bstream.Block, err error) { + chunks, err := SplitInBoundedChunks(line, 7) + if err != nil { + return nil, fmt.Errorf("splitting block log line: %w", err) + } + + blockNum, err := strconv.ParseUint(chunks[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing block num %q: %w", chunks[0], err) + } + + blockHash := chunks[1] + + _, err = strconv.ParseUint(chunks[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing parent num %q: %w", chunks[2], err) + } + + parentHash := chunks[3] + + libNum, err := strconv.ParseUint(chunks[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing lib num %q: %w", chunks[4], err) + } + + timestampUnixNano, err := strconv.ParseUint(chunks[5], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing timestamp %q: %w", chunks[5], err) + } + + timestamp := time.Unix(0, int64(timestampUnixNano)) + + payload, err := base64.StdEncoding.DecodeString(chunks[6]) + + var blockPayload anypb.Any + if err := proto.Unmarshal(payload, &blockPayload); err != nil { + return nil, fmt.Errorf("unmarshaling block payload: %w", err) + } + + typeChunks := strings.Split(blockPayload.TypeUrl, "/") + payloadType := typeChunks[len(typeChunks)-1] + if payloadType != ctx.protoMessageType { + return nil, fmt.Errorf("invalid payload type, expected %q, got %q", ctx.protoMessageType, blockPayload.TypeUrl) + } + + block := &bstream.Block{ + Id: blockHash, + Number: blockNum, + PreviousId: parentHash, + //todo: missing ParentNumber + Timestamp: timestamp, + LibNum: libNum, + PayloadKind: 0, //todo: PayloadKind + PayloadVersion: 0, //todo: PayloadVersion + } + + block, err = bstream.MemoryBlockPayloadSetter(block, blockPayload.Value) + if err != nil { + return nil, fmt.Errorf("setting block payload: %w", err) + } + + return block, nil +} + +// [READER_PROTOCOL_VERSION] sf.ethereum.type.v2.Block +func (ctx *parseCtx) readInit(line string) error { + chunks, err := SplitInBoundedChunks(line, 2) + if err != nil { + return fmt.Errorf("split: %s", err) + } + + ctx.readerProtocolVersion = chunks[0] + ctx.protoMessageType = chunks[1] + + return nil +} + +// SplitInBoundedChunks splits the line in `count` chunks and returns the slice `chunks[1:count]` (so exclusive end), +// but will accumulate all trailing chunks within the last (for free-form strings, or JSON objects) +func SplitInBoundedChunks(line string, count int) ([]string, error) { + chunks := strings.SplitN(line, " ", count) + if len(chunks) != count { + return nil, fmt.Errorf("%d fields required but found %d fields for line %q", count, len(chunks), line) + } + + return chunks, nil +} diff --git a/nodemanager/consolereader_test.go b/nodemanager/consolereader_test.go new file mode 100644 index 0000000..dc6c297 --- /dev/null +++ b/nodemanager/consolereader_test.go @@ -0,0 +1,95 @@ +package nodemanager + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/streamingfast/firehose-core/nodemanager/test" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func Test_Ctx_readBlock(t *testing.T) { + ctx := &parseCtx{ + readerProtocolVersion: "1.0", + protoMessageType: "sf.ethereum.type.v2.Block", + } + + blockHash := "d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659" + blockHashBytes, err := hex.DecodeString(blockHash) + blockNumber := uint64(18571000) + + parentHash := "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81" + parentBlockNumber := 18570999 + + libNumber := 18570800 + + pbBlock := test.Block{ + Hash: blockHashBytes, + Number: blockNumber, + } + + anypbBlock, err := anypb.New(&pbBlock) + payload, err := proto.Marshal(anypbBlock) + + require.NoError(t, err) + nowNano := time.Now().UnixNano() + line := fmt.Sprintf( + "%d %s %d %s %d %d %s", + blockNumber, + blockHash, + parentBlockNumber, + parentHash, + libNumber, + nowNano, + base64.StdEncoding.EncodeToString(payload), + ) + + block, err := ctx.readBlock(line) + require.NoError(t, err) + + require.Equal(t, blockNumber, block.Number) + require.Equal(t, blockHash, block.Id) + require.Equal(t, parentHash, block.PreviousId) + require.Equal(t, uint64(libNumber), block.LibNum) + require.Equal(t, time.Unix(0, nowNano), block.Timestamp) + + blockPayload, err := block.Payload.Get() + require.NoError(t, err) + require.Equal(t, anypbBlock.GetValue(), blockPayload) + +} + +type tracer struct { +} + +func (t *tracer) Enabled() bool { + return false +} + +func Test_GetNext(t *testing.T) { + lines := make(chan string, 2) + reader := NewConsoleReader(lines, zap.NewNop(), &tracer{}) + + initLine := "FIRE INIT 1.0 sf.ethereum.type.v2.Block" + blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" + + lines <- initLine + lines <- blockLine + close(lines) + + block, err := reader.ReadBlock() + require.NoError(t, err) + + require.Equal(t, uint64(18571000), block.Number) + require.Equal(t, "d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659", block.Id) + require.Equal(t, "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81", block.PreviousId) + require.Equal(t, uint64(18570800), block.LibNum) + require.Equal(t, time.Unix(0, 1699992393935935000), block.Timestamp) + +} diff --git a/nodemanager/test/type_test.pb.go b/nodemanager/test/type_test.pb.go new file mode 100644 index 0000000..e78f046 --- /dev/null +++ b/nodemanager/test/type_test.pb.go @@ -0,0 +1,3508 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.24.2 +// source: sf/ethereum/type/v2/type.proto + +package test + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransactionTraceStatus int32 + +const ( + TransactionTraceStatus_UNKNOWN TransactionTraceStatus = 0 + TransactionTraceStatus_SUCCEEDED TransactionTraceStatus = 1 + TransactionTraceStatus_FAILED TransactionTraceStatus = 2 + TransactionTraceStatus_REVERTED TransactionTraceStatus = 3 +) + +// Enum value maps for TransactionTraceStatus. +var ( + TransactionTraceStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SUCCEEDED", + 2: "FAILED", + 3: "REVERTED", + } + TransactionTraceStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SUCCEEDED": 1, + "FAILED": 2, + "REVERTED": 3, + } +) + +func (x TransactionTraceStatus) Enum() *TransactionTraceStatus { + p := new(TransactionTraceStatus) + *p = x + return p +} + +func (x TransactionTraceStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TransactionTraceStatus) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[0].Descriptor() +} + +func (TransactionTraceStatus) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[0] +} + +func (x TransactionTraceStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TransactionTraceStatus.Descriptor instead. +func (TransactionTraceStatus) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{0} +} + +type CallType int32 + +const ( + CallType_UNSPECIFIED CallType = 0 + CallType_CALL CallType = 1 // direct? what's the name for `Call` alone? + CallType_CALLCODE CallType = 2 + CallType_DELEGATE CallType = 3 + CallType_STATIC CallType = 4 + CallType_CREATE CallType = 5 // create2 ? any other form of calls? +) + +// Enum value maps for CallType. +var ( + CallType_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CALL", + 2: "CALLCODE", + 3: "DELEGATE", + 4: "STATIC", + 5: "CREATE", + } + CallType_value = map[string]int32{ + "UNSPECIFIED": 0, + "CALL": 1, + "CALLCODE": 2, + "DELEGATE": 3, + "STATIC": 4, + "CREATE": 5, + } +) + +func (x CallType) Enum() *CallType { + p := new(CallType) + *p = x + return p +} + +func (x CallType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CallType) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[1].Descriptor() +} + +func (CallType) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[1] +} + +func (x CallType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CallType.Descriptor instead. +func (CallType) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{1} +} + +type Block_DetailLevel int32 + +const ( + Block_DETAILLEVEL_EXTENDED Block_DetailLevel = 0 + // DETAILLEVEL_TRACE = 1; // TBD + Block_DETAILLEVEL_BASE Block_DetailLevel = 2 +) + +// Enum value maps for Block_DetailLevel. +var ( + Block_DetailLevel_name = map[int32]string{ + 0: "DETAILLEVEL_EXTENDED", + 2: "DETAILLEVEL_BASE", + } + Block_DetailLevel_value = map[string]int32{ + "DETAILLEVEL_EXTENDED": 0, + "DETAILLEVEL_BASE": 2, + } +) + +func (x Block_DetailLevel) Enum() *Block_DetailLevel { + p := new(Block_DetailLevel) + *p = x + return p +} + +func (x Block_DetailLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Block_DetailLevel) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[2].Descriptor() +} + +func (Block_DetailLevel) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[2] +} + +func (x Block_DetailLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Block_DetailLevel.Descriptor instead. +func (Block_DetailLevel) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{0, 0} +} + +type TransactionTrace_Type int32 + +const ( + // All transactions that ever existed prior Berlin fork before EIP-2718 was implemented. + TransactionTrace_TRX_TYPE_LEGACY TransactionTrace_Type = 0 + // Transaction that specicy an access list of contract/storage_keys that is going to be used + // in this transaction. + // + // Added in Berlin fork (EIP-2930). + TransactionTrace_TRX_TYPE_ACCESS_LIST TransactionTrace_Type = 1 + // Transaction that specifis an access list just like TRX_TYPE_ACCESS_LIST but in addition defines the + // max base gas gee and max priority gas fee to pay for this transaction. Transaction's of those type are + // executed against EIP-1559 rules which dictates a dynamic gas cost based on the congestion of the network. + TransactionTrace_TRX_TYPE_DYNAMIC_FEE TransactionTrace_Type = 2 + // Arbitrum-specific transactions + TransactionTrace_TRX_TYPE_ARBITRUM_DEPOSIT TransactionTrace_Type = 100 + TransactionTrace_TRX_TYPE_ARBITRUM_UNSIGNED TransactionTrace_Type = 101 + TransactionTrace_TRX_TYPE_ARBITRUM_CONTRACT TransactionTrace_Type = 102 + TransactionTrace_TRX_TYPE_ARBITRUM_RETRY TransactionTrace_Type = 104 + TransactionTrace_TRX_TYPE_ARBITRUM_SUBMIT_RETRYABLE TransactionTrace_Type = 105 + TransactionTrace_TRX_TYPE_ARBITRUM_INTERNAL TransactionTrace_Type = 106 + TransactionTrace_TRX_TYPE_ARBITRUM_LEGACY TransactionTrace_Type = 120 +) + +// Enum value maps for TransactionTrace_Type. +var ( + TransactionTrace_Type_name = map[int32]string{ + 0: "TRX_TYPE_LEGACY", + 1: "TRX_TYPE_ACCESS_LIST", + 2: "TRX_TYPE_DYNAMIC_FEE", + 100: "TRX_TYPE_ARBITRUM_DEPOSIT", + 101: "TRX_TYPE_ARBITRUM_UNSIGNED", + 102: "TRX_TYPE_ARBITRUM_CONTRACT", + 104: "TRX_TYPE_ARBITRUM_RETRY", + 105: "TRX_TYPE_ARBITRUM_SUBMIT_RETRYABLE", + 106: "TRX_TYPE_ARBITRUM_INTERNAL", + 120: "TRX_TYPE_ARBITRUM_LEGACY", + } + TransactionTrace_Type_value = map[string]int32{ + "TRX_TYPE_LEGACY": 0, + "TRX_TYPE_ACCESS_LIST": 1, + "TRX_TYPE_DYNAMIC_FEE": 2, + "TRX_TYPE_ARBITRUM_DEPOSIT": 100, + "TRX_TYPE_ARBITRUM_UNSIGNED": 101, + "TRX_TYPE_ARBITRUM_CONTRACT": 102, + "TRX_TYPE_ARBITRUM_RETRY": 104, + "TRX_TYPE_ARBITRUM_SUBMIT_RETRYABLE": 105, + "TRX_TYPE_ARBITRUM_INTERNAL": 106, + "TRX_TYPE_ARBITRUM_LEGACY": 120, + } +) + +func (x TransactionTrace_Type) Enum() *TransactionTrace_Type { + p := new(TransactionTrace_Type) + *p = x + return p +} + +func (x TransactionTrace_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TransactionTrace_Type) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[3].Descriptor() +} + +func (TransactionTrace_Type) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[3] +} + +func (x TransactionTrace_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TransactionTrace_Type.Descriptor instead. +func (TransactionTrace_Type) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{5, 0} +} + +// Obtain all balanche change reasons under deep mind repository: +// +// ```shell +// ack -ho 'BalanceChangeReason\(".*"\)' | grep -Eo '".*"' | sort | uniq +// ``` +type BalanceChange_Reason int32 + +const ( + BalanceChange_REASON_UNKNOWN BalanceChange_Reason = 0 + BalanceChange_REASON_REWARD_MINE_UNCLE BalanceChange_Reason = 1 + BalanceChange_REASON_REWARD_MINE_BLOCK BalanceChange_Reason = 2 + BalanceChange_REASON_DAO_REFUND_CONTRACT BalanceChange_Reason = 3 + BalanceChange_REASON_DAO_ADJUST_BALANCE BalanceChange_Reason = 4 + BalanceChange_REASON_TRANSFER BalanceChange_Reason = 5 + BalanceChange_REASON_GENESIS_BALANCE BalanceChange_Reason = 6 + BalanceChange_REASON_GAS_BUY BalanceChange_Reason = 7 + BalanceChange_REASON_REWARD_TRANSACTION_FEE BalanceChange_Reason = 8 + BalanceChange_REASON_REWARD_FEE_RESET BalanceChange_Reason = 14 + BalanceChange_REASON_GAS_REFUND BalanceChange_Reason = 9 + BalanceChange_REASON_TOUCH_ACCOUNT BalanceChange_Reason = 10 + BalanceChange_REASON_SUICIDE_REFUND BalanceChange_Reason = 11 + BalanceChange_REASON_SUICIDE_WITHDRAW BalanceChange_Reason = 13 + BalanceChange_REASON_CALL_BALANCE_OVERRIDE BalanceChange_Reason = 12 + // Used on chain(s) where some Ether burning happens + BalanceChange_REASON_BURN BalanceChange_Reason = 15 + BalanceChange_REASON_WITHDRAWAL BalanceChange_Reason = 16 +) + +// Enum value maps for BalanceChange_Reason. +var ( + BalanceChange_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_REWARD_MINE_UNCLE", + 2: "REASON_REWARD_MINE_BLOCK", + 3: "REASON_DAO_REFUND_CONTRACT", + 4: "REASON_DAO_ADJUST_BALANCE", + 5: "REASON_TRANSFER", + 6: "REASON_GENESIS_BALANCE", + 7: "REASON_GAS_BUY", + 8: "REASON_REWARD_TRANSACTION_FEE", + 14: "REASON_REWARD_FEE_RESET", + 9: "REASON_GAS_REFUND", + 10: "REASON_TOUCH_ACCOUNT", + 11: "REASON_SUICIDE_REFUND", + 13: "REASON_SUICIDE_WITHDRAW", + 12: "REASON_CALL_BALANCE_OVERRIDE", + 15: "REASON_BURN", + 16: "REASON_WITHDRAWAL", + } + BalanceChange_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_REWARD_MINE_UNCLE": 1, + "REASON_REWARD_MINE_BLOCK": 2, + "REASON_DAO_REFUND_CONTRACT": 3, + "REASON_DAO_ADJUST_BALANCE": 4, + "REASON_TRANSFER": 5, + "REASON_GENESIS_BALANCE": 6, + "REASON_GAS_BUY": 7, + "REASON_REWARD_TRANSACTION_FEE": 8, + "REASON_REWARD_FEE_RESET": 14, + "REASON_GAS_REFUND": 9, + "REASON_TOUCH_ACCOUNT": 10, + "REASON_SUICIDE_REFUND": 11, + "REASON_SUICIDE_WITHDRAW": 13, + "REASON_CALL_BALANCE_OVERRIDE": 12, + "REASON_BURN": 15, + "REASON_WITHDRAWAL": 16, + } +) + +func (x BalanceChange_Reason) Enum() *BalanceChange_Reason { + p := new(BalanceChange_Reason) + *p = x + return p +} + +func (x BalanceChange_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BalanceChange_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[4].Descriptor() +} + +func (BalanceChange_Reason) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[4] +} + +func (x BalanceChange_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BalanceChange_Reason.Descriptor instead. +func (BalanceChange_Reason) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{11, 0} +} + +// Obtain all gas change reasons under deep mind repository: +// +// ```shell +// ack -ho 'GasChangeReason\(".*"\)' | grep -Eo '".*"' | sort | uniq +// ``` +type GasChange_Reason int32 + +const ( + GasChange_REASON_UNKNOWN GasChange_Reason = 0 + // REASON_CALL is the amount of gas that will be charged for a 'CALL' opcode executed by the EVM + GasChange_REASON_CALL GasChange_Reason = 1 + // REASON_CALL_CODE is the amount of gas that will be charged for a 'CALLCODE' opcode executed by the EVM + GasChange_REASON_CALL_CODE GasChange_Reason = 2 + // REASON_CALL_DATA_COPY is the amount of gas that will be charged for a 'CALLDATACOPY' opcode executed by the EVM + GasChange_REASON_CALL_DATA_COPY GasChange_Reason = 3 + // REASON_CODE_COPY is the amount of gas that will be charged for a 'CALLDATACOPY' opcode executed by the EVM + GasChange_REASON_CODE_COPY GasChange_Reason = 4 + // REASON_CODE_STORAGE is the amount of gas that will be charged for code storage + GasChange_REASON_CODE_STORAGE GasChange_Reason = 5 + // REASON_CONTRACT_CREATION is the amount of gas that will be charged for a 'CREATE' opcode executed by the EVM and for the gas + // burned for a CREATE, today controlled by EIP150 rules + GasChange_REASON_CONTRACT_CREATION GasChange_Reason = 6 + // REASON_CONTRACT_CREATION2 is the amount of gas that will be charged for a 'CREATE2' opcode executed by the EVM and for the gas + // burned for a CREATE2, today controlled by EIP150 rules + GasChange_REASON_CONTRACT_CREATION2 GasChange_Reason = 7 + // REASON_DELEGATE_CALL is the amount of gas that will be charged for a 'DELEGATECALL' opcode executed by the EVM + GasChange_REASON_DELEGATE_CALL GasChange_Reason = 8 + // REASON_EVENT_LOG is the amount of gas that will be charged for a 'LOG' opcode executed by the EVM + GasChange_REASON_EVENT_LOG GasChange_Reason = 9 + // REASON_EXT_CODE_COPY is the amount of gas that will be charged for a 'LOG' opcode executed by the EVM + GasChange_REASON_EXT_CODE_COPY GasChange_Reason = 10 + // REASON_FAILED_EXECUTION is the burning of the remaining gas when the execution failed without a revert + GasChange_REASON_FAILED_EXECUTION GasChange_Reason = 11 + // REASON_INTRINSIC_GAS is the amount of gas that will be charged for the intrinsic cost of the transaction, there is + // always exactly one of those per transaction + GasChange_REASON_INTRINSIC_GAS GasChange_Reason = 12 + // GasChangePrecompiledContract is the amount of gas that will be charged for a precompiled contract execution + GasChange_REASON_PRECOMPILED_CONTRACT GasChange_Reason = 13 + // REASON_REFUND_AFTER_EXECUTION is the amount of gas that will be refunded to the caller after the execution of the call, + // if there is left over at the end of execution + GasChange_REASON_REFUND_AFTER_EXECUTION GasChange_Reason = 14 + // REASON_RETURN is the amount of gas that will be charged for a 'RETURN' opcode executed by the EVM + GasChange_REASON_RETURN GasChange_Reason = 15 + // REASON_RETURN_DATA_COPY is the amount of gas that will be charged for a 'RETURNDATACOPY' opcode executed by the EVM + GasChange_REASON_RETURN_DATA_COPY GasChange_Reason = 16 + // REASON_REVERT is the amount of gas that will be charged for a 'REVERT' opcode executed by the EVM + GasChange_REASON_REVERT GasChange_Reason = 17 + // REASON_SELF_DESTRUCT is the amount of gas that will be charged for a 'SELFDESTRUCT' opcode executed by the EVM + GasChange_REASON_SELF_DESTRUCT GasChange_Reason = 18 + // REASON_STATIC_CALL is the amount of gas that will be charged for a 'STATICALL' opcode executed by the EVM + GasChange_REASON_STATIC_CALL GasChange_Reason = 19 + // REASON_STATE_COLD_ACCESS is the amount of gas that will be charged for a cold storage access as controlled by EIP2929 rules + // + // Added in Berlin fork (Geth 1.10+) + GasChange_REASON_STATE_COLD_ACCESS GasChange_Reason = 20 + // REASON_TX_INITIAL_BALANCE is the initial balance for the call which will be equal to the gasLimit of the call + // + // Added as new tracing reason in Geth, available only on some chains + GasChange_REASON_TX_INITIAL_BALANCE GasChange_Reason = 21 + // REASON_TX_REFUNDS is the sum of all refunds which happened during the tx execution (e.g. storage slot being cleared) + // this generates an increase in gas. There is only one such gas change per transaction. + // + // Added as new tracing reason in Geth, available only on some chains + GasChange_REASON_TX_REFUNDS GasChange_Reason = 22 + // REASON_TX_LEFT_OVER_RETURNED is the amount of gas left over at the end of transaction's execution that will be returned + // to the chain. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas + // left at the end of execution, no such even will be emitted. The returned gas's value in Wei is returned to caller. + // There is at most one of such gas change per transaction. + // + // Added as new tracing reason in Geth, available only on some chains + GasChange_REASON_TX_LEFT_OVER_RETURNED GasChange_Reason = 23 + // REASON_CALL_INITIAL_BALANCE is the initial balance for the call which will be equal to the gasLimit of the call. There is only + // one such gas change per call. + // + // Added as new tracing reason in Geth, available only on some chains + GasChange_REASON_CALL_INITIAL_BALANCE GasChange_Reason = 24 + // REASON_CALL_LEFT_OVER_RETURNED is the amount of gas left over that will be returned to the caller, this change will always + // be a negative change as we "drain" left over gas towards 0. If there was no gas left at the end of execution, no such even + // will be emitted. + GasChange_REASON_CALL_LEFT_OVER_RETURNED GasChange_Reason = 25 +) + +// Enum value maps for GasChange_Reason. +var ( + GasChange_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_CALL", + 2: "REASON_CALL_CODE", + 3: "REASON_CALL_DATA_COPY", + 4: "REASON_CODE_COPY", + 5: "REASON_CODE_STORAGE", + 6: "REASON_CONTRACT_CREATION", + 7: "REASON_CONTRACT_CREATION2", + 8: "REASON_DELEGATE_CALL", + 9: "REASON_EVENT_LOG", + 10: "REASON_EXT_CODE_COPY", + 11: "REASON_FAILED_EXECUTION", + 12: "REASON_INTRINSIC_GAS", + 13: "REASON_PRECOMPILED_CONTRACT", + 14: "REASON_REFUND_AFTER_EXECUTION", + 15: "REASON_RETURN", + 16: "REASON_RETURN_DATA_COPY", + 17: "REASON_REVERT", + 18: "REASON_SELF_DESTRUCT", + 19: "REASON_STATIC_CALL", + 20: "REASON_STATE_COLD_ACCESS", + 21: "REASON_TX_INITIAL_BALANCE", + 22: "REASON_TX_REFUNDS", + 23: "REASON_TX_LEFT_OVER_RETURNED", + 24: "REASON_CALL_INITIAL_BALANCE", + 25: "REASON_CALL_LEFT_OVER_RETURNED", + } + GasChange_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_CALL": 1, + "REASON_CALL_CODE": 2, + "REASON_CALL_DATA_COPY": 3, + "REASON_CODE_COPY": 4, + "REASON_CODE_STORAGE": 5, + "REASON_CONTRACT_CREATION": 6, + "REASON_CONTRACT_CREATION2": 7, + "REASON_DELEGATE_CALL": 8, + "REASON_EVENT_LOG": 9, + "REASON_EXT_CODE_COPY": 10, + "REASON_FAILED_EXECUTION": 11, + "REASON_INTRINSIC_GAS": 12, + "REASON_PRECOMPILED_CONTRACT": 13, + "REASON_REFUND_AFTER_EXECUTION": 14, + "REASON_RETURN": 15, + "REASON_RETURN_DATA_COPY": 16, + "REASON_REVERT": 17, + "REASON_SELF_DESTRUCT": 18, + "REASON_STATIC_CALL": 19, + "REASON_STATE_COLD_ACCESS": 20, + "REASON_TX_INITIAL_BALANCE": 21, + "REASON_TX_REFUNDS": 22, + "REASON_TX_LEFT_OVER_RETURNED": 23, + "REASON_CALL_INITIAL_BALANCE": 24, + "REASON_CALL_LEFT_OVER_RETURNED": 25, + } +) + +func (x GasChange_Reason) Enum() *GasChange_Reason { + p := new(GasChange_Reason) + *p = x + return p +} + +func (x GasChange_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GasChange_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_sf_ethereum_type_v2_type_proto_enumTypes[5].Descriptor() +} + +func (GasChange_Reason) Type() protoreflect.EnumType { + return &file_sf_ethereum_type_v2_type_proto_enumTypes[5] +} + +func (x GasChange_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GasChange_Reason.Descriptor instead. +func (GasChange_Reason) EnumDescriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{15, 0} +} + +type Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Hash is the block's hash. + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + // Number is the block's height at which this block was mined. + Number uint64 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // Size is the size in bytes of the RLP encoding of the block according to Ethereum + // rules. + Size uint64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + // Header contain's the block's header information like its parent hash, the merkel root hash + // and all other information the form a block. + Header *BlockHeader `protobuf:"bytes,5,opt,name=header,proto3" json:"header,omitempty"` + // Uncles represents block produced with a valid solution but were not actually choosen + // as the canonical block for the given height so they are mostly "forked" blocks. + // + // If the Block has been produced using the Proof of Stake consensus algorithm, this + // field will actually be always empty. + Uncles []*BlockHeader `protobuf:"bytes,6,rep,name=uncles,proto3" json:"uncles,omitempty"` + // TransactionTraces hold the execute trace of all the transactions that were executed + // in this block. In in there that you will find most of the Ethereum data model. + TransactionTraces []*TransactionTrace `protobuf:"bytes,10,rep,name=transaction_traces,json=transactionTraces,proto3" json:"transaction_traces,omitempty"` + // BalanceChanges here is the array of ETH transfer that happened at the block level + // outside of the normal transaction flow of a block. The best example of this is mining + // reward for the block mined, the transfer of ETH to the miner happens outside the normal + // transaction flow of the chain and is recorded as a `BalanceChange` here since we cannot + // attached it to any transaction. + // + // Only available in DetailLevel: EXTENDED + BalanceChanges []*BalanceChange `protobuf:"bytes,11,rep,name=balance_changes,json=balanceChanges,proto3" json:"balance_changes,omitempty"` + // DetailLevel affects the data available in this block. + // + // EXTENDED describes the most complete block, with traces, balance changes, storage changes. It is extracted during the execution of the block. + // BASE describes a block that contains only the block header, transaction receipts and event logs: everything that can be extracted using the base JSON-RPC interface (https://ethereum.org/en/developers/docs/apis/json-rpc/#json-rpc-methods) + // + // Furthermore, the eth_getTransactionReceipt call has been avoided because it brings only minimal improvements at the cost of requiring an archive node or a full node with complete transaction index. + DetailLevel Block_DetailLevel `protobuf:"varint,12,opt,name=detail_level,json=detailLevel,proto3,enum=sf.ethereum.type.v2.Block_DetailLevel" json:"detail_level,omitempty"` + // CodeChanges here is the array of smart code change that happened that happened at the block level + // outside of the normal transaction flow of a block. Some Ethereum's fork like BSC and Polygon + // has some capabilities to upgrade internal smart contracts used usually to track the validator + // list. + // + // On hard fork, some procedure runs to upgrade the smart contract code to a new version. In those + // network, a `CodeChange` for each modified smart contract on upgrade would be present here. Note + // that this happen rarely, so the vast majority of block will have an empty list here. + // Only available in DetailLevel: EXTENDED + CodeChanges []*CodeChange `protobuf:"bytes,20,rep,name=code_changes,json=codeChanges,proto3" json:"code_changes,omitempty"` + // Ver represents that data model version of the block, it is used internally by Firehose on Ethereum + // as a validation that we are reading the correct version. + Ver int32 `protobuf:"varint,1,opt,name=ver,proto3" json:"ver,omitempty"` +} + +func (x *Block) Reset() { + *x = Block{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Block) ProtoMessage() {} + +func (x *Block) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Block.ProtoReflect.Descriptor instead. +func (*Block) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{0} +} + +func (x *Block) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *Block) GetNumber() uint64 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *Block) GetSize() uint64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Block) GetHeader() *BlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *Block) GetUncles() []*BlockHeader { + if x != nil { + return x.Uncles + } + return nil +} + +func (x *Block) GetTransactionTraces() []*TransactionTrace { + if x != nil { + return x.TransactionTraces + } + return nil +} + +func (x *Block) GetBalanceChanges() []*BalanceChange { + if x != nil { + return x.BalanceChanges + } + return nil +} + +func (x *Block) GetDetailLevel() Block_DetailLevel { + if x != nil { + return x.DetailLevel + } + return Block_DETAILLEVEL_EXTENDED +} + +func (x *Block) GetCodeChanges() []*CodeChange { + if x != nil { + return x.CodeChanges + } + return nil +} + +func (x *Block) GetVer() int32 { + if x != nil { + return x.Ver + } + return 0 +} + +type BlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentHash []byte `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"` + // Uncle hash of the block, some reference it as `sha3Uncles`, but `sha3“ is badly worded, so we prefer `uncle_hash`, also + // referred as `ommers` in EIP specification. + // + // If the Block containing this `BlockHeader` has been produced using the Proof of Stake + // consensus algorithm, this field will actually be constant and set to `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347`. + UncleHash []byte `protobuf:"bytes,2,opt,name=uncle_hash,json=uncleHash,proto3" json:"uncle_hash,omitempty"` + Coinbase []byte `protobuf:"bytes,3,opt,name=coinbase,proto3" json:"coinbase,omitempty"` + StateRoot []byte `protobuf:"bytes,4,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + TransactionsRoot []byte `protobuf:"bytes,5,opt,name=transactions_root,json=transactionsRoot,proto3" json:"transactions_root,omitempty"` + ReceiptRoot []byte `protobuf:"bytes,6,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"` + LogsBloom []byte `protobuf:"bytes,7,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` + // Difficulty is the difficulty of the Proof of Work algorithm that was required to compute a solution. + // + // If the Block containing this `BlockHeader` has been produced using the Proof of Stake + // consensus algorithm, this field will actually be constant and set to `0x00`. + Difficulty *BigInt `protobuf:"bytes,8,opt,name=difficulty,proto3" json:"difficulty,omitempty"` + // TotalDifficulty is the sum of all previous blocks difficulty including this block difficulty. + // + // If the Block containing this `BlockHeader` has been produced using the Proof of Stake + // consensus algorithm, this field will actually be constant and set to the terminal total difficulty + // that was required to transition to Proof of Stake algorithm, which varies per network. It is set to + // 58 750 000 000 000 000 000 000 on Ethereum Mainnet and to 10 790 000 on Ethereum Testnet Goerli. + TotalDifficulty *BigInt `protobuf:"bytes,17,opt,name=total_difficulty,json=totalDifficulty,proto3" json:"total_difficulty,omitempty"` + Number uint64 `protobuf:"varint,9,opt,name=number,proto3" json:"number,omitempty"` + GasLimit uint64 `protobuf:"varint,10,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + GasUsed uint64 `protobuf:"varint,11,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // ExtraData is free-form bytes included in the block by the "miner". While on Yellow paper of + // Ethereum this value is maxed to 32 bytes, other consensus algorithm like Clique and some other + // forks are using bigger values to carry special consensus data. + // + // If the Block containing this `BlockHeader` has been produced using the Proof of Stake + // consensus algorithm, this field is strictly enforced to be <= 32 bytes. + ExtraData []byte `protobuf:"bytes,13,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"` + // MixHash is used to prove, when combined with the `nonce` that sufficient amount of computation has been + // achieved and that the solution found is valid. + MixHash []byte `protobuf:"bytes,14,opt,name=mix_hash,json=mixHash,proto3" json:"mix_hash,omitempty"` + // Nonce is used to prove, when combined with the `mix_hash` that sufficient amount of computation has been + // achieved and that the solution found is valid. + // + // If the Block containing this `BlockHeader` has been produced using the Proof of Stake + // consensus algorithm, this field will actually be constant and set to `0`. + Nonce uint64 `protobuf:"varint,15,opt,name=nonce,proto3" json:"nonce,omitempty"` + // Hash is the hash of the block which is actually the computation: + // + // Keccak256(rlp([ + // parent_hash, + // uncle_hash, + // coinbase, + // state_root, + // transactions_root, + // receipt_root, + // logs_bloom, + // difficulty, + // number, + // gas_limit, + // gas_used, + // timestamp, + // extra_data, + // mix_hash, + // nonce, + // base_fee_per_gas (to be included, only if London Fork is active) + // withdrawals_root (to be included, only if Shangai Fork is active) + // ])) + Hash []byte `protobuf:"bytes,16,opt,name=hash,proto3" json:"hash,omitempty"` + // Base fee per gas according to EIP-1559 (e.g. London Fork) rules, only set if London is present/active on the chain. + BaseFeePerGas *BigInt `protobuf:"bytes,18,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3" json:"base_fee_per_gas,omitempty"` + // Withdrawals root hash according to EIP-4895 (e.g. Shangai Fork) rules, only set if Shangai is present/active on the chain. + // + // Only available in DetailLevel: EXTENDED + WithdrawalsRoot []byte `protobuf:"bytes,19,opt,name=withdrawals_root,json=withdrawalsRoot,proto3" json:"withdrawals_root,omitempty"` + // Only available in DetailLevel: EXTENDED + TxDependency *Uint64NestedArray `protobuf:"bytes,20,opt,name=tx_dependency,json=txDependency,proto3" json:"tx_dependency,omitempty"` +} + +func (x *BlockHeader) Reset() { + *x = BlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockHeader) ProtoMessage() {} + +func (x *BlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockHeader.ProtoReflect.Descriptor instead. +func (*BlockHeader) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{1} +} + +func (x *BlockHeader) GetParentHash() []byte { + if x != nil { + return x.ParentHash + } + return nil +} + +func (x *BlockHeader) GetUncleHash() []byte { + if x != nil { + return x.UncleHash + } + return nil +} + +func (x *BlockHeader) GetCoinbase() []byte { + if x != nil { + return x.Coinbase + } + return nil +} + +func (x *BlockHeader) GetStateRoot() []byte { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *BlockHeader) GetTransactionsRoot() []byte { + if x != nil { + return x.TransactionsRoot + } + return nil +} + +func (x *BlockHeader) GetReceiptRoot() []byte { + if x != nil { + return x.ReceiptRoot + } + return nil +} + +func (x *BlockHeader) GetLogsBloom() []byte { + if x != nil { + return x.LogsBloom + } + return nil +} + +func (x *BlockHeader) GetDifficulty() *BigInt { + if x != nil { + return x.Difficulty + } + return nil +} + +func (x *BlockHeader) GetTotalDifficulty() *BigInt { + if x != nil { + return x.TotalDifficulty + } + return nil +} + +func (x *BlockHeader) GetNumber() uint64 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *BlockHeader) GetGasLimit() uint64 { + if x != nil { + return x.GasLimit + } + return 0 +} + +func (x *BlockHeader) GetGasUsed() uint64 { + if x != nil { + return x.GasUsed + } + return 0 +} + +func (x *BlockHeader) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *BlockHeader) GetExtraData() []byte { + if x != nil { + return x.ExtraData + } + return nil +} + +func (x *BlockHeader) GetMixHash() []byte { + if x != nil { + return x.MixHash + } + return nil +} + +func (x *BlockHeader) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +func (x *BlockHeader) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *BlockHeader) GetBaseFeePerGas() *BigInt { + if x != nil { + return x.BaseFeePerGas + } + return nil +} + +func (x *BlockHeader) GetWithdrawalsRoot() []byte { + if x != nil { + return x.WithdrawalsRoot + } + return nil +} + +func (x *BlockHeader) GetTxDependency() *Uint64NestedArray { + if x != nil { + return x.TxDependency + } + return nil +} + +type Uint64NestedArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Val []*Uint64Array `protobuf:"bytes,1,rep,name=val,proto3" json:"val,omitempty"` +} + +func (x *Uint64NestedArray) Reset() { + *x = Uint64NestedArray{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Uint64NestedArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Uint64NestedArray) ProtoMessage() {} + +func (x *Uint64NestedArray) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Uint64NestedArray.ProtoReflect.Descriptor instead. +func (*Uint64NestedArray) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{2} +} + +func (x *Uint64NestedArray) GetVal() []*Uint64Array { + if x != nil { + return x.Val + } + return nil +} + +type Uint64Array struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Val []uint64 `protobuf:"varint,1,rep,packed,name=val,proto3" json:"val,omitempty"` +} + +func (x *Uint64Array) Reset() { + *x = Uint64Array{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Uint64Array) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Uint64Array) ProtoMessage() {} + +func (x *Uint64Array) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Uint64Array.ProtoReflect.Descriptor instead. +func (*Uint64Array) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{3} +} + +func (x *Uint64Array) GetVal() []uint64 { + if x != nil { + return x.Val + } + return nil +} + +type BigInt struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *BigInt) Reset() { + *x = BigInt{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BigInt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BigInt) ProtoMessage() {} + +func (x *BigInt) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BigInt.ProtoReflect.Descriptor instead. +func (*BigInt) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{4} +} + +func (x *BigInt) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type TransactionTrace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // consensus + To []byte `protobuf:"bytes,1,opt,name=to,proto3" json:"to,omitempty"` + Nonce uint64 `protobuf:"varint,2,opt,name=nonce,proto3" json:"nonce,omitempty"` + // GasPrice represents the effective price that has been paid for each gas unit of this transaction. Over time, the + // Ethereum rules changes regarding GasPrice field here. Before London fork, the GasPrice was always set to the + // fixed gas price. After London fork, this value has different meaning depending on the transaction type (see `Type` field). + // + // In cases where `TransactionTrace.Type == TRX_TYPE_LEGACY || TRX_TYPE_ACCESS_LIST`, then GasPrice has the same meaning + // as before the London fork. + // + // In cases where `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE`, then GasPrice is the effective gas price paid + // for the transaction which is equals to `BlockHeader.BaseFeePerGas + TransactionTrace.` + GasPrice *BigInt `protobuf:"bytes,3,opt,name=gas_price,json=gasPrice,proto3" json:"gas_price,omitempty"` + // GasLimit is the maximum of gas unit the sender of the transaction is willing to consume when perform the EVM + // execution of the whole transaction + GasLimit uint64 `protobuf:"varint,4,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + // Value is the amount of Ether transferred as part of this transaction. + Value *BigInt `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // Input data the transaction will receive for execution of EVM. + Input []byte `protobuf:"bytes,6,opt,name=input,proto3" json:"input,omitempty"` + // V is the recovery ID value for the signature Y point. + V []byte `protobuf:"bytes,7,opt,name=v,proto3" json:"v,omitempty"` + // R is the signature's X point on the elliptic curve (32 bytes). + R []byte `protobuf:"bytes,8,opt,name=r,proto3" json:"r,omitempty"` + // S is the signature's Y point on the elliptic curve (32 bytes). + S []byte `protobuf:"bytes,9,opt,name=s,proto3" json:"s,omitempty"` + // GasUsed is the total amount of gas unit used for the whole execution of the transaction. + // + // Only available in DetailLevel: EXTENDED + GasUsed uint64 `protobuf:"varint,10,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + // Type represents the Ethereum transaction type, available only since EIP-2718 & EIP-2930 activation which happened on Berlin fork. + // The value is always set even for transaction before Berlin fork because those before the fork are still legacy transactions. + // + // Only available in DetailLevel: EXTENDED + Type TransactionTrace_Type `protobuf:"varint,12,opt,name=type,proto3,enum=sf.ethereum.type.v2.TransactionTrace_Type" json:"type,omitempty"` + // AcccessList represents the storage access this transaction has agreed to do in which case those storage + // access cost less gas unit per access. + // + // This will is populated only if `TransactionTrace.Type == TRX_TYPE_ACCESS_LIST || TRX_TYPE_DYNAMIC_FEE` which + // is possible only if Berlin (TRX_TYPE_ACCESS_LIST) nor London (TRX_TYPE_DYNAMIC_FEE) fork are active on the chain. + AccessList []*AccessTuple `protobuf:"bytes,14,rep,name=access_list,json=accessList,proto3" json:"access_list,omitempty"` + // MaxFeePerGas is the maximum fee per gas the user is willing to pay for the transaction gas used. + // + // This will is populated only if `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE` which is possible only + // if Londong fork is active on the chain. + // + // Only available in DetailLevel: EXTENDED + MaxFeePerGas *BigInt `protobuf:"bytes,11,opt,name=max_fee_per_gas,json=maxFeePerGas,proto3" json:"max_fee_per_gas,omitempty"` + // MaxPriorityFeePerGas is priority fee per gas the user to pay in extra to the miner on top of the block's + // base fee. + // + // This will is populated only if `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE` which is possible only + // if London fork is active on the chain. + // + // Only available in DetailLevel: EXTENDED + MaxPriorityFeePerGas *BigInt `protobuf:"bytes,13,opt,name=max_priority_fee_per_gas,json=maxPriorityFeePerGas,proto3" json:"max_priority_fee_per_gas,omitempty"` + // meta + Index uint32 `protobuf:"varint,20,opt,name=index,proto3" json:"index,omitempty"` + Hash []byte `protobuf:"bytes,21,opt,name=hash,proto3" json:"hash,omitempty"` + From []byte `protobuf:"bytes,22,opt,name=from,proto3" json:"from,omitempty"` + // Only available in DetailLevel: EXTENDED + ReturnData []byte `protobuf:"bytes,23,opt,name=return_data,json=returnData,proto3" json:"return_data,omitempty"` + // Only available in DetailLevel: EXTENDED + PublicKey []byte `protobuf:"bytes,24,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + BeginOrdinal uint64 `protobuf:"varint,25,opt,name=begin_ordinal,json=beginOrdinal,proto3" json:"begin_ordinal,omitempty"` + EndOrdinal uint64 `protobuf:"varint,26,opt,name=end_ordinal,json=endOrdinal,proto3" json:"end_ordinal,omitempty"` + // TransactionTraceStatus is the status of the transaction execution and will let you know if the transaction + // was successful or not. + // + // A successful transaction has been recorded to the blockchain's state for calls in it that were successful. + // This means it's possible only a subset of the calls were properly recorded, refer to [calls[].state_reverted] field + // to determine which calls were reverted. + // + // A quirks of the Ethereum protocol is that a transaction `FAILED` or `REVERTED` still affects the blockchain's + // state for **some** of the state changes. Indeed, in those cases, the transactions fees are still paid to the miner + // which means there is a balance change for the transaction's emitter (e.g. `from`) to pay the gas fees, an optional + // balance change for gas refunded to the transaction's emitter (e.g. `from`) and a balance change for the miner who + // received the transaction fees. There is also a nonce change for the transaction's emitter (e.g. `from`). + // + // This means that to properly record the state changes for a transaction, you need to conditionally procees the + // transaction's status. + // + // For a `SUCCEEDED` transaction, you iterate over the `calls` array and record the state changes for each call for + // which `state_reverted == false` (if a transaction succeeded, the call at #0 will always `state_reverted == false` + // because it aligns with the transaction). + // + // For a `FAILED` or `REVERTED` transaction, you iterate over the root call (e.g. at #0, will always exist) for + // balance changes you process those where `reason` is either `REASON_GAS_BUY`, `REASON_GAS_REFUND` or + // `REASON_REWARD_TRANSACTION_FEE` and for nonce change, still on the root call, you pick the nonce change which the + // smallest ordinal (if more than one). + // + // Only available in DetailLevel: EXTENDED + Status TransactionTraceStatus `protobuf:"varint,30,opt,name=status,proto3,enum=sf.ethereum.type.v2.TransactionTraceStatus" json:"status,omitempty"` + Receipt *TransactionReceipt `protobuf:"bytes,31,opt,name=receipt,proto3" json:"receipt,omitempty"` + // Only available in DetailLevel: EXTENDED + Calls []*Call `protobuf:"bytes,32,rep,name=calls,proto3" json:"calls,omitempty"` +} + +func (x *TransactionTrace) Reset() { + *x = TransactionTrace{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionTrace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionTrace) ProtoMessage() {} + +func (x *TransactionTrace) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionTrace.ProtoReflect.Descriptor instead. +func (*TransactionTrace) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{5} +} + +func (x *TransactionTrace) GetTo() []byte { + if x != nil { + return x.To + } + return nil +} + +func (x *TransactionTrace) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +func (x *TransactionTrace) GetGasPrice() *BigInt { + if x != nil { + return x.GasPrice + } + return nil +} + +func (x *TransactionTrace) GetGasLimit() uint64 { + if x != nil { + return x.GasLimit + } + return 0 +} + +func (x *TransactionTrace) GetValue() *BigInt { + if x != nil { + return x.Value + } + return nil +} + +func (x *TransactionTrace) GetInput() []byte { + if x != nil { + return x.Input + } + return nil +} + +func (x *TransactionTrace) GetV() []byte { + if x != nil { + return x.V + } + return nil +} + +func (x *TransactionTrace) GetR() []byte { + if x != nil { + return x.R + } + return nil +} + +func (x *TransactionTrace) GetS() []byte { + if x != nil { + return x.S + } + return nil +} + +func (x *TransactionTrace) GetGasUsed() uint64 { + if x != nil { + return x.GasUsed + } + return 0 +} + +func (x *TransactionTrace) GetType() TransactionTrace_Type { + if x != nil { + return x.Type + } + return TransactionTrace_TRX_TYPE_LEGACY +} + +func (x *TransactionTrace) GetAccessList() []*AccessTuple { + if x != nil { + return x.AccessList + } + return nil +} + +func (x *TransactionTrace) GetMaxFeePerGas() *BigInt { + if x != nil { + return x.MaxFeePerGas + } + return nil +} + +func (x *TransactionTrace) GetMaxPriorityFeePerGas() *BigInt { + if x != nil { + return x.MaxPriorityFeePerGas + } + return nil +} + +func (x *TransactionTrace) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *TransactionTrace) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *TransactionTrace) GetFrom() []byte { + if x != nil { + return x.From + } + return nil +} + +func (x *TransactionTrace) GetReturnData() []byte { + if x != nil { + return x.ReturnData + } + return nil +} + +func (x *TransactionTrace) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *TransactionTrace) GetBeginOrdinal() uint64 { + if x != nil { + return x.BeginOrdinal + } + return 0 +} + +func (x *TransactionTrace) GetEndOrdinal() uint64 { + if x != nil { + return x.EndOrdinal + } + return 0 +} + +func (x *TransactionTrace) GetStatus() TransactionTraceStatus { + if x != nil { + return x.Status + } + return TransactionTraceStatus_UNKNOWN +} + +func (x *TransactionTrace) GetReceipt() *TransactionReceipt { + if x != nil { + return x.Receipt + } + return nil +} + +func (x *TransactionTrace) GetCalls() []*Call { + if x != nil { + return x.Calls + } + return nil +} + +// AccessTuple represents a list of storage keys for a given contract's address and is used +// for AccessList construction. +type AccessTuple struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + StorageKeys [][]byte `protobuf:"bytes,2,rep,name=storage_keys,json=storageKeys,proto3" json:"storage_keys,omitempty"` +} + +func (x *AccessTuple) Reset() { + *x = AccessTuple{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessTuple) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessTuple) ProtoMessage() {} + +func (x *AccessTuple) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessTuple.ProtoReflect.Descriptor instead. +func (*AccessTuple) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{6} +} + +func (x *AccessTuple) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *AccessTuple) GetStorageKeys() [][]byte { + if x != nil { + return x.StorageKeys + } + return nil +} + +type TransactionReceipt struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // State root is an intermediate state_root hash, computed in-between transactions to make + // **sure** you could build a proof and point to state in the middle of a block. Geth client + // uses `PostState + root + PostStateOrStatus“ while Parity used `status_code, root...“ this piles + // hardforks, see (read the EIPs first): + // - https://github.com/ethereum/EIPs/blob/master/EIPS/eip-658.md + // + // Moreover, the notion of `Outcome“ in parity, which segregates the two concepts, which are + // stored in the same field `status_code“ can be computed based on such a hack of the `state_root` + // field, following `EIP-658`. + // + // Before Byzantinium hard fork, this field is always empty. + // + // Only available in DetailLevel: EXTENDED + StateRoot []byte `protobuf:"bytes,1,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + // Only available in DetailLevel: EXTENDED + CumulativeGasUsed uint64 `protobuf:"varint,2,opt,name=cumulative_gas_used,json=cumulativeGasUsed,proto3" json:"cumulative_gas_used,omitempty"` + // Only available in DetailLevel: EXTENDED + LogsBloom []byte `protobuf:"bytes,3,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` + Logs []*Log `protobuf:"bytes,4,rep,name=logs,proto3" json:"logs,omitempty"` +} + +func (x *TransactionReceipt) Reset() { + *x = TransactionReceipt{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionReceipt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionReceipt) ProtoMessage() {} + +func (x *TransactionReceipt) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionReceipt.ProtoReflect.Descriptor instead. +func (*TransactionReceipt) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{7} +} + +func (x *TransactionReceipt) GetStateRoot() []byte { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *TransactionReceipt) GetCumulativeGasUsed() uint64 { + if x != nil { + return x.CumulativeGasUsed + } + return 0 +} + +func (x *TransactionReceipt) GetLogsBloom() []byte { + if x != nil { + return x.LogsBloom + } + return nil +} + +func (x *TransactionReceipt) GetLogs() []*Log { + if x != nil { + return x.Logs + } + return nil +} + +type Log struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Topics [][]byte `protobuf:"bytes,2,rep,name=topics,proto3" json:"topics,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + // Index is the index of the log relative to the transaction. This index + // is always populated regardless of the state revertion of the the call + // that emitted this log. + // + // Only available in DetailLevel: EXTENDED + Index uint32 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` + // BlockIndex represents the index of the log relative to the Block. + // + // An **important** notice is that this field will be 0 when the call + // that emitted the log has been reverted by the chain. + // + // Currently, there is two locations where a Log can be obtained: + // - block.transaction_traces[].receipt.logs[] + // - block.transaction_traces[].calls[].logs[] + // + // In the `receipt` case, the logs will be populated only when the call + // that emitted them has not been reverted by the chain and when in this + // position, the `blockIndex` is always populated correctly. + // + // In the case of `calls` case, for `call` where `stateReverted == true`, + // the `blockIndex` value will always be 0. + BlockIndex uint32 `protobuf:"varint,6,opt,name=blockIndex,proto3" json:"blockIndex,omitempty"` + Ordinal uint64 `protobuf:"varint,7,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *Log) Reset() { + *x = Log{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Log) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Log) ProtoMessage() {} + +func (x *Log) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Log.ProtoReflect.Descriptor instead. +func (*Log) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{8} +} + +func (x *Log) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *Log) GetTopics() [][]byte { + if x != nil { + return x.Topics + } + return nil +} + +func (x *Log) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *Log) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *Log) GetBlockIndex() uint32 { + if x != nil { + return x.BlockIndex + } + return 0 +} + +func (x *Log) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +type Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + ParentIndex uint32 `protobuf:"varint,2,opt,name=parent_index,json=parentIndex,proto3" json:"parent_index,omitempty"` + Depth uint32 `protobuf:"varint,3,opt,name=depth,proto3" json:"depth,omitempty"` + CallType CallType `protobuf:"varint,4,opt,name=call_type,json=callType,proto3,enum=sf.ethereum.type.v2.CallType" json:"call_type,omitempty"` + Caller []byte `protobuf:"bytes,5,opt,name=caller,proto3" json:"caller,omitempty"` + Address []byte `protobuf:"bytes,6,opt,name=address,proto3" json:"address,omitempty"` + Value *BigInt `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + GasLimit uint64 `protobuf:"varint,8,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + GasConsumed uint64 `protobuf:"varint,9,opt,name=gas_consumed,json=gasConsumed,proto3" json:"gas_consumed,omitempty"` + ReturnData []byte `protobuf:"bytes,13,opt,name=return_data,json=returnData,proto3" json:"return_data,omitempty"` + Input []byte `protobuf:"bytes,14,opt,name=input,proto3" json:"input,omitempty"` + ExecutedCode bool `protobuf:"varint,15,opt,name=executed_code,json=executedCode,proto3" json:"executed_code,omitempty"` + Suicide bool `protobuf:"varint,16,opt,name=suicide,proto3" json:"suicide,omitempty"` + // hex representation of the hash -> preimage + KeccakPreimages map[string]string `protobuf:"bytes,20,rep,name=keccak_preimages,json=keccakPreimages,proto3" json:"keccak_preimages,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + StorageChanges []*StorageChange `protobuf:"bytes,21,rep,name=storage_changes,json=storageChanges,proto3" json:"storage_changes,omitempty"` + BalanceChanges []*BalanceChange `protobuf:"bytes,22,rep,name=balance_changes,json=balanceChanges,proto3" json:"balance_changes,omitempty"` + NonceChanges []*NonceChange `protobuf:"bytes,24,rep,name=nonce_changes,json=nonceChanges,proto3" json:"nonce_changes,omitempty"` + Logs []*Log `protobuf:"bytes,25,rep,name=logs,proto3" json:"logs,omitempty"` + CodeChanges []*CodeChange `protobuf:"bytes,26,rep,name=code_changes,json=codeChanges,proto3" json:"code_changes,omitempty"` + GasChanges []*GasChange `protobuf:"bytes,28,rep,name=gas_changes,json=gasChanges,proto3" json:"gas_changes,omitempty"` + // In Ethereum, a call can be either: + // - Successfull, execution passes without any problem encountered + // - Failed, execution failed, and remaining gas should be consumed + // - Reverted, execution failed, but only gas consumed so far is billed, remaining gas is refunded + // + // When a call is either `failed` or `reverted`, the `status_failed` field + // below is set to `true`. If the status is `reverted`, then both `status_failed` + // and `status_reverted` are going to be set to `true`. + StatusFailed bool `protobuf:"varint,10,opt,name=status_failed,json=statusFailed,proto3" json:"status_failed,omitempty"` + StatusReverted bool `protobuf:"varint,12,opt,name=status_reverted,json=statusReverted,proto3" json:"status_reverted,omitempty"` + // Populated when a call either failed or reverted, so when `status_failed == true`, + // see above for details about those flags. + FailureReason string `protobuf:"bytes,11,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason,omitempty"` + // This field represents wheter or not the state changes performed + // by this call were correctly recorded by the blockchain. + // + // On Ethereum, a transaction can record state changes even if some + // of its inner nested calls failed. This is problematic however since + // a call will invalidate all its state changes as well as all state + // changes performed by its child call. This means that even if a call + // has a status of `SUCCESS`, the chain might have reverted all the state + // changes it performed. + // + // ```text + // + // Trx 1 + // Call #1 + // Call #2 + // Call #3 + // |--- Failure here + // Call #4 + // + // ``` + // + // In the transaction above, while Call #2 and Call #3 would have the + // status `EXECUTED`. + // + // If you check all calls and check only `state_reverted` flag, you might be missing + // some balance changes and nonce changes. This is because when a full transaction fails + // in ethereum (e.g. `calls.all(x.state_reverted == true)`), there is still the transaction + // fee that are recorded to the chain. + // + // Refer to [TransactionTrace#status] field for more details about the handling you must + // perform. + StateReverted bool `protobuf:"varint,30,opt,name=state_reverted,json=stateReverted,proto3" json:"state_reverted,omitempty"` + BeginOrdinal uint64 `protobuf:"varint,31,opt,name=begin_ordinal,json=beginOrdinal,proto3" json:"begin_ordinal,omitempty"` + EndOrdinal uint64 `protobuf:"varint,32,opt,name=end_ordinal,json=endOrdinal,proto3" json:"end_ordinal,omitempty"` + AccountCreations []*AccountCreation `protobuf:"bytes,33,rep,name=account_creations,json=accountCreations,proto3" json:"account_creations,omitempty"` +} + +func (x *Call) Reset() { + *x = Call{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Call) ProtoMessage() {} + +func (x *Call) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Call.ProtoReflect.Descriptor instead. +func (*Call) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{9} +} + +func (x *Call) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *Call) GetParentIndex() uint32 { + if x != nil { + return x.ParentIndex + } + return 0 +} + +func (x *Call) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} + +func (x *Call) GetCallType() CallType { + if x != nil { + return x.CallType + } + return CallType_UNSPECIFIED +} + +func (x *Call) GetCaller() []byte { + if x != nil { + return x.Caller + } + return nil +} + +func (x *Call) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *Call) GetValue() *BigInt { + if x != nil { + return x.Value + } + return nil +} + +func (x *Call) GetGasLimit() uint64 { + if x != nil { + return x.GasLimit + } + return 0 +} + +func (x *Call) GetGasConsumed() uint64 { + if x != nil { + return x.GasConsumed + } + return 0 +} + +func (x *Call) GetReturnData() []byte { + if x != nil { + return x.ReturnData + } + return nil +} + +func (x *Call) GetInput() []byte { + if x != nil { + return x.Input + } + return nil +} + +func (x *Call) GetExecutedCode() bool { + if x != nil { + return x.ExecutedCode + } + return false +} + +func (x *Call) GetSuicide() bool { + if x != nil { + return x.Suicide + } + return false +} + +func (x *Call) GetKeccakPreimages() map[string]string { + if x != nil { + return x.KeccakPreimages + } + return nil +} + +func (x *Call) GetStorageChanges() []*StorageChange { + if x != nil { + return x.StorageChanges + } + return nil +} + +func (x *Call) GetBalanceChanges() []*BalanceChange { + if x != nil { + return x.BalanceChanges + } + return nil +} + +func (x *Call) GetNonceChanges() []*NonceChange { + if x != nil { + return x.NonceChanges + } + return nil +} + +func (x *Call) GetLogs() []*Log { + if x != nil { + return x.Logs + } + return nil +} + +func (x *Call) GetCodeChanges() []*CodeChange { + if x != nil { + return x.CodeChanges + } + return nil +} + +func (x *Call) GetGasChanges() []*GasChange { + if x != nil { + return x.GasChanges + } + return nil +} + +func (x *Call) GetStatusFailed() bool { + if x != nil { + return x.StatusFailed + } + return false +} + +func (x *Call) GetStatusReverted() bool { + if x != nil { + return x.StatusReverted + } + return false +} + +func (x *Call) GetFailureReason() string { + if x != nil { + return x.FailureReason + } + return "" +} + +func (x *Call) GetStateReverted() bool { + if x != nil { + return x.StateReverted + } + return false +} + +func (x *Call) GetBeginOrdinal() uint64 { + if x != nil { + return x.BeginOrdinal + } + return 0 +} + +func (x *Call) GetEndOrdinal() uint64 { + if x != nil { + return x.EndOrdinal + } + return 0 +} + +func (x *Call) GetAccountCreations() []*AccountCreation { + if x != nil { + return x.AccountCreations + } + return nil +} + +type StorageChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + OldValue []byte `protobuf:"bytes,3,opt,name=old_value,json=oldValue,proto3" json:"old_value,omitempty"` + NewValue []byte `protobuf:"bytes,4,opt,name=new_value,json=newValue,proto3" json:"new_value,omitempty"` + Ordinal uint64 `protobuf:"varint,5,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *StorageChange) Reset() { + *x = StorageChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageChange) ProtoMessage() {} + +func (x *StorageChange) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageChange.ProtoReflect.Descriptor instead. +func (*StorageChange) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{10} +} + +func (x *StorageChange) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *StorageChange) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *StorageChange) GetOldValue() []byte { + if x != nil { + return x.OldValue + } + return nil +} + +func (x *StorageChange) GetNewValue() []byte { + if x != nil { + return x.NewValue + } + return nil +} + +func (x *StorageChange) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +type BalanceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + OldValue *BigInt `protobuf:"bytes,2,opt,name=old_value,json=oldValue,proto3" json:"old_value,omitempty"` + NewValue *BigInt `protobuf:"bytes,3,opt,name=new_value,json=newValue,proto3" json:"new_value,omitempty"` + Reason BalanceChange_Reason `protobuf:"varint,4,opt,name=reason,proto3,enum=sf.ethereum.type.v2.BalanceChange_Reason" json:"reason,omitempty"` + Ordinal uint64 `protobuf:"varint,5,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *BalanceChange) Reset() { + *x = BalanceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalanceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalanceChange) ProtoMessage() {} + +func (x *BalanceChange) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalanceChange.ProtoReflect.Descriptor instead. +func (*BalanceChange) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{11} +} + +func (x *BalanceChange) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *BalanceChange) GetOldValue() *BigInt { + if x != nil { + return x.OldValue + } + return nil +} + +func (x *BalanceChange) GetNewValue() *BigInt { + if x != nil { + return x.NewValue + } + return nil +} + +func (x *BalanceChange) GetReason() BalanceChange_Reason { + if x != nil { + return x.Reason + } + return BalanceChange_REASON_UNKNOWN +} + +func (x *BalanceChange) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +type NonceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + OldValue uint64 `protobuf:"varint,2,opt,name=old_value,json=oldValue,proto3" json:"old_value,omitempty"` + NewValue uint64 `protobuf:"varint,3,opt,name=new_value,json=newValue,proto3" json:"new_value,omitempty"` + Ordinal uint64 `protobuf:"varint,4,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *NonceChange) Reset() { + *x = NonceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonceChange) ProtoMessage() {} + +func (x *NonceChange) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonceChange.ProtoReflect.Descriptor instead. +func (*NonceChange) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{12} +} + +func (x *NonceChange) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *NonceChange) GetOldValue() uint64 { + if x != nil { + return x.OldValue + } + return 0 +} + +func (x *NonceChange) GetNewValue() uint64 { + if x != nil { + return x.NewValue + } + return 0 +} + +func (x *NonceChange) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +type AccountCreation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Account []byte `protobuf:"bytes,1,opt,name=account,proto3" json:"account,omitempty"` + Ordinal uint64 `protobuf:"varint,2,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *AccountCreation) Reset() { + *x = AccountCreation{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccountCreation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccountCreation) ProtoMessage() {} + +func (x *AccountCreation) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccountCreation.ProtoReflect.Descriptor instead. +func (*AccountCreation) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{13} +} + +func (x *AccountCreation) GetAccount() []byte { + if x != nil { + return x.Account + } + return nil +} + +func (x *AccountCreation) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +type CodeChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + OldHash []byte `protobuf:"bytes,2,opt,name=old_hash,json=oldHash,proto3" json:"old_hash,omitempty"` + OldCode []byte `protobuf:"bytes,3,opt,name=old_code,json=oldCode,proto3" json:"old_code,omitempty"` + NewHash []byte `protobuf:"bytes,4,opt,name=new_hash,json=newHash,proto3" json:"new_hash,omitempty"` + NewCode []byte `protobuf:"bytes,5,opt,name=new_code,json=newCode,proto3" json:"new_code,omitempty"` + Ordinal uint64 `protobuf:"varint,6,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *CodeChange) Reset() { + *x = CodeChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeChange) ProtoMessage() {} + +func (x *CodeChange) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeChange.ProtoReflect.Descriptor instead. +func (*CodeChange) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{14} +} + +func (x *CodeChange) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *CodeChange) GetOldHash() []byte { + if x != nil { + return x.OldHash + } + return nil +} + +func (x *CodeChange) GetOldCode() []byte { + if x != nil { + return x.OldCode + } + return nil +} + +func (x *CodeChange) GetNewHash() []byte { + if x != nil { + return x.NewHash + } + return nil +} + +func (x *CodeChange) GetNewCode() []byte { + if x != nil { + return x.NewCode + } + return nil +} + +func (x *CodeChange) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +// The gas change model represents the reason why some gas cost has occurred. +// The gas is computed per actual op codes. Doing them completely might prove +// overwhelming in most cases. +// +// Hence, we only index some of them, those that are costy like all the calls +// one, log events, return data, etc. +type GasChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldValue uint64 `protobuf:"varint,1,opt,name=old_value,json=oldValue,proto3" json:"old_value,omitempty"` + NewValue uint64 `protobuf:"varint,2,opt,name=new_value,json=newValue,proto3" json:"new_value,omitempty"` + Reason GasChange_Reason `protobuf:"varint,3,opt,name=reason,proto3,enum=sf.ethereum.type.v2.GasChange_Reason" json:"reason,omitempty"` + Ordinal uint64 `protobuf:"varint,4,opt,name=ordinal,proto3" json:"ordinal,omitempty"` +} + +func (x *GasChange) Reset() { + *x = GasChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GasChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GasChange) ProtoMessage() {} + +func (x *GasChange) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GasChange.ProtoReflect.Descriptor instead. +func (*GasChange) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{15} +} + +func (x *GasChange) GetOldValue() uint64 { + if x != nil { + return x.OldValue + } + return 0 +} + +func (x *GasChange) GetNewValue() uint64 { + if x != nil { + return x.NewValue + } + return 0 +} + +func (x *GasChange) GetReason() GasChange_Reason { + if x != nil { + return x.Reason + } + return GasChange_REASON_UNKNOWN +} + +func (x *GasChange) GetOrdinal() uint64 { + if x != nil { + return x.Ordinal + } + return 0 +} + +// HeaderOnlyBlock is used to optimally unpack the [Block] structure (note the +// corresponding message number for the `header` field) while consuming less +// memory, when only the `header` is desired. +// +// WARN: this is a client-side optimization pattern and should be moved in the +// consuming code. +type HeaderOnlyBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *BlockHeader `protobuf:"bytes,5,opt,name=header,proto3" json:"header,omitempty"` +} + +func (x *HeaderOnlyBlock) Reset() { + *x = HeaderOnlyBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderOnlyBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderOnlyBlock) ProtoMessage() {} + +func (x *HeaderOnlyBlock) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderOnlyBlock.ProtoReflect.Descriptor instead. +func (*HeaderOnlyBlock) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{16} +} + +func (x *HeaderOnlyBlock) GetHeader() *BlockHeader { + if x != nil { + return x.Header + } + return nil +} + +// BlockWithRefs is a lightweight block, with traces and transactions +// purged from the `block` within, and only. It is used in transports +// to pass block data around. +type BlockWithRefs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Block *Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + TransactionTraceRefs *TransactionRefs `protobuf:"bytes,3,opt,name=transaction_trace_refs,json=transactionTraceRefs,proto3" json:"transaction_trace_refs,omitempty"` + Irreversible bool `protobuf:"varint,4,opt,name=irreversible,proto3" json:"irreversible,omitempty"` +} + +func (x *BlockWithRefs) Reset() { + *x = BlockWithRefs{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockWithRefs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockWithRefs) ProtoMessage() {} + +func (x *BlockWithRefs) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockWithRefs.ProtoReflect.Descriptor instead. +func (*BlockWithRefs) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{17} +} + +func (x *BlockWithRefs) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BlockWithRefs) GetBlock() *Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *BlockWithRefs) GetTransactionTraceRefs() *TransactionRefs { + if x != nil { + return x.TransactionTraceRefs + } + return nil +} + +func (x *BlockWithRefs) GetIrreversible() bool { + if x != nil { + return x.Irreversible + } + return false +} + +type TransactionTraceWithBlockRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Trace *TransactionTrace `protobuf:"bytes,1,opt,name=trace,proto3" json:"trace,omitempty"` + BlockRef *BlockRef `protobuf:"bytes,2,opt,name=block_ref,json=blockRef,proto3" json:"block_ref,omitempty"` +} + +func (x *TransactionTraceWithBlockRef) Reset() { + *x = TransactionTraceWithBlockRef{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionTraceWithBlockRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionTraceWithBlockRef) ProtoMessage() {} + +func (x *TransactionTraceWithBlockRef) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionTraceWithBlockRef.ProtoReflect.Descriptor instead. +func (*TransactionTraceWithBlockRef) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{18} +} + +func (x *TransactionTraceWithBlockRef) GetTrace() *TransactionTrace { + if x != nil { + return x.Trace + } + return nil +} + +func (x *TransactionTraceWithBlockRef) GetBlockRef() *BlockRef { + if x != nil { + return x.BlockRef + } + return nil +} + +type TransactionRefs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` +} + +func (x *TransactionRefs) Reset() { + *x = TransactionRefs{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionRefs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionRefs) ProtoMessage() {} + +func (x *TransactionRefs) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionRefs.ProtoReflect.Descriptor instead. +func (*TransactionRefs) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{19} +} + +func (x *TransactionRefs) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +type BlockRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Number uint64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` +} + +func (x *BlockRef) Reset() { + *x = BlockRef{} + if protoimpl.UnsafeEnabled { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockRef) ProtoMessage() {} + +func (x *BlockRef) ProtoReflect() protoreflect.Message { + mi := &file_sf_ethereum_type_v2_type_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockRef.ProtoReflect.Descriptor instead. +func (*BlockRef) Descriptor() ([]byte, []int) { + return file_sf_ethereum_type_v2_type_proto_rawDescGZIP(), []int{20} +} + +func (x *BlockRef) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *BlockRef) GetNumber() uint64 { + if x != nil { + return x.Number + } + return 0 +} + +var File_sf_ethereum_type_v2_type_proto protoreflect.FileDescriptor + +var file_sf_ethereum_type_v2_type_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x66, 0x2f, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x13, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0, 0x04, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x68, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x38, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x06, 0x75, 0x6e, + 0x63, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x75, 0x6e, + 0x63, 0x6c, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0c, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0b, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x12, 0x42, 0x0a, 0x0c, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x64, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x64, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x76, 0x65, 0x72, 0x22, 0x3d, 0x0a, 0x0b, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x45, 0x54, 0x41, 0x49, + 0x4c, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x44, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x4c, 0x45, 0x56, 0x45, 0x4c, + 0x5f, 0x42, 0x41, 0x53, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x28, 0x10, 0x29, 0x4a, 0x04, 0x08, + 0x29, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x22, 0xa8, 0x06, 0x0a, 0x0b, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, + 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x69, + 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x69, + 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x6f, 0x6f, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x6f, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, + 0x6f, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, + 0x6f, 0x6f, 0x6d, 0x12, 0x3b, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, + 0x67, 0x49, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, + 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, + 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x69, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x44, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, + 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x0d, 0x62, + 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, + 0x61, 0x6c, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x4b, 0x0a, 0x0d, 0x74, 0x78, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x4e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x0c, 0x74, 0x78, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x22, 0x47, 0x0a, 0x11, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x4e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x03, 0x76, 0x61, 0x6c, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x69, 0x6e, + 0x74, 0x36, 0x34, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x03, 0x76, 0x61, 0x6c, 0x22, 0x1f, 0x0a, + 0x0b, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x03, 0x76, 0x61, 0x6c, 0x22, 0x1e, + 0x0a, 0x06, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xea, + 0x09, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x02, 0x74, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x61, 0x73, + 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, + 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x08, 0x67, 0x61, 0x73, 0x50, 0x72, + 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0c, 0x0a, 0x01, 0x72, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x01, 0x72, 0x12, 0x0c, 0x0a, 0x01, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x01, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x73, + 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x41, + 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x54, 0x75, 0x70, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x67, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x65, 0x65, 0x50, + 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x53, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, + 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, + 0x67, 0x49, 0x6e, 0x74, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0c, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x4f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x4f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x43, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, + 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x18, 0x1f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, + 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x2f, 0x0a, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, + 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x52, 0x05, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x18, 0x0a, 0x14, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, + 0x4d, 0x49, 0x43, 0x5f, 0x46, 0x45, 0x45, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x58, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x44, + 0x45, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x10, 0x64, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x52, 0x58, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x55, 0x4e, + 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x52, 0x58, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x43, 0x4f, + 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x66, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x52, 0x58, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x52, 0x45, + 0x54, 0x52, 0x59, 0x10, 0x68, 0x12, 0x26, 0x0a, 0x22, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, 0x55, 0x4d, 0x5f, 0x53, 0x55, 0x42, 0x4d, 0x49, + 0x54, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x59, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x69, 0x12, 0x1e, 0x0a, + 0x1a, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, + 0x55, 0x4d, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x6a, 0x12, 0x1c, 0x0a, + 0x18, 0x54, 0x52, 0x58, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x42, 0x49, 0x54, 0x52, + 0x55, 0x4d, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x78, 0x22, 0x4a, 0x0a, 0x0b, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x22, 0xb0, 0x01, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x0a, + 0x13, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, + 0x75, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x2c, 0x0a, 0x04, + 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x66, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x03, 0x4c, + 0x6f, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, + 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, + 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x22, 0xb2, 0x0a, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, + 0x70, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, + 0x12, 0x3a, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x31, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x61, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x67, 0x61, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x73, 0x75, 0x69, 0x63, 0x69, 0x64, 0x65, 0x12, 0x59, 0x0a, 0x10, 0x6b, 0x65, 0x63, 0x63, 0x61, + 0x6b, 0x5f, 0x70, 0x72, 0x65, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x2e, 0x4b, 0x65, 0x63, + 0x63, 0x61, 0x6b, 0x50, 0x72, 0x65, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x50, 0x72, 0x65, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x66, + 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, + 0x4b, 0x0a, 0x0f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0d, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x18, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, + 0x73, 0x12, 0x42, 0x0a, 0x0c, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x64, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x64, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x0b, 0x67, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x66, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x47, 0x61, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x67, 0x61, 0x73, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x76, 0x65, + 0x72, 0x74, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x18, 0x1e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, + 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x69, + 0x6e, 0x61, 0x6c, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x65, 0x67, 0x69, 0x6e, + 0x4f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x6f, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x6e, + 0x64, 0x4f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x21, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4b, + 0x65, 0x63, 0x63, 0x61, 0x6b, 0x50, 0x72, 0x65, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, + 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, 0x4a, 0x04, 0x08, 0x32, 0x10, + 0x33, 0x4a, 0x04, 0x08, 0x33, 0x10, 0x34, 0x4a, 0x04, 0x08, 0x3c, 0x10, 0x3d, 0x22, 0x8f, 0x01, + 0x0a, 0x0d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6f, + 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6e, 0x65, 0x77, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x22, + 0xcc, 0x05, 0x0a, 0x0d, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x6f, + 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x6c, 0x64, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x67, 0x49, 0x6e, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x22, 0xcf, 0x03, 0x0a, + 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x49, 0x4e, + 0x45, 0x5f, 0x55, 0x4e, 0x43, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x41, + 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x49, 0x4e, 0x45, 0x5f, + 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x44, 0x41, 0x4f, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x44, 0x41, 0x4f, 0x5f, 0x41, 0x44, 0x4a, 0x55, 0x53, 0x54, 0x5f, 0x42, 0x41, 0x4c, + 0x41, 0x4e, 0x43, 0x45, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, + 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x42, 0x41, + 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x06, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x42, 0x55, 0x59, 0x10, 0x07, 0x12, 0x21, 0x0a, 0x1d, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x54, 0x52, 0x41, + 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x45, 0x45, 0x10, 0x08, 0x12, 0x1b, + 0x0a, 0x17, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x57, 0x41, 0x52, 0x44, 0x5f, + 0x46, 0x45, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x54, 0x10, 0x0e, 0x12, 0x15, 0x0a, 0x11, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x4e, 0x44, + 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x54, 0x4f, 0x55, + 0x43, 0x48, 0x5f, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15, + 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, 0x55, 0x49, 0x43, 0x49, 0x44, 0x45, 0x5f, 0x52, + 0x45, 0x46, 0x55, 0x4e, 0x44, 0x10, 0x0b, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x53, 0x55, 0x49, 0x43, 0x49, 0x44, 0x45, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x44, 0x52, + 0x41, 0x57, 0x10, 0x0d, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, + 0x41, 0x4c, 0x4c, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4f, 0x56, 0x45, 0x52, + 0x52, 0x49, 0x44, 0x45, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, + 0x5f, 0x42, 0x55, 0x52, 0x4e, 0x10, 0x0f, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x44, 0x52, 0x41, 0x57, 0x41, 0x4c, 0x10, 0x10, 0x22, 0x7b, + 0x0a, 0x0b, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x22, 0x45, 0x0a, 0x0f, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, + 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, + 0x61, 0x6c, 0x22, 0xac, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x64, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6f, + 0x6c, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6f, + 0x6c, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x19, 0x0a, 0x08, + 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x6e, 0x65, 0x77, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, + 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, + 0x6c, 0x22, 0xe0, 0x06, 0x0a, 0x09, 0x47, 0x61, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6e, 0x65, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x61, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x69, + 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x69, 0x6e, + 0x61, 0x6c, 0x22, 0xbf, 0x05, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, + 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4c, + 0x4c, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x41, 0x53, + 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x43, 0x4f, 0x50, + 0x59, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x50, 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x41, + 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, + 0x10, 0x05, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x52, 0x41, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, + 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, + 0x41, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x32, 0x10, 0x07, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, + 0x54, 0x45, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x08, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, + 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x58, 0x54, 0x5f, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x50, 0x59, 0x10, 0x0a, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x41, + 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, + 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x47, 0x41, 0x53, 0x10, 0x0c, + 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, + 0x4d, 0x50, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x10, + 0x0d, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x46, 0x55, + 0x4e, 0x44, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, + 0x45, 0x54, 0x55, 0x52, 0x4e, 0x10, 0x0f, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x52, 0x45, 0x54, 0x55, 0x52, 0x4e, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x43, 0x4f, + 0x50, 0x59, 0x10, 0x10, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x52, + 0x45, 0x56, 0x45, 0x52, 0x54, 0x10, 0x11, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x53, 0x45, 0x4c, 0x46, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x10, + 0x12, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x49, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x13, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x41, + 0x53, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4c, 0x44, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x14, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x54, 0x58, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x42, 0x41, 0x4c, + 0x41, 0x4e, 0x43, 0x45, 0x10, 0x15, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, + 0x5f, 0x54, 0x58, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x4e, 0x44, 0x53, 0x10, 0x16, 0x12, 0x20, 0x0a, + 0x1c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x54, 0x58, 0x5f, 0x4c, 0x45, 0x46, 0x54, 0x5f, + 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x55, 0x52, 0x4e, 0x45, 0x44, 0x10, 0x17, 0x12, + 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x49, + 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x18, + 0x12, 0x22, 0x0a, 0x1e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, + 0x4c, 0x45, 0x46, 0x54, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x55, 0x52, 0x4e, + 0x45, 0x44, 0x10, 0x19, 0x22, 0x4b, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x6e, + 0x6c, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x38, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x22, 0xd1, 0x01, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x52, + 0x65, 0x66, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x30, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x5a, 0x0a, 0x16, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x73, 0x52, 0x14, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x65, 0x66, + 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x72, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x69, 0x62, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x72, 0x72, 0x65, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x62, 0x6c, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x1c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x57, 0x69, 0x74, 0x68, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x66, 0x12, 0x3b, 0x0a, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x05, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x66, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x66, 0x2e, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x66, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x66, 0x22, + 0x29, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x36, 0x0a, 0x08, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x2a, 0x4e, 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, + 0x45, 0x44, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x56, 0x45, 0x52, 0x54, 0x45, 0x44, + 0x10, 0x03, 0x2a, 0x59, 0x0a, 0x08, 0x43, 0x61, 0x6c, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4c, + 0x4c, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4c, 0x45, 0x47, + 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, + 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x05, 0x42, 0x4f, 0x5a, + 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x66, 0x61, 0x73, 0x74, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x68, 0x6f, + 0x73, 0x65, 0x2d, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x66, 0x2f, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x70, 0x62, 0x65, 0x74, 0x68, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sf_ethereum_type_v2_type_proto_rawDescOnce sync.Once + file_sf_ethereum_type_v2_type_proto_rawDescData = file_sf_ethereum_type_v2_type_proto_rawDesc +) + +func file_sf_ethereum_type_v2_type_proto_rawDescGZIP() []byte { + file_sf_ethereum_type_v2_type_proto_rawDescOnce.Do(func() { + file_sf_ethereum_type_v2_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_sf_ethereum_type_v2_type_proto_rawDescData) + }) + return file_sf_ethereum_type_v2_type_proto_rawDescData +} + +var file_sf_ethereum_type_v2_type_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_sf_ethereum_type_v2_type_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_sf_ethereum_type_v2_type_proto_goTypes = []interface{}{ + (TransactionTraceStatus)(0), // 0: sf.ethereum.type.v2.TransactionTraceStatus + (CallType)(0), // 1: sf.ethereum.type.v2.CallType + (Block_DetailLevel)(0), // 2: sf.ethereum.type.v2.Block.DetailLevel + (TransactionTrace_Type)(0), // 3: sf.ethereum.type.v2.TransactionTrace.Type + (BalanceChange_Reason)(0), // 4: sf.ethereum.type.v2.BalanceChange.Reason + (GasChange_Reason)(0), // 5: sf.ethereum.type.v2.GasChange.Reason + (*Block)(nil), // 6: sf.ethereum.type.v2.Block + (*BlockHeader)(nil), // 7: sf.ethereum.type.v2.BlockHeader + (*Uint64NestedArray)(nil), // 8: sf.ethereum.type.v2.Uint64NestedArray + (*Uint64Array)(nil), // 9: sf.ethereum.type.v2.Uint64Array + (*BigInt)(nil), // 10: sf.ethereum.type.v2.BigInt + (*TransactionTrace)(nil), // 11: sf.ethereum.type.v2.TransactionTrace + (*AccessTuple)(nil), // 12: sf.ethereum.type.v2.AccessTuple + (*TransactionReceipt)(nil), // 13: sf.ethereum.type.v2.TransactionReceipt + (*Log)(nil), // 14: sf.ethereum.type.v2.Log + (*Call)(nil), // 15: sf.ethereum.type.v2.Call + (*StorageChange)(nil), // 16: sf.ethereum.type.v2.StorageChange + (*BalanceChange)(nil), // 17: sf.ethereum.type.v2.BalanceChange + (*NonceChange)(nil), // 18: sf.ethereum.type.v2.NonceChange + (*AccountCreation)(nil), // 19: sf.ethereum.type.v2.AccountCreation + (*CodeChange)(nil), // 20: sf.ethereum.type.v2.CodeChange + (*GasChange)(nil), // 21: sf.ethereum.type.v2.GasChange + (*HeaderOnlyBlock)(nil), // 22: sf.ethereum.type.v2.HeaderOnlyBlock + (*BlockWithRefs)(nil), // 23: sf.ethereum.type.v2.BlockWithRefs + (*TransactionTraceWithBlockRef)(nil), // 24: sf.ethereum.type.v2.TransactionTraceWithBlockRef + (*TransactionRefs)(nil), // 25: sf.ethereum.type.v2.TransactionRefs + (*BlockRef)(nil), // 26: sf.ethereum.type.v2.BlockRef + nil, // 27: sf.ethereum.type.v2.Call.KeccakPreimagesEntry + (*timestamppb.Timestamp)(nil), // 28: google.protobuf.Timestamp +} +var file_sf_ethereum_type_v2_type_proto_depIdxs = []int32{ + 7, // 0: sf.ethereum.type.v2.Block.header:type_name -> sf.ethereum.type.v2.BlockHeader + 7, // 1: sf.ethereum.type.v2.Block.uncles:type_name -> sf.ethereum.type.v2.BlockHeader + 11, // 2: sf.ethereum.type.v2.Block.transaction_traces:type_name -> sf.ethereum.type.v2.TransactionTrace + 17, // 3: sf.ethereum.type.v2.Block.balance_changes:type_name -> sf.ethereum.type.v2.BalanceChange + 2, // 4: sf.ethereum.type.v2.Block.detail_level:type_name -> sf.ethereum.type.v2.Block.DetailLevel + 20, // 5: sf.ethereum.type.v2.Block.code_changes:type_name -> sf.ethereum.type.v2.CodeChange + 10, // 6: sf.ethereum.type.v2.BlockHeader.difficulty:type_name -> sf.ethereum.type.v2.BigInt + 10, // 7: sf.ethereum.type.v2.BlockHeader.total_difficulty:type_name -> sf.ethereum.type.v2.BigInt + 28, // 8: sf.ethereum.type.v2.BlockHeader.timestamp:type_name -> google.protobuf.Timestamp + 10, // 9: sf.ethereum.type.v2.BlockHeader.base_fee_per_gas:type_name -> sf.ethereum.type.v2.BigInt + 8, // 10: sf.ethereum.type.v2.BlockHeader.tx_dependency:type_name -> sf.ethereum.type.v2.Uint64NestedArray + 9, // 11: sf.ethereum.type.v2.Uint64NestedArray.val:type_name -> sf.ethereum.type.v2.Uint64Array + 10, // 12: sf.ethereum.type.v2.TransactionTrace.gas_price:type_name -> sf.ethereum.type.v2.BigInt + 10, // 13: sf.ethereum.type.v2.TransactionTrace.value:type_name -> sf.ethereum.type.v2.BigInt + 3, // 14: sf.ethereum.type.v2.TransactionTrace.type:type_name -> sf.ethereum.type.v2.TransactionTrace.Type + 12, // 15: sf.ethereum.type.v2.TransactionTrace.access_list:type_name -> sf.ethereum.type.v2.AccessTuple + 10, // 16: sf.ethereum.type.v2.TransactionTrace.max_fee_per_gas:type_name -> sf.ethereum.type.v2.BigInt + 10, // 17: sf.ethereum.type.v2.TransactionTrace.max_priority_fee_per_gas:type_name -> sf.ethereum.type.v2.BigInt + 0, // 18: sf.ethereum.type.v2.TransactionTrace.status:type_name -> sf.ethereum.type.v2.TransactionTraceStatus + 13, // 19: sf.ethereum.type.v2.TransactionTrace.receipt:type_name -> sf.ethereum.type.v2.TransactionReceipt + 15, // 20: sf.ethereum.type.v2.TransactionTrace.calls:type_name -> sf.ethereum.type.v2.Call + 14, // 21: sf.ethereum.type.v2.TransactionReceipt.logs:type_name -> sf.ethereum.type.v2.Log + 1, // 22: sf.ethereum.type.v2.Call.call_type:type_name -> sf.ethereum.type.v2.CallType + 10, // 23: sf.ethereum.type.v2.Call.value:type_name -> sf.ethereum.type.v2.BigInt + 27, // 24: sf.ethereum.type.v2.Call.keccak_preimages:type_name -> sf.ethereum.type.v2.Call.KeccakPreimagesEntry + 16, // 25: sf.ethereum.type.v2.Call.storage_changes:type_name -> sf.ethereum.type.v2.StorageChange + 17, // 26: sf.ethereum.type.v2.Call.balance_changes:type_name -> sf.ethereum.type.v2.BalanceChange + 18, // 27: sf.ethereum.type.v2.Call.nonce_changes:type_name -> sf.ethereum.type.v2.NonceChange + 14, // 28: sf.ethereum.type.v2.Call.logs:type_name -> sf.ethereum.type.v2.Log + 20, // 29: sf.ethereum.type.v2.Call.code_changes:type_name -> sf.ethereum.type.v2.CodeChange + 21, // 30: sf.ethereum.type.v2.Call.gas_changes:type_name -> sf.ethereum.type.v2.GasChange + 19, // 31: sf.ethereum.type.v2.Call.account_creations:type_name -> sf.ethereum.type.v2.AccountCreation + 10, // 32: sf.ethereum.type.v2.BalanceChange.old_value:type_name -> sf.ethereum.type.v2.BigInt + 10, // 33: sf.ethereum.type.v2.BalanceChange.new_value:type_name -> sf.ethereum.type.v2.BigInt + 4, // 34: sf.ethereum.type.v2.BalanceChange.reason:type_name -> sf.ethereum.type.v2.BalanceChange.Reason + 5, // 35: sf.ethereum.type.v2.GasChange.reason:type_name -> sf.ethereum.type.v2.GasChange.Reason + 7, // 36: sf.ethereum.type.v2.HeaderOnlyBlock.header:type_name -> sf.ethereum.type.v2.BlockHeader + 6, // 37: sf.ethereum.type.v2.BlockWithRefs.block:type_name -> sf.ethereum.type.v2.Block + 25, // 38: sf.ethereum.type.v2.BlockWithRefs.transaction_trace_refs:type_name -> sf.ethereum.type.v2.TransactionRefs + 11, // 39: sf.ethereum.type.v2.TransactionTraceWithBlockRef.trace:type_name -> sf.ethereum.type.v2.TransactionTrace + 26, // 40: sf.ethereum.type.v2.TransactionTraceWithBlockRef.block_ref:type_name -> sf.ethereum.type.v2.BlockRef + 41, // [41:41] is the sub-list for method output_type + 41, // [41:41] is the sub-list for method input_type + 41, // [41:41] is the sub-list for extension type_name + 41, // [41:41] is the sub-list for extension extendee + 0, // [0:41] is the sub-list for field type_name +} + +func init() { file_sf_ethereum_type_v2_type_proto_init() } +func file_sf_ethereum_type_v2_type_proto_init() { + if File_sf_ethereum_type_v2_type_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sf_ethereum_type_v2_type_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Uint64NestedArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Uint64Array); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BigInt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionTrace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessTuple); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionReceipt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Log); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalanceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccountCreation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GasChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderOnlyBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockWithRefs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionTraceWithBlockRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionRefs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sf_ethereum_type_v2_type_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sf_ethereum_type_v2_type_proto_rawDesc, + NumEnums: 6, + NumMessages: 22, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sf_ethereum_type_v2_type_proto_goTypes, + DependencyIndexes: file_sf_ethereum_type_v2_type_proto_depIdxs, + EnumInfos: file_sf_ethereum_type_v2_type_proto_enumTypes, + MessageInfos: file_sf_ethereum_type_v2_type_proto_msgTypes, + }.Build() + File_sf_ethereum_type_v2_type_proto = out.File + file_sf_ethereum_type_v2_type_proto_rawDesc = nil + file_sf_ethereum_type_v2_type_proto_goTypes = nil + file_sf_ethereum_type_v2_type_proto_depIdxs = nil +} From 4bdf188d344e17d339cc5b4d06f36871fc943400 Mon Sep 17 00:00:00 2001 From: billettc Date: Wed, 15 Nov 2023 10:56:17 -0500 Subject: [PATCH 02/66] wip --- cmd/firecore/main.go | 31 +++++++++++++++++++++++++++++++ nodemanager/blockencoder.go | 18 ++++++++++++++++++ nodemanager/consolereader.go | 8 ++++++-- 3 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 cmd/firecore/main.go create mode 100644 nodemanager/blockencoder.go diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go new file mode 100644 index 0000000..d2d1b19 --- /dev/null +++ b/cmd/firecore/main.go @@ -0,0 +1,31 @@ +package main + +import ( + firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/nodemanager" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +) + +func main() { + firecore.Main(&firecore.Chain[*pbbstream.Block]{ + ShortName: "near", + LongName: "NEAR", + ExecutableName: "near-firehose-indexer", + FullyQualifiedModule: "github.com/streamingfast/firehose-near", + Version: version, + + Protocol: "NEA", + ProtocolVersion: 1, + + BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, + + ConsoleReaderFactory: nodemanager.NewConsoleReader, + + Tools: &firecore.ToolsConfig[*pbnear.Block]{ + BlockPrinter: printBlock, + }, + }) +} + +// Version value, injected via go build `ldflags` at build time, **must** not be removed or inlined +var version = "dev" diff --git a/nodemanager/blockencoder.go b/nodemanager/blockencoder.go new file mode 100644 index 0000000..dc5d982 --- /dev/null +++ b/nodemanager/blockencoder.go @@ -0,0 +1,18 @@ +package nodemanager + +import ( + "github.com/streamingfast/bstream" + firecore "github.com/streamingfast/firehose-core" +) + +type GenericBlockEncoder struct { +} + +func NewGenericBlockEncoder() *GenericBlockEncoder { + return &GenericBlockEncoder{} +} + +func (g GenericBlockEncoder) Encode(block firecore.Block) (blk *bstream.Block, err error) { + //TODO implement me + panic("implement me") +} diff --git a/nodemanager/consolereader.go b/nodemanager/consolereader.go index fe1cb03..9694a26 100644 --- a/nodemanager/consolereader.go +++ b/nodemanager/consolereader.go @@ -8,6 +8,10 @@ import ( "strings" "time" + "github.com/streamingfast/node-manager/mindreader" + + firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/bstream" "github.com/streamingfast/logging" "go.uber.org/zap" @@ -36,7 +40,7 @@ type ConsoleReader struct { ctx *parseCtx } -func NewConsoleReader(lines chan string, logger *zap.Logger, tracer logging.Tracer) *ConsoleReader { +func NewConsoleReader(lines chan string, blockEncoder firecore.BlockEncoder, logger *zap.Logger, tracer logging.Tracer) (mindreader.ConsolerReader, error) { reader := &ConsoleReader{ lines: lines, close: func() {}, @@ -45,7 +49,7 @@ func NewConsoleReader(lines chan string, logger *zap.Logger, tracer logging.Trac logger: logger, tracer: tracer, } - return reader + return reader, nil } func (r *ConsoleReader) Done() <-chan interface{} { From 17d6e8297b93d295e636084b0ab46ddd6812082a Mon Sep 17 00:00:00 2001 From: billettc Date: Wed, 15 Nov 2023 15:45:00 -0500 Subject: [PATCH 03/66] WIP waiting for bstream little refactor ;-) --- .../blockencoder.go => blockencoder.go | 5 +- chain.go | 20 +- cmd/firecore/main.go | 17 +- .../consolereader.go => consolereader.go | 35 +- ...olereader_test.go => consolereader_test.go | 10 +- go.mod | 14 +- go.sum | 21 +- merger.go | 4 +- merger/CHANGELOG.md | 49 ++ merger/DESIGN.md | 54 ++ merger/README.md | 31 ++ merger/app/merger/app.go | 140 +++++ merger/app/merger/logging.go | 21 + merger/bundler.go | 249 +++++++++ merger/bundler_test.go | 170 +++++++ merger/bundlereader.go | 115 +++++ merger/bundlereader_test.go | 297 +++++++++++ merger/consts.go | 24 + merger/healthz.go | 43 ++ merger/healthz_test.go | 32 ++ merger/init_test.go | 25 + merger/merger.go | 244 +++++++++ merger/merger_io.go | 400 +++++++++++++++ merger/merger_io_test.go | 193 +++++++ merger/metrics/metrics.go | 23 + merger/server.go | 21 + ...1-20150730T152628.0-13406cb6-b1cb8fa3.dbin | Bin 0 -> 826 bytes ...2-20150730T152657.0-044698c9-13406cb6.dbin | Bin 0 -> 836 bytes ...3-20150730T152728.0-a88cf741-044698c9.dbin | Bin 0 -> 1458 bytes merger/utils.go | 95 ++++ node-manager/CHANGELOG.md | 59 +++ node-manager/LICENSE | 202 ++++++++ node-manager/README.md | 65 +++ node-manager/app/node_manager/app.go | 150 ++++++ node-manager/app/node_reader_stdin/app.go | 172 +++++++ node-manager/boot.sh | 100 ++++ node-manager/boot/eos_bp/config.ini | 29 ++ node-manager/boot/eos_bp/genesis.json | 4 + node-manager/boot/eos_jungle/config.ini | 44 ++ node-manager/boot/eos_jungle/genesis.json | 24 + node-manager/boot/eos_mainnet/config.ini | 27 + node-manager/boot/eos_mainnet/genesis.json | 23 + node-manager/clean.sh | 72 +++ .../log_plugin/keep_last_lines_log_plugin.go | 60 +++ .../keep_last_lines_log_plugin_test.go | 54 ++ node-manager/log_plugin/line_ring_buffer.go | 60 +++ node-manager/log_plugin/log_plugin.go | 68 +++ .../log_plugin/to_console_log_plugin.go | 80 +++ node-manager/log_plugin/to_zap_log_plugin.go | 130 +++++ .../log_plugin/to_zap_log_plugin_test.go | 141 +++++ node-manager/metrics/common.go | 33 ++ node-manager/mindreader/archiver.go | 116 +++++ node-manager/mindreader/file_uploader.go | 92 ++++ node-manager/mindreader/file_uploader_test.go | 43 ++ node-manager/mindreader/init_test.go | 25 + node-manager/mindreader/logging.go | 9 + node-manager/mindreader/mindreader.go | 357 +++++++++++++ node-manager/mindreader/mindreader_test.go | 166 ++++++ node-manager/monitor.go | 92 ++++ node-manager/operator/backuper.go | 212 ++++++++ node-manager/operator/backuper_test.go | 73 +++ node-manager/operator/errors.go | 19 + node-manager/operator/http_server.go | 212 ++++++++ node-manager/operator/operator.go | 481 ++++++++++++++++++ node-manager/serve.sh | 112 ++++ node-manager/superviser.go | 99 ++++ node-manager/superviser/superviser.go | 383 ++++++++++++++ node-manager/superviser/superviser_test.go | 137 +++++ node-manager/types.go | 25 + node-manager/utils.go | 23 + reader_node.go | 16 +- reader_node_stdin.go | 8 +- relayer.go | 4 +- relayer/CHANGELOG.md | 33 ++ relayer/LICENSE | 202 ++++++++ relayer/README.md | 66 +++ relayer/app/relayer/app.go | 118 +++++ relayer/app/relayer/logging.go | 21 + relayer/healthz.go | 46 ++ relayer/logging.go | 21 + relayer/metrics/metrics.go | 25 + relayer/relayer.go | 150 ++++++ .../genericsupervisor.go | 6 +- {nodemanager => superviser}/logging.go | 4 +- {nodemanager/test => test}/type_test.pb.go | 0 tools_download_from_firehose.go | 224 ++++---- types.go | 62 +-- 87 files changed, 7362 insertions(+), 269 deletions(-) rename nodemanager/blockencoder.go => blockencoder.go (57%) rename nodemanager/consolereader.go => consolereader.go (83%) rename nodemanager/consolereader_test.go => consolereader_test.go (91%) create mode 100644 merger/CHANGELOG.md create mode 100644 merger/DESIGN.md create mode 100644 merger/README.md create mode 100644 merger/app/merger/app.go create mode 100644 merger/app/merger/logging.go create mode 100644 merger/bundler.go create mode 100644 merger/bundler_test.go create mode 100644 merger/bundlereader.go create mode 100644 merger/bundlereader_test.go create mode 100644 merger/consts.go create mode 100644 merger/healthz.go create mode 100644 merger/healthz_test.go create mode 100644 merger/init_test.go create mode 100644 merger/merger.go create mode 100644 merger/merger_io.go create mode 100644 merger/merger_io_test.go create mode 100644 merger/metrics/metrics.go create mode 100644 merger/server.go create mode 100644 merger/test_data/0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin create mode 100644 merger/test_data/0000000002-20150730T152657.0-044698c9-13406cb6.dbin create mode 100644 merger/test_data/0000000003-20150730T152728.0-a88cf741-044698c9.dbin create mode 100644 merger/utils.go create mode 100644 node-manager/CHANGELOG.md create mode 100644 node-manager/LICENSE create mode 100644 node-manager/README.md create mode 100644 node-manager/app/node_manager/app.go create mode 100644 node-manager/app/node_reader_stdin/app.go create mode 100755 node-manager/boot.sh create mode 100644 node-manager/boot/eos_bp/config.ini create mode 100644 node-manager/boot/eos_bp/genesis.json create mode 100644 node-manager/boot/eos_jungle/config.ini create mode 100644 node-manager/boot/eos_jungle/genesis.json create mode 100644 node-manager/boot/eos_mainnet/config.ini create mode 100644 node-manager/boot/eos_mainnet/genesis.json create mode 100755 node-manager/clean.sh create mode 100644 node-manager/log_plugin/keep_last_lines_log_plugin.go create mode 100644 node-manager/log_plugin/keep_last_lines_log_plugin_test.go create mode 100644 node-manager/log_plugin/line_ring_buffer.go create mode 100644 node-manager/log_plugin/log_plugin.go create mode 100644 node-manager/log_plugin/to_console_log_plugin.go create mode 100644 node-manager/log_plugin/to_zap_log_plugin.go create mode 100644 node-manager/log_plugin/to_zap_log_plugin_test.go create mode 100644 node-manager/metrics/common.go create mode 100644 node-manager/mindreader/archiver.go create mode 100644 node-manager/mindreader/file_uploader.go create mode 100644 node-manager/mindreader/file_uploader_test.go create mode 100644 node-manager/mindreader/init_test.go create mode 100644 node-manager/mindreader/logging.go create mode 100644 node-manager/mindreader/mindreader.go create mode 100644 node-manager/mindreader/mindreader_test.go create mode 100644 node-manager/monitor.go create mode 100644 node-manager/operator/backuper.go create mode 100644 node-manager/operator/backuper_test.go create mode 100644 node-manager/operator/errors.go create mode 100644 node-manager/operator/http_server.go create mode 100644 node-manager/operator/operator.go create mode 100755 node-manager/serve.sh create mode 100644 node-manager/superviser.go create mode 100644 node-manager/superviser/superviser.go create mode 100644 node-manager/superviser/superviser_test.go create mode 100644 node-manager/types.go create mode 100644 node-manager/utils.go create mode 100644 relayer/CHANGELOG.md create mode 100644 relayer/LICENSE create mode 100644 relayer/README.md create mode 100644 relayer/app/relayer/app.go create mode 100644 relayer/app/relayer/logging.go create mode 100644 relayer/healthz.go create mode 100644 relayer/logging.go create mode 100644 relayer/metrics/metrics.go create mode 100644 relayer/relayer.go rename nodemanager/supervisor.go => superviser/genericsupervisor.go (87%) rename {nodemanager => superviser}/logging.go (84%) rename {nodemanager/test => test}/type_test.pb.go (100%) diff --git a/nodemanager/blockencoder.go b/blockencoder.go similarity index 57% rename from nodemanager/blockencoder.go rename to blockencoder.go index dc5d982..be0784e 100644 --- a/nodemanager/blockencoder.go +++ b/blockencoder.go @@ -1,8 +1,7 @@ -package nodemanager +package firecore import ( "github.com/streamingfast/bstream" - firecore "github.com/streamingfast/firehose-core" ) type GenericBlockEncoder struct { @@ -12,7 +11,7 @@ func NewGenericBlockEncoder() *GenericBlockEncoder { return &GenericBlockEncoder{} } -func (g GenericBlockEncoder) Encode(block firecore.Block) (blk *bstream.Block, err error) { +func (g GenericBlockEncoder) Encode(block Block) (blk *bstream.Block, err error) { //TODO implement me panic("implement me") } diff --git a/chain.go b/chain.go index 70121da..ee7ea68 100644 --- a/chain.go +++ b/chain.go @@ -10,12 +10,11 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/streamingfast/bstream" + "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" "github.com/streamingfast/logging" - "github.com/streamingfast/node-manager/mindreader" - "github.com/streamingfast/node-manager/operator" "go.uber.org/multierr" "go.uber.org/zap" - "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/anypb" ) @@ -101,13 +100,6 @@ type Chain[B Block] struct { // for most chains. FirstStreamableBlock uint64 - // BlockFactory is a factory function that returns a new instance of your chain's Block. - // This new instance is usually used within `firecore` to unmarshal some bytes into your - // chain's specific block model and return a [proto.Message] fully instantiated. - // - // The [BlockFactory] **must** be non-nil and must return a non-nil [proto.Message]. - BlockFactory func() Block - BlockAcceptedVersions []int32 // ConsoleReaderFactory is the function that should return the `ConsoleReader` that knowns @@ -304,12 +296,6 @@ func (c *Chain[B]) Validate() { err = multierr.Append(err, fmt.Errorf("field 'Version' must be non-empty")) } - if c.BlockFactory == nil { - err = multierr.Append(err, fmt.Errorf("field 'BlockFactory' must be non-nil")) - } else if c.BlockFactory() == nil { - err = multierr.Append(err, fmt.Errorf("field 'BlockFactory' must not produce nil blocks")) - } - if c.ConsoleReaderFactory == nil { err = multierr.Append(err, fmt.Errorf("field 'ConsoleReaderFactory' must be non-nil")) } @@ -353,8 +339,6 @@ func (c *Chain[B]) Init() { c.BlockAcceptedVersions = []int32{c.ProtocolVersion} } - InitBstream(c.Protocol, c.ProtocolVersion, c.BlockAcceptedVersions, func() proto.Message { return c.BlockFactory() }) - c.BlockEncoder = NewBlockEncoder() } diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index d2d1b19..d1db1e2 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -2,28 +2,23 @@ package main import ( firecore "github.com/streamingfast/firehose-core" - "github.com/streamingfast/firehose-core/nodemanager" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) func main() { firecore.Main(&firecore.Chain[*pbbstream.Block]{ - ShortName: "near", - LongName: "NEAR", - ExecutableName: "near-firehose-indexer", - FullyQualifiedModule: "github.com/streamingfast/firehose-near", + ShortName: "core", //used to compose the binary name + LongName: "CORE", //only used to compose cmd title and description + ExecutableName: "fire-core", //only used to set default value of reader-node-path, we should not provide a default value anymore ... + FullyQualifiedModule: "github.com/streamingfast/firehose-core", Version: version, Protocol: "NEA", ProtocolVersion: 1, - BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, + ConsoleReaderFactory: supervisor.NewConsoleReader, - ConsoleReaderFactory: nodemanager.NewConsoleReader, - - Tools: &firecore.ToolsConfig[*pbnear.Block]{ - BlockPrinter: printBlock, - }, + Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) } diff --git a/nodemanager/consolereader.go b/consolereader.go similarity index 83% rename from nodemanager/consolereader.go rename to consolereader.go index 9694a26..c2e3719 100644 --- a/nodemanager/consolereader.go +++ b/consolereader.go @@ -1,4 +1,4 @@ -package nodemanager +package firecore import ( "encoding/base64" @@ -8,11 +8,8 @@ import ( "strings" "time" - "github.com/streamingfast/node-manager/mindreader" - - firecore "github.com/streamingfast/firehose-core" - "github.com/streamingfast/bstream" + "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -40,7 +37,7 @@ type ConsoleReader struct { ctx *parseCtx } -func NewConsoleReader(lines chan string, blockEncoder firecore.BlockEncoder, logger *zap.Logger, tracer logging.Tracer) (mindreader.ConsolerReader, error) { +func NewConsoleReader(lines chan string, blockEncoder BlockEncoder, logger *zap.Logger, tracer logging.Tracer) (mindreader.ConsolerReader, error) { reader := &ConsoleReader{ lines: lines, close: func() {}, @@ -116,7 +113,7 @@ func (ctx *parseCtx) readBlock(line string) (out *bstream.Block, err error) { blockHash := chunks[1] - _, err = strconv.ParseUint(chunks[2], 10, 64) + parentNum, err := strconv.ParseUint(chunks[2], 10, 64) if err != nil { return nil, fmt.Errorf("parsing parent num %q: %w", chunks[2], err) } @@ -137,8 +134,8 @@ func (ctx *parseCtx) readBlock(line string) (out *bstream.Block, err error) { payload, err := base64.StdEncoding.DecodeString(chunks[6]) - var blockPayload anypb.Any - if err := proto.Unmarshal(payload, &blockPayload); err != nil { + var blockPayload *anypb.Any + if err := proto.Unmarshal(payload, blockPayload); err != nil { return nil, fmt.Errorf("unmarshaling block payload: %w", err) } @@ -149,19 +146,13 @@ func (ctx *parseCtx) readBlock(line string) (out *bstream.Block, err error) { } block := &bstream.Block{ - Id: blockHash, - Number: blockNum, - PreviousId: parentHash, - //todo: missing ParentNumber - Timestamp: timestamp, - LibNum: libNum, - PayloadKind: 0, //todo: PayloadKind - PayloadVersion: 0, //todo: PayloadVersion - } - - block, err = bstream.MemoryBlockPayloadSetter(block, blockPayload.Value) - if err != nil { - return nil, fmt.Errorf("setting block payload: %w", err) + Id: blockHash, + Number: blockNum, + PreviousId: parentHash, + PreviousNum: parentNum, + Timestamp: timestamp, + LibNum: libNum, + Payload: blockPayload, } return block, nil diff --git a/nodemanager/consolereader_test.go b/consolereader_test.go similarity index 91% rename from nodemanager/consolereader_test.go rename to consolereader_test.go index dc6c297..6c1350e 100644 --- a/nodemanager/consolereader_test.go +++ b/consolereader_test.go @@ -1,4 +1,4 @@ -package nodemanager +package firecore import ( "encoding/base64" @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/streamingfast/firehose-core/nodemanager/test" + "github.com/streamingfast/firehose-core/test" "github.com/stretchr/testify/require" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -59,9 +59,8 @@ func Test_Ctx_readBlock(t *testing.T) { require.Equal(t, uint64(libNumber), block.LibNum) require.Equal(t, time.Unix(0, nowNano), block.Timestamp) - blockPayload, err := block.Payload.Get() require.NoError(t, err) - require.Equal(t, anypbBlock.GetValue(), blockPayload) + require.Equal(t, anypbBlock.GetValue(), block.Payload.Value) } @@ -74,7 +73,8 @@ func (t *tracer) Enabled() bool { func Test_GetNext(t *testing.T) { lines := make(chan string, 2) - reader := NewConsoleReader(lines, zap.NewNop(), &tracer{}) + reader, err := NewConsoleReader(lines, NewBlockEncoder(), zap.NewNop(), &tracer{}) + require.NoError(t, err) initLine := "FIRE INIT 1.0 sf.ethereum.type.v2.Block" blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" diff --git a/go.mod b/go.mod index 93f6cfb..b9e3a3e 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231109200242-92c3eea7aaba + github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa @@ -24,10 +24,7 @@ require ( github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 - github.com/streamingfast/merger v0.0.3-0.20231027161314-209c2ddd8d96 - github.com/streamingfast/node-manager v0.0.2-0.20230406142433-692298a8b8d2 - github.com/streamingfast/pbgo v0.0.6-0.20221020131607-255008258d28 - github.com/streamingfast/relayer v0.0.2-0.20220909122435-e67fbc964fd9 + github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482 github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 github.com/streamingfast/substreams v1.1.20 github.com/stretchr/testify v1.8.4 @@ -168,13 +165,12 @@ require ( github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/streamingfast/atm v0.0.0-20220131151839-18c87005e680 // indirect - github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad // indirect + github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 // indirect github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839 // indirect github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 // indirect github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 // indirect - github.com/streamingfast/shutter v1.5.0 // indirect + github.com/streamingfast/shutter v1.5.0 github.com/subosito/gotenv v1.4.2 // indirect github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf // indirect github.com/tetratelabs/wazero v1.1.0 // indirect @@ -210,7 +206,7 @@ require ( google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/olivere/elastic.v3 v3.0.75 // indirect + gopkg.in/olivere/elastic.v3 v3.0.75 gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.25.0 // indirect diff --git a/go.sum b/go.sum index 476bd65..a6c5631 100644 --- a/go.sum +++ b/go.sum @@ -201,7 +201,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2U github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -575,10 +574,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/atm v0.0.0-20220131151839-18c87005e680 h1:fGJnUx0shX9Y312QOlz+/+yLquihXRhNqctJ26jtZZM= -github.com/streamingfast/atm v0.0.0-20220131151839-18c87005e680/go.mod h1:iISPGAstbUsPgyC3auLLi7PYUTi9lHv5z0COam0OPOY= -github.com/streamingfast/bstream v0.0.2-0.20231109200242-92c3eea7aaba h1:Ms7P4CTImBrfUsc+ULL3qitVZ1pHUWooF8qHjPlhYU0= -github.com/streamingfast/bstream v0.0.2-0.20231109200242-92c3eea7aaba/go.mod h1:Njkx972HcZiz0djWBylxqO/eq686eDGr+egQ1lePj3Q= +github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab h1:NED6em0qaVsCFlSL5HX2vo/xmDnNzGZxCjpCuDmLjPY= +github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab/go.mod h1:ryNdCDG4CCOo2QYctNFzAuNf3ITGhfTwbgRK0/VRDdQ= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= @@ -606,21 +603,18 @@ github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2/go.mod github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 h1:g8eEYbFSykyzIyuxNMmHEUGGUvJE0ivmqZagLDK42gw= github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0/go.mod h1:cTNObq2Uofb330y05JbbZZ6RwE6QUXw5iVcHk1Fx3fk= github.com/streamingfast/logging v0.0.0-20210811175431-f3b44b61606a/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= -github.com/streamingfast/logging v0.0.0-20210908162127-bdc5856d5341/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304183711-ddba33d79e27/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304214715-bc750a74b424/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 h1:RN5mrigyirb8anBEtdjtHFIufXdacyTi6i4KBfeNXeo= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= -github.com/streamingfast/merger v0.0.3-0.20231027161314-209c2ddd8d96 h1:aq5hUjo+Y+3OUH2z1egyJ9fSepRvOzxgR+TYICdSEgE= -github.com/streamingfast/merger v0.0.3-0.20231027161314-209c2ddd8d96/go.mod h1:WGMs+zwpPQNfzRnOqnyNdQfyGSG4lXYWQacicAGiP4s= -github.com/streamingfast/node-manager v0.0.2-0.20230406142433-692298a8b8d2 h1:6Jdu6LBwaW38n2jjInFk1fM460cq+5paEAHGPPRWWN0= -github.com/streamingfast/node-manager v0.0.2-0.20230406142433-692298a8b8d2/go.mod h1:R5WwJuyNueq0QXKAFinTGU8zaON0hWJBFHX6KA9WZqk= +github.com/streamingfast/firehose-core/node-manager v0.0.2-0.20230406142433-692298a8b8d2 h1:6Jdu6LBwaW38n2jjInFk1fM460cq+5paEAHGPPRWWN0= +github.com/streamingfast/firehose-core/node-manager v0.0.2-0.20230406142433-692298a8b8d2/go.mod h1:R5WwJuyNueq0QXKAFinTGU8zaON0hWJBFHX6KA9WZqk= github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 h1:xlWSfi1BoPfsHtPb0VEHGUcAdBF208LUiFCwfaVPfLA= github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308/go.mod h1:K1p8Bj/wG34KJvYzPUqtzpndffmpkrVY11u2hkyxCWQ= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHRsqvI+vKJwgF1OMV6L55jHbaV/ZLoU4IAG/dME= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= -github.com/streamingfast/pbgo v0.0.6-0.20221020131607-255008258d28 h1:wmQg8T0rIFl/R3dy97OWRi8OSdM3llvRw2p3TPFVKZQ= -github.com/streamingfast/pbgo v0.0.6-0.20221020131607-255008258d28/go.mod h1:huKwfgTGFIFZMKSVbD5TywClM7zAeBUG/zePZMqvXQQ= +github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482 h1:eCL6jUDZoSmScqHsp5kiFyEGgo0B5jvCGp21oM7Ow0k= +github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= github.com/streamingfast/relayer v0.0.2-0.20220909122435-e67fbc964fd9 h1:V3LPBmTofZbmT46qQsr0lFa+0qDHZNJXgqLRo9iZBHY= github.com/streamingfast/relayer v0.0.2-0.20220909122435-e67fbc964fd9/go.mod h1:55E/1g+ojZoX86Odp48LFgceJVyh1xx9ZuhknKfmc/o= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= @@ -710,7 +704,6 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= @@ -722,7 +715,6 @@ go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= @@ -1151,7 +1143,6 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= diff --git a/merger.go b/merger.go index fb523e9..cd1cf57 100644 --- a/merger.go +++ b/merger.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - mergerApp "github.com/streamingfast/merger/app/merger" + "github.com/streamingfast/firehose-core/merger/app/merger" ) func registerMergerApp() { @@ -28,7 +28,7 @@ func registerMergerApp() { return nil, err } - return mergerApp.New(&mergerApp.Config{ + return merger.New(&merger.Config{ GRPCListenAddr: viper.GetString("merger-grpc-listen-addr"), PruneForkedBlocksAfter: viper.GetUint64("merger-prune-forked-blocks-after"), StorageOneBlockFilesPath: oneBlocksStoreURL, diff --git a/merger/CHANGELOG.md b/merger/CHANGELOG.md new file mode 100644 index 0000000..cf1bc3c --- /dev/null +++ b/merger/CHANGELOG.md @@ -0,0 +1,49 @@ +# Change log + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +### BREAKING CHANGES: https://github.com/streamingfast/bstream/issues/22 +* Merger now only writes irreversible blocks in merged blocks +* Merger keeps the non-canonical one-block-files (forked blocks) until `MaxForkedBlockAgeBeforePruning` is passed, doing a pass at most once every `TimeBetweenPruning` +* Main loop will run at most once every `TimeBetweenPolling` + +## [v0.0.2] +### Changed +* Merger now deletes one-block-files that it has seen before exactly like the ones that are passed MaxFixableFork, based on DeleteBlocksBefore +* 'Live' option now changed to 'BatchMode' with the inverse behavior (for consistency with our other projects) +* 'SeenBlocksFile' option changed to 'StateFile' since it now contains highestSeenBlock, used to determine next start block. +* When merger is started, in live mode, it tries to get its start block from the state file. If it cannot, it locates starting point as before, by identifying the highest merged-blocks. + +### Added +* Config: `OneBlockDeletionThreads` to control how many one-block-files will be deleted in parallel on storage, 10 is a sane default, 1 is the minimum. +* Config: `MaxOneBlockOperationsBatchSize` to control how many files ahead to we read (also how many files we can put in deleting queue at a time.) Should be way more than the number of files that we need to merge in case of forks, 2000 is a sane default, 250 is the minimum + +### Removed +* Option DeleteBlocksBefore is now gone, it is the only behavior (not deleting one-block-files makes no sense for a merger) +* Option Progressfilename is now gone. BatchMode now has NO Progression option (not needed now that batch should mostly be done from mindreader...) + +### Improved +* Logging of OneBlockFile deletion now only called once per delete batch +* When someone else pushes a merged file, merger now detects it and reads the actual blocks to populate its seenblockscache, as discussed here: https://github.com/streamingfast/firehose-core/merger/issues/1 +* Fixed waiting time to actually use TimeBetweenStoreLookups instead of hardcoded value of 1 second when bundle is incomplete + +## [v0.0.1] +### Changed +* `--listen-grpc-addr` now is `--grpc-listen-addr` + +### Removed +* Removed the `protocol`, merger is not `protocol` agnostic +* Removed EnableReadinessProbe option in config, it is now the only behavior + +### Improved +* Logging was adjust at many places +* context now used in every dstore call for better timeout handling + +## 2020-03-21 + +### Changed + +* License changed to Apache 2.0 diff --git a/merger/DESIGN.md b/merger/DESIGN.md new file mode 100644 index 0000000..81cdaf4 --- /dev/null +++ b/merger/DESIGN.md @@ -0,0 +1,54 @@ +# DESIGN + +## REAL-TIME + +### On initialization +1) List merged files from the first-streamable-block, until a "hole" is found, +2) set this as the start-block + +ex: +* first-streamable-block == 12340 +* existing merged files: 12300, 12400, 12900 +* the start-block will be set to 12500 + +### processing loop + +#### 1. Polling the one-block-files to feed the Forkable Handler +* List one-block-files and decode their fields based on the filenames only +* skip any one-block-file that is < start-block +* Send these one-block-files in a "Forkable Handler" + +#### 2. Accumulating irreversible (final) blocks + +* When there are enough linkable one-blocks, the Forkable Handler will let the "irreversible blocks" through, feeding the Bundler one by one in a linear fashion (there should not be any hole there) +* When a one-block object comes through, its payload is read from the store (async, waitGroup()) so it is ready for the next step + +#### 3. Merging + +* When the Bundler receives an irreversible block that passes a boundary (ex: while loading bundle 100-199, we see the block 205) +* It waits for the one-block reading waitgroup +* It writes the merged file +* It deletes the one-block-files that were merged (the final/canonical ones) -- leaving only the forked blocks in the one-block-store +* It deletes any very old one-block-files (based on timestamp and max-forked-blocks-age + +### Providing unmerged blocks through GRPC + +* On request, the merger can send the accumulated irreversible blocks in the bundler through GRPC + +## OneBlock files naming + +{TIMESTAMP}-{BLOCKNUM}-{BLOCKIDSUFFIX}-{PREVIOUSIDSUFFIX}-{SOURCEID}.json.gz + +* TIMESTAMP: YYYYMMDDThhmmss.{0|5} where 0 and 5 are the possible values for 500-millisecond increments.. +* BLOCKNUM: 0-padded block number +* BLOCKIDSUFFIX: [:8] from block ID +* PREVIOUSIDSUFFIX: [:8] previousId for block +* SOURCEID: freeform string to identify who wrote the file. This is useful if you want multiple extractors writing to the same one-block-file store without concurrency issues. It is not part of the canonical form of the one-block-file. + +Example: +* 20170701T122141.0-0000000100-24a07267-e5914b39-extractor-0.json.gz +* 20170701T122141.5-0000000101-dbda3f44-09f6d693-myhostname134.json.gz + + fmt.Sprintf("%s.%01d", t.Format("20060102T150405"), t.Nanosecond()/100000000) + + diff --git a/merger/README.md b/merger/README.md new file mode 100644 index 0000000..a64d0ad --- /dev/null +++ b/merger/README.md @@ -0,0 +1,31 @@ +# StreamingFast Merger + +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/merger) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +The merger process is responsible for accumulating blocks from all +forks visible by the pool of instrumented nodes, and builds the famous +100-blocks files consumed by `bstream`'s _FileSource_ and may other +StreamingFast processes. + +## Design + +The Merger section of the official Firehose documentation provides additional information on its design details. + +https://firehose.streamingfast.io/concepts-and-architeceture/components#merger + +## Contributing + +**Issues and PR in this repo related strictly to the merger functionalities** + +Report any protocol-specific issues in their +[respective repositories](https://github.com/streamingfast/streamingfast#protocols) + +**Please first refer to the general +[streamingfast contribution guide](https://github.com/streamingfast/streamingfast/blob/master/CONTRIBUTING.md)**, +if you wish to contribute to this code base. + + +## License + +[Apache 2.0](LICENSE) diff --git a/merger/app/merger/app.go b/merger/app/merger/app.go new file mode 100644 index 0000000..52e6ae1 --- /dev/null +++ b/merger/app/merger/app.go @@ -0,0 +1,140 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + "fmt" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dgrpc" + "github.com/streamingfast/dmetrics" + "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/merger" + "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +type Config struct { + StorageOneBlockFilesPath string + StorageMergedBlocksFilesPath string + StorageForkedBlocksFilesPath string + + GRPCListenAddr string + + PruneForkedBlocksAfter uint64 + + TimeBetweenPruning time.Duration + TimeBetweenPolling time.Duration + StopBlock uint64 +} + +type App struct { + *shutter.Shutter + config *Config + readinessProbe pbhealth.HealthClient +} + +func New(config *Config) *App { + return &App{ + Shutter: shutter.New(), + config: config, + } +} + +func (a *App) Run() error { + zlog.Info("running merger", zap.Reflect("config", a.config)) + + dmetrics.Register(metrics.MetricSet) + + oneBlockStoreStore, err := dstore.NewDBinStore(a.config.StorageOneBlockFilesPath) + if err != nil { + return fmt.Errorf("failed to init source archive store: %w", err) + } + + mergedBlocksStore, err := dstore.NewDBinStore(a.config.StorageMergedBlocksFilesPath) + if err != nil { + return fmt.Errorf("failed to init destination archive store: %w", err) + } + + var forkedBlocksStore dstore.Store + if a.config.StorageForkedBlocksFilesPath != "" { + forkedBlocksStore, err = dstore.NewDBinStore(a.config.StorageForkedBlocksFilesPath) + if err != nil { + return fmt.Errorf("failed to init destination archive store: %w", err) + } + } + + bundleSize := uint64(100) + + // we are setting the backoff here for dstoreIO + io := merger.NewDStoreIO( + zlog, + tracer, + oneBlockStoreStore, + mergedBlocksStore, + forkedBlocksStore, + 5, + 500*time.Millisecond, + bundleSize) + + m := merger.NewMerger( + zlog, + a.config.GRPCListenAddr, + io, + bstream.GetProtocolFirstStreamableBlock, + bundleSize, + a.config.PruneForkedBlocksAfter, + a.config.TimeBetweenPruning, + a.config.TimeBetweenPolling, + a.config.StopBlock, + ) + zlog.Info("merger initiated") + + gs, err := dgrpc.NewInternalClient(a.config.GRPCListenAddr) + if err != nil { + return fmt.Errorf("cannot create readiness probe") + } + a.readinessProbe = pbhealth.NewHealthClient(gs) + + a.OnTerminating(m.Shutdown) + m.OnTerminated(a.Shutdown) + + go m.Run() + + zlog.Info("merger running") + return nil +} + +func (a *App) IsReady() bool { + if a.readinessProbe == nil { + return false + } + + resp, err := a.readinessProbe.Check(context.Background(), &pbhealth.HealthCheckRequest{}) + if err != nil { + zlog.Info("merger readiness probe error", zap.Error(err)) + return false + } + + if resp.Status == pbhealth.HealthCheckResponse_SERVING { + return true + } + + return false +} diff --git a/merger/app/merger/logging.go b/merger/app/merger/logging.go new file mode 100644 index 0000000..c0ee237 --- /dev/null +++ b/merger/app/merger/logging.go @@ -0,0 +1,21 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "github.com/streamingfast/logging" +) + +var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/app/merger") diff --git a/merger/bundler.go b/merger/bundler.go new file mode 100644 index 0000000..bfa9299 --- /dev/null +++ b/merger/bundler.go @@ -0,0 +1,249 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/forkable" + "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +var ErrStopBlockReached = errors.New("stop block reached") +var ErrFirstBlockAfterInitialStreamableBlock = errors.New("received first block after inital streamable block") + +type Bundler struct { + sync.Mutex + + io IOInterface + + baseBlockNum uint64 + + bundleSize uint64 + bundleError chan error + inProcess sync.Mutex + stopBlock uint64 + enforceNextBlockOnBoundary bool + firstStreamableBlock uint64 + + seenBlockFiles map[string]*bstream.OneBlockFile + irreversibleBlocks []*bstream.OneBlockFile + forkable *forkable.Forkable + + logger *zap.Logger +} + +var logger, _ = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/bundler") + +func NewBundler(startBlock, stopBlock, firstStreamableBlock, bundleSize uint64, io IOInterface) *Bundler { + b := &Bundler{ + bundleSize: bundleSize, + io: io, + bundleError: make(chan error, 1), + firstStreamableBlock: firstStreamableBlock, + stopBlock: stopBlock, + seenBlockFiles: make(map[string]*bstream.OneBlockFile), + logger: logger, + } + b.Reset(toBaseNum(startBlock, bundleSize), nil) + return b +} + +// BaseBlockNum can be called from a different thread +func (b *Bundler) BaseBlockNum() uint64 { + b.inProcess.Lock() + defer b.inProcess.Unlock() + // while inProcess is locked, all blocks below b.baseBlockNum are actually merged + return b.baseBlockNum +} + +func (b *Bundler) HandleBlockFile(obf *bstream.OneBlockFile) error { + b.seenBlockFiles[obf.CanonicalName] = obf + return b.forkable.ProcessBlock(obf.ToBstreamBlock(), obf) // forkable will call our own b.ProcessBlock() on irreversible blocks only +} + +func (b *Bundler) forkedBlocksInCurrentBundle() (out []*bstream.OneBlockFile) { + highBoundary := b.baseBlockNum + b.bundleSize + + // remove irreversible blocks from map (they will be merged and deleted soon) + for _, block := range b.irreversibleBlocks { + delete(b.seenBlockFiles, block.CanonicalName) + } + + // identify and then delete remaining blocks from map, return them as forks + for name, block := range b.seenBlockFiles { + if block.Num < b.baseBlockNum { + delete(b.seenBlockFiles, name) // too old, just cleaning up the map of lingering old blocks + } + if block.Num < highBoundary { + out = append(out, block) + delete(b.seenBlockFiles, name) + } + } + return +} + +func (b *Bundler) Reset(nextBase uint64, lib bstream.BlockRef) { + options := []forkable.Option{ + forkable.WithFilters(bstream.StepIrreversible), + forkable.HoldBlocksUntilLIB(), + forkable.WithWarnOnUnlinkableBlocks(100), // don't warn too soon, sometimes oneBlockFiles are uploaded out of order from mindreader (on remote I/O) + } + if lib != nil { + options = append(options, forkable.WithInclusiveLIB(lib)) + b.enforceNextBlockOnBoundary = false // we don't need to check first block because we know it will be linked to lib + } else { + b.enforceNextBlockOnBoundary = true + } + b.forkable = forkable.New(b, options...) + + b.Lock() + b.baseBlockNum = nextBase + b.irreversibleBlocks = nil + b.Unlock() +} + +func readBlockTime(data []byte) (time.Time, error) { + reader := bytes.NewReader(data) + blockReader, err := bstream.GetBlockReaderFactory.New(reader) + if err != nil { + return time.Time{}, fmt.Errorf("unable to create block reader: %w", err) + } + blk, err := blockReader.Read() + if err != nil && err != io.EOF { + return time.Time{}, fmt.Errorf("block reader failed: %w", err) + } + return blk.Time(), nil +} + +func (b *Bundler) ProcessBlock(_ *bstream.Block, obj interface{}) error { + obf := obj.(bstream.ObjectWrapper).WrappedObject().(*bstream.OneBlockFile) + if obf.Num < b.baseBlockNum { + // we may be receiving an inclusive LIB just before our bundle, ignore it + return nil + } + + if b.enforceNextBlockOnBoundary { + if obf.Num != b.baseBlockNum && obf.Num != b.firstStreamableBlock { + //{"severity":"ERROR","timestamp":"2023-11-07T12:28:34.735713163-05:00","logger":"merger","message":"expecting to start at block `base_block_num` but got block `block_num` (and we have no previous blockID to align with..). First streamable block is configured to be: `first_streamable_block`", + //"base_block_num":22207900, + //"block_num":22208900, + //"first_streamable_block":22207900, + //"logging.googleapis.com/labels":{},"serviceContext":{"service":"unknown"}} + b.logger.Error( + "expecting to start at block `base_block_num` but got block `block_num` (and we have no previous blockID to align with..). First streamable block is configured to be: `first_streamable_block`", + zap.Uint64("base_block_num", b.baseBlockNum), + zap.Uint64("block_num", obf.Num), + zap.Uint64("first_streamable_block", b.firstStreamableBlock), + ) + return ErrFirstBlockAfterInitialStreamableBlock + } + b.enforceNextBlockOnBoundary = false + } + + if obf.Num < b.baseBlockNum+b.bundleSize { + b.Lock() + metrics.AppReadiness.SetReady() + b.irreversibleBlocks = append(b.irreversibleBlocks, obf) + metrics.HeadBlockNumber.SetUint64(obf.Num) + go func() { + // this pre-downloads the data + data, err := obf.Data(context.Background(), b.io.DownloadOneBlockFile) + if err != nil { + return + } + // now that we have the data, might as well read the block time for metrics + if time, err := readBlockTime(data); err == nil { + metrics.HeadBlockTimeDrift.SetBlockTime(time) + } + }() + b.Unlock() + return nil + } + + select { + case err := <-b.bundleError: + return err + default: + } + + forkedBlocks := b.forkedBlocksInCurrentBundle() + blocksToBundle := b.irreversibleBlocks + baseBlockNum := b.baseBlockNum + b.inProcess.Lock() + go func() { + defer b.inProcess.Unlock() + if err := b.io.MergeAndStore(context.Background(), baseBlockNum, blocksToBundle); err != nil { + b.bundleError <- err + return + } + if forkableIO, ok := b.io.(ForkAwareIOInterface); ok { + forkableIO.MoveForkedBlocks(context.Background(), forkedBlocks) + } + // we do not delete bundled blocks here, they get pruned later. keeping the blocks from the last bundle is useful for bootstrapping + }() + + b.Lock() + // we keep the last block of the bundle, only deleting it on next merge, to facilitate joining to one-block-filled hub + lastBlock := b.irreversibleBlocks[len(b.irreversibleBlocks)-1] + b.irreversibleBlocks = []*bstream.OneBlockFile{lastBlock, obf} + b.baseBlockNum += b.bundleSize + for obf.Num > b.baseBlockNum+b.bundleSize { // skip more merged-block-files + b.inProcess.Lock() + if err := b.io.MergeAndStore(context.Background(), b.baseBlockNum, []*bstream.OneBlockFile{lastBlock}); err != nil { // lastBlock will be excluded from bundle but is useful to bundler + return err + } + b.inProcess.Unlock() + b.baseBlockNum += b.bundleSize + } + b.Unlock() + + if b.stopBlock != 0 && b.baseBlockNum >= b.stopBlock { + return ErrStopBlockReached + } + + return nil +} + +// String can be called from a different thread +func (b *Bundler) String() string { + b.Lock() + defer b.Unlock() + + var firstBlock, lastBlock string + length := len(b.irreversibleBlocks) + if length != 0 { + firstBlock = b.irreversibleBlocks[0].String() + lastBlock = b.irreversibleBlocks[length-1].String() + } + + return fmt.Sprintf( + "bundle_size: %d, base_block_num: %d, first_block: %s, last_block: %s, length: %d", + b.bundleSize, + b.baseBlockNum, + firstBlock, + lastBlock, + length, + ) +} diff --git a/merger/bundler_test.go b/merger/bundler_test.go new file mode 100644 index 0000000..19d12fd --- /dev/null +++ b/merger/bundler_test.go @@ -0,0 +1,170 @@ +package merger + +import ( + // "context" + //"fmt" + + "context" + "testing" + + // "time" + + // "github.com/streamingfast/bstream" + //"github.com/streamingfast/firehose-core/merger/bundle" + "github.com/streamingfast/bstream" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var block98 = bstream.MustNewOneBlockFile("0000000098-0000000000000098a-0000000000000097a-96-suffix") +var block99 = bstream.MustNewOneBlockFile("0000000099-0000000000000099a-0000000000000098a-97-suffix") +var block100 = bstream.MustNewOneBlockFile("0000000100-0000000000000100a-0000000000000099a-98-suffix") +var block101 = bstream.MustNewOneBlockFile("0000000101-0000000000000101a-0000000000000100a-99-suffix") +var block102Final100 = bstream.MustNewOneBlockFile("0000000102-0000000000000102a-0000000000000101a-100-suffix") +var block103Final101 = bstream.MustNewOneBlockFile("0000000103-0000000000000103a-0000000000000102a-101-suffix") +var block104Final102 = bstream.MustNewOneBlockFile("0000000104-0000000000000104a-0000000000000103a-102-suffix") +var block105Final103 = bstream.MustNewOneBlockFile("0000000105-0000000000000105a-0000000000000104a-103-suffix") +var block106Final104 = bstream.MustNewOneBlockFile("0000000106-0000000000000106a-0000000000000105a-104-suffix") + +var block507Final106 = bstream.MustNewOneBlockFile("0000000507-0000000000000507a-0000000000000106a-106-suffix") +var block608Final507 = bstream.MustNewOneBlockFile("0000000608-0000000000000608a-0000000000000507a-507-suffix") +var block609Final608 = bstream.MustNewOneBlockFile("0000000609-0000000000000607a-0000000000000608a-608-suffix") + +func init() { + bstream.GetBlockReaderFactory = bstream.TestBlockReaderFactory +} + +func TestNewBundler(t *testing.T) { + b := NewBundler(100, 200, 2, 100, nil) + require.NotNil(t, b) + assert.EqualValues(t, 100, b.bundleSize) + assert.EqualValues(t, 200, b.stopBlock) + assert.NotNil(t, b.bundleError) + assert.NotNil(t, b.seenBlockFiles) +} + +func TestBundlerReset(t *testing.T) { + b := NewBundler(100, 200, 2, 2, nil) // merge every 2 blocks + + b.irreversibleBlocks = []*bstream.OneBlockFile{block100, block101} + b.Reset(102, block100.ToBstreamBlock().AsRef()) + assert.Nil(t, b.irreversibleBlocks) + assert.EqualValues(t, 102, b.baseBlockNum) + +} + +func TestBundlerMergeKeepOne(t *testing.T) { + + tests := []struct { + name string + inBlocks []*bstream.OneBlockFile + mergeSize uint64 + expectRemaining []*bstream.OneBlockFile + expectBase uint64 + expectMerged []uint64 + }{ + { + name: "vanilla", + inBlocks: []*bstream.OneBlockFile{ + block100, + block101, + block102Final100, + block103Final101, + block104Final102, + }, + mergeSize: 2, + expectRemaining: []*bstream.OneBlockFile{ + block101, + block102Final100, + }, + expectBase: 102, + expectMerged: []uint64{100}, + }, + { + name: "vanilla_plus_one", + inBlocks: []*bstream.OneBlockFile{ + block100, + block101, + block102Final100, + block103Final101, + block104Final102, + block105Final103, + }, + mergeSize: 2, + expectRemaining: []*bstream.OneBlockFile{ + block101, + block102Final100, + block103Final101, + }, + expectBase: 102, + expectMerged: []uint64{100}, + }, + { + name: "twoMerges", + inBlocks: []*bstream.OneBlockFile{ + block100, + block101, + block102Final100, + block103Final101, + block104Final102, + block105Final103, + block106Final104, + }, + mergeSize: 2, + expectRemaining: []*bstream.OneBlockFile{ + block103Final101, + block104Final102, + }, + expectBase: 104, + expectMerged: []uint64{100, 102}, + }, + { + name: "big_hole", + inBlocks: []*bstream.OneBlockFile{ + block100, + block101, + block102Final100, + block103Final101, + block104Final102, + block105Final103, + block106Final104, + block507Final106, + block608Final507, + block609Final608, + }, + mergeSize: 100, + expectRemaining: []*bstream.OneBlockFile{ + block507Final106, // last from bundle 500 + block608Final507, // the only irreversible block from current bundle + }, + expectBase: 600, + expectMerged: []uint64{100, 200, 300, 400, 500}, + }, + } + + for _, c := range tests { + + t.Run(c.name, func(t *testing.T) { + var merged []uint64 + b := NewBundler(100, 700, 2, c.mergeSize, &TestMergerIO{ + MergeAndStoreFunc: func(_ context.Context, inclusiveLowerBlock uint64, _ []*bstream.OneBlockFile) (err error) { + merged = append(merged, inclusiveLowerBlock) + return nil + }, + }) // merge every 2 blocks + b.irreversibleBlocks = []*bstream.OneBlockFile{block100, block101} + + for _, blk := range c.inBlocks { + require.NoError(t, b.HandleBlockFile(blk)) + } + + // wait for MergeAndStore + b.inProcess.Lock() + b.inProcess.Unlock() + + assert.Equal(t, c.expectMerged, merged) + assert.Equal(t, c.expectRemaining, b.irreversibleBlocks) + assert.Equal(t, int(c.expectBase), int(b.baseBlockNum)) + }) + } +} diff --git a/merger/bundlereader.go b/merger/bundlereader.go new file mode 100644 index 0000000..94a10ea --- /dev/null +++ b/merger/bundlereader.go @@ -0,0 +1,115 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + "fmt" + "io" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +type BundleReader struct { + ctx context.Context + readBuffer []byte + readBufferOffset int + oneBlockDataChan chan []byte + errChan chan error + + logger *zap.Logger +} + +func NewBundleReader(ctx context.Context, logger *zap.Logger, tracer logging.Tracer, oneBlockFiles []*bstream.OneBlockFile, anyOneBlockFile *bstream.OneBlockFile, oneBlockDownloader bstream.OneBlockDownloaderFunc) (*BundleReader, error) { + r := &BundleReader{ + ctx: ctx, + logger: logger, + oneBlockDataChan: make(chan []byte, 1), + errChan: make(chan error, 1), + } + + data, err := anyOneBlockFile.Data(ctx, oneBlockDownloader) + if err != nil { + return nil, fmt.Errorf("cannot read one_block_file to get header: %w", err) + } + if len(data) < bstream.GetBlockWriterHeaderLen { + return nil, fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", bstream.GetBlockWriterHeaderLen, len(data)) + } + r.readBuffer = data[:bstream.GetBlockWriterHeaderLen] + + go r.downloadAll(oneBlockFiles, oneBlockDownloader) + + return r, nil +} + +// downloadAll does not work in parallel: for performance, the oneBlockFiles' data should already have been memoized by calling Data() on them. +func (r *BundleReader) downloadAll(oneBlockFiles []*bstream.OneBlockFile, oneBlockDownloader bstream.OneBlockDownloaderFunc) { + defer close(r.oneBlockDataChan) + for _, oneBlockFile := range oneBlockFiles { + data, err := oneBlockFile.Data(r.ctx, oneBlockDownloader) + if err != nil { + r.errChan <- err + return + } + r.oneBlockDataChan <- data + } +} + +func (r *BundleReader) Read(p []byte) (bytesRead int, err error) { + + if r.readBuffer == nil { + if err := r.fillBuffer(); err != nil { + return 0, err + } + } + + bytesRead = copy(p, r.readBuffer[r.readBufferOffset:]) + r.readBufferOffset += bytesRead + if r.readBufferOffset >= len(r.readBuffer) { + r.readBuffer = nil + } + + return bytesRead, nil +} + +func (r *BundleReader) fillBuffer() error { + var data []byte + select { + case d, ok := <-r.oneBlockDataChan: + if !ok { + return io.EOF + } + data = d + case err := <-r.errChan: + return err + case <-r.ctx.Done(): + return nil + } + + if len(data) == 0 { + r.readBuffer = nil + return fmt.Errorf("one-block-file corrupt: empty data") + } + + if len(data) < bstream.GetBlockWriterHeaderLen { + return fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", bstream.GetBlockWriterHeaderLen, len(data)) + } + data = data[bstream.GetBlockWriterHeaderLen:] + r.readBuffer = data + r.readBufferOffset = 0 + return nil +} diff --git a/merger/bundlereader_test.go b/merger/bundlereader_test.go new file mode 100644 index 0000000..ce131c8 --- /dev/null +++ b/merger/bundlereader_test.go @@ -0,0 +1,297 @@ +package merger + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "path" + "testing" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dbin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBundleReader_ReadSimpleFiles(t *testing.T) { + bundle := NewTestBundle() + + bstream.GetBlockWriterHeaderLen = 0 + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], nil) + require.NoError(t, err) + r1 := make([]byte, 4) + + read, err := r.Read(r1) + require.NoError(t, err, "reading header") + assert.Equal(t, 0, read) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 2, read) + assert.Equal(t, []byte{0x1, 0x2, 0x0, 0x0}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 2, read) + assert.Equal(t, []byte{0x3, 0x4, 0x0, 0x0}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 2, read) + assert.Equal(t, []byte{0x5, 0x6, 0x0, 0x0}, r1) + + read, err = r.Read(r1) + assert.Equal(t, 0, read) + assert.Equal(t, io.EOF, err) +} + +func TestBundleReader_ReadByChunk(t *testing.T) { + bundle := NewTestBundle() + + bstream.GetBlockWriterHeaderLen = 0 + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], nil) + require.NoError(t, err) + r1 := make([]byte, 1) + + read, err := r.Read(r1) + require.NoError(t, err, "reading header") + assert.Equal(t, 0, read) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x1}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x2}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x3}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x4}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x5}, r1) + + read, err = r.Read(r1) + require.NoError(t, err) + assert.Equal(t, 1, read) + assert.Equal(t, []byte{0x6}, r1) + + _, err = r.Read(r1) + require.Equal(t, err, io.EOF) +} + +func TestBundleReader_Read_Then_Read_Block(t *testing.T) { + //important + bstream.GetBlockWriterHeaderLen = 10 + + bundle := []*bstream.OneBlockFile{ + NewTestOneBlockFileFromFile(t, "0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin"), + NewTestOneBlockFileFromFile(t, "0000000002-20150730T152657.0-044698c9-13406cb6.dbin"), + NewTestOneBlockFileFromFile(t, "0000000003-20150730T152728.0-a88cf741-044698c9.dbin"), + } + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], nil) + require.NoError(t, err) + allBlockData, err := ioutil.ReadAll(r) + require.NoError(t, err) + dbinReader := dbin.NewReader(bytes.NewReader(allBlockData)) + + //Reader header once + _, _, err = dbinReader.ReadHeader() + + //Block 1 + require.NoError(t, err) + b1, err := dbinReader.ReadMessage() + require.NoError(t, err) + require.Equal(t, b1, bundle[0].MemoizeData[14:]) + + //Block 2 + require.NoError(t, err) + b2, err := dbinReader.ReadMessage() + require.NoError(t, err) + require.Equal(t, b2, bundle[1].MemoizeData[14:]) + + //Block 3 + require.NoError(t, err) + b3, err := dbinReader.ReadMessage() + require.NoError(t, err) + require.Equal(t, b3, bundle[2].MemoizeData[14:]) +} + +func TestBundleReader_Read_DownloadOneBlockFileError(t *testing.T) { + bundle := NewDownloadBundle() + bstream.GetBlockWriterHeaderLen = 0 + + anyOB := &bstream.OneBlockFile{ + CanonicalName: "header", + MemoizeData: []byte{0x3, 0x4}, + } + + downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + return nil, fmt.Errorf("some error") + } + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyOB, downloadOneBlockFile) + require.NoError(t, err) + r1 := make([]byte, 4) + + read, err := r.Read(r1) + require.NoError(t, err, "reading header") + require.Equal(t, 0, read) + + read, err = r.Read(r1) + require.Equal(t, 0, read) + require.Errorf(t, err, "some error") +} + +func TestBundleReader_Read_DownloadOneBlockFileCorrupt(t *testing.T) { + bstream.GetBlockWriterHeaderLen = 4 + + bundle := NewDownloadBundle() + + downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + return []byte{0xAB, 0xCD, 0xEF}, nil // shorter than header length + } + + _, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], downloadOneBlockFile) + require.Error(t, err) +} + +func TestBundleReader_Read_DownloadOneBlockFileZeroLength(t *testing.T) { + bundle := NewDownloadBundle() + + bstream.GetBlockWriterHeaderLen = 2 + anyBlockFile := &bstream.OneBlockFile{ + CanonicalName: "header", + MemoizeData: []byte{0xa, 0xb}, + } + + downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + return []byte{}, nil + } + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyBlockFile, downloadOneBlockFile) + require.NoError(t, err) + r1 := make([]byte, 4) + + read, err := r.Read(r1) + require.Equal(t, 2, read, "header") + require.NoError(t, err) + + read, err = r.Read(r1) + require.Equal(t, read, 0) + require.Error(t, err, "EOF expected") +} + +func TestBundleReader_Read_ReadBufferNotNil(t *testing.T) { + bundle := NewDownloadBundle() + + bstream.GetBlockWriterHeaderLen = 2 + anyBlockFile := &bstream.OneBlockFile{ + CanonicalName: "header", + MemoizeData: []byte{0xa, 0xb}, + } + + downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + return nil, fmt.Errorf("some error") + } + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyBlockFile, downloadOneBlockFile) + require.NoError(t, err) + r.readBuffer = []byte{0xAB, 0xCD} + r1 := make([]byte, 4) + + read, err := r.Read(r1) + require.Equal(t, read, 2) + require.Nil(t, err) +} + +func TestBundleReader_Read_EmptyListOfOneBlockFiles(t *testing.T) { + bundle := NewDownloadBundle() + + bstream.GetBlockWriterHeaderLen = 2 + anyBlockFile := &bstream.OneBlockFile{ + CanonicalName: "header", + MemoizeData: []byte{0xa, 0xb}, + } + + downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + return nil, fmt.Errorf("some error") + } + + r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyBlockFile, downloadOneBlockFile) + require.NoError(t, err) + r1 := make([]byte, 4) + + read, err := r.Read(r1) + require.Equal(t, 2, read, "header") + require.NoError(t, err) + + read, err = r.Read(r1) + require.Equal(t, 0, read) + require.Errorf(t, err, "EOF") +} + +func NewTestOneBlockFileFromFile(t *testing.T, fileName string) *bstream.OneBlockFile { + t.Helper() + data, err := ioutil.ReadFile(path.Join("test_data", fileName)) + require.NoError(t, err) + time.Sleep(1 * time.Millisecond) + return &bstream.OneBlockFile{ + CanonicalName: fileName, + Filenames: map[string]bool{fileName: true}, + ID: "", + Num: 0, + PreviousID: "", + MemoizeData: data, + } +} + +func NewTestBundle() []*bstream.OneBlockFile { + bstream.GetBlockWriterHeaderLen = 0 + + o1 := &bstream.OneBlockFile{ + CanonicalName: "o1", + MemoizeData: []byte{0x1, 0x2}, + } + o2 := &bstream.OneBlockFile{ + CanonicalName: "o2", + MemoizeData: []byte{0x3, 0x4}, + } + o3 := &bstream.OneBlockFile{ + CanonicalName: "o3", + MemoizeData: []byte{0x5, 0x6}, + } + return []*bstream.OneBlockFile{o1, o2, o3} +} + +func NewDownloadBundle() []*bstream.OneBlockFile { + o1 := &bstream.OneBlockFile{ + CanonicalName: "o1", + MemoizeData: []byte{}, + } + o2 := &bstream.OneBlockFile{ + CanonicalName: "o2", + MemoizeData: []byte{}, + } + o3 := &bstream.OneBlockFile{ + CanonicalName: "o3", + MemoizeData: []byte{}, + } + return []*bstream.OneBlockFile{o1, o2, o3} +} diff --git a/merger/consts.go b/merger/consts.go new file mode 100644 index 0000000..585cc91 --- /dev/null +++ b/merger/consts.go @@ -0,0 +1,24 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import "time" + +var ListFilesTimeout = 10 * time.Minute +var WriteObjectTimeout = 5 * time.Minute +var GetObjectTimeout = 5 * time.Minute +var DeleteObjectTimeout = 5 * time.Minute + +const ParallelOneBlockDownload = 2 diff --git a/merger/healthz.go b/merger/healthz.go new file mode 100644 index 0000000..cc50dfd --- /dev/null +++ b/merger/healthz.go @@ -0,0 +1,43 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +// Check is basic GRPC Healthcheck +func (m *Merger) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { + status := pbhealth.HealthCheckResponse_SERVING + return &pbhealth.HealthCheckResponse{ + Status: status, + }, nil +} + +// Watch is basic GRPC Healthcheck as a stream +func (m *Merger) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { + err := stream.Send(&pbhealth.HealthCheckResponse{ + Status: pbhealth.HealthCheckResponse_SERVING, + }) + if err != nil { + return err + } + + // The merger is always serving, so just want until this stream is canceled out + <-stream.Context().Done() + return nil +} diff --git a/merger/healthz_test.go b/merger/healthz_test.go new file mode 100644 index 0000000..697c536 --- /dev/null +++ b/merger/healthz_test.go @@ -0,0 +1,32 @@ +package merger + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +func TestHealthz_Check(t *testing.T) { + ctx := context.Background() + m := NewMerger( + testLogger, + "6969", + nil, + 1, + 100, + 100, + time.Second, + time.Second, + 0, + ) + request := &pbhealth.HealthCheckRequest{} + resp, err := m.Check(ctx, request) + if err != nil { + panic(err) + } + + require.Equal(t, resp.Status, pbhealth.HealthCheckResponse_SERVING) +} diff --git a/merger/init_test.go b/merger/init_test.go new file mode 100644 index 0000000..e6f09a1 --- /dev/null +++ b/merger/init_test.go @@ -0,0 +1,25 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "github.com/streamingfast/logging" +) + +var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger_tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/merger/merger.go b/merger/merger.go new file mode 100644 index 0000000..8181dce --- /dev/null +++ b/merger/merger.go @@ -0,0 +1,244 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + "errors" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type Merger struct { + *shutter.Shutter + grpcListenAddr string + + io IOInterface + firstStreamableBlock uint64 + logger *zap.Logger + + timeBetweenPolling time.Duration + + timeBetweenPruning time.Duration + pruningDistanceToLIB uint64 + + bundler *Bundler +} + +func NewMerger( + logger *zap.Logger, + grpcListenAddr string, + io IOInterface, + + firstStreamableBlock uint64, + bundleSize uint64, + pruningDistanceToLIB uint64, + timeBetweenPruning time.Duration, + timeBetweenPolling time.Duration, + stopBlock uint64, +) *Merger { + m := &Merger{ + Shutter: shutter.New(), + bundler: NewBundler(firstStreamableBlock, stopBlock, firstStreamableBlock, bundleSize, io), + grpcListenAddr: grpcListenAddr, + io: io, + firstStreamableBlock: firstStreamableBlock, + pruningDistanceToLIB: pruningDistanceToLIB, + timeBetweenPolling: timeBetweenPolling, + timeBetweenPruning: timeBetweenPruning, + logger: logger, + } + m.OnTerminating(func(_ error) { m.bundler.inProcess.Lock(); m.bundler.inProcess.Unlock() }) // finish bundle that may be merging async + + return m +} + +func (m *Merger) Run() { + m.logger.Info("starting merger") + + m.startGRPCServer() + + m.startOldFilesPruner() + m.startForkedBlocksPruner() + + err := m.run() + if err != nil { + m.logger.Error("merger returned error", zap.Error(err)) + } + m.Shutdown(err) +} + +func (m *Merger) startForkedBlocksPruner() { + forkableIO, ok := m.io.(ForkAwareIOInterface) + if !ok { + return + } + m.logger.Info("starting pruning of forked files", + zap.Uint64("pruning_distance_to_lib", m.pruningDistanceToLIB), + zap.Duration("time_between_pruning", m.timeBetweenPruning), + ) + + go func() { + delay := m.timeBetweenPruning // do not start pruning immediately + for { + time.Sleep(delay) + now := time.Now() + + pruningTarget := m.pruningTarget(m.pruningDistanceToLIB) + forkableIO.DeleteForkedBlocksAsync(bstream.GetProtocolFirstStreamableBlock, pruningTarget) + + if spentTime := time.Since(now); spentTime < m.timeBetweenPruning { + delay = m.timeBetweenPruning - spentTime + } + } + }() + +} + +func (m *Merger) startOldFilesPruner() { + m.logger.Info("starting pruning of unused (old) one-block-files", + zap.Uint64("pruning_distance_to_lib", m.bundler.bundleSize), + zap.Duration("time_between_pruning", m.timeBetweenPruning), + ) + go func() { + delay := m.timeBetweenPruning // do not start pruning immediately + + unfinishedDelay := time.Second * 5 + if unfinishedDelay > delay { + unfinishedDelay = delay / 2 + } + + ctx := context.Background() + for { + time.Sleep(delay) + + var toDelete []*bstream.OneBlockFile + + pruningTarget := m.pruningTarget(m.bundler.bundleSize) + if pruningTarget == 0 { + m.logger.Debug("skipping file deletion until we have a pruning target") + continue + } + + delay = m.timeBetweenPruning + err := m.io.WalkOneBlockFiles(ctx, m.firstStreamableBlock, func(obf *bstream.OneBlockFile) error { + if obf.Num < pruningTarget { + toDelete = append(toDelete, obf) + } + if len(toDelete) >= DefaultFilesDeleteBatchSize { + delay = unfinishedDelay + return ErrStopBlockReached + } + return nil + }) + if err != nil && !errors.Is(err, ErrStopBlockReached) { + m.logger.Warn("error while walking oneBlockFiles", zap.Error(err)) + } + + m.io.DeleteAsync(toDelete) + } + }() +} + +func (m *Merger) pruningTarget(distance uint64) uint64 { + bundlerBase := m.bundler.BaseBlockNum() + if distance > bundlerBase { + return 0 + } + + return bundlerBase - distance +} + +func (m *Merger) run() error { + ctx := context.Background() + + var holeFoundLogged bool + for { + now := time.Now() + if m.IsTerminating() { + return nil + } + + base, lib, err := m.io.NextBundle(ctx, m.bundler.baseBlockNum) + if err != nil { + if errors.Is(err, ErrHoleFound) { + if holeFoundLogged { + m.logger.Debug("found hole in merged files. this is not normal behavior unless reprocessing batches", zap.Error(err)) + } else { + holeFoundLogged = true + m.logger.Warn("found hole in merged files (next occurrence will show up as Debug)", zap.Error(err)) + } + } else { + return err + } + } + + if m.bundler.stopBlock != 0 && base > m.bundler.stopBlock { + if err == ErrStopBlockReached { + m.logger.Info("stop block reached") + return nil + } + } + + if base > m.bundler.baseBlockNum { + logFields := []zapcore.Field{ + zap.Uint64("previous_base_block_num", m.bundler.baseBlockNum), + zap.Uint64("new_base_block_num", base), + } + if lib != nil { + logFields = append(logFields, zap.Stringer("lib", lib)) + } + m.logger.Info("resetting bundler base block num", logFields...) + m.bundler.Reset(base, lib) + } + + var walkErr error + retryErr := Retry(m.logger, 12, 5*time.Second, func() error { + err = m.io.WalkOneBlockFiles(ctx, m.bundler.baseBlockNum, func(obf *bstream.OneBlockFile) error { + return m.bundler.HandleBlockFile(obf) + }) + + if err == ErrFirstBlockAfterInitialStreamableBlock { + m.bundler.Reset(base, lib) + return err + } + + if err != nil { + walkErr = err + } + return nil + }) + + if retryErr != nil { + return retryErr + } + + if walkErr != nil { + if walkErr == ErrStopBlockReached { + m.logger.Info("stop block reached") + return nil + } + return walkErr + } + + if spentTime := time.Since(now); spentTime < m.timeBetweenPolling { + time.Sleep(m.timeBetweenPolling - spentTime) + } + } +} diff --git a/merger/merger_io.go b/merger/merger_io.go new file mode 100644 index 0000000..0b928e9 --- /dev/null +++ b/merger/merger_io.go @@ -0,0 +1,400 @@ +package merger + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +var ErrHoleFound = errors.New("hole found in merged files") +var DefaultFilesDeleteBatchSize = 10000 +var DefaultFilesDeleteThreads = 8 + +type IOInterface interface { + + // NextBundle will read through consecutive merged blocks, starting at `lowestBaseBlock`, and return the next bundle that needs to be created + // If it finds an existing merged file at `lowestBaseBlock`, it will read the last one and include the lastIrreversibleBlock so you can bootstrap your forkdb from there + NextBundle(ctx context.Context, lowestBaseBlock uint64) (baseBlock uint64, lastIrreversibleBlock bstream.BlockRef, err error) + + // WalkOneBlockFiles calls your function for each oneBlockFile it reads, starting at the inclusiveLowerBlock. Useful to feed a block source + WalkOneBlockFiles(ctx context.Context, inclusiveLowerBlock uint64, callback func(*bstream.OneBlockFile) error) error + + // MergeAndStore writes a merged file from a list of oneBlockFiles + MergeAndStore(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) + + // DownloadOneBlockFile will get you the data from the file + DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) + + // DeleteAsync should be able to delete large quantities of oneBlockFiles from storage without ever blocking + DeleteAsync(oneBlockFiles []*bstream.OneBlockFile) error +} + +type ForkAwareIOInterface interface { + // DeleteForkedBlocksAsync will delete forked blocks between lowBoundary and highBoundary (both inclusive) + DeleteForkedBlocksAsync(inclusiveLowBoundary, inclusiveHighBoundary uint64) + + // MoveForkedBlocks will copy an array of oneBlockFiles to the forkedBlocksStore, then delete them (dstore does not have MOVE primitive) + MoveForkedBlocks(ctx context.Context, oneBlockFiles []*bstream.OneBlockFile) +} + +type ForkAwareDStoreIO struct { + *DStoreIO + forkedBlocksStore dstore.Store + forkOd *oneBlockFilesDeleter +} + +type DStoreIO struct { + oneBlocksStore dstore.Store + mergedBlocksStore dstore.Store + + retryAttempts int + retryCooldown time.Duration + + bundleSize uint64 + + logger *zap.Logger + tracer logging.Tracer + od *oneBlockFilesDeleter + forkOd *oneBlockFilesDeleter +} + +func NewDStoreIO( + logger *zap.Logger, + tracer logging.Tracer, + oneBlocksStore dstore.Store, + mergedBlocksStore dstore.Store, + forkedBlocksStore dstore.Store, + retryAttempts int, + retryCooldown time.Duration, + bundleSize uint64, +) IOInterface { + + od := &oneBlockFilesDeleter{store: oneBlocksStore, logger: logger} + od.Start(DefaultFilesDeleteThreads, DefaultFilesDeleteBatchSize*2) + dstoreIO := &DStoreIO{ + oneBlocksStore: oneBlocksStore, + mergedBlocksStore: mergedBlocksStore, + retryAttempts: retryAttempts, + retryCooldown: retryCooldown, + bundleSize: bundleSize, + logger: logger, + tracer: tracer, + od: od, + } + + forkAware := forkedBlocksStore != nil + if !forkAware { + return dstoreIO + } + + forkOd := &oneBlockFilesDeleter{store: forkedBlocksStore, logger: logger} + forkOd.Start(DefaultFilesDeleteThreads, DefaultFilesDeleteBatchSize*2) + + return &ForkAwareDStoreIO{ + DStoreIO: dstoreIO, + forkedBlocksStore: forkedBlocksStore, + forkOd: forkOd, + } +} + +func (s *DStoreIO) MergeAndStore(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) { + // since we keep the last block from previous merged bundle for future deleting, + // we want to make sure that it does not end up in this merged bundle too + var filteredOBF []*bstream.OneBlockFile + + if len(oneBlockFiles) == 0 { + return fmt.Errorf("cannot merge and store without a single oneBlockFile") + } + anyOneBlockFile := oneBlockFiles[0] + for _, obf := range oneBlockFiles { + if obf.Num >= inclusiveLowerBlock { + filteredOBF = append(filteredOBF, obf) + } + } + t0 := time.Now() + + bundleFilename := fileNameForBlocksBundle(inclusiveLowerBlock) + + zapFields := []zap.Field{ + zap.String("filename", bundleFilename), + zap.Duration("write_timeout", WriteObjectTimeout), + zap.Int("number_of_blocks", len(filteredOBF)), + } + if len(filteredOBF) != 0 { + zapFields = append(zapFields, zap.Uint64("lower_block_num", filteredOBF[0].Num), zap.Uint64("highest_block_num", filteredOBF[len(filteredOBF)-1].Num)) + } + + s.logger.Info("about to write merged blocks to storage location", zapFields...) + + err = Retry(s.logger, s.retryAttempts, s.retryCooldown, func() error { + inCtx, cancel := context.WithTimeout(ctx, WriteObjectTimeout) + defer cancel() + bundleReader, err := NewBundleReader(ctx, s.logger, s.tracer, filteredOBF, anyOneBlockFile, s.DownloadOneBlockFile) + if err != nil { + return err + } + return s.mergedBlocksStore.WriteObject(inCtx, bundleFilename, bundleReader) + }) + if err != nil { + return fmt.Errorf("write object error: %s", err) + } + + s.logger.Info("merged and uploaded", zap.String("filename", fileNameForBlocksBundle(inclusiveLowerBlock)), zap.Duration("merge_time", time.Since(t0))) + + return +} + +func (s *DStoreIO) WalkOneBlockFiles(ctx context.Context, lowestBlock uint64, callback func(*bstream.OneBlockFile) error) error { + return s.oneBlocksStore.WalkFrom(ctx, "", fileNameForBlocksBundle(lowestBlock), func(filename string) error { + if strings.HasSuffix(filename, ".tmp") { + return nil + } + oneBlockFile := bstream.MustNewOneBlockFile(filename) + + if err := callback(oneBlockFile); err != nil { + return err + } + return nil + }) + +} + +func (s *DStoreIO) DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + for filename := range oneBlockFile.Filenames { // will try to get MemoizeData from any of those files + var out io.ReadCloser + out, err = s.oneBlocksStore.OpenObject(ctx, filename) + s.logger.Debug("downloading one block", zap.String("file_name", filename)) + if err != nil { + continue + } + defer out.Close() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + data, err = ioutil.ReadAll(out) + if err == nil { + return data, nil + } + } + + return +} + +func (s *DStoreIO) NextBundle(ctx context.Context, lowestBaseBlock uint64) (outBaseBlock uint64, lib bstream.BlockRef, err error) { + var lastFound *uint64 + outBaseBlock = lowestBaseBlock + err = s.mergedBlocksStore.WalkFrom(ctx, "", fileNameForBlocksBundle(lowestBaseBlock), func(filename string) error { + num, err := strconv.ParseUint(filename, 10, 64) + if err != nil { + return err + } + + if num != outBaseBlock { + return fmt.Errorf("%w: merged blocks skip from %d to %d, you need to fill this hole, set firstStreamableBlock above this hole or set merger option to ignore holes", ErrHoleFound, outBaseBlock, num) + } + outBaseBlock += s.bundleSize + lastFound = &num + return nil + }) + + if lastFound != nil { + last, lastTime, err := s.readLastBlockFromMerged(ctx, *lastFound) + if err != nil { + return 0, nil, err + } + metrics.HeadBlockTimeDrift.SetBlockTime(*lastTime) + metrics.HeadBlockNumber.SetUint64(last.Num()) + lib = last + } + + return +} + +func (s *DStoreIO) readLastBlockFromMerged(ctx context.Context, baseBlock uint64) (bstream.BlockRef, *time.Time, error) { + subCtx, cancel := context.WithTimeout(ctx, GetObjectTimeout) + defer cancel() + reader, err := s.mergedBlocksStore.OpenObject(subCtx, fileNameForBlocksBundle(baseBlock)) + if err != nil { + return nil, nil, err + } + last, err := lastBlock(reader) + if err != nil { + return nil, nil, err + } + // we truncate the block ID to have the short version that we get on oneBlockFiles + return bstream.NewBlockRef(bstream.TruncateBlockID(last.Id), last.Number), &last.Timestamp, nil +} + +func (s *DStoreIO) DeleteAsync(oneBlockFiles []*bstream.OneBlockFile) error { + return s.od.Delete(oneBlockFiles) +} + +func (s *ForkAwareDStoreIO) MoveForkedBlocks(ctx context.Context, oneBlockFiles []*bstream.OneBlockFile) { + for _, f := range oneBlockFiles { + for name := range f.Filenames { + reader, err := s.oneBlocksStore.OpenObject(ctx, name) + if err != nil { + s.logger.Warn("could not copy forked block", zap.Error(err)) + continue + } + err = s.forkedBlocksStore.WriteObject(ctx, name, reader) + if err != nil { + s.logger.Warn("could not copy forked block", zap.Error(err)) + continue + } + reader.Close() + break + } + } + _ = s.od.Delete(oneBlockFiles) +} + +func (s *ForkAwareDStoreIO) DeleteForkedBlocksAsync(inclusiveLowBoundary, inclusiveHighBoundary uint64) { + var forkedBlockFiles []*bstream.OneBlockFile + err := s.forkedBlocksStore.WalkFrom(context.Background(), "", "", func(filename string) error { + if strings.HasSuffix(filename, ".tmp") { + return nil + } + obf := bstream.MustNewOneBlockFile(filename) + if obf.Num > inclusiveHighBoundary { + return io.EOF + } + forkedBlockFiles = append(forkedBlockFiles, obf) + return nil + }) + + if err != nil && err != io.EOF { + s.logger.Warn("cannot walk forked block files to delete old ones", + zap.Uint64("inclusive_low_boundary", inclusiveLowBoundary), + zap.Uint64("inclusive_high_boundary", inclusiveHighBoundary), + zap.Error(err), + ) + } + + s.forkOd.Delete(forkedBlockFiles) +} + +type oneBlockFilesDeleter struct { + sync.Mutex + toProcess chan string + retryAttempts int + retryCooldown time.Duration + store dstore.Store + logger *zap.Logger +} + +func (od *oneBlockFilesDeleter) Start(threads int, maxDeletions int) { + od.toProcess = make(chan string, maxDeletions) + for i := 0; i < threads; i++ { + go od.processDeletions() + } +} + +func (od *oneBlockFilesDeleter) Delete(oneBlockFiles []*bstream.OneBlockFile) error { + od.Lock() + defer od.Unlock() + + if len(oneBlockFiles) == 0 { + return nil + } + + var fileNames []string + for _, oneBlockFile := range oneBlockFiles { + for filename := range oneBlockFile.Filenames { + fileNames = append(fileNames, filename) + } + } + od.logger.Info("deleting a bunch of one_block_files", zap.Int("number_of_files", len(fileNames)), zap.String("first_file", fileNames[0]), zap.String("last_file", fileNames[len(fileNames)-1]), zap.Stringer("store", od.store.BaseURL())) + + deletable := make(map[string]bool) + + for _, f := range fileNames { + deletable[f] = true + } + + // dedupe processing queue + for empty := false; !empty; { + select { + case f := <-od.toProcess: + deletable[f] = true + default: + empty = true + } + } + + var deletableArr []string + for file := range deletable { + deletableArr = append(deletableArr, file) + } + sort.Strings(deletableArr) + + var err error + for _, file := range deletableArr { + if len(od.toProcess) == cap(od.toProcess) { + od.logger.Warn("skipping file deletions: the channel is full", zap.Int("capacity", cap(od.toProcess))) + err = fmt.Errorf("skipped some files") + break + } + od.toProcess <- file + } + return err +} + +func (od *oneBlockFilesDeleter) processDeletions() { + for { + file := <-od.toProcess + err := Retry(od.logger, od.retryAttempts, od.retryCooldown, func() error { + ctx, cancel := context.WithTimeout(context.Background(), DeleteObjectTimeout) + defer cancel() + err := od.store.DeleteObject(ctx, file) + if errors.Is(err, dstore.ErrNotFound) { + return nil + } + return err + }) + if err != nil { + od.logger.Warn("cannot delete oneblock file after a few retries", zap.String("file", file), zap.Error(err)) + } + } +} + +func lastBlock(mergeFileReader io.ReadCloser) (out *bstream.Block, err error) { + defer mergeFileReader.Close() + + blkReader, err := bstream.GetBlockReaderFactory.New(mergeFileReader) + if err != nil { + return nil, err + } + + for { + block, err := blkReader.Read() + if block != nil { + out = block + } + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + } + + return out, nil +} diff --git a/merger/merger_io_test.go b/merger/merger_io_test.go new file mode 100644 index 0000000..10bfbad --- /dev/null +++ b/merger/merger_io_test.go @@ -0,0 +1,193 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + "io" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dstore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewDstore(t *testing.T) { + store := NewDStoreIO( + testLogger, + testTracer, + dstore.NewMockStore(nil), + dstore.NewMockStore(nil), + dstore.NewMockStore(nil), + 1, + 0, + 100, + ) + + _, ok := store.(ForkAwareIOInterface) + require.True(t, ok) + + // non-fork-aware + store = NewDStoreIO( + testLogger, + testTracer, + dstore.NewMockStore(nil), + dstore.NewMockStore(nil), + nil, + 1, + 0, + 100, + ) + + _, ok = store.(ForkAwareIOInterface) + require.False(t, ok) +} + +func newDStoreIO( + oneBlocksStore dstore.Store, + mergedBlocksStore dstore.Store, +) IOInterface { + return NewDStoreIO(testLogger, testTracer, oneBlocksStore, mergedBlocksStore, nil, 0, 0, 100) +} + +func TestMergerIO_MergeUploadPerfect(t *testing.T) { + bstream.GetBlockWriterHeaderLen = 0 + + files := []*bstream.OneBlockFile{ + block100, + block101, + } + var mergeLastBase string + var filesRead []string + var mergeCounter int + done := make(chan struct{}) + + oneBlockStore := dstore.NewMockStore(nil) + oneBlockStore.OpenObjectFunc = func(_ context.Context, name string) (io.ReadCloser, error) { + filesRead = append(filesRead, name) + if len(filesRead) == 2 { + close(done) + } + return ioutil.NopCloser(strings.NewReader("")), nil + } + mergedBlocksStore := dstore.NewMockStore( + func(base string, f io.Reader) (err error) { + mergeLastBase = base + mergeCounter++ + return nil + }, + ) + + mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + + err := mio.MergeAndStore(context.Background(), 100, files) + require.NoError(t, err) + assert.Equal(t, mergeCounter, 1) + assert.Equal(t, mergeLastBase, "0000000100") + + expectFilenames := []string{ + "0000000100-0000000000000100a-0000000000000099a-98-suffix", // read header + "0000000100-0000000000000100a-0000000000000099a-98-suffix", + "0000000101-0000000000000101a-0000000000000100a-99-suffix", + } + + select { + case <-time.After(time.Second): + t.Error("timeout waiting for read") + case <-done: + } + assert.Equal(t, expectFilenames, filesRead) +} + +func TestMergerIO_MergeUploadFiltered(t *testing.T) { + files := []*bstream.OneBlockFile{ + block98, + block99, + block100, + block101, + } + + var mergeLastBase string + var filesRead []string + var mergeCounter int + done := make(chan struct{}) + + oneBlockStore := dstore.NewMockStore(nil) + oneBlockStore.OpenObjectFunc = func(_ context.Context, name string) (io.ReadCloser, error) { + filesRead = append(filesRead, name) + if len(filesRead) == 2 { + close(done) + } + return ioutil.NopCloser(strings.NewReader("")), nil + } + mergedBlocksStore := dstore.NewMockStore( + func(base string, f io.Reader) (err error) { + mergeLastBase = base + mergeCounter++ + return nil + }, + ) + + mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + + err := mio.MergeAndStore(context.Background(), 100, files) + require.NoError(t, err) + assert.Equal(t, mergeCounter, 1) + assert.Equal(t, mergeLastBase, "0000000100") + + expectFilenames := []string{ + "0000000098-0000000000000098a-0000000000000097a-96-suffix", // read header + // 99 not read + "0000000100-0000000000000100a-0000000000000099a-98-suffix", + "0000000101-0000000000000101a-0000000000000100a-99-suffix", + } + + select { + case <-time.After(time.Second): + t.Error("timeout waiting for read") + case <-done: + } + assert.Equal(t, expectFilenames, filesRead) +} + +func TestMergerIO_MergeUploadNoFiles(t *testing.T) { + files := []*bstream.OneBlockFile{} + + oneBlockStore := dstore.NewMockStore(nil) + mergedBlocksStore := dstore.NewMockStore(nil) + mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + + err := mio.MergeAndStore(context.Background(), 114, files) + require.Error(t, err) +} +func TestMergerIO_MergeUploadFilteredToZero(t *testing.T) { + files := []*bstream.OneBlockFile{ + block102Final100, + block103Final101, + } + oneBlockStore := dstore.NewMockStore(nil) + mergedBlocksStore := dstore.NewMockStore(nil) + mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + + block102Final100.MemoizeData = []byte{0x0, 0x1, 0x2, 0x3} + block103Final101.MemoizeData = []byte{0x0, 0x1, 0x2, 0x3} + + err := mio.MergeAndStore(context.Background(), 114, files) + require.NoError(t, err) +} diff --git a/merger/metrics/metrics.go b/merger/metrics/metrics.go new file mode 100644 index 0000000..8dae3f8 --- /dev/null +++ b/merger/metrics/metrics.go @@ -0,0 +1,23 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import "github.com/streamingfast/dmetrics" + +var MetricSet = dmetrics.NewSet() + +var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("merger") +var HeadBlockNumber = MetricSet.NewHeadBlockNumber("merger") +var AppReadiness = MetricSet.NewAppReadiness("merger") diff --git a/merger/server.go b/merger/server.go new file mode 100644 index 0000000..a4fa455 --- /dev/null +++ b/merger/server.go @@ -0,0 +1,21 @@ +package merger + +import ( + dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +func (m *Merger) startGRPCServer() { + gs := dgrpcfactory.ServerFromOptions() + gs.OnTerminated(m.Shutdown) + m.logger.Info("grpc server created") + + m.OnTerminated(func(_ error) { + gs.Shutdown(0) + }) + pbhealth.RegisterHealthServer(gs.ServiceRegistrar(), m) + m.logger.Info("server registered") + + go gs.Launch(m.grpcListenAddr) + +} diff --git a/merger/test_data/0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin b/merger/test_data/0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin new file mode 100644 index 0000000000000000000000000000000000000000..265a969136897793f61d8dfbb5b4ec748d6467fa GIT binary patch literal 826 zcmYdE%FJVM4e>BAWME*{;b0VUu&_wAG)pltH8xL5O*BcdOg1o0F*GtYv9vTWPck+# zH8V^xPD(R2OfxY}OiVR5Hc3uNHZ-(MGc-0aFiTD{lX6HgNi{VyH!(=FFgHs~O|wWg zFf>TAFiSKsNHI+V$(SY@8d{_nSQ?lm8k?k=Sth0?0~Mtt873!Lq$L_Fv2iSEdAXL= zfXRZX}3t6$Sy7fM{1kY)00)|(#Qji>t;3n|E+@qKaccx=_`)~)H= z&RmhYEaWOZrK?2v!$POO4p%0-ONp>9&C?A!AucA-{FTQ=#z^D*Q{VYY3fJm)xP4&Z-Dlp8ThCerYk$^K2zwzfE`26++5cwOXOj;6$cy%PAU!i@ z?E%Il{jHCawOQ&`-}d)HBgDDpHs>djjvH>a1hS%AL$&uf>Nh2+DE%EAen+zLb#Yp=bG#6-V~& m`Tuj$*R#P*j0BTXlo}V-1R%s9#l^+pnmn<|BAWME)6<6shUNHVcBO*Bj~Pc<_$Gd46eN=&shGB>bDNi?xAF*Y_f zPqH+%Ff=hUFf>gxvM?}8GcmL@PBbwzFfcJOv$ROIlyb1JNVPOeF);_{t&12k*N(!KG^6eDR?^H zbWAQ06eyW;Yu<@HET(gdrT=nl*Y@SRv)AX+o6U||3Slqg#ih@LF8kl?`fSpHA9>Lp z52R=2tUbV(q`&oX@)kuSf-0;Um>5PlaAN0TW&Y0~z~I3Y(4n3LO$}KJ?x`gi`elZC z26_g%2Ea69Zf0VxpOcwaTA`nwZ>VQdq@d3};pi&E)l($nBAWME)j!oe)$V4PxRXk=t&W{{k0VPs-#ZkA?fn3`%~Vw7fKoS2f5 zY;0;|nrf7iVrFh?mSmZfoMw=imX>Iim}p^?kK^m zu$@Ee6bqNamZ=M`KT9(VS~sbncS(z}`OXND1kpte`i_&A2(Y-#I4Pu%Exyy@*pv4X zo3d_5&siK8ul{$AU(>3iS@JhtZR)6qbCePZ=(wi5i~l^M;e*bqiN#*N&5>CPloZNV zb9c^bZgK~qDPf_rL7hJKl$ zo`If$Zd$UrWs0#;ihfRJUTKAXdcL8aNs&V8L%lc3|58Jr?YZr~N>y#vL$;?MvqK)f z)IIkoPNwUEdBwT*U-x?ETs*1Q#7Hnj865#8rYld=9e$Lp`*A>EN0!6Yuj#G}B`!?J zGI=)ZO%LzJ)BTHu6lBl%zPNWhwrX|j*7R*>u1H-La+RLaRU-Uhq0?W7E0f)&L{7Xu zbM$M*{1vHln+uH{pRScOnc|_OAeYC?+W1$ZcuR#-L+hN&n-6%c2vzvdW8m}kUGnA0 zQFliZ6IHSSI5GWiU|{fIge0azprR#9Dl)MowOAo2F)t-CCr3e7!L=kKwa70sFDtP~ z;m1kee~IVS?%ePB|8}wY`K$@HsYg7oXG$??n16e8b4p~ztFJFtT-kB!b_OsR#av~W zc6&$IvWxqE<-YiNi1*E=nV(BVUDR)~C*I)}5af(f)+%n^&cL9+ W2vUcw*ny~X?_0+gF1>(3fe8RIHH@GD literal 0 HcmV?d00001 diff --git a/merger/utils.go b/merger/utils.go new file mode 100644 index 0000000..9e6cfd2 --- /dev/null +++ b/merger/utils.go @@ -0,0 +1,95 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package merger + +import ( + "context" + "fmt" + "time" + + "github.com/streamingfast/bstream" + "go.uber.org/zap" + "gopkg.in/olivere/elastic.v3/backoff" +) + +func fileNameForBlocksBundle(blockNum uint64) string { + return fmt.Sprintf("%010d", blockNum) +} + +func toBaseNum(in uint64, bundleSize uint64) uint64 { + return in / bundleSize * bundleSize +} + +func Retry(logger *zap.Logger, attempts int, sleep time.Duration, function func() error) (err error) { + b := backoff.NewExponentialBackoff(sleep, 5*time.Second) + for i := 0; ; i++ { + err = function() + if err == nil { + return + } + + if i >= (attempts - 1) { + break + } + + time.Sleep(b.Next()) + + logger.Warn("retrying after error", zap.Error(err)) + } + return fmt.Errorf("after %d attempts, last error: %s", attempts, err) +} + +type TestMergerIO struct { + NextBundleFunc func(ctx context.Context, lowestBaseBlock uint64) (baseBlock uint64, lastIrreversibleBlock bstream.BlockRef, err error) + WalkOneBlockFilesFunc func(ctx context.Context, inclusiveLowerBlock uint64, callback func(*bstream.OneBlockFile) error) error + MergeAndStoreFunc func(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) + DownloadOneBlockFileFunc func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) + DeleteAsyncFunc func(oneBlockFiles []*bstream.OneBlockFile) error +} + +func (io *TestMergerIO) NextBundle(ctx context.Context, lowestBaseBlock uint64) (baseBlock uint64, lastIrreversibleBlock bstream.BlockRef, err error) { + if io.NextBundleFunc != nil { + return io.NextBundleFunc(ctx, lowestBaseBlock) + } + return lowestBaseBlock, nil, nil +} + +func (io *TestMergerIO) MergeAndStore(ctx context.Context, inclusiveLowerBlock uint64, oneBlockFiles []*bstream.OneBlockFile) (err error) { + if io.MergeAndStoreFunc != nil { + return io.MergeAndStoreFunc(ctx, inclusiveLowerBlock, oneBlockFiles) + } + return nil +} + +func (io *TestMergerIO) DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { + if io.DownloadOneBlockFileFunc != nil { + return io.DownloadOneBlockFileFunc(ctx, oneBlockFile) + } + + return nil, nil +} + +func (io *TestMergerIO) WalkOneBlockFiles(ctx context.Context, inclusiveLowerBlock uint64, callback func(*bstream.OneBlockFile) error) error { + if io.WalkOneBlockFilesFunc != nil { + return io.WalkOneBlockFilesFunc(ctx, inclusiveLowerBlock, callback) + } + return nil +} +func (io *TestMergerIO) DeleteAsync(oneBlockFiles []*bstream.OneBlockFile) error { + if io.DeleteAsyncFunc != nil { + return io.DeleteAsyncFunc(oneBlockFiles) + } + return nil +} diff --git a/node-manager/CHANGELOG.md b/node-manager/CHANGELOG.md new file mode 100644 index 0000000..79446e9 --- /dev/null +++ b/node-manager/CHANGELOG.md @@ -0,0 +1,59 @@ +# Change log + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + + +## Unreleased + +### Removed +* No more 'BatchMode' option, we get wanted behavior only by setting MergeThresholdBlockAge: + - '0' -> do not automatically merge, ever + - '1' -> always merge + - (any other duration) -> only merge bundle when all its blocks are older than this duration +* No more 'tracker' to auto-merge based on current LIB (that feature requires too much setup) +* No more option 'FailOnNonContinuousBlocks' -> (was not actually implemented anyway) + +## 2021 (released throughout..) + +### Added +* New Feature: `auto-merge` the mindreader will switch between producing merged-blocks and one-block files depending on the existence of merged files in destination store and on the age of the blocks. It will also never overwrite destination files (unless BatchMode is set) +* New Feature: when producing merged files, a partial file will be produced on shutdown. If the next block to appear on next startup is the expected one, it will load the partial file to continue producing a merged-blocks file. +* New option 'BatchMode' forces the mindreader to produce merged-blocks all the time (without checking block age or existence of merged files in block store) and to overwrite any existing merged-blocks files. +* New option MergeThresholdBlockAge: defines the age at which a block is considered old enough to be included in a merged-block-file directly (without any risk of forking). + +### Fixed +* auto-merged block files are now written locally first, then sent asynchronously to the destination storage. They are sent in order (no threads). This makes it more resilient. + +### Removed +* `discardAfterStopBlock`: this option did not give any value, especially now that the mindreader can switch between producing merged blocks and one-block files +* `merge_upload_directly`: that feature is now automatically enabled (see new `auto-merge` feature), the `BatchMode` option can force that behavior now. + + +## [v0.0.1] - 2020-06-22 + +### Fixed: +* nodeos log levels are now properly decoded when going through zap logging engine instead of showing up as DEBUG +* mindreader does not get stuck anymore when trying to find an unexisting snapshot (it fails rapidly instead) + +### Changed +* ContinuousChecker is not enabled by default now, use FailOnNonContinuousBlocks +* BREAKING: AutoRestoreLatest(bool) option becomes AutoRestoreSource (`backup`, `snapshot`) +* Nodeos unexpectedly shutting down now triggers a Shutdown of the whole app +* ShutdownDelay is now actually implemented for any action that will make nodeos unavailable (snapshot, volume_snapshot, backup, maintenance, shutdown..). It will report the app as "not ready" for that period before actually affecting the service. + +### Added +* Options.DiscardAfterStopBlock, if not true, one-block-files will now be produced with the remaining blocks when MergeUploadDirectly is set. This way, they are not lost if you intend to restart without mergeUploadDirectly run a merger on these blocks later. +* App `nodeos_mindreader_stdin`, with a small subset of the features, a kind of "dumb mode" that only does the "mindreader" job (producing block files, relaying blocks through GRPC) on none of the "manager" job. +* Options.AutoSnapshotHostnameMatch(string) will only apply auto-snapshot parameters if os.Hostname() returns this string +* Options.AutoBackupHostnameMatch(string) will only apply auto-backup parameters if os.Hostname() returns this string +* Add FailOnNonContinuousBlocks Option to use continuousChecker or not +* Possibility to auto-restore from latest snapshot (useful for BP), deleting appropriate files to make it work and continue +* NumberOfSnapshotsToKeep flag to maintain a small list of snapshots -> If non-zero, it deletes older snapshot. + +## 2020-03-21 + +### Changed + +* License changed to Apache 2.0 diff --git a/node-manager/LICENSE b/node-manager/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/node-manager/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/node-manager/README.md b/node-manager/README.md new file mode 100644 index 0000000..192711b --- /dev/null +++ b/node-manager/README.md @@ -0,0 +1,65 @@ +# StreamingFast Node Manager +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/manageos) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +This service is a wrapping process that operates blockchain nodes as part of **[StreamingFast](https://github.com/streamingfast/streamingfast)**. + +## Installation + +Build and run from here: +* [**EOSIO on StreamingFast**](https://github.com/streamingfast/sf-eosio) +* [**Ethereum on StreamingFast**](https://github.com/streamingfast/sf-ethereum) + +## Overview + +1) Operator (process commands, owns the superviser, owns the http handlers that sends the commands to him) + +2) Superviser is the the one managing the actual blockchain software (nodeos, geth..). It is made of an embedded generic superviser struct, plus specific nodeos/geth embedding it. It owns the plugins. + +## Shutdown pattern for Node-Manager only: +App creates: +* Superviser +* Operator (+superviser) + +So, the ownership is `app -> operator -> superviser` +* app.OnTerminating(operator.Shutdown()) +* operator.OnTerminating(sendCmd:"maintenance", superviser.Shutdown()) +* superviser.OnTerminating(superviser.Stop() (blocking)) + +## Shutdown pattern for Mindreader: + +App creates: +* Superviser +* Operator (+superviser) +* mindreaderPlugin (has call back to set maintenance on operator and stopBlockReached) + +App sets: +* superviser.RegisterLogPlugin(mindreaderPlugin) + +So, the ownership is `app -> operator -> superviser -> mindreader` +* app.OnTerminating(operator.Shutdown()) +* operator.OnTerminating(sendCmd:"maintenance", superviser.Shutdown()) +* superviser.OnTerminating(mindreader.Shutdown(), then endLogPlugins) +* superviser.OnTerminated(endLogPlugins) +* mindreader.OnTerminating(async operator.Shutdown(), wait consumeFlowDone) + * mindreader::archiver closes consumeFlowDone when superviser.endLogPlugins(+upload completed) + * mindreader shuts itself down when stopBlockNum reached +* mindreader.OnTerminated -> app.Shutdown() + +## Contributing + +**Issues and PR in this repo related strictly to the core manageos engine** + +Report any protocol-specific issues in their +[respective repositories](https://github.com/streamingfast/streamingfast#protocols) + +**Please first refer to the general +[StreamingFast contribution guide](https://github.com/streamingfast/streamingfast/blob/master/CONTRIBUTING.md)**, +if you wish to contribute to this code base. + +This codebase uses unit tests extensively, please write and run tests. + +## License + +[Apache 2.0](LICENSE) + diff --git a/node-manager/app/node_manager/app.go b/node-manager/app/node_manager/app.go new file mode 100644 index 0000000..ceff8ae --- /dev/null +++ b/node-manager/app/node_manager/app.go @@ -0,0 +1,150 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nodemanager + +import ( + "context" + "fmt" + "net/http" + "os" + "time" + + dgrpcserver "github.com/streamingfast/dgrpc/server" + dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" + "github.com/streamingfast/dmetrics" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/firehose-core/node-manager/metrics" + "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + StartupDelay time.Duration + + HTTPAddr string // was ManagerAPIAddress + ConnectionWatchdog bool + + GRPCAddr string +} + +type Modules struct { + Operator *operator.Operator + MetricsAndReadinessManager *nodeManager.MetricsAndReadinessManager + LaunchConnectionWatchdogFunc func(terminating <-chan struct{}) + MindreaderPlugin *mindreader.MindReaderPlugin + RegisterGRPCService func(server grpc.ServiceRegistrar) error + StartFailureHandlerFunc func() +} + +type App struct { + *shutter.Shutter + config *Config + modules *Modules + zlogger *zap.Logger +} + +func New(config *Config, modules *Modules, zlogger *zap.Logger) *App { + return &App{ + Shutter: shutter.New(), + config: config, + modules: modules, + zlogger: zlogger, + } +} + +func (a *App) Run() error { + hasMindreader := a.modules.MindreaderPlugin != nil + a.zlogger.Info("running node manager app", zap.Reflect("config", a.config), zap.Bool("mindreader", hasMindreader)) + + hostname, _ := os.Hostname() + a.zlogger.Info("retrieved hostname from os", zap.String("hostname", hostname)) + + dmetrics.Register(metrics.Metricset) + + a.OnTerminating(func(err error) { + a.modules.Operator.Shutdown(err) + <-a.modules.Operator.Terminated() + }) + + a.modules.Operator.OnTerminated(func(err error) { + a.zlogger.Info("chain operator terminated shutting down mindreader app") + a.Shutdown(err) + }) + + if a.config.StartupDelay != 0 { + time.Sleep(a.config.StartupDelay) + } + + var httpOptions []operator.HTTPOption + if hasMindreader { + if err := a.startMindreader(); err != nil { + return fmt.Errorf("unable to start mindreader: %w", err) + } + + } + + a.zlogger.Info("launching operator") + go a.modules.MetricsAndReadinessManager.Launch() + go a.Shutdown(a.modules.Operator.Launch(a.config.HTTPAddr, httpOptions...)) + + if a.config.ConnectionWatchdog { + go a.modules.LaunchConnectionWatchdogFunc(a.Terminating()) + } + + return nil +} + +func (a *App) IsReady() bool { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + url := fmt.Sprintf("http://%s/healthz", a.config.HTTPAddr) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + a.zlogger.Warn("unable to build get health request", zap.Error(err)) + return false + } + + client := http.DefaultClient + res, err := client.Do(req) + if err != nil { + a.zlogger.Debug("unable to execute get health request", zap.Error(err)) + return false + } + + return res.StatusCode == 200 +} + +func (a *App) startMindreader() error { + a.zlogger.Info("starting mindreader gRPC server") + gs := dgrpcfactory.ServerFromOptions(dgrpcserver.WithLogger(a.zlogger)) + + if a.modules.RegisterGRPCService != nil { + err := a.modules.RegisterGRPCService(gs.ServiceRegistrar()) + if err != nil { + return fmt.Errorf("register extra grpc service: %w", err) + } + } + + gs.OnTerminated(a.Shutdown) + + // Launch is blocking and we don't want to block in this method + go gs.Launch(a.config.GRPCAddr) + + return nil +} diff --git a/node-manager/app/node_reader_stdin/app.go b/node-manager/app/node_reader_stdin/app.go new file mode 100644 index 0000000..3a54eca --- /dev/null +++ b/node-manager/app/node_reader_stdin/app.go @@ -0,0 +1,172 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package node_reader_stdin + +import ( + "bufio" + "fmt" + "os" + + "github.com/streamingfast/bstream/blockstream" + dgrpcserver "github.com/streamingfast/dgrpc/server" + dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/logging" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + GRPCAddr string + OneBlocksStoreURL string + OneBlockSuffix string + MindReadBlocksChanCapacity int + StartBlockNum uint64 + StopBlockNum uint64 + WorkingDir string + LogToZap bool + DebugDeepMind bool + + // MaxLineLengthInBytes configures the maximum bytes a single line consumed can be + // without any error. If left unspecified or 0, the default is 50 MiB (50 * 1024 * 1024). + MaxLineLengthInBytes int64 +} + +type Modules struct { + ConsoleReaderFactory mindreader.ConsolerReaderFactory + MetricsAndReadinessManager *nodeManager.MetricsAndReadinessManager + RegisterGRPCService func(server grpc.ServiceRegistrar) error +} + +type App struct { + *shutter.Shutter + Config *Config + ReadyFunc func() + modules *Modules + zlogger *zap.Logger + tracer logging.Tracer +} + +func New(c *Config, modules *Modules, zlogger *zap.Logger, tracer logging.Tracer) *App { + n := &App{ + Shutter: shutter.New(), + Config: c, + ReadyFunc: func() {}, + modules: modules, + zlogger: zlogger, + tracer: tracer, + } + return n +} + +func (a *App) Run() error { + a.zlogger.Info("launching reader-node app (reading from stdin)", zap.Reflect("config", a.Config)) + + gs := dgrpcfactory.ServerFromOptions(dgrpcserver.WithLogger(a.zlogger)) + + blockStreamServer := blockstream.NewUnmanagedServer( + blockstream.ServerOptionWithLogger(a.zlogger), + blockstream.ServerOptionWithBuffer(1), + ) + + a.zlogger.Info("launching reader log plugin") + mindreaderLogPlugin, err := mindreader.NewMindReaderPlugin( + a.Config.OneBlocksStoreURL, + a.Config.WorkingDir, + a.modules.ConsoleReaderFactory, + a.Config.StartBlockNum, + a.Config.StopBlockNum, + a.Config.MindReadBlocksChanCapacity, + a.modules.MetricsAndReadinessManager.UpdateHeadBlock, + func(_ error) {}, + a.Config.OneBlockSuffix, + blockStreamServer, + a.zlogger, + a.tracer, + ) + if err != nil { + return err + } + + a.zlogger.Debug("configuring shutter") + mindreaderLogPlugin.OnTerminated(a.Shutdown) + a.OnTerminating(mindreaderLogPlugin.Shutdown) + + serviceRegistrar := gs.ServiceRegistrar() + pbheadinfo.RegisterHeadInfoServer(serviceRegistrar, blockStreamServer) + pbbstream.RegisterBlockStreamServer(serviceRegistrar, blockStreamServer) + + if a.modules.RegisterGRPCService != nil { + err := a.modules.RegisterGRPCService(gs.ServiceRegistrar()) + if err != nil { + return fmt.Errorf("register extra grpc service: %w", err) + } + } + gs.OnTerminated(a.Shutdown) + go gs.Launch(a.Config.GRPCAddr) + + a.zlogger.Debug("running reader log plugin") + mindreaderLogPlugin.Launch() + go a.modules.MetricsAndReadinessManager.Launch() + + var logPlugin *logplugin.ToZapLogPlugin + if a.Config.LogToZap { + logPlugin = logplugin.NewToZapLogPlugin(a.Config.DebugDeepMind, a.zlogger) + } + + maxLineLength := a.Config.MaxLineLengthInBytes + if maxLineLength == 0 { + maxLineLength = 50 * 1024 * 1024 + } + + scanner := bufio.NewScanner(os.Stdin) + scanner.Buffer(make([]byte, int(maxLineLength)), int(maxLineLength)) + + go func() { + a.zlogger.Info("starting stdin consumption loop") + for scanner.Scan() { + line := scanner.Text() + + if logPlugin != nil { + logPlugin.LogLine(line) + } + + mindreaderLogPlugin.LogLine(line) + } + + if err := scanner.Err(); err != nil { + a.zlogger.Error("got an error from while trying to read a line", zap.Error(err)) + mindreaderLogPlugin.Shutdown(err) + return + } + + a.zlogger.Info("done reading from stdin") + }() + + return nil +} + +func (a *App) OnReady(f func()) { + a.ReadyFunc = f +} + +func (a *App) IsReady() bool { + return true +} diff --git a/node-manager/boot.sh b/node-manager/boot.sh new file mode 100755 index 0000000..72578b5 --- /dev/null +++ b/node-manager/boot.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# Copyright 2019 dfuse Platform Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +clean="false" + +main() { + current_dir="`pwd`" + trap "cd \"$current_dir\"" EXIT + pushd "$ROOT" &> /dev/null + + while getopts "hc" opt; do + case "$opt" in + h) usage && exit 0;; + c) clean="true";; + \?) usage_error "Invalid option: -$OPTARG";; + esac + done + shift $((OPTIND-1)) + + platform=$1; shift + network=$1; shift + + if [[ $platform != "eos" ]]; then + usage_error "argument must be 'eos'" + fi + + if [[ $clean == "true" ]]; then + ./clean.sh nodeos_manager + ./clean.sh nodeos_mindreader + fi + + case "$platform" in + eos) eos $platform $network $force;; + esac +} + +eos() { + platform=$1; shift + network=$1; shift + force=$1; shift + + if [[ $network != "mainnet" && $network != "jungle" && $network != "bp" ]]; then + usage_error "argument must be 'mainnet', 'jungle' or 'bp'" + fi + + content_dir="$ROOT/boot/${platform}_${network}" + + config_file="$content_dir/config.ini" + genesis_file="$content_dir/genesis.json" + + manager_config_dir="$ROOT/data/nodeos_manager/config" + mindreader_config_dir="$ROOT/data/nodeos_mindreader/config" + + mkdir -p "$manager_config_dir" &> /dev/null + mkdir -p "$mindreader_config_dir" &> /dev/null + + [[ ! -f "$manager_config_dir/config.ini" || clean="true" ]] && cp "$config_file" "$manager_config_dir/config.ini" + [[ ! -f "$manager_config_dir/genesis.json" || clean="true" ]] && cp "$genesis_file" "$manager_config_dir/genesis.json" + + [[ ! -f "$mindreader_config_dir/config.ini" || clean="true" ]] && cp "$config_file" "$mindreader_config_dir/config.ini" + [[ ! -f "$mindreader_config_dir/genesis.json" || clean="true" ]] && cp "$genesis_file" "$mindreader_config_dir/genesis.json" +} + +usage_error() { + message="$1" + exit_code="$2" + + echo "ERROR: $message" + echo "" + usage + exit ${exit_code:-1} +} + +usage() { + echo "usage: boot.sh [-c] " + echo "" + echo "Create the necessary files for chain bootstrap (for example, the 'genesis.json' file" + echo "for EOS Mainnet syncing)." + echo "" + echo "Flags" + echo "" + echo " -c Clean all files before copying the 'config.ini' and 'genesis.json'" +} + +main $@ \ No newline at end of file diff --git a/node-manager/boot/eos_bp/config.ini b/node-manager/boot/eos_bp/config.ini new file mode 100644 index 0000000..b388df7 --- /dev/null +++ b/node-manager/boot/eos_bp/config.ini @@ -0,0 +1,29 @@ +# Chain +abi-serializer-max-time-ms = 500000 +chain-state-db-size-mb = 5000 +max-transaction-time = 5000 + +# P2P +agent-name = eos_bp +p2p-server-address = 0.0.0.0:9876 +p2p-max-nodes-per-host = 5 +connection-cleanup-period = 15 + +# HTTP +access-control-allow-origin = * +http-server-address = 0.0.0.0:8888 +http-max-response-time-ms = 1000 +http-validate-host = 0 +verbose-http-errors = true + +plugin = eosio::db_size_api_plugin +plugin = eosio::net_api_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::producer_api_plugin + +# We want to produce the block logs, no deep-mind instrumentation here. +producer-name = eosio +producer-name = eosio2 +producer-name = eosio3 +enable-stale-production = true +signature-provider = EOS5MHPYyhjBjnQZejzZHqHewPWhGTfQWSVTWYEhDmJu4SXkzgweP=KEY:5JpjqdhVCQTegTjrLtCSXHce7c9M8w7EXYZS7xC13jVFF4Phcrx \ No newline at end of file diff --git a/node-manager/boot/eos_bp/genesis.json b/node-manager/boot/eos_bp/genesis.json new file mode 100644 index 0000000..4d648b8 --- /dev/null +++ b/node-manager/boot/eos_bp/genesis.json @@ -0,0 +1,4 @@ +{ + "initial_key": "EOS5MHPYyhjBjnQZejzZHqHewPWhGTfQWSVTWYEhDmJu4SXkzgweP", + "initial_timestamp": "2019-05-23T19:18:34" +} \ No newline at end of file diff --git a/node-manager/boot/eos_jungle/config.ini b/node-manager/boot/eos_jungle/config.ini new file mode 100644 index 0000000..f55ba3d --- /dev/null +++ b/node-manager/boot/eos_jungle/config.ini @@ -0,0 +1,44 @@ +http-server-address = 0.0.0.0:8888 +p2p-listen-endpoint = 0.0.0.0:9876 +p2p-server-address = 0.0.0.0:9876 +p2p-max-nodes-per-host = 2 +connection-cleanup-period = 60 +verbose-http-errors = true +chain-state-db-size-mb = 64000 +# shared-memory-size-mb = 2048 +reversible-blocks-db-size-mb = 2048 +http-validate-host = false +max-transaction-time = 5000 +abi-serializer-max-time-ms = 500000 +# read-mode = read-only # we want internal connectivity + +plugin = eosio::net_api_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::db_size_api_plugin +plugin = eosio::producer_api_plugin + +# Max speed for replay +# validation-mode = light +# wasm-runtime = wavm + +# Enable deep mind +# deep-mind = 1 +# deep-mind-console = 1 + +agent-name = dfuse dev +p2p-peer-address = 145.239.133.201:9876 +p2p-peer-address = 163.172.34.128:9876 +p2p-peer-address = 34.73.143.228:9876 +p2p-peer-address = 47.244.11.76:9876 +p2p-peer-address = 88.99.193.44:9876 +p2p-peer-address = bp4-d3.eos42.io:9876 +p2p-peer-address = jungle2.cryptolions.io:19876 +p2p-peer-address = jungle2.cryptolions.io:9876 +p2p-peer-address = jungle2-eos.blckchnd.com:9876 +p2p-peer-address = jungle2.eosdac.io:9872 +p2p-peer-address = jungle.eosamsterdam.net:9876 +p2p-peer-address = jungle.eoscafeblock.com:9876 +p2p-peer-address = jungle.eosusa.news:19876 +p2p-peer-address = junglepeer.eossweden.se:9876 +p2p-peer-address = peer1-jungle.eosphere.io:9876 +p2p-peer-address = peer.jungle.alohaeos.com:9876 diff --git a/node-manager/boot/eos_jungle/genesis.json b/node-manager/boot/eos_jungle/genesis.json new file mode 100644 index 0000000..5d1a4d6 --- /dev/null +++ b/node-manager/boot/eos_jungle/genesis.json @@ -0,0 +1,24 @@ +{ + "initial_configuration": { + "base_per_transaction_net_usage": 12, + "context_free_discount_net_usage_den": 100, + "context_free_discount_net_usage_num": 20, + "deferred_trx_expiration_window": 600, + "max_authority_depth": 6, + "max_block_cpu_usage": 200000, + "max_block_net_usage": 1048576, + "max_inline_action_depth": 4, + "max_inline_action_size": 4096, + "max_ram_size": 34359738368, + "max_transaction_cpu_usage": 150000, + "max_transaction_delay": 3888000, + "max_transaction_lifetime": 3600, + "max_transaction_net_usage": 524288, + "min_transaction_cpu_usage": 100, + "net_usage_leeway": 500, + "target_block_cpu_usage_pct": 2000, + "target_block_net_usage_pct": 1000 + }, + "initial_key": "EOS8bRkmrfsQSmb87ix1EuFSe2NDsepKGCjUNgLEt1SDqw1fuhG4v", + "initial_timestamp": "2018-11-23T16:20:00" +} \ No newline at end of file diff --git a/node-manager/boot/eos_mainnet/config.ini b/node-manager/boot/eos_mainnet/config.ini new file mode 100644 index 0000000..26bd919 --- /dev/null +++ b/node-manager/boot/eos_mainnet/config.ini @@ -0,0 +1,27 @@ +http-server-address = 0.0.0.0:8888 +agent-name = dfuse dev +p2p-server-address = 0.0.0.0:9876 +p2p-max-nodes-per-host = 2 +connection-cleanup-period = 60 +verbose-http-errors = true +chain-state-db-size-mb = 64000 +reversible-blocks-db-size-mb = 2048 +# shared-memory-size-mb = 2048 +http-validate-host = false +max-transaction-time = 5000 +abi-serializer-max-time-ms = 500000 +read-mode = read-only + +plugin = eosio::net_api_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::db_size_api_plugin +plugin = eosio::producer_api_plugin + +# Enable deep mind +# deep-mind = 1 +# deep-mind-console = 1 + +p2p-peer-address = publicnode.cypherglass.com:9876 +p2p-peer-address = mars.fnp2p.eosbixin.com:443 +p2p-peer-address = fullnode.eoslaomao.com:443 +p2p-peer-address = peer.main.alohaeos.com:9876 \ No newline at end of file diff --git a/node-manager/boot/eos_mainnet/genesis.json b/node-manager/boot/eos_mainnet/genesis.json new file mode 100644 index 0000000..1d784c7 --- /dev/null +++ b/node-manager/boot/eos_mainnet/genesis.json @@ -0,0 +1,23 @@ +{ + "initial_timestamp": "2018-06-08T08:08:08.888", + "initial_key": "EOS7EarnUhcyYqmdnPon8rm7mBCTnBoot6o7fE2WzjvEX2TdggbL3", + "initial_configuration": { + "max_block_net_usage": 1048576, + "target_block_net_usage_pct": 1000, + "max_transaction_net_usage": 524288, + "base_per_transaction_net_usage": 12, + "net_usage_leeway": 500, + "context_free_discount_net_usage_num": 20, + "context_free_discount_net_usage_den": 100, + "max_block_cpu_usage": 200000, + "target_block_cpu_usage_pct": 1000, + "max_transaction_cpu_usage": 150000, + "min_transaction_cpu_usage": 100, + "max_transaction_lifetime": 3600, + "deferred_trx_expiration_window": 600, + "max_transaction_delay": 3888000, + "max_inline_action_size": 4096, + "max_inline_action_depth": 4, + "max_authority_depth": 6 + } +} \ No newline at end of file diff --git a/node-manager/clean.sh b/node-manager/clean.sh new file mode 100755 index 0000000..e93b02d --- /dev/null +++ b/node-manager/clean.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Copyright 2019 dfuse Platform Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +main() { + current_dir="`pwd`" + trap "cd \"$current_dir\"" EXIT + pushd "$ROOT" &> /dev/null + + cmd=$1; shift + + if [[ $cmd == "" ]]; then + usage_error "argument is required" + fi + + if [[ ! -d "./cmd/$cmd" ]]; then + usage_error "argument is invalid, valid ones are: \"`ls ./cmd | xargs | tr ' ' ','`\"" + fi + + backup_dir="$ROOT/data/$cmd/backups" + config_dir="$ROOT/data/$cmd/config" + data_dir="$ROOT/data/$cmd/storage" + snapshot_dir="$ROOT/data/$cmd/snapshots" + mindreader_dir="$ROOT/data/$cmd/deep-mind" + + echo "Cleaning $cmd..." + if [[ $cmd == "geth_mindreader" ]]; then + rm -rf $mindreader_dir + rm -rf $data_dir/geth/chaindata + elif [[ $cmd == "nodeos_mindreader" || $cmd == "nodeos_manager" ]]; then + rm -rf $snapshot_dir + rm -rf $data_dir + fi + + echo "Done" +} + +usage_error() { + message="$1" + exit_code="$2" + + echo "ERROR: $message" + echo "" + usage + exit ${exit_code:-1} +} + +usage() { + echo "usage: clean.sh []" + echo "" + echo "Clean data of the appropriate manager/mindreader operator" + echo "" + echo "Valid " + ls "$ROOT/cmd" | xargs | tr " " "\n" | sed 's/^/ * /' + echo "" +} + +main $@ \ No newline at end of file diff --git a/node-manager/log_plugin/keep_last_lines_log_plugin.go b/node-manager/log_plugin/keep_last_lines_log_plugin.go new file mode 100644 index 0000000..2ba73c1 --- /dev/null +++ b/node-manager/log_plugin/keep_last_lines_log_plugin.go @@ -0,0 +1,60 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "github.com/streamingfast/shutter" +) + +// KeepLastLinesLogPlugin takes a line and keep the last N lines as requested by the caller. +type KeepLastLinesLogPlugin struct { + *shutter.Shutter + lastLines *lineRingBuffer + includeDeepMindLines bool +} + +func NewKeepLastLinesLogPlugin(lineCount int, includeDeepMindLines bool) *KeepLastLinesLogPlugin { + plugin := &KeepLastLinesLogPlugin{ + Shutter: shutter.New(), + lastLines: &lineRingBuffer{maxCount: lineCount}, + includeDeepMindLines: includeDeepMindLines, + } + + return plugin +} +func (p *KeepLastLinesLogPlugin) Name() string { + return "KeepLastLinesLogPlugin" +} +func (p *KeepLastLinesLogPlugin) Launch() {} +func (p KeepLastLinesLogPlugin) Stop() {} +func (p *KeepLastLinesLogPlugin) DebugDeepMind(enabled bool) { + p.includeDeepMindLines = enabled +} + +func (p *KeepLastLinesLogPlugin) LastLines() []string { + return p.lastLines.lines() +} + +//func (p *KeepLastLinesLogPlugin) Close(_ error) { +//} + +func (p *KeepLastLinesLogPlugin) LogLine(in string) { + if readerInstrumentationPrefixRegex.MatchString(in) && !p.includeDeepMindLines { + // It's a deep mind log line and we don't care about it, skip + return + } + + p.lastLines.append(in) +} diff --git a/node-manager/log_plugin/keep_last_lines_log_plugin_test.go b/node-manager/log_plugin/keep_last_lines_log_plugin_test.go new file mode 100644 index 0000000..7f91e02 --- /dev/null +++ b/node-manager/log_plugin/keep_last_lines_log_plugin_test.go @@ -0,0 +1,54 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKeepLastLinesLogPlugin(t *testing.T) { + tests := []struct { + name string + in []string + maxLine int + includeDeepMindLines bool + out []string + }{ + {"empty", []string{}, 3, false, nil}, + {"single, not reached", []string{"a"}, 3, false, []string{"a"}}, + {"flush, not reached", []string{"a", "b", "c"}, 3, false, []string{"a", "b", "c"}}, + {"over, count", []string{"a", "b", "c", "d"}, 3, false, []string{"b", "c", "d"}}, + {"multiple over count", []string{"a", "b", "c", "d", "e", "f", "g"}, 3, false, []string{"e", "f", "g"}}, + + {"max count 0 keeps nothing", []string{"a", "b", "c", "d", "e", "f", "g"}, 0, false, nil}, + + {"dm exclude, multiple over count", []string{"a", "b", "DMLOG a", "c", "d", "e", "f", "g", "DMLOG b"}, 3, false, []string{"e", "f", "g"}}, + {"dm include, multiple over count", []string{"a", "b", "DMLOG a", "c", "d", "e", "f", "g", "DMLOG b"}, 3, true, []string{"f", "g", "DMLOG b"}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + plugin := NewKeepLastLinesLogPlugin(test.maxLine, test.includeDeepMindLines) + + for _, line := range test.in { + plugin.LogLine(line) + } + + assert.Equal(t, test.out, plugin.LastLines()) + }) + } +} diff --git a/node-manager/log_plugin/line_ring_buffer.go b/node-manager/log_plugin/line_ring_buffer.go new file mode 100644 index 0000000..a0efb0d --- /dev/null +++ b/node-manager/log_plugin/line_ring_buffer.go @@ -0,0 +1,60 @@ +package logplugin + +type bufferElement struct { + previous *bufferElement + next *bufferElement + line string +} + +type lineRingBuffer struct { + maxCount int + + count int + tail *bufferElement + head *bufferElement +} + +func (b *lineRingBuffer) lines() (out []string) { + if b.count == 0 { + return nil + } + + if b.count == 1 { + return []string{b.head.line} + } + + i := 0 + out = make([]string, b.count) + for current := b.tail; current != nil; current = current.next { + out[i] = current.line + i++ + } + + return +} + +func (b *lineRingBuffer) append(line string) { + // If we keep nothing, there is nothing to do here + if b.maxCount == 0 { + return + } + + oldHead := b.head + b.head = &bufferElement{line: line, previous: oldHead} + + if oldHead != nil { + oldHead.next = b.head + } + + if b.tail == nil { + b.tail = b.head + } + + if b.count == b.maxCount { + // We are full, we need to rotate stuff a bit + b.tail = b.tail.next + } else { + // We are not full, let's just append a new line (so only update count) + b.count++ + } +} diff --git a/node-manager/log_plugin/log_plugin.go b/node-manager/log_plugin/log_plugin.go new file mode 100644 index 0000000..0a5458e --- /dev/null +++ b/node-manager/log_plugin/log_plugin.go @@ -0,0 +1,68 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "regexp" + + "github.com/streamingfast/bstream/blockstream" +) + +var readerInstrumentationPrefixRegex = regexp.MustCompile("^(DMLOG|FIRE) ") + +type LogPlugin interface { + Name() string + Launch() + LogLine(in string) + //Close(err error) + Shutdown(err error) + IsTerminating() bool + Stop() +} + +type Shutter interface { + Terminated() <-chan struct{} + OnTerminating(f func(error)) + OnTerminated(f func(error)) + IsTerminating() bool + Shutdown(err error) +} + +type BlockStreamer interface { + Run(blockServer *blockstream.Server) +} + +type LogPluginFunc func(line string) + +func (f LogPluginFunc) Launch() {} +func (f LogPluginFunc) LogLine(line string) { f(line) } +func (f LogPluginFunc) Name() string { return "log plug func" } +func (f LogPluginFunc) Stop() {} +func (f LogPluginFunc) Shutdown(_ error) {} +func (f LogPluginFunc) Terminated() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +} + +func (f LogPluginFunc) IsTerminating() bool { + return false +} + +func (f LogPluginFunc) OnTerminating(_ func(error)) { +} + +func (f LogPluginFunc) OnTerminated(_ func(error)) { +} diff --git a/node-manager/log_plugin/to_console_log_plugin.go b/node-manager/log_plugin/to_console_log_plugin.go new file mode 100644 index 0000000..32f1410 --- /dev/null +++ b/node-manager/log_plugin/to_console_log_plugin.go @@ -0,0 +1,80 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "fmt" + "os" + "strconv" + + "github.com/streamingfast/shutter" +) + +var DebugLineLength = int64(4096) + +func init() { + if os.Getenv("DEBUG_LINE_LENGTH") != "" { + value, err := strconv.ParseInt(os.Getenv("DEBUG_LINE_LENGTH"), 10, 64) + if err == nil { + DebugLineLength = value + } + } +} + +// ToConsoleLogPlugin takes a line, and if it's not a FIRE (or DMLOG) line or +// if we are actively debugging deep mind, will print the line to the +// standard output +type ToConsoleLogPlugin struct { + *shutter.Shutter + debugDeepMind bool + skipBlankLines bool +} + +func NewToConsoleLogPlugin(debugDeepMind bool) *ToConsoleLogPlugin { + return &ToConsoleLogPlugin{ + Shutter: shutter.New(), + debugDeepMind: debugDeepMind, + } +} + +func (p *ToConsoleLogPlugin) SetSkipBlankLines(skip bool) { + p.skipBlankLines = skip +} + +func (p *ToConsoleLogPlugin) Launch() {} +func (p ToConsoleLogPlugin) Stop() {} +func (p *ToConsoleLogPlugin) Name() string { + return "ToConsoleLogPlugin" +} +func (p *ToConsoleLogPlugin) DebugDeepMind(enabled bool) { + p.debugDeepMind = enabled +} + +func (p *ToConsoleLogPlugin) LogLine(in string) { + if in == "" && p.skipBlankLines { + return + } + + if p.debugDeepMind || !readerInstrumentationPrefixRegex.MatchString(in) { + logLineLength := int64(len(in)) + + // We really want to write lines to stdout and not through our logger, it's the purpose of our plugin! + if logLineLength > DebugLineLength { + fmt.Printf("%s ... bytes: %d\n", in[:DebugLineLength], (logLineLength - DebugLineLength)) + } else { + fmt.Println(in) + } + } +} diff --git a/node-manager/log_plugin/to_zap_log_plugin.go b/node-manager/log_plugin/to_zap_log_plugin.go new file mode 100644 index 0000000..65a1165 --- /dev/null +++ b/node-manager/log_plugin/to_zap_log_plugin.go @@ -0,0 +1,130 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "github.com/streamingfast/shutter" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var NoDisplay = zapcore.Level(zap.FatalLevel + 10) + +type ToZapLogPluginOption interface { + apply(p *ToZapLogPlugin) +} + +type toZapLogPluginOptionFunc func(p *ToZapLogPlugin) + +func (s toZapLogPluginOptionFunc) apply(p *ToZapLogPlugin) { + s(p) +} + +// ToZapLogPluginLogLevel is the option that defines which function to use to extract the log level +// from the line. +// +// The received function will be invoked with the actual line to log. The function should then return +// the log level value to use for this line. If the return value is the special value `NoDisplay` constant +// (which corresponds to log level `15` which does not exist within zap), the line is actually discarded +// completely and not logged to the logger. +func ToZapLogPluginLogLevel(extractLevel func(in string) zapcore.Level) ToZapLogPluginOption { + return toZapLogPluginOptionFunc(func(p *ToZapLogPlugin) { + p.levelExtractor = extractLevel + }) +} + +// ToZapLogPluginTransformer is the option that defines which function to use to transform the line before +// being logged to the logger. +// +// The received function will be invoked with the actual line to log **after** the level have been determined. +// The function should then return the transformed line. If the return line is the empty string, it is discarded +// completely. +func ToZapLogPluginTransformer(transformer func(in string) string) ToZapLogPluginOption { + return toZapLogPluginOptionFunc(func(p *ToZapLogPlugin) { + p.lineTransformer = transformer + }) +} + +// ToZapLogPlugin takes a line, and if it's not a FIRE (or DMLOG) line or +// if we are actively debugging deep mind, will print the line to received +// logger instance. +type ToZapLogPlugin struct { + *shutter.Shutter + + logger *zap.Logger + debugDeepMind bool + + levelExtractor func(in string) zapcore.Level + lineTransformer func(in string) string +} + +func NewToZapLogPlugin(debugDeepMind bool, logger *zap.Logger, options ...ToZapLogPluginOption) *ToZapLogPlugin { + plugin := &ToZapLogPlugin{ + Shutter: shutter.New(), + debugDeepMind: debugDeepMind, + logger: logger, + } + + for _, opt := range options { + opt.apply(plugin) + } + + return plugin +} + +func (p *ToZapLogPlugin) Launch() {} +func (p ToZapLogPlugin) Stop() {} + +func (p *ToZapLogPlugin) Name() string { + return "ToZapLogPlugin" +} + +func (p *ToZapLogPlugin) DebugDeepMind(enabled bool) { + p.debugDeepMind = enabled +} + +//func (p *ToZapLogPlugin) Close(_ error) { +//} + +func (p *ToZapLogPlugin) LogLine(in string) { + if readerInstrumentationPrefixRegex.MatchString(in) { + if p.debugDeepMind { + // Needs to be an info since often used in production where debug level is not enabled by default + p.logger.Info(in) + } + + return + } + + level := zap.DebugLevel + if p.levelExtractor != nil { + level = p.levelExtractor(in) + if level == NoDisplay { + // This is ignored, nothing else to do here ... + return + } + } + + if p.lineTransformer != nil { + in = p.lineTransformer(in) + if in == "" { + // This is ignored, nothing else to do here ... + return + } + } + + p.logger.Check(level, in).Write() +} diff --git a/node-manager/log_plugin/to_zap_log_plugin_test.go b/node-manager/log_plugin/to_zap_log_plugin_test.go new file mode 100644 index 0000000..865e545 --- /dev/null +++ b/node-manager/log_plugin/to_zap_log_plugin_test.go @@ -0,0 +1,141 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logplugin + +import ( + "strings" + "testing" + + "github.com/streamingfast/logging" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestToZapLogPlugin(t *testing.T) { + simpleExtractor := func(in string) zapcore.Level { + if strings.HasPrefix(in, "error") { + return zap.ErrorLevel + } + + if strings.HasPrefix(in, "discard") { + return NoDisplay + } + + return zap.InfoLevel + } + + simpleTransformer := func(in string) string { + if in == "message" { + return in + } + + if in == "discard" { + return "" + } + + return strings.ToUpper(in) + } + + toUnderscoreTransformer := func(in string) string { + return "_" + } + + options := func(opts ...ToZapLogPluginOption) []ToZapLogPluginOption { + return opts + } + + tests := []struct { + name string + in []string + options []ToZapLogPluginOption + out []string + }{ + // Plain + { + "plain, always debug untransformed", + []string{"error message"}, + nil, + []string{`{"level":"debug","msg":"error message"}`}, + }, + + // Log Level + { + "with log leve, match element", + []string{"error message"}, + options(ToZapLogPluginLogLevel(simpleExtractor)), + []string{`{"level":"error","msg":"error message"}`}, + }, + { + "with log leve, no match element", + []string{"warn message"}, + options(ToZapLogPluginLogLevel(simpleExtractor)), + []string{`{"level":"info","msg":"warn message"}`}, + }, + { + "with log leve, no display element", + []string{"discard message"}, + options(ToZapLogPluginLogLevel(simpleExtractor)), + []string(nil), + }, + + // Transformer + { + "with transformer, same", + []string{"message"}, + options(ToZapLogPluginTransformer(simpleTransformer)), + []string{`{"level":"debug","msg":"message"}`}, + }, + { + "with transformer, to upper", + []string{"any"}, + options(ToZapLogPluginTransformer(simpleTransformer)), + []string{`{"level":"debug","msg":"ANY"}`}, + }, + { + "with transformer, discard", + []string{"discard"}, + options(ToZapLogPluginTransformer(simpleTransformer)), + []string(nil), + }, + + // Log Level & Transformer + { + "with transformer & log leve, transform don't affect log level", + []string{"error message"}, + options(ToZapLogPluginTransformer(toUnderscoreTransformer), ToZapLogPluginLogLevel(simpleExtractor)), + []string{`{"level":"error","msg":"_"}`}, + }, + { + "with transformer & log leve, transform don't affect log level, any option order", + []string{"error message"}, + options(ToZapLogPluginLogLevel(simpleExtractor), ToZapLogPluginTransformer(toUnderscoreTransformer)), + []string{`{"level":"error","msg":"_"}`}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testLogger := logging.NewTestLogger(t) + + plugin := NewToZapLogPlugin(false, testLogger.Instance(), test.options...) + for _, in := range test.in { + plugin.LogLine(in) + } + + assert.Equal(t, test.out, testLogger.RecordedLines(t)) + }) + } +} diff --git a/node-manager/metrics/common.go b/node-manager/metrics/common.go new file mode 100644 index 0000000..d48249b --- /dev/null +++ b/node-manager/metrics/common.go @@ -0,0 +1,33 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "github.com/streamingfast/dmetrics" +) + +var Metricset = dmetrics.NewSet() + +func NewHeadBlockTimeDrift(serviceName string) *dmetrics.HeadTimeDrift { + return Metricset.NewHeadTimeDrift(serviceName) +} + +func NewHeadBlockNumber(serviceName string) *dmetrics.HeadBlockNum { + return Metricset.NewHeadBlockNumber(serviceName) +} + +func NewAppReadiness(serviceName string) *dmetrics.AppReadiness { + return Metricset.NewAppReadiness(serviceName) +} diff --git a/node-manager/mindreader/archiver.go b/node-manager/mindreader/archiver.go new file mode 100644 index 0000000..1d09730 --- /dev/null +++ b/node-manager/mindreader/archiver.go @@ -0,0 +1,116 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mindreader + +import ( + "context" + "fmt" + "io" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dstore" + "github.com/streamingfast/logging" + "github.com/streamingfast/shutter" + "go.uber.org/zap" +) + +type Archiver struct { + *shutter.Shutter + + startBlock uint64 + oneblockSuffix string + + localOneBlocksStore dstore.Store + blockWriterFactory bstream.BlockWriterFactory + + fileUploader *FileUploader + logger *zap.Logger + tracer logging.Tracer +} + +func NewArchiver( + startBlock uint64, + oneblockSuffix string, + localOneBlocksStore dstore.Store, + remoteOneBlocksStore dstore.Store, + blockWriterFactory bstream.BlockWriterFactory, + logger *zap.Logger, + tracer logging.Tracer, +) *Archiver { + + fileUploader := NewFileUploader( + localOneBlocksStore, + remoteOneBlocksStore, + logger) + + a := &Archiver{ + Shutter: shutter.New(), + startBlock: startBlock, + oneblockSuffix: oneblockSuffix, + localOneBlocksStore: localOneBlocksStore, + blockWriterFactory: blockWriterFactory, + fileUploader: fileUploader, + logger: logger, + tracer: tracer, + } + + return a +} + +func (a *Archiver) Start(ctx context.Context) { + a.OnTerminating(func(err error) { + a.logger.Info("archiver selector is terminating", zap.Error(err)) + }) + + a.OnTerminated(func(err error) { + a.logger.Info("archiver selector is terminated", zap.Error(err)) + }) + go a.fileUploader.Start(ctx) +} + +func (a *Archiver) StoreBlock(ctx context.Context, block *bstream.Block) error { + if block.Number < a.startBlock { + a.logger.Debug("skipping block below start_block", zap.Stringer("block", block), zap.Uint64("start_block", a.startBlock)) + return nil + } + + pipeRead, pipeWrite := io.Pipe() + + // We are in a pipe context and `a.blockWriterFactory.New(pipeWrite)` writes some bytes to the writer when called. + // To avoid blocking everything, we must start reading bytes in a goroutine first to ensure the called is not block + // forever because nobody is reading the pipe. + writeObjectErrChan := make(chan error) + go func() { + writeObjectErrChan <- a.localOneBlocksStore.WriteObject(ctx, bstream.BlockFileNameWithSuffix(block, a.oneblockSuffix), pipeRead) + }() + + blockWriter, err := a.blockWriterFactory.New(pipeWrite) + if err != nil { + return fmt.Errorf("write block factory: %w", err) + } + + // If `blockWriter.Write()` emits `nil`, the fact that we close with a `nil` error will actually forwards + // `io.EOF` to the `pipeRead` (e.g. our `WriteObject` call above) which is what we want. If it emits a non + // `nil`, it will be forwarded to the `pipeRead` which is also correct. + pipeWrite.CloseWithError(blockWriter.Write(block)) + + // We are in a pipe context here, wait until the `WriteObject` call has finished + err = <-writeObjectErrChan + if err != nil { + return err + } + + return nil +} diff --git a/node-manager/mindreader/file_uploader.go b/node-manager/mindreader/file_uploader.go new file mode 100644 index 0000000..32707ab --- /dev/null +++ b/node-manager/mindreader/file_uploader.go @@ -0,0 +1,92 @@ +package mindreader + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/abourget/llerrgroup" + "github.com/streamingfast/dstore" + "github.com/streamingfast/shutter" + "go.uber.org/zap" +) + +type FileUploader struct { + *shutter.Shutter + mutex sync.Mutex + localStore dstore.Store + destinationStore dstore.Store + logger *zap.Logger + complete chan struct{} +} + +func NewFileUploader(localStore dstore.Store, destinationStore dstore.Store, logger *zap.Logger) *FileUploader { + return &FileUploader{ + Shutter: shutter.New(), + complete: make(chan struct{}), + localStore: localStore, + destinationStore: destinationStore, + logger: logger, + } +} + +func (fu *FileUploader) Start(ctx context.Context) { + defer close(fu.complete) + + fu.OnTerminating(func(_ error) { + <-fu.complete + }) + + if fu.IsTerminating() { + return + } + + var terminating bool + for { + err := fu.uploadFiles(ctx) + if err != nil { + fu.logger.Warn("failed to upload file", zap.Error(err)) + } + + if terminating { + return + } + + select { + case <-fu.Terminating(): + fu.logger.Info("terminating upload loop on next pass") + terminating = true + case <-time.After(500 * time.Millisecond): + } + } +} + +func (fu *FileUploader) uploadFiles(ctx context.Context) error { + fu.mutex.Lock() + defer fu.mutex.Unlock() + + eg := llerrgroup.New(200) + _ = fu.localStore.Walk(ctx, "", func(filename string) (err error) { + if eg.Stop() { + return nil + } + eg.Go(func() error { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + if traceEnabled { + fu.logger.Debug("uploading file to storage", zap.String("local_file", filename)) + } + + if err = fu.destinationStore.PushLocalFile(ctx, fu.localStore.ObjectPath(filename), filename); err != nil { + return fmt.Errorf("moving file %q to storage: %w", filename, err) + } + return nil + }) + + return nil + }) + + return eg.Wait() +} diff --git a/node-manager/mindreader/file_uploader_test.go b/node-manager/mindreader/file_uploader_test.go new file mode 100644 index 0000000..e268fc4 --- /dev/null +++ b/node-manager/mindreader/file_uploader_test.go @@ -0,0 +1,43 @@ +package mindreader + +import ( + "context" + "testing" + "time" + + "github.com/streamingfast/dstore" + "github.com/stretchr/testify/require" +) + +func TestFileUploader(t *testing.T) { + localStore := dstore.NewMockStore(nil) + localStore.SetFile("test1", nil) + localStore.SetFile("test2", nil) + localStore.SetFile("test3", nil) + + destinationStore := dstore.NewMockStore(nil) + + done := make(chan interface{}) + out := make(chan bool, 3) + + destinationStore.PushLocalFileFunc = func(_ context.Context, _, _ string) (err error) { + out <- true + return nil + } + go func() { + for i := 0; i < 3; i++ { + <-out + } + close(done) + }() + + uploader := NewFileUploader(localStore, destinationStore, testLogger) + err := uploader.uploadFiles(context.Background()) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Error("took took long") + } +} diff --git a/node-manager/mindreader/init_test.go b/node-manager/mindreader/init_test.go new file mode 100644 index 0000000..9f82b3c --- /dev/null +++ b/node-manager/mindreader/init_test.go @@ -0,0 +1,25 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mindreader + +import ( + "github.com/streamingfast/logging" +) + +var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/node_manager/mindreader/tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/node-manager/mindreader/logging.go b/node-manager/mindreader/logging.go new file mode 100644 index 0000000..1b93050 --- /dev/null +++ b/node-manager/mindreader/logging.go @@ -0,0 +1,9 @@ +package mindreader + +import "os" + +var traceEnabled bool + +func init() { + traceEnabled = os.Getenv("TRACE") == "true" +} diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go new file mode 100644 index 0000000..ecc5967 --- /dev/null +++ b/node-manager/mindreader/mindreader.go @@ -0,0 +1,357 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mindreader + +import ( + "context" + "fmt" + "io" + "os" + "path" + "regexp" + "sync" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/blockstream" + "github.com/streamingfast/dstore" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/logging" + "github.com/streamingfast/shutter" + "go.uber.org/zap" +) + +var ( + oneblockSuffixRegexp = regexp.MustCompile(`^[\w\-]+$`) +) + +type ConsolerReader interface { + ReadBlock() (obj *bstream.Block, err error) + Done() <-chan interface{} +} + +type ConsolerReaderFactory func(lines chan string) (ConsolerReader, error) + +type MindReaderPlugin struct { + *shutter.Shutter + + archiver *Archiver // transformed blocks are sent to Archiver + consoleReaderFactory ConsolerReaderFactory + stopBlock uint64 // if set, call shutdownFunc(nil) when we hit this number + channelCapacity int // transformed blocks are buffered in a channel + + lastSeenBlock bstream.BlockRef + lastSeenBlockLock sync.RWMutex + + headBlockUpdater nodeManager.HeadBlockUpdater + onBlockWritten nodeManager.OnBlockWritten + blockStreamServer *blockstream.Server + zlogger *zap.Logger + + lines chan string + consoleReader ConsolerReader // contains the 'reader' part of the pipe + consumeReadFlowDone chan interface{} +} + +// NewMindReaderPlugin initiates its own: +// * ConsoleReader (from given Factory) +// * Archiver (from archive store params) +// * Shutter +func NewMindReaderPlugin( + oneBlocksStoreURL string, + workingDirectory string, + consoleReaderFactory ConsolerReaderFactory, + startBlockNum uint64, + stopBlockNum uint64, + channelCapacity int, + headBlockUpdater nodeManager.HeadBlockUpdater, + shutdownFunc func(error), + oneBlockSuffix string, + blockStreamServer *blockstream.Server, + zlogger *zap.Logger, + tracer logging.Tracer, +) (*MindReaderPlugin, error) { + err := validateOneBlockSuffix(oneBlockSuffix) + if err != nil { + return nil, err + } + + zlogger.Info("creating mindreader plugin", + zap.String("one_blocks_store_url", oneBlocksStoreURL), + zap.String("one_block_suffix", oneBlockSuffix), + zap.String("working_directory", workingDirectory), + zap.Uint64("start_block_num", startBlockNum), + zap.Uint64("stop_block_num", stopBlockNum), + zap.Int("channel_capacity", channelCapacity), + zap.Bool("with_head_block_updater", headBlockUpdater != nil), + zap.Bool("with_shutdown_func", shutdownFunc != nil), + ) + + // Create directory and its parent(s), it's a no-op if everything already exists + err = os.MkdirAll(workingDirectory, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("create working directory: %w", err) + } + + // local store + localOnBlocksStoreURL := path.Join(workingDirectory, "uploadable-oneblock") + localOneBlocksStore, err := dstore.NewStore(localOnBlocksStoreURL, "dbin", "", false) + if err != nil { + return nil, fmt.Errorf("new local one block store: %w", err) + } + + remoteOneBlocksStore, err := dstore.NewStore(oneBlocksStoreURL, "dbin.zst", "zstd", false) + if err != nil { + return nil, fmt.Errorf("new remote one block store: %w", err) + } + + archiver := NewArchiver( + startBlockNum, + oneBlockSuffix, + localOneBlocksStore, + remoteOneBlocksStore, + bstream.GetBlockWriterFactory, + zlogger, + tracer, + ) + + zlogger.Info("creating new mindreader plugin") + return &MindReaderPlugin{ + Shutter: shutter.New(), + archiver: archiver, + consoleReaderFactory: consoleReaderFactory, + stopBlock: stopBlockNum, + channelCapacity: channelCapacity, + headBlockUpdater: headBlockUpdater, + blockStreamServer: blockStreamServer, + zlogger: zlogger, + }, nil +} + +// Other components may have issues finding the one block files if suffix is invalid +func validateOneBlockSuffix(suffix string) error { + if suffix == "" { + return fmt.Errorf("oneblock_suffix cannot be empty") + } + if !oneblockSuffixRegexp.MatchString(suffix) { + return fmt.Errorf("oneblock_suffix contains invalid characters: %q", suffix) + } + return nil +} + +func (p *MindReaderPlugin) Name() string { + return "MindReaderPlugin" +} + +func (p *MindReaderPlugin) Launch() { + ctx, cancel := context.WithCancel(context.Background()) + p.OnTerminating(func(_ error) { + cancel() + }) + + p.zlogger.Info("starting mindreader") + + p.consumeReadFlowDone = make(chan interface{}) + + lines := make(chan string, 10000) //need a config here? + p.lines = lines + + consoleReader, err := p.consoleReaderFactory(lines) + if err != nil { + p.Shutdown(err) + } + p.consoleReader = consoleReader + + p.zlogger.Debug("starting archiver") + p.archiver.Start(ctx) + p.launch() + +} +func (p *MindReaderPlugin) launch() { + blocks := make(chan *bstream.Block, p.channelCapacity) + p.zlogger.Info("launching blocks reading loop", zap.Int("capacity", p.channelCapacity)) + go p.consumeReadFlow(blocks) + + go func() { + for { + err := p.readOneMessage(blocks) + if err != nil { + if err == io.EOF { + p.zlogger.Info("reached end of console reader stream, nothing more to do") + close(blocks) + return + } + p.zlogger.Error("reading from console logs", zap.Error(err)) + p.Shutdown(err) + // Always read messages otherwise you'll stall the shutdown lifecycle of the managed process, leading to corrupted database if exit uncleanly afterward + p.drainMessages() + close(blocks) + return + } + } + }() +} + +func (p MindReaderPlugin) Stop() { + p.zlogger.Info("mindreader is stopping") + if p.lines == nil { + // If the `lines` channel was not created yet, it means everything was shut down very rapidly + // and means MindreaderPlugin has not launched yet. Since it has not launched yet, there is + // no point in waiting for the read flow to complete since the read flow never started. So + // we exit right now. + return + } + + p.Shutdown(nil) + + close(p.lines) + p.waitForReadFlowToComplete() +} + +func (p *MindReaderPlugin) waitForReadFlowToComplete() { + p.zlogger.Info("waiting until consume read flow (i.e. blocks) is actually done processing blocks...") + <-p.consumeReadFlowDone + p.zlogger.Info("consume read flow terminate") +} + +// consumeReadFlow is the one function blocking termination until consumption/writeBlock/upload is done +func (p *MindReaderPlugin) consumeReadFlow(blocks <-chan *bstream.Block) { + p.zlogger.Info("starting consume flow") + defer close(p.consumeReadFlowDone) + + ctx := context.Background() + for { + p.zlogger.Debug("waiting to consume next block") + block, ok := <-blocks + if !ok { + p.zlogger.Info("all blocks in channel were drained, exiting read flow") + p.archiver.Shutdown(nil) + + <-p.archiver.Terminated() + p.zlogger.Info("archiver termination code completed") + + return + } + + p.zlogger.Debug("got one block", zap.Uint64("block_num", block.Number)) + + err := p.archiver.StoreBlock(ctx, block) + if err != nil { + p.zlogger.Error("failed storing block in archiver, shutting down and trying to send next blocks individually. You will need to reprocess over this range.", zap.Error(err), zap.Stringer("received_block", block)) + + if !p.IsTerminating() { + go p.Shutdown(fmt.Errorf("archiver store block failed: %w", err)) + } + + continue + } + + if p.onBlockWritten != nil { + err = p.onBlockWritten(block) + if err != nil { + p.zlogger.Error("onBlockWritten callback failed", zap.Error(err)) + + if !p.IsTerminating() { + go p.Shutdown(fmt.Errorf("onBlockWritten callback failed: %w", err)) + } + + continue + } + } + + if p.blockStreamServer != nil { + err = p.blockStreamServer.PushBlock(block) + if err != nil { + p.zlogger.Error("failed passing block to block stream server (this should not happen, shutting down)", zap.Error(err)) + + if !p.IsTerminating() { + go p.Shutdown(fmt.Errorf("block stream push block failed: %w", err)) + } + + continue + } + } + } +} + +func (p *MindReaderPlugin) drainMessages() { + for line := range p.lines { + _ = line + } +} + +func (p *MindReaderPlugin) readOneMessage(blocks chan<- *bstream.Block) error { + block, err := p.consoleReader.ReadBlock() + if err != nil { + return err + } + + if block.Num() < bstream.GetProtocolFirstStreamableBlock { + return nil + } + + p.lastSeenBlockLock.Lock() + p.lastSeenBlock = block.AsRef() + p.lastSeenBlockLock.Unlock() + + if p.headBlockUpdater != nil { + if err := p.headBlockUpdater(block); err != nil { + p.zlogger.Info("shutting down because head block updater generated an error", zap.Error(err)) + + // We are shutting dow in a separate goroutine because the shutdown signal reaches us back at some point which + // if we were not on a goroutine, we would dead block with the shutdown pipeline that would wait for us to + // terminate which would never happen. + // + // 0a33f6b578cc4d0b + go p.Shutdown(err) + } + } + + blocks <- block + + if p.stopBlock != 0 && block.Num() >= p.stopBlock && !p.IsTerminating() { + p.zlogger.Info("shutting down because requested end block reached", zap.Stringer("block", block)) + + // See comment tagged 0a33f6b578cc4d0b + go p.Shutdown(nil) + } + + return nil +} + +// LogLine receives log line and write it to "pipe" of the local console reader +func (p *MindReaderPlugin) LogLine(in string) { + if p.IsTerminating() { + return + } + + p.lines <- in +} + +func (p *MindReaderPlugin) OnBlockWritten(callback nodeManager.OnBlockWritten) { + p.onBlockWritten = callback +} + +// GetMindreaderLineChannel is a marker method that `superviser.Superviser` uses to determine if +// `logplugin.LogPlugin` is an actual mindreader plugin without depending on the `mindreader` +// package in which case it would create an import cycle. +// +// The `superviser.Superviser` defines `type mindreaderPlugin interface { LastSeenBlockNum() bstream.BlockRef }` +// which is respected. This is a trick to avoid circual dependency in imports. +func (p *MindReaderPlugin) LastSeenBlock() bstream.BlockRef { + p.lastSeenBlockLock.RLock() + defer p.lastSeenBlockLock.RUnlock() + + return p.lastSeenBlock +} diff --git a/node-manager/mindreader/mindreader_test.go b/node-manager/mindreader/mindreader_test.go new file mode 100644 index 0000000..5e76d58 --- /dev/null +++ b/node-manager/mindreader/mindreader_test.go @@ -0,0 +1,166 @@ +package mindreader + +import ( + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/shutter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMindReaderPlugin_OfficialPrefix_ReadFlow(t *testing.T) { + testMindReaderPluginReadFlow(t, "FIRE") +} + +func TestMindReaderPlugin_LegacyPrefix_ReadFlow(t *testing.T) { + testMindReaderPluginReadFlow(t, "DMLOG") +} + +func testMindReaderPluginReadFlow(t *testing.T, prefix string) { + numOfLines := 1 + lines := make(chan string, numOfLines) + blocks := make(chan *bstream.Block, numOfLines) + + mindReader := &MindReaderPlugin{ + Shutter: shutter.New(), + lines: lines, + consoleReader: newTestConsoleReader(lines), + } + + wg := sync.WaitGroup{} + wg.Add(numOfLines) + + var readMessageError error + go func() { + defer wg.Done() + readMessageError = mindReader.readOneMessage(blocks) + }() + + mindReader.LogLine(prefix + ` {"id":"00000001a"}`) + select { + case b := <-blocks: + require.Equal(t, uint64(01), b.Number) + case <-time.After(time.Second): + t.Error("too long") + } + + wg.Wait() + require.NoError(t, readMessageError) +} + +func TestMindReaderPlugin_StopAtBlockNumReached(t *testing.T) { + numOfLines := 2 + lines := make(chan string, numOfLines) + blocks := make(chan *bstream.Block, numOfLines) + done := make(chan interface{}) + + mindReader := &MindReaderPlugin{ + Shutter: shutter.New(), + lines: lines, + consoleReader: newTestConsoleReader(lines), + stopBlock: 2, + zlogger: testLogger, + } + mindReader.OnTerminating(func(err error) { + if err == nil { + close(done) + } else { + t.Error("should not be called") + } + }) + + mindReader.LogLine(`DMLOG {"id":"00000001a"}`) + mindReader.LogLine(`DMLOG {"id":"00000002a"}`) + + wg := sync.WaitGroup{} + wg.Add(numOfLines) + + readErrors := []error{} + go func() { + for i := 0; i < numOfLines; i++ { + err := mindReader.readOneMessage(blocks) + readErrors = append(readErrors, err) + wg.Done() + } + }() + + select { + case <-done: + case <-time.After(1 * time.Millisecond): + t.Error("too long") + } + + wg.Wait() + for _, err := range readErrors { + require.NoError(t, err) + } + + // Validate actually read block + assert.Equal(t, numOfLines, len(blocks)) // moderate requirement, race condition can make it pass more blocks +} + +func TestMindReaderPlugin_OneBlockSuffixFormat(t *testing.T) { + assert.Error(t, validateOneBlockSuffix("")) + assert.NoError(t, validateOneBlockSuffix("example")) + assert.NoError(t, validateOneBlockSuffix("example-hostname-123")) + assert.NoError(t, validateOneBlockSuffix("example_hostname_123")) + assert.Equal(t, `oneblock_suffix contains invalid characters: "example.lan"`, validateOneBlockSuffix("example.lan").Error()) +} + +type testConsoleReader struct { + lines chan string + done chan interface{} +} + +func newTestConsoleReader(lines chan string) *testConsoleReader { + return &testConsoleReader{ + lines: lines, + } +} + +func (c *testConsoleReader) Done() <-chan interface{} { + return c.done +} + +func (c *testConsoleReader) ReadBlock() (*bstream.Block, error) { + line, _ := <-c.lines + + var formatedLine string + if strings.HasPrefix(line, "DMLOG") { + formatedLine = line[6:] + } else { + formatedLine = line[5:] + } + + type block struct { + ID string `json:"id"` + } + + data := new(block) + if err := json.Unmarshal([]byte(formatedLine), data); err != nil { + return nil, fmt.Errorf("marshalling error on '%s': %w", formatedLine, err) + } + return &bstream.Block{ + Id: data.ID, + Number: toBlockNum(data.ID), + }, nil +} + +func toBlockNum(blockID string) uint64 { + if len(blockID) < 8 { + return 0 + } + bin, err := hex.DecodeString(blockID[:8]) + if err != nil { + return 0 + } + return uint64(binary.BigEndian.Uint32(bin)) +} diff --git a/node-manager/monitor.go b/node-manager/monitor.go new file mode 100644 index 0000000..0c36f24 --- /dev/null +++ b/node-manager/monitor.go @@ -0,0 +1,92 @@ +package node_manager + +import ( + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dmetrics" + "go.uber.org/atomic" + "go.uber.org/zap" +) + +type Readiness interface { + IsReady() bool +} + +type MetricsAndReadinessManager struct { + headBlockChan chan *bstream.Block + headBlockTimeDrift *dmetrics.HeadTimeDrift + headBlockNumber *dmetrics.HeadBlockNum + appReadiness *dmetrics.AppReadiness + readinessProbe *atomic.Bool + + // ReadinessMaxLatency is the max delta between head block time and + // now before /healthz starts returning success + readinessMaxLatency time.Duration + + logger *zap.Logger +} + +func NewMetricsAndReadinessManager(headBlockTimeDrift *dmetrics.HeadTimeDrift, headBlockNumber *dmetrics.HeadBlockNum, appReadiness *dmetrics.AppReadiness, readinessMaxLatency time.Duration) *MetricsAndReadinessManager { + return &MetricsAndReadinessManager{ + headBlockChan: make(chan *bstream.Block, 1), // just for non-blocking, saving a few nanoseconds here + readinessProbe: atomic.NewBool(false), + appReadiness: appReadiness, + headBlockTimeDrift: headBlockTimeDrift, + headBlockNumber: headBlockNumber, + readinessMaxLatency: readinessMaxLatency, + } +} + +func (m *MetricsAndReadinessManager) setReadinessProbeOn() { + m.readinessProbe.CAS(false, true) + m.appReadiness.SetReady() +} + +func (m *MetricsAndReadinessManager) setReadinessProbeOff() { + m.readinessProbe.CAS(true, false) + m.appReadiness.SetNotReady() +} + +func (m *MetricsAndReadinessManager) IsReady() bool { + return m.readinessProbe.Load() +} + +func (m *MetricsAndReadinessManager) Launch() { + for { + var lastSeenBlock *bstream.Block + select { + case block := <-m.headBlockChan: + lastSeenBlock = block + case <-time.After(time.Second): + } + + if lastSeenBlock == nil { + continue + } + + // metrics + if m.headBlockNumber != nil { + m.headBlockNumber.SetUint64(lastSeenBlock.Num()) + } + + if lastSeenBlock.Time().IsZero() { // never act upon zero timestamps + continue + } + if m.headBlockTimeDrift != nil { + m.headBlockTimeDrift.SetBlockTime(lastSeenBlock.Time()) + } + + // readiness + if m.readinessMaxLatency == 0 || time.Since(lastSeenBlock.Time()) < m.readinessMaxLatency { + m.setReadinessProbeOn() + } else { + m.setReadinessProbeOff() + } + } +} + +func (m *MetricsAndReadinessManager) UpdateHeadBlock(block *bstream.Block) error { + m.headBlockChan <- block + return nil +} diff --git a/node-manager/operator/backuper.go b/node-manager/operator/backuper.go new file mode 100644 index 0000000..7045077 --- /dev/null +++ b/node-manager/operator/backuper.go @@ -0,0 +1,212 @@ +package operator + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "go.uber.org/zap" +) + +type BackupModuleConfig map[string]string + +type BackupModuleFactory func(conf BackupModuleConfig) (BackupModule, error) + +type BackupModule interface { + Backup(lastSeenBlockNum uint32) (string, error) + RequiresStop() bool +} + +type RestorableBackupModule interface { + BackupModule + Restore(name string) error +} + +type BackupSchedule struct { + BlocksBetweenRuns int + TimeBetweenRuns time.Duration + RequiredHostnameMatch string // will not run backup if !empty env.Hostname != HostnameMatch + BackuperName string // must match id of backupModule +} + +func (o *Operator) RegisterBackupModule(name string, mod BackupModule) error { + if o.backupModules == nil { + o.backupModules = make(map[string]BackupModule) + } + + if existing, found := o.backupModules[name]; found { + return fmt.Errorf("backup module %q is already registered, previous module type %s", name, reflect.ValueOf(existing)) + } + + o.backupModules[name] = mod + return nil +} + +func (o *Operator) RegisterBackupSchedule(sched *BackupSchedule) { + o.backupSchedules = append(o.backupSchedules, sched) +} + +func selectBackupModule(mods map[string]BackupModule, optionalName string) (BackupModule, error) { + if len(mods) == 0 { + return nil, fmt.Errorf("no registered backup modules") + } + + if optionalName != "" { + chosen, ok := mods[optionalName] + if !ok { + return nil, fmt.Errorf("invalid backup module: %s", optionalName) + } + return chosen, nil + } + + if len(mods) > 1 { + var modNames []string + for k := range mods { + modNames = append(modNames, k) + } + return nil, fmt.Errorf("more than one module registered, and none specified (%s)", strings.Join(modNames, ",")) + } + + for _, mod := range mods { // single element in map + return mod, nil + } + return nil, fmt.Errorf("impossible path") + +} + +func selectRestoreModule(choices map[string]BackupModule, optionalName string) (RestorableBackupModule, error) { + mods := restorable(choices) + if len(mods) == 0 { + return nil, fmt.Errorf("none of the registered backup modules support 'restore'") + } + + if optionalName != "" { + chosen, ok := mods[optionalName] + if !ok { + return nil, fmt.Errorf("invalid restorable backup module: %s", optionalName) + } + return chosen, nil + } + + if len(mods) > 1 { + var modNames []string + for k := range mods { + modNames = append(modNames, k) + } + return nil, fmt.Errorf("more than one restorable module registered, and none specified (%s)", strings.Join(modNames, ",")) + } + + for _, mod := range mods { // single element in map + return mod, nil + } + return nil, fmt.Errorf("impossible path") + +} + +func restorable(in map[string]BackupModule) map[string]RestorableBackupModule { + out := make(map[string]RestorableBackupModule) + for k, v := range in { + if rest, ok := v.(RestorableBackupModule); ok { + out[k] = rest + } + } + return out +} + +func NewBackupSchedule(freqBlocks, freqTime, requiredHostname, backuperName string) (*BackupSchedule, error) { + switch { + case freqBlocks != "": + freqUint, err := strconv.ParseUint(freqBlocks, 10, 64) + if err != nil || freqUint == 0 { + return nil, fmt.Errorf("invalid value for freq_block in backup schedule (err: %w)", err) + } + + return &BackupSchedule{ + BlocksBetweenRuns: int(freqUint), + RequiredHostnameMatch: requiredHostname, + BackuperName: backuperName, + }, nil + + case freqTime != "": + freqTime, err := time.ParseDuration(freqTime) + if err != nil || freqTime < time.Minute { + return nil, fmt.Errorf("invalid value for freq_time in backup schedule(duration: %s, err: %w)", freqTime, err) + } + + return &BackupSchedule{ + TimeBetweenRuns: freqTime, + RequiredHostnameMatch: requiredHostname, + BackuperName: backuperName, + }, nil + + default: + return nil, fmt.Errorf("schedule created without any frequency value") + } +} + +func ParseBackupConfigs( + logger *zap.Logger, + backupConfigs []string, + backupModuleFactories map[string]BackupModuleFactory, +) ( + mods map[string]BackupModule, + scheds []*BackupSchedule, + err error, +) { + logger.Info("parsing backup configs", zap.Strings("configs", backupConfigs), zap.Int("factory_count", len(backupModuleFactories))) + for key := range backupModuleFactories { + logger.Info("parsing backup known factory", zap.String("name", key)) + } + + mods = make(map[string]BackupModule) + for _, confStr := range backupConfigs { + conf, err := parseKVConfigString(confStr) + if err != nil { + return nil, nil, err + } + + t := conf["type"] + factory, found := backupModuleFactories[t] + if !found { + return nil, nil, fmt.Errorf("unknown backup module type %q", t) + } + + mods[t], err = factory(conf) + if err != nil { + return nil, nil, fmt.Errorf("backup module %q factory: %w", t, err) + } + + if conf["freq-blocks"] != "" || conf["freq-time"] != "" { + newSched, err := NewBackupSchedule(conf["freq-blocks"], conf["freq-time"], conf["required-hostname"], t) + if err != nil { + return nil, nil, fmt.Errorf("error setting up backup schedule for %q: %w", t, err) + } + + scheds = append(scheds, newSched) + } + } + + return +} + +// parseKVConfigString is used for flags that generate key/value data, like +// `--backup="type=something freq_blocks=1000 prefix=v1"`. +func parseKVConfigString(in string) (map[string]string, error) { + fields := strings.Fields(in) + kvs := map[string]string{} + for _, field := range fields { + kv := strings.Split(field, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("invalid key=value in kv config string: %s", field) + } + kvs[kv[0]] = kv[1] + } + typ, ok := kvs["type"] + if !ok || typ == "" { + return nil, fmt.Errorf("no type defined in kv config string (type field mandatory)") + } + + return kvs, nil +} diff --git a/node-manager/operator/backuper_test.go b/node-manager/operator/backuper_test.go new file mode 100644 index 0000000..1d64183 --- /dev/null +++ b/node-manager/operator/backuper_test.go @@ -0,0 +1,73 @@ +package operator + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseKVConfigString(t *testing.T) { + cases := []struct { + name string + in string + expected map[string]string + expectError bool + }{ + { + "vanilla", + "type=pitreos store=file:///var/backups", + map[string]string{"type": "pitreos", "store": "file:///var/backups"}, + false, + }, + { + "missing type", + "store=file:///var/backups", + nil, + true, + }, + { + "empty type", + "type= store=file:///var/backups", + nil, + true, + }, + { + "empty", + "", + nil, + true, + }, + { + "invalid", + "type=blah store=file:///var/backups something", + nil, + true, + }, + { + "multispace_ok", + "type=blah store=file:///var/backups ", + map[string]string{"type": "blah", "store": "file:///var/backups"}, + false, + }, + { + "emptystring ok", + "type=blah store= freq=", + map[string]string{"type": "blah", "store": "", "freq": ""}, + false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + out, err := parseKVConfigString(tc.in) + if tc.expectError { + require.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tc.expected, out) + }) + } +} diff --git a/node-manager/operator/errors.go b/node-manager/operator/errors.go new file mode 100644 index 0000000..ba411d9 --- /dev/null +++ b/node-manager/operator/errors.go @@ -0,0 +1,19 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import "errors" + +var ErrCleanExit = errors.New("clean exit") diff --git a/node-manager/operator/http_server.go b/node-manager/operator/http_server.go new file mode 100644 index 0000000..f06f9f9 --- /dev/null +++ b/node-manager/operator/http_server.go @@ -0,0 +1,212 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" + "github.com/streamingfast/derr" + "go.uber.org/zap" +) + +type HTTPOption func(r *mux.Router) + +func (o *Operator) RunHTTPServer(httpListenAddr string, options ...HTTPOption) *http.Server { + r := mux.NewRouter() + r.HandleFunc("/v1/ping", o.pingHandler).Methods("GET") + r.HandleFunc("/healthz", o.healthzHandler).Methods("GET") + r.HandleFunc("/v1/healthz", o.healthzHandler).Methods("GET") + r.HandleFunc("/v1/server_id", o.serverIDHandler).Methods("GET") + r.HandleFunc("/v1/is_running", o.isRunningHandler).Methods("GET") + r.HandleFunc("/v1/start_command", o.startcommandHandler).Methods("GET") + r.HandleFunc("/v1/maintenance", o.maintenanceHandler).Methods("POST") + r.HandleFunc("/v1/resume", o.resumeHandler).Methods("POST") + r.HandleFunc("/v1/backup", o.backupHandler).Methods("POST") + r.HandleFunc("/v1/restore", o.restoreHandler).Methods("POST") + r.HandleFunc("/v1/list_backups", o.listBackupsHandler).Methods("GET") + r.HandleFunc("/v1/reload", o.reloadHandler).Methods("POST") + r.HandleFunc("/v1/safely_reload", o.safelyReloadHandler).Methods("POST") + r.HandleFunc("/v1/safely_pause_production", o.safelyPauseProdHandler).Methods("POST") + r.HandleFunc("/v1/safely_resume_production", o.safelyResumeProdHandler).Methods("POST") + + for _, opt := range options { + opt(r) + } + + o.zlogger.Info("starting webserver", zap.String("http_addr", httpListenAddr)) + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + methodsTmp, err := route.GetMethods() + var methods string + if err == nil { + methods = strings.Join(methodsTmp, ",") + } else { + methods = "GET" + } + + o.zlogger.Debug("walked route methods", zap.String("methods", methods), zap.String("path_template", pathTemplate)) + } + return nil + }) + + if err != nil { + o.zlogger.Error("walking route methods", zap.Error(err)) + } + + srv := &http.Server{Addr: httpListenAddr, Handler: r} + go func() { + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + o.zlogger.Info("http server did not close correctly") + o.Shutdown(err) + } + }() + + return srv +} + +func (o *Operator) pingHandler(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte("pong\n")) +} + +func (o *Operator) startcommandHandler(w http.ResponseWriter, _ *http.Request) { + command := "Command:\n" + o.Superviser.GetCommand() + "\n" + _, _ = w.Write([]byte(command)) +} + +func (o *Operator) isRunningHandler(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(fmt.Sprintf(`{"is_running":%t}`, o.Superviser.IsRunning()))) +} + +func (o *Operator) serverIDHandler(w http.ResponseWriter, _ *http.Request) { + id, err := o.Superviser.ServerID() + if err != nil { + http.Error(w, "not ready", http.StatusServiceUnavailable) + return + } + + _, _ = w.Write([]byte(id)) +} + +func (o *Operator) healthzHandler(w http.ResponseWriter, _ *http.Request) { + if !o.Superviser.IsRunning() { + http.Error(w, "not ready: chain is not running", http.StatusServiceUnavailable) + return + } + + if !o.chainReadiness.IsReady() { + http.Error(w, "not ready: chain is not ready", http.StatusServiceUnavailable) + return + } + + if o.aboutToStop.Load() || derr.IsShuttingDown() { + http.Error(w, "not ready: chain about to stop", http.StatusServiceUnavailable) + return + } + + w.Write([]byte("ready\n")) +} + +func (o *Operator) reloadHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("reload", nil, w, r) +} + +func (o *Operator) safelyReloadHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("safely_reload", nil, w, r) +} + +func (o *Operator) safelyResumeProdHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("safely_resume_production", nil, w, r) +} + +func (o *Operator) safelyPauseProdHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("safely_pause_production", nil, w, r) +} + +func (o *Operator) restoreHandler(w http.ResponseWriter, r *http.Request) { + params := getRequestParams(r, "backupName", "backupTag", "forceVerify") + o.triggerWebCommand("restore", params, w, r) +} + +func (o *Operator) listBackupsHandler(w http.ResponseWriter, r *http.Request) { + params := getRequestParams(r, "offset", "limit") + o.triggerWebCommand("list", params, w, r) +} + +func getRequestParams(r *http.Request, terms ...string) map[string]string { + params := make(map[string]string) + for _, p := range terms { + val := r.FormValue(p) + if val != "" { + params[p] = val + } + } + return params +} + +func (o *Operator) backupHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("backup", nil, w, r) +} + +func (o *Operator) maintenanceHandler(w http.ResponseWriter, r *http.Request) { + o.triggerWebCommand("maintenance", nil, w, r) +} + +func (o *Operator) resumeHandler(w http.ResponseWriter, r *http.Request) { + params := map[string]string{ + "debug-firehose-logs": r.FormValue("debug-firehose-logs"), + } + + if params["debug-firehose-logs"] == "" { + params["debug-firehose-logs"] = "false" + } + + o.triggerWebCommand("resume", params, w, r) +} + +func (o *Operator) triggerWebCommand(cmdName string, params map[string]string, w http.ResponseWriter, r *http.Request) { + c := &Command{cmd: cmdName, logger: o.zlogger} + c.params = params + sync := r.FormValue("sync") + if sync == "true" { + o.sendCommandSync(c, w) + } else { + o.sendCommandAsync(c, w) + } +} + +func (o *Operator) sendCommandAsync(c *Command, w http.ResponseWriter) { + o.zlogger.Info("sending async command to operator through channel", zap.Object("command", c)) + o.commandChan <- c + w.WriteHeader(http.StatusCreated) + _, _ = w.Write([]byte(fmt.Sprintf("%s command submitted\n", c.cmd))) +} + +func (o *Operator) sendCommandSync(c *Command, w http.ResponseWriter) { + o.zlogger.Info("sending sync command to operator through channel", zap.Object("command", c)) + c.returnch = make(chan error) + o.commandChan <- c + err := <-c.returnch + if err == nil { + w.Write([]byte(fmt.Sprintf("Success: %s completed\n", c.cmd))) + } else { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(fmt.Sprintf("ERROR: %s failed: %s \n", c.cmd, err))) + } + +} diff --git a/node-manager/operator/operator.go b/node-manager/operator/operator.go new file mode 100644 index 0000000..03428e3 --- /dev/null +++ b/node-manager/operator/operator.go @@ -0,0 +1,481 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import ( + "fmt" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/streamingfast/derr" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/shutter" + "go.uber.org/atomic" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type Operator struct { + *shutter.Shutter + options *Options + lastStartCommand time.Time + + backupModules map[string]BackupModule + backupSchedules []*BackupSchedule + + commandChan chan *Command + httpServer *http.Server + + Superviser nodeManager.ChainSuperviser + chainReadiness nodeManager.Readiness + + aboutToStop *atomic.Bool + zlogger *zap.Logger +} + +type Bootstrapper interface { + Bootstrap() error +} + +type Options struct { + Bootstrapper Bootstrapper + + EnableSupervisorMonitoring bool + + // Delay before sending Stop() to superviser, during which we return NotReady + ShutdownDelay time.Duration +} + +type Command struct { + cmd string + params map[string]string + returnch chan error + closer sync.Once + logger *zap.Logger +} + +func (c *Command) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + encoder.AddString("name", c.cmd) + encoder.AddReflected("params", c.params) + return nil +} + +func New(zlogger *zap.Logger, chainSuperviser nodeManager.ChainSuperviser, chainReadiness nodeManager.Readiness, options *Options) (*Operator, error) { + zlogger.Info("creating operator", zap.Reflect("options", options)) + + o := &Operator{ + Shutter: shutter.New(), + chainReadiness: chainReadiness, + commandChan: make(chan *Command, 10), + options: options, + Superviser: chainSuperviser, + aboutToStop: atomic.NewBool(false), + zlogger: zlogger, + } + + chainSuperviser.OnTerminated(func(err error) { + if !o.IsTerminating() { + zlogger.Info("chain superviser is shutting down operator") + o.Shutdown(err) + } + }) + + o.OnTerminating(func(err error) { + //wait for supervisor to terminate, supervisor will wait for plugins to terminate + if !chainSuperviser.IsTerminating() { + zlogger.Info("operator is terminating", zap.Error(err)) + chainSuperviser.Shutdown(err) + } + + zlogger.Info("operator is waiting for superviser to shutdown", zap.Error(err)) + <-o.Superviser.Terminated() + zlogger.Info("operator done waiting for superviser to shutdown", zap.Error(err)) + }) + + return o, nil +} + +func (o *Operator) Launch(httpListenAddr string, options ...HTTPOption) error { + o.zlogger.Info("launching operator HTTP server", zap.String("http_listen_addr", httpListenAddr)) + o.httpServer = o.RunHTTPServer(httpListenAddr, options...) + + // FIXME: too many options for that, maybe use monitoring module like with bootstrapper + if o.options.EnableSupervisorMonitoring { + if monitorable, ok := o.Superviser.(nodeManager.MonitorableChainSuperviser); ok { + go monitorable.Monitor() + } + } + + o.LaunchBackupSchedules() + + if o.options.Bootstrapper != nil { + o.zlogger.Info("operator calling bootstrap function") + err := o.options.Bootstrapper.Bootstrap() + if err != nil { + return fmt.Errorf("unable to bootstrap chain: %w", err) + } + } + o.commandChan <- &Command{cmd: "start", logger: o.zlogger} + + for { + o.zlogger.Info("operator ready to receive commands") + select { + case <-o.Superviser.Stopped(): // the chain stopped outside of a command that was expecting it. + if o.Superviser.IsTerminating() { + o.zlogger.Info("superviser terminating, waiting for operator...") + <-o.Terminating() + return o.Err() + } + // FIXME call a restore handler if passed... + lastLogLines := o.Superviser.LastLogLines() + + // FIXME: Actually, we should create a custom error type that contains the required data, the catching + // code can thus perform the required formatting! + baseFormat := "instance %q stopped (exit code: %d), shutting down" + var shutdownErr error + if len(lastLogLines) > 0 { + shutdownErr = fmt.Errorf(baseFormat+": last log lines:\n%s", o.Superviser.GetName(), o.Superviser.LastExitCode(), formatLogLines(lastLogLines)) + } else { + shutdownErr = fmt.Errorf(baseFormat, o.Superviser.GetName(), o.Superviser.LastExitCode()) + } + + o.Shutdown(shutdownErr) + break + + case cmd := <-o.commandChan: + if cmd.cmd == "start" { // start 'sub' commands after a restore do NOT come through here + o.lastStartCommand = time.Now() + } + err := o.runCommand(cmd) + cmd.Return(err) + if err != nil { + if err == ErrCleanExit { + return nil + } + return fmt.Errorf("command %v execution failed: %v", cmd.cmd, err) + } + } + } +} + +func formatLogLines(lines []string) string { + formattedLines := make([]string, len(lines)) + for i, line := range lines { + formattedLines[i] = " " + line + } + + return strings.Join(formattedLines, "\n") +} + +func (o *Operator) runSubCommand(name string, parentCmd *Command) error { + return o.runCommand(&Command{cmd: name, returnch: parentCmd.returnch, logger: o.zlogger}) +} + +func (o *Operator) cleanSuperviserStop() error { + o.aboutToStop.Store(true) + defer o.aboutToStop.Store(false) + if o.options.ShutdownDelay != 0 && !derr.IsShuttingDown() { + o.zlogger.Info("marked as not_ready, waiting delay before actually stopping for maintenance", zap.Duration("delay", o.options.ShutdownDelay)) + time.Sleep(o.options.ShutdownDelay) + } + + err := o.Superviser.Stop() + return err +} + +// runCommand does its work, and returns an error for irrecoverable states. +func (o *Operator) runCommand(cmd *Command) error { + o.zlogger.Info("received operator command", zap.String("command", cmd.cmd), zap.Reflect("params", cmd.params)) + switch cmd.cmd { + case "maintenance": + o.zlogger.Info("preparing to stop process") + + if err := o.cleanSuperviserStop(); err != nil { + return err + } + + // Careful, we are now "stopped". Every other case can handle that state. + o.zlogger.Info("successfully put in maintenance") + + case "restore": + restoreMod, err := selectRestoreModule(o.backupModules, cmd.params["name"]) + if err != nil { + cmd.Return(err) + return nil + } + + o.zlogger.Info("Stopping to restore a backup") + if restoreMod.RequiresStop() { + if err := o.cleanSuperviserStop(); err != nil { + return err + } + } + + backupName := "latest" + if b, ok := cmd.params["backupName"]; ok { + backupName = b + } + + if err := restoreMod.Restore(backupName); err != nil { + return err + } + + o.zlogger.Info("Restarting after restore") + if restoreMod.RequiresStop() { + return o.runSubCommand("start", cmd) + } + return nil + + case "backup": + backupMod, err := selectBackupModule(o.backupModules, cmd.params["name"]) + if err != nil { + cmd.Return(err) + return nil + } + + o.zlogger.Info("Stopping to perform a backup") + if backupMod.RequiresStop() { + if err := o.cleanSuperviserStop(); err != nil { + return err + } + } + + backupName, err := backupMod.Backup(uint32(o.Superviser.LastSeenBlockNum())) + if err != nil { + return err + } + cmd.logger.Info("Completed backup", zap.String("backup_name", backupName)) + + o.zlogger.Info("Restarting after backup") + if backupMod.RequiresStop() { + return o.runSubCommand("start", cmd) + } + return nil + + case "reload": + o.zlogger.Info("preparing for reload") + if err := o.cleanSuperviserStop(); err != nil { + return err + } + + return o.runSubCommand("start", cmd) + + case "safely_resume_production": + o.zlogger.Info("preparing for safely resume production") + producer, ok := o.Superviser.(nodeManager.ProducerChainSuperviser) + if !ok { + cmd.Return(fmt.Errorf("the chain superviser does not support producing blocks")) + return nil + } + + isProducing, err := producer.IsProducing() + if err != nil { + cmd.Return(fmt.Errorf("unable to check if producing: %w", err)) + return nil + } + + if !isProducing { + o.zlogger.Info("resuming production of blocks") + err := producer.ResumeProduction() + if err != nil { + cmd.Return(fmt.Errorf("error resuming production of blocks: %w", err)) + return nil + } + + o.zlogger.Info("successfully resumed producer") + + } else { + o.zlogger.Info("block production was already running, doing nothing") + } + + o.zlogger.Info("successfully resumed block production") + + case "safely_pause_production": + o.zlogger.Info("preparing for safely pause production") + producer, ok := o.Superviser.(nodeManager.ProducerChainSuperviser) + if !ok { + cmd.Return(fmt.Errorf("the chain superviser does not support producing blocks")) + return nil + } + + isProducing, err := producer.IsProducing() + if err != nil { + cmd.Return(fmt.Errorf("unable to check if producing: %w", err)) + return nil + } + + if !isProducing { + o.zlogger.Info("block production is already paused, command is a no-op") + return nil + } + + o.zlogger.Info("waiting to pause the producer") + err = producer.WaitUntilEndOfNextProductionRound(3 * time.Minute) + if err != nil { + cmd.Return(fmt.Errorf("timeout waiting for production round: %w", err)) + return nil + } + + o.zlogger.Info("pausing block production") + err = producer.PauseProduction() + if err != nil { + cmd.Return(fmt.Errorf("unable to pause production correctly: %w", err)) + return nil + } + + o.zlogger.Info("successfully paused block production") + + case "safely_reload": + o.zlogger.Info("preparing for safely reload") + producer, ok := o.Superviser.(nodeManager.ProducerChainSuperviser) + if ok && producer.IsActiveProducer() { + o.zlogger.Info("waiting right after production round") + err := producer.WaitUntilEndOfNextProductionRound(3 * time.Minute) + if err != nil { + cmd.Return(fmt.Errorf("timeout waiting for production round: %w", err)) + return nil + } + } + + o.zlogger.Info("issuing 'reload' now") + emptied := false + for !emptied { + select { + case interimCmd := <-o.commandChan: + o.zlogger.Info("emptying command queue while safely_reload was running, dropped", zap.Any("interim_cmd", interimCmd)) + default: + emptied = true + } + } + + return o.runSubCommand("reload", cmd) + + case "start", "resume": + o.zlogger.Info("preparing for start") + if o.Superviser.IsRunning() { + o.zlogger.Info("chain is already running") + return nil + } + + o.zlogger.Info("preparing to start chain") + + var options []nodeManager.StartOption + if value := cmd.params["debug-firehose-logs"]; value != "" { + if value == "true" { + options = append(options, nodeManager.EnableDebugDeepmindOption) + } else { + options = append(options, nodeManager.DisableDebugDeepmindOption) + } + } + + if err := o.Superviser.Start(options...); err != nil { + return fmt.Errorf("error starting chain superviser: %w", err) + } + + o.zlogger.Info("successfully start service") + + } + + return nil +} + +func (c *Command) Return(err error) { + c.closer.Do(func() { + if err != nil && err != ErrCleanExit { + c.logger.Error("command failed", zap.String("cmd", c.cmd), zap.Error(err)) + } + + if c.returnch != nil { + c.returnch <- err + } + }) +} + +func (o *Operator) LaunchBackupSchedules() { + for _, sched := range o.backupSchedules { + if sched.RequiredHostnameMatch != "" { + hostname, err := os.Hostname() + if err != nil { + o.zlogger.Error("Disabling automatic backup schedule because requiredHostname is set and cannot retrieve hostname", zap.Error(err)) + continue + } + if sched.RequiredHostnameMatch != hostname { + o.zlogger.Info("Disabling automatic backup schedule because hostname does not match required value", + zap.String("hostname", hostname), + zap.String("required_hostname", sched.RequiredHostnameMatch), + zap.String("backuper_name", sched.BackuperName)) + continue + } + } + + cmdParams := map[string]string{"name": sched.BackuperName} + + if sched.TimeBetweenRuns > time.Second { //loose validation of not-zero (I've seen issues with .IsZero()) + o.zlogger.Info("starting time-based schedule for backup", + zap.Duration("time_between_runs", sched.TimeBetweenRuns), + zap.String("backuper_name", sched.BackuperName), + ) + go o.RunEveryPeriod(sched.TimeBetweenRuns, "backup", cmdParams) + } + if sched.BlocksBetweenRuns > 0 { + o.zlogger.Info("starting block-based schedule for backup", + zap.Int("blocks_between_runs", sched.BlocksBetweenRuns), + zap.String("backuper_name", sched.BackuperName), + ) + go o.RunEveryXBlock(uint32(sched.BlocksBetweenRuns), "backup", cmdParams) + } + } +} + +func (o *Operator) RunEveryPeriod(period time.Duration, commandName string, params map[string]string) { + for { + time.Sleep(100 * time.Microsecond) + + if o.Superviser.IsRunning() { + break + } + } + + ticker := time.NewTicker(period).C + + for range ticker { + if o.Superviser.IsRunning() { + o.commandChan <- &Command{cmd: commandName, logger: o.zlogger, params: params} + } + } +} + +func (o *Operator) RunEveryXBlock(freq uint32, commandName string, params map[string]string) { + var lastHeadReference uint64 + for { + time.Sleep(1 * time.Second) + lastSeenBlockNum := o.Superviser.LastSeenBlockNum() + if lastSeenBlockNum == 0 { + continue + } + + if lastHeadReference == 0 { + lastHeadReference = lastSeenBlockNum + } + + if lastSeenBlockNum > lastHeadReference+uint64(freq) { + o.commandChan <- &Command{cmd: commandName, logger: o.zlogger, params: params} + lastHeadReference = lastSeenBlockNum + } + } +} diff --git a/node-manager/serve.sh b/node-manager/serve.sh new file mode 100755 index 0000000..3e285a7 --- /dev/null +++ b/node-manager/serve.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +# Copyright 2019 dfuse Platform Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +main() { + current_dir="`pwd`" + trap "cd \"$current_dir\"" EXIT + pushd "$ROOT" &> /dev/null + + cmd=$1; shift + + if [[ $cmd == "" ]]; then + usage_error "argument is required" + fi + + if [[ ! -d "./cmd/$cmd" ]]; then + usage_error "argument is invalid, valid ones are: \"`ls ./cmd | xargs | tr ' ' ','`\"" + fi + + echo "Building $cmd..." + go build -o "./$cmd" "./cmd/$cmd" + if [[ $? != 0 ]]; then + echo "Build failed" + exit 1 + fi + + command="./$cmd `cmd_args $cmd $@`" + echo "Starting $command" + exec $command +} + +cmd_args() { + cmd=$1; shift + + backup_dir="$ROOT/data/$cmd/backups" + config_dir="$ROOT/data/$cmd/config" + data_dir="$ROOT/data/$cmd/storage" + snapshot_dir="$ROOT/data/$cmd/snapshots" + mindreader_dir="$ROOT/data/$cmd/deep-mind" + + geth_args="--geth-binary geth --data-dir $data_dir --backup-store-url $backup_dir" + nodeos_args="--nodeos-path nodeos --config-dir $config_dir --data-dir $data_dir --backup-store-url $backup_dir --snapshot-store-url $snapshot_dir" + + case "$cmd" in + geth_manager) + args="$geth_args" + extra_flags="--deep-mind-block-progress" + ;; + geth_mindreader) + args="$geth_args --oneblock-store-url=$mindreader_dir/blocks --working-dir=$mindreader_dir/tmp" + extra_flags="--deep-mind" + ;; + nodeos_manager) + args="$nodeos_args " + ;; + nodeos_mindreader) + args="$nodeos_args --oneblock-store-url=$mindreader_dir/blocks --working-dir=$mindreader_dir/tmp" + ;; + esac + + extra_flags_processed="" + for arg in "$@"; do + if [[ "$arg" == "--" ]]; then + args+=" -- $extra_flags" + extra_flags_processed="true" + else + args+=" $arg" + fi + done + + if [[ $extra_flags_processed == "" ]]; then + args+=" -- $extra_flags" + fi + + echo "$args" +} + +usage_error() { + message="$1" + exit_code="$2" + + echo "ERROR: $message" + echo "" + usage + exit ${exit_code:-1} +} + +usage() { + echo "usage: serve.sh []" + echo "" + echo "Build & serve the appropriate manager/mindreader operator" + echo "" + echo "Valid " + ls "$ROOT/cmd" | xargs | tr " " "\n" | sed 's/^/ * /' + echo "" +} + +main $@ \ No newline at end of file diff --git a/node-manager/superviser.go b/node-manager/superviser.go new file mode 100644 index 0000000..3b2aaeb --- /dev/null +++ b/node-manager/superviser.go @@ -0,0 +1,99 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package node_manager + +import ( + "time" + + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" +) + +type StartOption string + +var EnableDebugDeepmindOption = StartOption("enable-debug-firehose-logs") +var DisableDebugDeepmindOption = StartOption("disable-debug-firehose-logs") + +type ShutterInterface interface { + Shutdown(error) + OnTerminating(func(error)) + OnTerminated(func(error)) + IsTerminated() bool + IsTerminating() bool + Terminated() <-chan struct{} +} + +type ChainSuperviser interface { + ShutterInterface + + GetCommand() string + GetName() string + ServerID() (string, error) + + RegisterLogPlugin(plugin logplugin.LogPlugin) + Start(options ...StartOption) error + Stop() error + + IsRunning() bool + Stopped() <-chan struct{} + + LastExitCode() int + LastLogLines() []string + LastSeenBlockNum() uint64 +} + +type MonitorableChainSuperviser interface { + Monitor() +} + +type ProducerChainSuperviser interface { + IsProducing() (bool, error) + IsActiveProducer() bool + + ResumeProduction() error + PauseProduction() error + + WaitUntilEndOfNextProductionRound(timeout time.Duration) error +} + +type ProductionState int + +const ( + StatePre ProductionState = iota // Just before we produce, don't restart + StateProducing // We're producing right now + StatePost // Right after production + StateStale // We haven't produced for 12 minutes +) + +func (s ProductionState) String() string { + switch s { + case StatePre: + return "pre" + case StateProducing: + return "producing" + case StatePost: + return "post" + case StateStale: + return "stale" + default: + return "unknown" + } +} + +type ProductionEvent int + +const ( + EventProduced ProductionEvent = iota + EventReceived +) diff --git a/node-manager/superviser/superviser.go b/node-manager/superviser/superviser.go new file mode 100644 index 0000000..ad433ed --- /dev/null +++ b/node-manager/superviser/superviser.go @@ -0,0 +1,383 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package superviser + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/ShinyTrinkets/overseer" + "github.com/streamingfast/bstream" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + "github.com/streamingfast/shutter" + "go.uber.org/zap" +) + +// mindreaderPlugin can be used to check if `logplugin.LogPlugin` is actually a mindreader one. +// This is not in `mindreader` package to not introduce a cycle dependencies +type mindreaderPlugin interface { + logplugin.LogPlugin + + LastSeenBlock() bstream.BlockRef +} + +type Superviser struct { + *shutter.Shutter + Binary string + Arguments []string + // Env represents the environment variables the command will run with, the `nil` + // is handled differently than the `[]string{}` empty case. In the `nil` case, + // the process inherits from the parent process. In the empty case, it starts + // without any variables set. + Env []string + Logger *zap.Logger + + cmd *overseer.Cmd + cmdLock sync.Mutex + + logPlugins []logplugin.LogPlugin + logPluginsLock sync.RWMutex + + enableDeepMind bool +} + +func New(logger *zap.Logger, binary string, arguments []string) *Superviser { + s := &Superviser{ + Shutter: shutter.New(), + Binary: binary, + Arguments: arguments, + Logger: logger, + } + + s.Shutter.OnTerminating(func(_ error) { + s.Logger.Info("superviser is terminating") + + if err := s.Stop(); err != nil { + s.Logger.Error("failed to stop supervised node process", zap.Error(err)) + } + + s.Logger.Info("shutting down plugins", zap.Int("last_exit_code", s.LastExitCode())) + s.endLogPlugins() + }) + + return s +} + +func (s *Superviser) RegisterLogPlugin(plugin logplugin.LogPlugin) { + s.logPluginsLock.Lock() + defer s.logPluginsLock.Unlock() + + s.logPlugins = append(s.logPlugins, plugin) + if shut, ok := plugin.(logplugin.Shutter); ok { + s.Logger.Info("adding superviser shutdown to plugins", zap.String("plugin_name", plugin.Name())) + shut.OnTerminating(func(err error) { + if !s.IsTerminating() { + s.Logger.Info("superviser shutting down because of a plugin", zap.String("plugin_name", plugin.Name())) + go s.Shutdown(err) + } + }) + } + + s.Logger.Info("registered log plugin", zap.Int("plugin count", len(s.logPlugins))) +} + +func (s *Superviser) GetLogPlugins() []logplugin.LogPlugin { + s.logPluginsLock.RLock() + defer s.logPluginsLock.RUnlock() + + return s.logPlugins +} + +func (s *Superviser) setDeepMindDebug(enabled bool) { + s.Logger.Info("setting deep mind debug mode", zap.Bool("enabled", enabled)) + for _, logPlugin := range s.logPlugins { + if v, ok := logPlugin.(nodeManager.DeepMindDebuggable); ok { + v.DebugDeepMind(enabled) + } + } +} + +func (s *Superviser) Stopped() <-chan struct{} { + if s.cmd != nil { + return s.cmd.Done() + } + return nil +} + +func (s *Superviser) LastExitCode() int { + if s.cmd != nil { + return s.cmd.Status().Exit + } + return 0 +} + +func (s *Superviser) LastLogLines() []string { + if s.hasToConsolePlugin() { + // There is no point in showing the last log lines when the user already saw it through the to console log plugin + return nil + } + + for _, plugin := range s.logPlugins { + if v, ok := plugin.(*logplugin.KeepLastLinesLogPlugin); ok { + return v.LastLines() + } + } + + return nil +} + +func (s *Superviser) LastSeenBlockNum() uint64 { + for _, plugin := range s.GetLogPlugins() { + if v, ok := plugin.(mindreaderPlugin); ok { + return v.LastSeenBlock().Num() + } + } + return 0 +} + +func (s *Superviser) Start(options ...nodeManager.StartOption) error { + for _, opt := range options { + if opt == nodeManager.EnableDebugDeepmindOption { + s.setDeepMindDebug(true) + } + if opt == nodeManager.DisableDebugDeepmindOption { + s.setDeepMindDebug(false) + } + } + + for _, plugin := range s.logPlugins { + plugin.Launch() + } + + s.cmdLock.Lock() + defer s.cmdLock.Unlock() + + if s.cmd != nil { + if s.cmd.State == overseer.STARTING || s.cmd.State == overseer.RUNNING { + s.Logger.Info("underlying process already running, nothing to do") + return nil + } + + if s.cmd.State == overseer.STOPPING { + s.Logger.Info("underlying process is currently stopping, waiting for it to finish") + <-s.cmd.Done() + } + } + + s.Logger.Info("creating new command instance and launch read loop", zap.String("binary", s.Binary), zap.Strings("arguments", s.Arguments)) + var args []interface{} + for _, a := range s.Arguments { + args = append(args, a) + } + + s.cmd = overseer.NewCmd(s.Binary, s.Arguments, overseer.Options{Streaming: true, Env: s.Env}) + + go s.start(s.cmd) + + return nil +} + +func (s *Superviser) Stop() error { + s.cmdLock.Lock() + defer s.cmdLock.Unlock() + + s.Logger.Info("supervisor received a stop request, terminating supervised node process") + + if !s.isRunning() { + s.Logger.Info("underlying process is not running, nothing to do") + return nil + } + + if s.cmd.State == overseer.STARTING || s.cmd.State == overseer.RUNNING { + s.Logger.Info("stopping underlying process") + err := s.cmd.Stop() + if err != nil { + s.Logger.Error("failed to stop overseer cmd", zap.Error(err)) + return err + } + } + + // Blocks until command finished completely + s.Logger.Debug("blocking until command actually ends") + +nodeProcessDone: + for { + select { + case <-s.cmd.Done(): + break nodeProcessDone + case <-time.After(500 * time.Millisecond): + s.Logger.Debug("still blocking until command actually ends") + } + } + + s.Logger.Info("supervised process has been terminated") + + s.Logger.Info("waiting for stdout and stderr to be drained", s.getProcessOutputStatsLogFields()...) + for { + if s.isBufferEmpty() { + break + } + + s.Logger.Debug("draining stdout and stderr", s.getProcessOutputStatsLogFields()...) + time.Sleep(500 * time.Millisecond) + } + + s.Logger.Info("stdout and stderr are now drained") + + // Must be after `for { ... }` as `s.cmd` is used within the loop and also before it via call to `getProcessOutputStatsLogFields` + s.cmd = nil + + return nil +} + +func (s *Superviser) getProcessOutputStats() (stdoutLineCount, stderrLineCount int) { + if s.cmd != nil { + return len(s.cmd.Stdout), len(s.cmd.Stderr) + } + + return +} + +func (s *Superviser) getProcessOutputStatsLogFields() []zap.Field { + stdoutLineCount, stderrLineCount := s.getProcessOutputStats() + + return []zap.Field{zap.Int("stdout_len", stdoutLineCount), zap.Int("stderr_len", stderrLineCount)} +} + +func (s *Superviser) IsRunning() bool { + s.cmdLock.Lock() + defer s.cmdLock.Unlock() + + return s.isRunning() +} + +// This one assuming the lock is properly held already +func (s *Superviser) isRunning() bool { + if s.cmd == nil { + return false + } + return s.cmd.State == overseer.STARTING || s.cmd.State == overseer.RUNNING || s.cmd.State == overseer.STOPPING +} + +func (s *Superviser) isBufferEmpty() bool { + if s.cmd == nil { + return true + } + return len(s.cmd.Stdout) == 0 && len(s.cmd.Stderr) == 0 +} + +func (s *Superviser) start(cmd *overseer.Cmd) { + statusChan := cmd.Start() + + processTerminated := false + for { + select { + case status := <-statusChan: + processTerminated = true + if status.Exit == 0 { + s.Logger.Info("command terminated with zero status", s.getProcessOutputStatsLogFields()...) + } else { + s.Logger.Error(fmt.Sprintf("command terminated with non-zero status, last log lines:\n%s\n", formatLogLines(s.LastLogLines())), overseerStatusLogFields(status)...) + } + + case line := <-cmd.Stdout: + s.processLogLine(line) + case line := <-cmd.Stderr: + s.processLogLine(line) + } + + if processTerminated { + s.Logger.Debug("command terminated but continue read loop to fully consume stdout/sdterr line channels", zap.Bool("buffer_empty", s.isBufferEmpty())) + if s.isBufferEmpty() { + return + } + } + } +} + +func overseerStatusLogFields(status overseer.Status) []zap.Field { + fields := []zap.Field{ + zap.String("command", status.Cmd), + zap.Int("exit_code", status.Exit), + } + + if status.Error != nil { + fields = append(fields, zap.String("error", status.Error.Error())) + } + + if status.PID != 0 { + fields = append(fields, zap.Int("pid", status.PID)) + } + + if status.Runtime > 0 { + fields = append(fields, zap.Duration("runtime", time.Duration(status.Runtime*float64(time.Second)))) + } + + if status.StartTs > 0 { + fields = append(fields, zap.Time("started_at", time.Unix(0, status.StartTs))) + } + + if status.StopTs > 0 { + fields = append(fields, zap.Time("stopped_at", time.Unix(0, status.StopTs))) + } + + return fields +} + +func formatLogLines(lines []string) string { + if len(lines) == 0 { + return "" + } + + formattedLines := make([]string, len(lines)) + for i, line := range lines { + formattedLines[i] = " " + line + } + + return strings.Join(formattedLines, "\n") +} + +func (s *Superviser) endLogPlugins() { + s.logPluginsLock.Lock() + defer s.logPluginsLock.Unlock() + + for _, plugin := range s.logPlugins { + s.Logger.Info("stopping plugin", zap.String("plugin_name", plugin.Name())) + plugin.Stop() + } + s.Logger.Info("all plugins closed") +} + +func (s *Superviser) processLogLine(line string) { + s.logPluginsLock.Lock() + defer s.logPluginsLock.Unlock() + + for _, plugin := range s.logPlugins { + plugin.LogLine(line) + } +} + +func (s *Superviser) hasToConsolePlugin() bool { + for _, plugin := range s.logPlugins { + if _, ok := plugin.(*logplugin.ToConsoleLogPlugin); ok { + return true + } + } + + return false +} diff --git a/node-manager/superviser/superviser_test.go b/node-manager/superviser/superviser_test.go new file mode 100644 index 0000000..1f628b2 --- /dev/null +++ b/node-manager/superviser/superviser_test.go @@ -0,0 +1,137 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package superviser + +import ( + "os" + "testing" + "time" + + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + "github.com/streamingfast/logging" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +var infiniteScript = ` + echo "Starting" + while true; do + sleep 0.25 + echo "In loop" + done +` + +var zlog = zap.NewNop() + +func init() { + if os.Getenv("DEBUG") != "" || os.Getenv("TRACE") == "true" { + zlog, _ := zap.NewDevelopment() + logging.Override(zlog) + } +} + +var waitDefaultTimeout = 500 * time.Millisecond + +func TestSuperviser_NotRunningAfterCreation(t *testing.T) { + assert.Equal(t, false, testSuperviserInfinite().IsRunning()) +} + +func TestSuperviser_StartsCorrectly(t *testing.T) { + superviser := testSuperviserInfinite() + defer superviser.Stop() + + lineChan := make(chan string) + superviser.RegisterLogPlugin(logplugin.LogPluginFunc(func(line string) { + lineChan <- line + })) + + go superviser.Start() + + waitForSuperviserTaskCompletion(superviser) + waitForOutput(t, lineChan, waitDefaultTimeout) + + assert.Equal(t, true, superviser.IsRunning()) +} + +func TestSuperviser_CanBeRestartedCorrectly(t *testing.T) { + superviser := testSuperviserInfinite() + defer superviser.Stop() + + lineChan := make(chan string) + superviser.RegisterLogPlugin(logplugin.LogPluginFunc(func(line string) { + lineChan <- line + })) + + go superviser.Start() + waitForSuperviserTaskCompletion(superviser) + waitForOutput(t, lineChan, waitDefaultTimeout) + + superviser.Stop() + assert.Equal(t, false, superviser.IsRunning()) + + go superviser.Start() + waitForSuperviserTaskCompletion(superviser) + waitForOutput(t, lineChan, waitDefaultTimeout) + + assert.Equal(t, true, superviser.IsRunning()) +} + +func TestSuperviser_CapturesStdoutCorrectly(t *testing.T) { + superviser := testSuperviserSh("echo first; sleep 0.1; echo second") + defer superviser.Stop() + + lineChan := make(chan string) + superviser.RegisterLogPlugin(logplugin.LogPluginFunc(func(line string) { + lineChan <- line + })) + + go superviser.Start() + waitForSuperviserTaskCompletion(superviser) + + var lines []string + lines = append(lines, waitForOutput(t, lineChan, waitDefaultTimeout)) + lines = append(lines, waitForOutput(t, lineChan, waitDefaultTimeout)) + + assert.Equal(t, []string{"first", "second"}, lines) +} + +func testSuperviserBash(script string) *Superviser { + return New(zlog, "bash", []string{"-c", script}) +} + +func testSuperviserSh(script string) *Superviser { + return New(zlog, "sh", []string{"-c", script}) +} + +func testSuperviserInfinite() *Superviser { + return testSuperviserSh(infiniteScript) +} + +func waitForSuperviserTaskCompletion(superviser *Superviser) { + superviser.cmdLock.Lock() + superviser.cmdLock.Unlock() +} + +func waitForOutput(t *testing.T, lineChan chan string, timeout time.Duration) (line string) { + select { + case line = <-lineChan: + return + case <-time.After(timeout): + t.Error("no line seen before timeout") + } + + // Will fail before reaching this line + return "" +} diff --git a/node-manager/types.go b/node-manager/types.go new file mode 100644 index 0000000..a732be8 --- /dev/null +++ b/node-manager/types.go @@ -0,0 +1,25 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package node_manager + +import "github.com/streamingfast/bstream" + +type DeepMindDebuggable interface { + DebugDeepMind(enabled bool) +} + +type HeadBlockUpdater func(block *bstream.Block) error + +type OnBlockWritten func(block *bstream.Block) error diff --git a/node-manager/utils.go b/node-manager/utils.go new file mode 100644 index 0000000..90999db --- /dev/null +++ b/node-manager/utils.go @@ -0,0 +1,23 @@ +package node_manager + +import ( + "fmt" + "syscall" +) + +func AugmentStackSizeLimit() error { + // Set ulimit for stack + var rLimit syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_STACK, &rLimit) + if err != nil { + return fmt.Errorf("getting rlimit: %w", err) + } + rLimit.Cur = 67104768 + + err = syscall.Setrlimit(syscall.RLIMIT_STACK, &rLimit) + if err != nil { + return fmt.Errorf("setting rlimit: %w", err) + } + + return nil +} diff --git a/reader_node.go b/reader_node.go index f104a2c..a273ce2 100644 --- a/reader_node.go +++ b/reader_node.go @@ -13,13 +13,13 @@ import ( "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" - nm "github.com/streamingfast/firehose-core/nodemanager" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeManagerApp "github.com/streamingfast/firehose-core/node-manager/app/node_manager" + "github.com/streamingfast/firehose-core/node-manager/metrics" + reader "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" + sv "github.com/streamingfast/firehose-core/superviser" "github.com/streamingfast/logging" - nodeManager "github.com/streamingfast/node-manager" - nodeManagerApp "github.com/streamingfast/node-manager/app/node_manager" - "github.com/streamingfast/node-manager/metrics" - reader "github.com/streamingfast/node-manager/mindreader" - "github.com/streamingfast/node-manager/operator" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" "github.com/streamingfast/snapshotter" @@ -118,8 +118,8 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { readinessMaxLatency, ) - superviser := nm.SupervisorFactory(chain.ExecutableName, nodePath, nodeArguments, appLogger) - superviser.RegisterLogPlugin(nm.NewNodeLogPlugin(debugFirehose)) + superviser := sv.SupervisorFactory(chain.ExecutableName, nodePath, nodeArguments, appLogger) + superviser.RegisterLogPlugin(sv.NewNodeLogPlugin(debugFirehose)) var bootstrapper operator.Bootstrapper if chain.ReaderNodeBootstrapperFactory != nil { diff --git a/reader_node_stdin.go b/reader_node_stdin.go index f775bc4..a06a728 100644 --- a/reader_node_stdin.go +++ b/reader_node_stdin.go @@ -18,11 +18,11 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeReaderStdinApp "github.com/streamingfast/firehose-core/node-manager/app/node_reader_stdin" + "github.com/streamingfast/firehose-core/node-manager/metrics" + "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" - nodeManager "github.com/streamingfast/node-manager" - nodeReaderStdinApp "github.com/streamingfast/node-manager/app/node_reader_stdin" - "github.com/streamingfast/node-manager/metrics" - "github.com/streamingfast/node-manager/mindreader" ) func registerReaderNodeStdinApp[B Block](chain *Chain[B]) { diff --git a/relayer.go b/relayer.go index c1da5bd..a13fe16 100644 --- a/relayer.go +++ b/relayer.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - relayerApp "github.com/streamingfast/relayer/app/relayer" + "github.com/streamingfast/firehose-core/relayer/app/relayer" ) func registerRelayerApp() { @@ -23,7 +23,7 @@ func registerRelayerApp() { FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { sfDataDir := runtime.AbsDataDir - return relayerApp.New(&relayerApp.Config{ + return relayer.New(&relayer.Config{ SourcesAddr: viper.GetStringSlice("relayer-source"), OneBlocksURL: MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")), GRPCListenAddr: viper.GetString("relayer-grpc-listen-addr"), diff --git a/relayer/CHANGELOG.md b/relayer/CHANGELOG.md new file mode 100644 index 0000000..b3fb3c5 --- /dev/null +++ b/relayer/CHANGELOG.md @@ -0,0 +1,33 @@ +# Change log + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Uncommitted] + +### Removed +* `MaxDrift` config option removed +* `InitTime` config option removed + +### Changed +* "max-drift" mechanism modified to now detect "block hole", by expecting highest 'received' block never to be higher than highest 'sent' block + 1 (out of the forkable). +* upon detection of a "block hole", instead of shutting down, the process will simply restart joining from block files from where it left off. +* Now prints the whole config on start + + +### Added +* add SourceRequestBurst to config, allows requesting a burst to each of the source (useful when connecting to another relayer) + +## [v0.0.1] 2020-06-23 + +### Changed +* maxDriftTolerance feature now disabled if set to 0 +* now returns headinfo and Ready state before max-source-latency is passed +* add `min-start-offset` instead of default=200 +* `--listen-grpc-addr` now is `--grpc-listen-addr` + +## 2020-03-21 + +### Changed + +* License changed to Apache 2.0 diff --git a/relayer/LICENSE b/relayer/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/relayer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/relayer/README.md b/relayer/README.md new file mode 100644 index 0000000..13a0526 --- /dev/null +++ b/relayer/README.md @@ -0,0 +1,66 @@ +# StreamingFast Relayer + +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/relayer) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +The relayer process fans out and propagates blocks from instrumented +blockchain nodes, down to services, serving as a redundant and +highly-available access to streaming block & transaction data. +It is part of **[StreamingFast](https://github.com/streamingfast/streamingfast)**. + +Design +The Relayer section of the official Firehose documentation provides additional information on its design details. + +https://firehose.streamingfast.io/concepts-and-architeceture/components#relayer + +Current implementations: + +* [**EOSIO on StreamingFast**](https://github.com/streamingfast/sf-eosio) +* [**Ethereum on StreamingFast**](https://github.com/streamingfast/sf-ethereum) + + +## Schema + +``` + Graph: + + [--------------] [-------------------] + [ Mindreader-1 ] [ Mindreader-2, ... ] + [--------------] [-------------------] + \ / + \ / + [-----------------] [-------------------] + [ OneBlocksSource ] [ MultiplexedSource ] + [-----------------] [-------------------] + \ / + [-------------] + [ ForkableHub ] (all blocks triggering StepNew) + [-------------] + | + (hub's single subscriber) + | + [-----------------------------------] + [ pipe Handler: Server.PushBlock ] + [-----------------------------------] + / \ + [-----------------] [---------------] + [ Buffer (dedupe) ]-->[ Subscriptions ] + [-----------------] [---------------] + +``` + +## Contributing + +**Issues and PR in this repo related strictly to the relayer functionalities** + +Report any protocol-specific issues in their +[respective repositories](https://github.com/streamingfast/streamingfast#protocols) + +**Please first refer to the general +[StreamingFast contribution guide](https://github.com/streamingfast/streamingfast/blob/master/CONTRIBUTING.md)**, +if you wish to contribute to this code base. + + +## License + +[Apache 2.0](LICENSE) diff --git a/relayer/app/relayer/app.go b/relayer/app/relayer/app.go new file mode 100644 index 0000000..00ee91f --- /dev/null +++ b/relayer/app/relayer/app.go @@ -0,0 +1,118 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package relayer + +import ( + "context" + "fmt" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dmetrics" + "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/relayer" + "github.com/streamingfast/firehose-core/relayer/metrics" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +var RelayerStartAborted = fmt.Errorf("getting start block aborted by relayer application terminating signal") + +type Config struct { + SourcesAddr []string + GRPCListenAddr string + SourceRequestBurst int + MaxSourceLatency time.Duration + OneBlocksURL string +} + +func (c *Config) ZapFields() []zap.Field { + return []zap.Field{ + zap.Strings("sources_addr", c.SourcesAddr), + zap.String("grpc_listen_addr", c.GRPCListenAddr), + zap.Int("source_request_burst", c.SourceRequestBurst), + zap.Duration("max_source_latency", c.MaxSourceLatency), + zap.String("one_blocks_url", c.OneBlocksURL), + } +} + +type App struct { + *shutter.Shutter + config *Config + + relayer *relayer.Relayer +} + +func New(config *Config) *App { + return &App{ + Shutter: shutter.New(), + config: config, + } +} + +func (a *App) Run() error { + dmetrics.Register(metrics.MetricSet) + + oneBlocksStore, err := dstore.NewDBinStore(a.config.OneBlocksURL) + if err != nil { + return fmt.Errorf("getting block store: %w", err) + } + + liveSourceFactory := bstream.SourceFactory(func(h bstream.Handler) bstream.Source { + return relayer.NewMultiplexedSource( + h, + a.config.SourcesAddr, + a.config.MaxSourceLatency, + a.config.SourceRequestBurst, + ) + }) + oneBlocksSourceFactory := bstream.SourceFromNumFactoryWithSkipFunc(func(num uint64, h bstream.Handler, skipFunc func(idSuffix string) bool) bstream.Source { + src, err := bstream.NewOneBlocksSource(num, oneBlocksStore, h, bstream.OneBlocksSourceWithSkipperFunc(skipFunc)) + if err != nil { + return nil + } + return src + }) + + zlog.Info("starting relayer", a.config.ZapFields()...) + a.relayer = relayer.NewRelayer( + liveSourceFactory, + oneBlocksSourceFactory, + a.config.GRPCListenAddr, + ) + + a.OnTerminating(a.relayer.Shutdown) + a.relayer.OnTerminated(a.Shutdown) + + a.relayer.Run() + return nil +} + +var emptyHealthCheckRequest = &pbhealth.HealthCheckRequest{} + +func (a *App) IsReady() bool { + if a.relayer == nil { + return false + } + + resp, err := a.relayer.Check(context.Background(), emptyHealthCheckRequest) + if err != nil { + zlog.Info("readiness check failed", zap.Error(err)) + return false + } + + return resp.Status == pbhealth.HealthCheckResponse_SERVING +} diff --git a/relayer/app/relayer/logging.go b/relayer/app/relayer/logging.go new file mode 100644 index 0000000..b68787c --- /dev/null +++ b/relayer/app/relayer/logging.go @@ -0,0 +1,21 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package relayer + +import ( + "github.com/streamingfast/logging" +) + +var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer/app/relayer") diff --git a/relayer/healthz.go b/relayer/healthz.go new file mode 100644 index 0000000..fd3a406 --- /dev/null +++ b/relayer/healthz.go @@ -0,0 +1,46 @@ +package relayer + +import ( + "context" + "time" + + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +func (r *Relayer) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { + return &pbhealth.HealthCheckResponse{ + Status: r.healthStatus(), + }, nil +} + +func (r *Relayer) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { + currentStatus := pbhealth.HealthCheckResponse_SERVICE_UNKNOWN + waitTime := 0 * time.Second + + for { + select { + case <-stream.Context().Done(): + return nil + case <-time.After(waitTime): + newStatus := r.healthStatus() + waitTime = 5 * time.Second + + if newStatus != currentStatus { + currentStatus = newStatus + + if err := stream.Send(&pbhealth.HealthCheckResponse{Status: currentStatus}); err != nil { + return err + } + } + } + } +} + +func (r *Relayer) healthStatus() pbhealth.HealthCheckResponse_ServingStatus { + status := pbhealth.HealthCheckResponse_NOT_SERVING + if r.ready { + status = pbhealth.HealthCheckResponse_SERVING + } + + return status +} diff --git a/relayer/logging.go b/relayer/logging.go new file mode 100644 index 0000000..e123d96 --- /dev/null +++ b/relayer/logging.go @@ -0,0 +1,21 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package relayer + +import ( + "github.com/streamingfast/logging" +) + +var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer") diff --git a/relayer/metrics/metrics.go b/relayer/metrics/metrics.go new file mode 100644 index 0000000..42e662b --- /dev/null +++ b/relayer/metrics/metrics.go @@ -0,0 +1,25 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "github.com/streamingfast/dmetrics" +) + +var MetricSet = dmetrics.NewSet() + +var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("relayer") +var HeadBlockNumber = MetricSet.NewHeadBlockNumber("relayer") +var AppReadiness = MetricSet.NewAppReadiness("relayer") diff --git a/relayer/relayer.go b/relayer/relayer.go new file mode 100644 index 0000000..0f15a95 --- /dev/null +++ b/relayer/relayer.go @@ -0,0 +1,150 @@ +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package relayer + +import ( + "context" + "strings" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/blockstream" + "github.com/streamingfast/bstream/forkable" + "github.com/streamingfast/bstream/hub" + dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" + "github.com/streamingfast/firehose-core/relayer/metrics" + "github.com/streamingfast/shutter" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + getHeadInfoTimeout = 10 * time.Second +) + +type Relayer struct { + *shutter.Shutter + + grpcListenAddr string + liveSourceFactory bstream.SourceFactory + oneBlocksSourceFactory bstream.SourceFromNumFactoryWithSkipFunc + + hub *hub.ForkableHub + + ready bool + + blockStreamServer *hub.BlockstreamServer +} + +func NewRelayer( + liveSourceFactory bstream.SourceFactory, + oneBlocksSourceFactory bstream.SourceFromNumFactoryWithSkipFunc, + grpcListenAddr string, +) *Relayer { + r := &Relayer{ + Shutter: shutter.New(), + grpcListenAddr: grpcListenAddr, + liveSourceFactory: liveSourceFactory, + oneBlocksSourceFactory: oneBlocksSourceFactory, + } + + gs := dgrpcfactory.ServerFromOptions() + pbhealth.RegisterHealthServer(gs.ServiceRegistrar(), r) + + forkableHub := hub.NewForkableHub( + r.liveSourceFactory, + r.oneBlocksSourceFactory, + 10, + forkable.EnsureAllBlocksTriggerLongestChain(), // send every forked block too + forkable.WithFilters(bstream.StepNew), + forkable.WithFailOnUnlinkableBlocks(20, time.Minute), + ) + r.hub = forkableHub + gs.OnTerminated(r.Shutdown) + r.blockStreamServer = r.hub.NewBlockstreamServer(gs) + return r + +} + +func NewMultiplexedSource(handler bstream.Handler, sourceAddresses []string, maxSourceLatency time.Duration, sourceRequestBurst int) bstream.Source { + ctx := context.Background() + + var sourceFactories []bstream.SourceFactory + for _, u := range sourceAddresses { + + url := u // https://github.com/golang/go/wiki/CommonMistakes (url is given to the blockstream newSource) + sourceName := urlToLoggerName(url) + logger := zlog.Named("src").Named(sourceName) + sf := func(subHandler bstream.Handler) bstream.Source { + + gate := bstream.NewRealtimeGate(maxSourceLatency, subHandler, bstream.GateOptionWithLogger(logger)) + var upstreamHandler bstream.Handler + upstreamHandler = bstream.HandlerFunc(func(blk *bstream.Block, obj interface{}) error { + return gate.ProcessBlock(blk, &namedObj{ + Obj: obj, + Name: sourceName, + }) + }) + + src := blockstream.NewSource(ctx, url, int64(sourceRequestBurst), upstreamHandler, blockstream.WithLogger(logger), blockstream.WithRequester("relayer")) + return src + } + sourceFactories = append(sourceFactories, sf) + } + + return bstream.NewMultiplexedSource(sourceFactories, handler, bstream.MultiplexedSourceWithLogger(zlog)) +} + +func urlToLoggerName(url string) string { + return strings.TrimPrefix(strings.TrimPrefix(url, "dns:///"), ":") +} + +func pollMetrics(fh *hub.ForkableHub) { + for { + time.Sleep(time.Second * 2) + headNum, _, headTime, _, err := fh.HeadInfo() + if err != nil { + zlog.Info("cannot get head info yet") + continue + } + metrics.HeadBlockTimeDrift.SetBlockTime(headTime) + metrics.HeadBlockNumber.SetUint64(headNum) + } +} + +func (r *Relayer) Run() { + go r.hub.Run() + zlog.Info("waiting for hub to be ready...") + <-r.hub.Ready + go pollMetrics(r.hub) + + r.OnTerminating(func(e error) { + zlog.Info("closing block stream server") + r.blockStreamServer.Close() + }) + + r.blockStreamServer.Launch(r.grpcListenAddr) + + zlog.Info("relayer started") + r.ready = true + metrics.AppReadiness.SetReady() + + <-r.hub.Terminating() + r.Shutdown(r.hub.Err()) +} + +type namedObj struct { + Name string + Obj interface{} +} diff --git a/nodemanager/supervisor.go b/superviser/genericsupervisor.go similarity index 87% rename from nodemanager/supervisor.go rename to superviser/genericsupervisor.go index 2e7a92a..fa4e416 100644 --- a/nodemanager/supervisor.go +++ b/superviser/genericsupervisor.go @@ -1,11 +1,11 @@ -package nodemanager +package superviser import ( "strings" "github.com/ShinyTrinkets/overseer" - nodeManager "github.com/streamingfast/node-manager" - "github.com/streamingfast/node-manager/superviser" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/firehose-core/node-manager/superviser" "go.uber.org/zap" ) diff --git a/nodemanager/logging.go b/superviser/logging.go similarity index 84% rename from nodemanager/logging.go rename to superviser/logging.go index 8a54d93..5c79bfe 100644 --- a/nodemanager/logging.go +++ b/superviser/logging.go @@ -1,7 +1,7 @@ -package nodemanager +package superviser import ( - logplugin "github.com/streamingfast/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" ) // This file configures a logging reader that transforms log lines received from the blockchain process running diff --git a/nodemanager/test/type_test.pb.go b/test/type_test.pb.go similarity index 100% rename from nodemanager/test/type_test.pb.go rename to test/type_test.pb.go diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index 4f5a72b..4d44cfb 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -1,19 +1,8 @@ package firecore import ( - "context" - "fmt" - "io" - "strconv" - "time" - "github.com/spf13/cobra" - "github.com/streamingfast/bstream" - "github.com/streamingfast/dstore" - pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" ) func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) *cobra.Command { @@ -35,111 +24,112 @@ func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - - endpoint := args[0] - startBlock, err := strconv.ParseUint(args[1], 10, 64) - if err != nil { - return fmt.Errorf("parsing start block num: %w", err) - } - stopBlock, err := strconv.ParseUint(args[2], 10, 64) - if err != nil { - return fmt.Errorf("parsing stop block num: %w", err) - } - destFolder := args[3] - - firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, zlog, endpoint, chain) - if err != nil { - return err - } - defer connClose() - - var retryDelay = time.Second * 4 - - store, err := dstore.NewDBinStore(destFolder) - if err != nil { - return err - } - - mergeWriter := &mergedBlocksWriter{ - store: store, - writerFactory: bstream.GetBlockWriterFactory, - tweakBlock: func(b *bstream.Block) (*bstream.Block, error) { return b, nil }, - logger: zlog, - } - - approximateLIBWarningIssued := false - var lastBlockID string - var lastBlockNum uint64 - for { - - request := &pbfirehose.Request{ - StartBlockNum: int64(startBlock), - StopBlockNum: stopBlock, - FinalBlocksOnly: true, - Cursor: requestInfo.Cursor, - } - - stream, err := firehoseClient.Blocks(ctx, request, requestInfo.GRPCCallOpts...) - if err != nil { - return fmt.Errorf("unable to start blocks stream: %w", err) - } - - for { - response, err := stream.Recv() - if err != nil { - if err == io.EOF { - return nil - } - - zlog.Error("stream encountered a remote error, going to retry", - zap.Duration("retry_delay", retryDelay), - zap.Error(err), - ) - <-time.After(retryDelay) - break - } - - block := chain.BlockFactory() - if err := anypb.UnmarshalTo(response.Block, block, proto.UnmarshalOptions{}); err != nil { - return fmt.Errorf("unmarshal response block: %w", err) - } - - if _, ok := block.(BlockLIBNumDerivable); !ok { - // We must wrap the block in a BlockEnveloppe and "provide" the LIB number as itself minus 1 since - // there is nothing we can do more here to obtain the value sadly. For chain where the LIB can be - // derived from the Block itself, this code does **not** run (so it will have the correct value) - if !approximateLIBWarningIssued { - approximateLIBWarningIssued = true - zlog.Warn("LIB number is approximated, it is not provided by the chain's Block model so we msut set it to block number minus 1 (which is kinda ok because only final blocks are retrieved in this download tool)") - } - - number := block.GetFirehoseBlockNumber() - libNum := number - 1 - if number <= bstream.GetProtocolFirstStreamableBlock { - libNum = number - } - - block = BlockEnveloppe{ - Block: block, - LIBNum: libNum, - } - } - - blk, err := chain.BlockEncoder.Encode(block) - if err != nil { - return fmt.Errorf("error decoding response to bstream block: %w", err) - } - if lastBlockID != "" && blk.PreviousId != lastBlockID { - return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.PreviousId, lastBlockNum, lastBlockID) - } - lastBlockID = blk.Id - lastBlockNum = blk.Number - - if err := mergeWriter.ProcessBlock(blk, nil); err != nil { - return fmt.Errorf("write to blockwriter: %w", err) - } - } - } + panic("not implemented") + //ctx := context.Background() + // + //endpoint := args[0] + //startBlock, err := strconv.ParseUint(args[1], 10, 64) + //if err != nil { + // return fmt.Errorf("parsing start block num: %w", err) + //} + //stopBlock, err := strconv.ParseUint(args[2], 10, 64) + //if err != nil { + // return fmt.Errorf("parsing stop block num: %w", err) + //} + //destFolder := args[3] + // + //firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, zlog, endpoint, chain) + //if err != nil { + // return err + //} + //defer connClose() + // + //var retryDelay = time.Second * 4 + // + //store, err := dstore.NewDBinStore(destFolder) + //if err != nil { + // return err + //} + // + //mergeWriter := &mergedBlocksWriter{ + // store: store, + // writerFactory: bstream.GetBlockWriterFactory, + // tweakBlock: func(b *bstream.Block) (*bstream.Block, error) { return b, nil }, + // logger: zlog, + //} + // + //approximateLIBWarningIssued := false + //var lastBlockID string + //var lastBlockNum uint64 + //for { + // + // request := &pbfirehose.Request{ + // StartBlockNum: int64(startBlock), + // StopBlockNum: stopBlock, + // FinalBlocksOnly: true, + // Cursor: requestInfo.Cursor, + // } + // + // stream, err := firehoseClient.Blocks(ctx, request, requestInfo.GRPCCallOpts...) + // if err != nil { + // return fmt.Errorf("unable to start blocks stream: %w", err) + // } + // + // for { + // response, err := stream.Recv() + // if err != nil { + // if err == io.EOF { + // return nil + // } + // + // zlog.Error("stream encountered a remote error, going to retry", + // zap.Duration("retry_delay", retryDelay), + // zap.Error(err), + // ) + // <-time.After(retryDelay) + // break + // } + // + // block := chain.BlockFactory() + // if err := anypb.UnmarshalTo(response.Block, block, proto.UnmarshalOptions{}); err != nil { + // return fmt.Errorf("unmarshal response block: %w", err) + // } + // + // if _, ok := block.(BlockLIBNumDerivable); !ok { + // // We must wrap the block in a BlockEnveloppe and "provide" the LIB number as itself minus 1 since + // // there is nothing we can do more here to obtain the value sadly. For chain where the LIB can be + // // derived from the Block itself, this code does **not** run (so it will have the correct value) + // if !approximateLIBWarningIssued { + // approximateLIBWarningIssued = true + // zlog.Warn("LIB number is approximated, it is not provided by the chain's Block model so we msut set it to block number minus 1 (which is kinda ok because only final blocks are retrieved in this download tool)") + // } + // + // number := block.GetFirehoseBlockNumber() + // libNum := number - 1 + // if number <= bstream.GetProtocolFirstStreamableBlock { + // libNum = number + // } + // + // block = BlockEnveloppe{ + // Block: block, + // LIBNum: libNum, + // } + // } + // + // blk, err := chain.BlockEncoder.Encode(block) + // if err != nil { + // return fmt.Errorf("error decoding response to bstream block: %w", err) + // } + // if lastBlockID != "" && blk.PreviousId != lastBlockID { + // return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.PreviousId, lastBlockNum, lastBlockID) + // } + // lastBlockID = blk.Id + // lastBlockNum = blk.Number + // + // if err := mergeWriter.ProcessBlock(blk, nil); err != nil { + // return fmt.Errorf("write to blockwriter: %w", err) + // } + // } + //} } } diff --git a/types.go b/types.go index 223654b..3fca748 100644 --- a/types.go +++ b/types.go @@ -2,10 +2,10 @@ package firecore import ( "fmt" - "slices" - "strings" "time" + "google.golang.org/protobuf/types/known/anypb" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/transform" @@ -156,19 +156,21 @@ func EncodeBlock(b Block) (blk *bstream.Block, err error) { ) } + var blockPayload *anypb.Any + if err := proto.Unmarshal(content, blockPayload); err != nil { + return nil, fmt.Errorf("unmarshaling block payload: %w", err) + } + bstreamBlock := &bstream.Block{ - Id: b.GetFirehoseBlockID(), - Number: b.GetFirehoseBlockNumber(), - PreviousId: b.GetFirehoseBlockParentID(), - Timestamp: b.GetFirehoseBlockTime(), - LibNum: v.GetFirehoseBlockLIBNum(), - PayloadVersion: b.GetFirehoseBlockVersion(), - - // PayloadKind is not actually used anymore and should be left to UNKNOWN - PayloadKind: UnsafePayloadKind, + Id: b.GetFirehoseBlockID(), + Number: b.GetFirehoseBlockNumber(), + PreviousId: b.GetFirehoseBlockParentID(), + Timestamp: b.GetFirehoseBlockTime(), + LibNum: v.GetFirehoseBlockLIBNum(), + Payload: blockPayload, } - return bstream.GetBlockPayloadSetter(bstreamBlock, content) + return bstreamBlock, nil } type BlockIndexerFactory[B Block] func(indexStore dstore.Store, indexSize uint64) (BlockIndexer[B], error) @@ -185,39 +187,3 @@ type BlockIndexer[B Block] interface { // for the overall process. The returns [transform.Factory] will be used multiple times (one per request // requesting this transform). type BlockTransformerFactory func(indexStore dstore.Store, indexPossibleSizes []uint64) (*transform.Factory, error) - -// InitBstream initializes `bstream` with a generic block payload setter, reader, decoder and writer that are suitable -// for all chains. This is used in `firehose-core` as well as in testing method in respective tests to instantiate -// bstream. -// -// We have it in `firehose-core` to make it easier to change it's signature without needing to bump `bstream`. -func InitBstream(protocol string, protocolVersion int32, acceptedPayloadVersions []int32, blockFactory func() proto.Message) { - // We use the same code as in bstream.InitGeneric except that we override below the GetBlockDecoder version - bstream.InitGeneric(protocol, protocolVersion, blockFactory) - - bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *bstream.Block) (any, error) { - // blk.Kind() is not used anymore, only the content type and version is checked at read time now - - if !slices.Contains(acceptedPayloadVersions, blk.Version()) { - acceptedVersions := make([]string, len(acceptedPayloadVersions)) - for i, v := range acceptedPayloadVersions { - acceptedVersions[i] = fmt.Sprintf("%d", v) - } - - return nil, fmt.Errorf("this decoder only knows about version(s) %s, got %d", strings.Join(acceptedVersions, ", "), blk.Version()) - } - - block := blockFactory() - payload, err := blk.Payload.Get() - if err != nil { - return nil, fmt.Errorf("getting payload: %w", err) - } - - err = proto.Unmarshal(payload, block) - if err != nil { - return nil, fmt.Errorf("unable to decode payload: %w", err) - } - - return block, nil - }) -} From 6faf4efc42d5637a01d72d810cbec1d9062aadd3 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 16 Nov 2023 07:46:17 -0500 Subject: [PATCH 04/66] added firehose --- README.md | 4 +- chain.go | 4 +- cmd/firecore/main.go | 6 +- consolereader.go | 2 +- consolereader_test.go | 2 +- firehose.go | 12 +- firehose/CHANGELOG.md | 10 + firehose/CONVENTIONS.md | 18 ++ firehose/LICENSE | 202 ++++++++++++++ firehose/README.md | 19 ++ firehose/app/firehose/app.go | 220 +++++++++++++++ firehose/client/client.go | 78 ++++++ firehose/factory.go | 239 +++++++++++++++++ firehose/init_test.go | 12 + firehose/metrics/metrics.go | 17 ++ firehose/rate/limiter.go | 71 +++++ firehose/server/blocks.go | 298 +++++++++++++++++++++ firehose/server/errors.go | 19 ++ firehose/server/local.go | 77 ++++++ firehose/server/server.go | 183 +++++++++++++ firehose/server/v1proxy.go | 86 ++++++ firehose/tests/integration_test.go | 236 ++++++++++++++++ firehose/tests/stream_blocks_test.go | 103 +++++++ go.mod | 18 +- go.sum | 143 +++++++++- merger.go | 2 +- merger/CHANGELOG.md | 2 +- merger/README.md | 2 +- merger/app/merger/app.go | 4 +- merger/app/merger/logging.go | 2 +- merger/bundler.go | 4 +- merger/bundler_test.go | 2 +- merger/init_test.go | 2 +- merger/merger_io.go | 2 +- node-manager/app/node_manager/app.go | 8 +- node-manager/app/node_reader_stdin/app.go | 6 +- node-manager/mindreader/init_test.go | 2 +- node-manager/mindreader/mindreader.go | 2 +- node-manager/operator/operator.go | 2 +- node-manager/superviser.go | 2 +- node-manager/superviser/superviser.go | 4 +- node-manager/superviser/superviser_test.go | 2 +- reader_node.go | 12 +- reader_node_stdin.go | 8 +- relayer.go | 2 +- relayer/README.md | 2 +- relayer/app/relayer/app.go | 4 +- relayer/app/relayer/logging.go | 2 +- relayer/logging.go | 2 +- relayer/relayer.go | 2 +- substreams_tier1.go | 2 +- substreams_tier2.go | 2 +- superviser/genericsupervisor.go | 4 +- superviser/logging.go | 2 +- tools.go | 2 +- tools_check.go | 2 +- tools_checkmergedbatch.go | 2 +- tools_compare_blocks.go | 2 +- tools_firehose_client.go | 2 +- tools_fix_bloated_merged_blocks.go | 2 +- tools_print.go | 68 +++-- tools_unmerge_blocks.go | 2 +- 62 files changed, 2134 insertions(+), 121 deletions(-) create mode 100644 firehose/CHANGELOG.md create mode 100644 firehose/CONVENTIONS.md create mode 100644 firehose/LICENSE create mode 100644 firehose/README.md create mode 100644 firehose/app/firehose/app.go create mode 100644 firehose/client/client.go create mode 100644 firehose/factory.go create mode 100644 firehose/init_test.go create mode 100644 firehose/metrics/metrics.go create mode 100644 firehose/rate/limiter.go create mode 100644 firehose/server/blocks.go create mode 100644 firehose/server/errors.go create mode 100644 firehose/server/local.go create mode 100644 firehose/server/server.go create mode 100644 firehose/server/v1proxy.go create mode 100644 firehose/tests/integration_test.go create mode 100644 firehose/tests/stream_blocks_test.go diff --git a/README.md b/README.md index c0bc006..d56d763 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ This repository contains all the boilerplate code that is required to maintain t Firehose maintenance cost comes from two sides. First, there is the chain integration that needs to be maintained. This is done within the chain's code directly by the chain's core developers. The second side of things is the maintenance of the Golang part of the Firehose stack. -Each chain creates its own Firehose Golang repository named `firehose-`. [Firehose-acme repository](https://github.com/streamingfast/firehose-acme) acts as an example of this. Firehose is composed of multiple smaller components that can be run independently and each of them has a set of CLI flags and other configuration parameters. +Each chain creates its own Firehose Golang repository named `firehose-`. [Firehose-acme repository](https://github.com/streamingfast/firehose-core/firehose-acme) acts as an example of this. Firehose is composed of multiple smaller components that can be run independently and each of them has a set of CLI flags and other configuration parameters. The initial "Acme" template we had contained a lot of boilerplate code to properly configure and run the Firehose Golang stack. This meant that if we needed to add a new feature that required a new flag or change a flag default value or any kind of improvements, chain integrators that were maintaining their `firehose-` repository were in the obligation of tracking changes made in `firehose-acme` and apply those back on their repository by hand. @@ -33,4 +33,4 @@ When bumping `firehose-core` to a breaking version, details of such upgrade will ### Build & CI -The build and CI files are maintained for now in https://github.com/streamingfast/firehose-acme directly and should be updated manually from time to time from there. +The build and CI files are maintained for now in https://github.com/streamingfast/firehose-core/firehose-acme directly and should be updated manually from time to time from there. diff --git a/chain.go b/chain.go index ee7ea68..fa8da67 100644 --- a/chain.go +++ b/chain.go @@ -10,8 +10,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/streamingfast/bstream" - "github.com/streamingfast/firehose-core/node-manager/mindreader" - "github.com/streamingfast/firehose-core/node-manager/operator" + "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + "github.com/streamingfast/firehose-core/firehose/node-manager/operator" "github.com/streamingfast/logging" "go.uber.org/multierr" "go.uber.org/zap" diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index d1db1e2..bf548be 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -1,7 +1,7 @@ package main import ( - firecore "github.com/streamingfast/firehose-core" + firecore "github.com/streamingfast/firehose-core/firehose" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) @@ -10,13 +10,13 @@ func main() { ShortName: "core", //used to compose the binary name LongName: "CORE", //only used to compose cmd title and description ExecutableName: "fire-core", //only used to set default value of reader-node-path, we should not provide a default value anymore ... - FullyQualifiedModule: "github.com/streamingfast/firehose-core", + FullyQualifiedModule: "github.com/streamingfast/firehose-core/firehose", Version: version, Protocol: "NEA", ProtocolVersion: 1, - ConsoleReaderFactory: supervisor.NewConsoleReader, + ConsoleReaderFactory: firecore.NewConsoleReader, Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) diff --git a/consolereader.go b/consolereader.go index c2e3719..6456d1b 100644 --- a/consolereader.go +++ b/consolereader.go @@ -9,7 +9,7 @@ import ( "time" "github.com/streamingfast/bstream" - "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" "github.com/streamingfast/logging" "go.uber.org/zap" "google.golang.org/protobuf/proto" diff --git a/consolereader_test.go b/consolereader_test.go index 6c1350e..4d224de 100644 --- a/consolereader_test.go +++ b/consolereader_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/streamingfast/firehose-core/test" + "github.com/streamingfast/firehose-core/firehose/test" "github.com/stretchr/testify/require" "go.uber.org/zap" "google.golang.org/protobuf/proto" diff --git a/firehose.go b/firehose.go index 04ab241..066992b 100644 --- a/firehose.go +++ b/firehose.go @@ -12,8 +12,8 @@ import ( discoveryservice "github.com/streamingfast/dgrpc/server/discovery-service" "github.com/streamingfast/dlauncher/launcher" "github.com/streamingfast/dmetrics" - firehoseApp "github.com/streamingfast/firehose/app/firehose" - firehoseServer "github.com/streamingfast/firehose/server" + "github.com/streamingfast/firehose-core/firehose/firehose/app/firehose" + "github.com/streamingfast/firehose-core/firehose/firehose/server" "github.com/streamingfast/logging" ) @@ -76,15 +76,15 @@ func registerFirehoseApp[B Block](chain *Chain[B]) { registry.Register(transformer) } - var serverOptions []firehoseServer.Option + var serverOptions []server.Option limiterSize := viper.GetInt("firehose-rate-limit-bucket-size") limiterRefillRate := viper.GetDuration("firehose-rate-limit-bucket-fill-rate") if limiterSize > 0 { - serverOptions = append(serverOptions, firehoseServer.WithLeakyBucketLimiter(limiterSize, limiterRefillRate)) + serverOptions = append(serverOptions, server.WithLeakyBucketLimiter(limiterSize, limiterRefillRate)) } - return firehoseApp.New(appLogger, &firehoseApp.Config{ + return firehose.New(appLogger, &firehose.Config{ MergedBlocksStoreURL: mergedBlocksStoreURL, OneBlocksStoreURL: oneBlocksStoreURL, ForkedBlocksStoreURL: forkedBlocksStoreURL, @@ -93,7 +93,7 @@ func registerFirehoseApp[B Block](chain *Chain[B]) { GRPCShutdownGracePeriod: 1 * time.Second, ServiceDiscoveryURL: serviceDiscoveryURL, ServerOptions: serverOptions, - }, &firehoseApp.Modules{ + }, &firehose.Modules{ Authenticator: authenticator, HeadTimeDriftMetric: headTimeDriftmetric, HeadBlockNumberMetric: headBlockNumMetric, diff --git a/firehose/CHANGELOG.md b/firehose/CHANGELOG.md new file mode 100644 index 0000000..6198172 --- /dev/null +++ b/firehose/CHANGELOG.md @@ -0,0 +1,10 @@ +# Change log + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +# [v0.1.0] 2021-01-18 + +* Initial release. diff --git a/firehose/CONVENTIONS.md b/firehose/CONVENTIONS.md new file mode 100644 index 0000000..f6dad7c --- /dev/null +++ b/firehose/CONVENTIONS.md @@ -0,0 +1,18 @@ +# Coding conventions + +In general, this project adheres to all the standard Go conventions, +and is constantly on the look to make sure we have a uniform and +coherent codebase. + +Here are things that might be useful to clarify nonetheless: + +## Error management + +* All `fmt.Errorf()` ought to start with a lowercase letter, with `:` + separating elements, from broadest to most refined. + +## Logging + +* Logging is done through the `zap` library. All developer-centric logs ought to start with a lowercase, and provide sufficient context to aid in debugging. +* Assume systems log at `Info` level by default, enabling `Debug` when needed (at runtime through port :1065 <- logs, you read that l33t?). +* Assume systems trigger some sort of alerting when `Warn` and `Error` level errors are triggered. diff --git a/firehose/LICENSE b/firehose/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/firehose/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/firehose/README.md b/firehose/README.md new file mode 100644 index 0000000..e6d2049 --- /dev/null +++ b/firehose/README.md @@ -0,0 +1,19 @@ +# StreamingFast Firehose +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/firehose) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +A firehose-style stream of streamingfast Blocks app wrapper as part of **[StreamingFast](https://github.com/streamingfast/streamingfast)**. + +## Contributing +**Issues and PR in this repo related strictly to the core firehose service.** + +Report any protocol-specific issues in their +[respective repositories](https://github.com/streamingfast/streamingfast#protocols) + +**Please first refer to the general +[StreamingFast contribution guide](https://github.com/streamingfast/streamingfast/blob/master/CONTRIBUTING.md)**, +if you wish to contribute to this code base. + +## License + +[Apache 2.0](LICENSE) diff --git a/firehose/app/firehose/app.go b/firehose/app/firehose/app.go new file mode 100644 index 0000000..62a04dd --- /dev/null +++ b/firehose/app/firehose/app.go @@ -0,0 +1,220 @@ +// Copyright 2020 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firehose + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/blockstream" + "github.com/streamingfast/bstream/hub" + "github.com/streamingfast/bstream/transform" + "github.com/streamingfast/dauth" + dgrpcserver "github.com/streamingfast/dgrpc/server" + "github.com/streamingfast/dmetrics" + "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/firehose/firehose" + "github.com/streamingfast/firehose-core/firehose/firehose/metrics" + "github.com/streamingfast/firehose-core/firehose/firehose/server" + "github.com/streamingfast/shutter" + "go.uber.org/atomic" + "go.uber.org/zap" +) + +type Config struct { + MergedBlocksStoreURL string + OneBlocksStoreURL string + ForkedBlocksStoreURL string + BlockStreamAddr string // gRPC endpoint to get real-time blocks, can be "" in which live streams is disabled + GRPCListenAddr string // gRPC address where this app will listen to + GRPCShutdownGracePeriod time.Duration // The duration we allow for gRPC connections to terminate gracefully prior forcing shutdown + ServiceDiscoveryURL *url.URL + ServerOptions []server.Option +} + +type RegisterServiceExtensionFunc func(server dgrpcserver.Server, + mergedBlocksStore dstore.Store, + forkedBlocksStore dstore.Store, // this can be nil here + forkableHub *hub.ForkableHub, + logger *zap.Logger) + +type Modules struct { + // Required dependencies + Authenticator dauth.Authenticator + HeadTimeDriftMetric *dmetrics.HeadTimeDrift + HeadBlockNumberMetric *dmetrics.HeadBlockNum + TransformRegistry *transform.Registry + RegisterServiceExtension RegisterServiceExtensionFunc + CheckPendingShutdown func() bool +} + +type App struct { + *shutter.Shutter + config *Config + modules *Modules + logger *zap.Logger + isReady *atomic.Bool +} + +func New(logger *zap.Logger, config *Config, modules *Modules) *App { + return &App{ + Shutter: shutter.New(), + config: config, + modules: modules, + logger: logger, + + isReady: atomic.NewBool(false), + } +} + +func (a *App) Run() error { + dmetrics.Register(metrics.Metricset) + + a.logger.Info("running firehose", zap.Reflect("config", a.config)) + if err := a.config.Validate(); err != nil { + return fmt.Errorf("invalid app config: %w", err) + } + + mergedBlocksStore, err := dstore.NewDBinStore(a.config.MergedBlocksStoreURL) + if err != nil { + return fmt.Errorf("failed setting up block store from url %q: %w", a.config.MergedBlocksStoreURL, err) + } + + oneBlocksStore, err := dstore.NewDBinStore(a.config.OneBlocksStoreURL) + if err != nil { + return fmt.Errorf("failed setting up block store from url %q: %w", a.config.OneBlocksStoreURL, err) + } + + // set to empty store interface if URL is "" + var forkedBlocksStore dstore.Store + if a.config.ForkedBlocksStoreURL != "" { + forkedBlocksStore, err = dstore.NewDBinStore(a.config.ForkedBlocksStoreURL) + if err != nil { + return fmt.Errorf("failed setting up block store from url %q: %w", a.config.ForkedBlocksStoreURL, err) + } + } + + withLive := a.config.BlockStreamAddr != "" + + var forkableHub *hub.ForkableHub + + if withLive { + liveSourceFactory := bstream.SourceFactory(func(h bstream.Handler) bstream.Source { + + return blockstream.NewSource( + context.Background(), + a.config.BlockStreamAddr, + 2, + bstream.HandlerFunc(func(blk *bstream.Block, obj interface{}) error { + a.modules.HeadBlockNumberMetric.SetUint64(blk.Num()) + a.modules.HeadTimeDriftMetric.SetBlockTime(blk.Time()) + return h.ProcessBlock(blk, obj) + }), + blockstream.WithRequester("firehose"), + ) + }) + + oneBlocksSourceFactory := bstream.SourceFromNumFactoryWithSkipFunc(func(num uint64, h bstream.Handler, skipFunc func(string) bool) bstream.Source { + src, err := bstream.NewOneBlocksSource(num, oneBlocksStore, h, bstream.OneBlocksSourceWithSkipperFunc(skipFunc)) + if err != nil { + return nil + } + return src + }) + + forkableHub = hub.NewForkableHub(liveSourceFactory, oneBlocksSourceFactory, 500) + forkableHub.OnTerminated(a.Shutdown) + + go forkableHub.Run() + } + + streamFactory := firehose.NewStreamFactory( + mergedBlocksStore, + forkedBlocksStore, + forkableHub, + a.modules.TransformRegistry, + ) + + blockGetter := firehose.NewBlockGetter(mergedBlocksStore, forkedBlocksStore, forkableHub) + + firehoseServer := server.New( + a.modules.TransformRegistry, + streamFactory, + blockGetter, + a.logger, + a.modules.Authenticator, + a.IsReady, + a.config.GRPCListenAddr, + a.config.ServiceDiscoveryURL, + a.config.ServerOptions..., + ) + + a.OnTerminating(func(_ error) { + firehoseServer.Shutdown(a.config.GRPCShutdownGracePeriod) + }) + firehoseServer.OnTerminated(a.Shutdown) + + if a.modules.RegisterServiceExtension != nil { + a.modules.RegisterServiceExtension( + firehoseServer.Server, + mergedBlocksStore, + forkedBlocksStore, + forkableHub, + a.logger) + } + + go func() { + if withLive { + a.logger.Info("waiting until hub is real-time synced") + select { + case <-forkableHub.Ready: + metrics.AppReadiness.SetReady() + case <-a.Terminating(): + return + } + } + + a.logger.Info("launching gRPC firehoseServer", zap.Bool("live_support", withLive)) + a.isReady.CAS(false, true) + firehoseServer.Launch() + }() + + return nil +} + +// IsReady return `true` if the apps is ready to accept requests, `false` is returned +// otherwise. +func (a *App) IsReady(ctx context.Context) bool { + if a.IsTerminating() { + return false + } + if a.modules.CheckPendingShutdown != nil && a.modules.CheckPendingShutdown() { + return false + } + if !a.modules.Authenticator.Ready(ctx) { + return false + } + + return a.isReady.Load() +} + +// Validate inspects itself to determine if the current config is valid according to +// Firehose rules. +func (config *Config) Validate() error { + return nil +} diff --git a/firehose/client/client.go b/firehose/client/client.go new file mode 100644 index 0000000..bc0d1d5 --- /dev/null +++ b/firehose/client/client.go @@ -0,0 +1,78 @@ +package client + +import ( + "crypto/tls" + "fmt" + + "github.com/streamingfast/dgrpc" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "golang.org/x/oauth2" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/oauth" +) + +// firehoseClient, closeFunc, grpcCallOpts, err := NewFirehoseClient(endpoint, jwt, insecure, plaintext) +// defer closeFunc() +// stream, err := firehoseClient.Blocks(context.Background(), request, grpcCallOpts...) +func NewFirehoseClient(endpoint, jwt string, useInsecureTSLConnection, usePlainTextConnection bool) (cli pbfirehose.StreamClient, closeFunc func() error, callOpts []grpc.CallOption, err error) { + skipAuth := jwt == "" || usePlainTextConnection + + if useInsecureTSLConnection && usePlainTextConnection { + return nil, nil, nil, fmt.Errorf("option --insecure and --plaintext are mutually exclusive, they cannot be both specified at the same time") + } + + var dialOptions []grpc.DialOption + switch { + case usePlainTextConnection: + dialOptions = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} + + case useInsecureTSLConnection: + dialOptions = []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))} + } + + conn, err := dgrpc.NewExternalClient(endpoint, dialOptions...) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create external gRPC client: %w", err) + } + closeFunc = conn.Close + cli = pbfirehose.NewStreamClient(conn) + + if !skipAuth { + credentials := oauth.NewOauthAccess(&oauth2.Token{AccessToken: jwt, TokenType: "Bearer"}) + callOpts = append(callOpts, grpc.PerRPCCredentials(credentials)) + } + + return +} + +func NewFirehoseFetchClient(endpoint, jwt string, useInsecureTSLConnection, usePlainTextConnection bool) (cli pbfirehose.FetchClient, closeFunc func() error, err error) { + + if useInsecureTSLConnection && usePlainTextConnection { + return nil, nil, fmt.Errorf("option --insecure and --plaintext are mutually exclusive, they cannot be both specified at the same time") + } + + var dialOptions []grpc.DialOption + switch { + case usePlainTextConnection: + dialOptions = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} + + case useInsecureTSLConnection: + dialOptions = []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))} + } + + if jwt != "" && !usePlainTextConnection { + credentials := oauth.NewOauthAccess(&oauth2.Token{AccessToken: jwt, TokenType: "Bearer"}) + dialOptions = append(dialOptions, grpc.WithPerRPCCredentials(credentials)) + } + + conn, err := dgrpc.NewExternalClient(endpoint, dialOptions...) + if err != nil { + return nil, nil, fmt.Errorf("unable to create external gRPC client: %w", err) + } + closeFunc = conn.Close + cli = pbfirehose.NewFetchClient(conn) + + return +} diff --git a/firehose/factory.go b/firehose/factory.go new file mode 100644 index 0000000..b5a38e0 --- /dev/null +++ b/firehose/factory.go @@ -0,0 +1,239 @@ +package firehose + +import ( + "context" + "errors" + "fmt" + + "github.com/streamingfast/dauth" + "github.com/streamingfast/derr" + "github.com/streamingfast/dmetering" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/hub" + "github.com/streamingfast/bstream/stream" + "github.com/streamingfast/bstream/transform" + "github.com/streamingfast/dstore" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// StreamMergedBlocksPreprocThreads defines the number of threads +// that the filesource is allowed to use PER FILE. Used for decoding +// bstream blocks to protobuf and applying other transforms +var StreamMergedBlocksPreprocThreads = 25 + +var bstreamToProtocolPreprocFunc = func(blk *bstream.Block) (interface{}, error) { + return blk.ToProtocol(), nil +} + +type BlockGetter struct { + mergedBlocksStore dstore.Store + forkedBlocksStore dstore.Store + hub *hub.ForkableHub +} + +func NewBlockGetter( + mergedBlocksStore dstore.Store, + forkedBlocksStore dstore.Store, + hub *hub.ForkableHub, +) *BlockGetter { + return &BlockGetter{ + mergedBlocksStore: mergedBlocksStore, + forkedBlocksStore: forkedBlocksStore, + hub: hub, + } +} + +func (g *BlockGetter) Get( + ctx context.Context, + num uint64, + id string, + logger *zap.Logger) (out *bstream.Block, err error) { + + id = bstream.NormalizeBlockID(id) + reqLogger := logger.With( + zap.Uint64("num", num), + zap.String("id", id), + ) + + // check for block in live segment: Hub + if g.hub != nil && num > g.hub.LowestBlockNum() { + if blk := g.hub.GetBlock(num, id); blk != nil { + reqLogger.Info("single block request", zap.String("source", "hub"), zap.Bool("found", true)) + return blk, nil + } + reqLogger.Info("single block request", zap.String("source", "hub"), zap.Bool("found", false)) + return nil, status.Error(codes.NotFound, "live block not found in hub") + } + + mergedBlocksStore := g.mergedBlocksStore + if clonable, ok := mergedBlocksStore.(dstore.Clonable); ok { + var err error + mergedBlocksStore, err = clonable.Clone(ctx) + if err != nil { + return nil, err + } + mergedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) + } + + // check for block in mergedBlocksStore + err = derr.RetryContext(ctx, 3, func(ctx context.Context) error { + blk, err := bstream.FetchBlockFromMergedBlocksStore(ctx, num, mergedBlocksStore) + if err != nil { + if errors.Is(err, dstore.ErrNotFound) { + return derr.NewFatalError(err) + } + return err + } + if id == "" || blk.Id == id { + reqLogger.Info("single block request", zap.String("source", "merged_blocks"), zap.Bool("found", true)) + out = blk + return nil + } + return derr.NewFatalError(fmt.Errorf("wrong block: found %s, expecting %s", blk.Id, id)) + }) + if out != nil { + return out, nil + } + + // check for block in forkedBlocksStore + if g.forkedBlocksStore != nil { + forkedBlocksStore := g.forkedBlocksStore + if clonable, ok := forkedBlocksStore.(dstore.Clonable); ok { + var err error + forkedBlocksStore, err = clonable.Clone(ctx) + if err != nil { + return nil, err + } + forkedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) + } + + if blk, _ := bstream.FetchBlockFromOneBlockStore(ctx, num, id, forkedBlocksStore); blk != nil { + reqLogger.Info("single block request", zap.String("source", "forked_blocks"), zap.Bool("found", true)) + return blk, nil + } + } + + reqLogger.Info("single block request", zap.Bool("found", false), zap.Error(err)) + return nil, status.Error(codes.NotFound, "block not found in files") +} + +type StreamFactory struct { + mergedBlocksStore dstore.Store + forkedBlocksStore dstore.Store + hub *hub.ForkableHub + transformRegistry *transform.Registry +} + +func NewStreamFactory( + mergedBlocksStore dstore.Store, + forkedBlocksStore dstore.Store, + hub *hub.ForkableHub, + transformRegistry *transform.Registry, +) *StreamFactory { + return &StreamFactory{ + mergedBlocksStore: mergedBlocksStore, + forkedBlocksStore: forkedBlocksStore, + hub: hub, + transformRegistry: transformRegistry, + } +} + +func (sf *StreamFactory) New( + ctx context.Context, + handler bstream.Handler, + request *pbfirehose.Request, + decodeBlock bool, + logger *zap.Logger) (*stream.Stream, error) { + + reqLogger := logger.With( + zap.Int64("req_start_block", request.StartBlockNum), + zap.String("req_cursor", request.Cursor), + zap.Uint64("req_stop_block", request.StopBlockNum), + zap.Bool("final_blocks_only", request.FinalBlocksOnly), + ) + + options := []stream.Option{ + stream.WithStopBlock(request.StopBlockNum), + } + + preprocFunc, blockIndexProvider, desc, err := sf.transformRegistry.BuildFromTransforms(request.Transforms) + if err != nil { + reqLogger.Error("cannot process incoming blocks request transforms", zap.Error(err)) + return nil, fmt.Errorf("building from transforms: %w", err) + } + if preprocFunc != nil { + options = append(options, stream.WithPreprocessFunc(preprocFunc, StreamMergedBlocksPreprocThreads)) + } else if decodeBlock { + options = append(options, stream.WithPreprocessFunc(bstreamToProtocolPreprocFunc, StreamMergedBlocksPreprocThreads)) // decoding bstream in parallel, faster + } + if blockIndexProvider != nil { + reqLogger = reqLogger.With(zap.Bool("with_index_provider", true)) + } + if desc != "" { + reqLogger = reqLogger.With(zap.String("transform_desc", desc)) + } + options = append(options, stream.WithLogger(logger)) // stream won't have the full reqLogger, use the traceID to connect them together + + if blockIndexProvider != nil { + options = append(options, stream.WithBlockIndexProvider(blockIndexProvider)) + } + + if request.FinalBlocksOnly { + options = append(options, stream.WithFinalBlocksOnly()) + } + + var fields []zap.Field + auth := dauth.FromContext(ctx) + if auth != nil { + fields = append(fields, + zap.String("api_key_id", auth.APIKeyID()), + zap.String("user_id", auth.UserID()), + zap.String("real_ip", auth.RealIP()), + ) + } + + reqLogger.Info("processing incoming blocks request", fields...) + + if request.Cursor != "" { + cur, err := bstream.CursorFromOpaque(request.Cursor) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid start cursor %q: %s", request.Cursor, err) + } + + options = append(options, stream.WithCursor(cur)) + } + + forkedBlocksStore := sf.forkedBlocksStore + if clonable, ok := forkedBlocksStore.(dstore.Clonable); ok { + var err error + forkedBlocksStore, err = clonable.Clone(ctx) + if err != nil { + return nil, err + } + forkedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) + } + + mergedBlocksStore := sf.mergedBlocksStore + if clonable, ok := mergedBlocksStore.(dstore.Clonable); ok { + var err error + mergedBlocksStore, err = clonable.Clone(ctx) + if err != nil { + return nil, err + } + mergedBlocksStore.SetMeter(dmetering.GetBytesMeter(ctx)) + } + + str := stream.New( + forkedBlocksStore, + mergedBlocksStore, + sf.hub, + request.StartBlockNum, + handler, + options...) + + return str, nil +} diff --git a/firehose/init_test.go b/firehose/init_test.go new file mode 100644 index 0000000..7bd0c75 --- /dev/null +++ b/firehose/init_test.go @@ -0,0 +1,12 @@ +package firehose + +import ( + "github.com/streamingfast/bstream" + "github.com/streamingfast/logging" +) + +func init() { + logging.InstantiateLoggers() + + bstream.GetBlockReaderFactory = bstream.TestBlockReaderFactory +} diff --git a/firehose/metrics/metrics.go b/firehose/metrics/metrics.go new file mode 100644 index 0000000..ac1c146 --- /dev/null +++ b/firehose/metrics/metrics.go @@ -0,0 +1,17 @@ +package metrics + +import ( + "github.com/streamingfast/dmetrics" +) + +var Metricset = dmetrics.NewSet() + +var AppReadiness = Metricset.NewAppReadiness("firehose") +var ActiveRequests = Metricset.NewGauge("firehose_active_requests", "Number of active requests") +var RequestCounter = Metricset.NewCounter("firehose_requests_counter", "Request count") + +var ActiveSubstreams = Metricset.NewGauge("firehose_active_substreams", "Number of active substreams requests") +var SubstreamsCounter = Metricset.NewCounter("firehose_substreams_counter", "Substreams requests count") + +// var CurrentListeners = Metricset.NewGaugeVec("current_listeners", []string{"req_type"}, "...") +// var TimedOutPushingTrxCount = Metricset.NewCounterVec("something", []string{"guarantee"}, "Number of requests for push_transaction timed out while submitting") diff --git a/firehose/rate/limiter.go b/firehose/rate/limiter.go new file mode 100644 index 0000000..4ff4e7b --- /dev/null +++ b/firehose/rate/limiter.go @@ -0,0 +1,71 @@ +package rate + +import ( + "context" + "fmt" + "time" +) + +type Limiter interface { + Take(ctx context.Context, id string, method string) (allow bool) + Return() + String() string +} + +type token bool + +type leakyBucketLimiter struct { + tokens chan token + + dripInterval time.Duration +} + +func NewLeakyBucketLimiter(size int, dripInterval time.Duration) Limiter { + tks := make(chan token, size) + for i := 0; i < size; i++ { + tks <- token(true) + } + + go func() { + for { + select { + case <-time.After(dripInterval): + select { + case tks <- token(true): + // + default: + // + } + } + } + }() + + return &leakyBucketLimiter{ + tokens: tks, + dripInterval: dripInterval, + } +} + +func (l *leakyBucketLimiter) Take(ctx context.Context, id string, method string) (allow bool) { + select { + case <-l.tokens: + return true + case <-ctx.Done(): + return false + default: + return false + } +} + +func (l *leakyBucketLimiter) Return() { + select { + case l.tokens <- token(true): + // + default: + // + } +} + +func (l *leakyBucketLimiter) String() string { + return fmt.Sprintf("leaky-bucket-limiter(len=%d, cap=%d, drip-interval=%s)", len(l.tokens), cap(l.tokens), l.dripInterval) +} diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go new file mode 100644 index 0000000..3aeb0cb --- /dev/null +++ b/firehose/server/blocks.go @@ -0,0 +1,298 @@ +package server + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/stream" + "github.com/streamingfast/dauth" + "github.com/streamingfast/dmetering" + "github.com/streamingfast/firehose-core/firehose/firehose/metrics" + "github.com/streamingfast/logging" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func (s *Server) Block(ctx context.Context, request *pbfirehose.SingleBlockRequest) (*pbfirehose.SingleBlockResponse, error) { + var blockNum uint64 + var blockHash string + switch ref := request.Reference.(type) { + case *pbfirehose.SingleBlockRequest_BlockHashAndNumber_: + blockNum = ref.BlockHashAndNumber.Num + blockHash = ref.BlockHashAndNumber.Hash + case *pbfirehose.SingleBlockRequest_Cursor_: + cur, err := bstream.CursorFromOpaque(ref.Cursor.Cursor) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + blockNum = cur.Block.Num() + blockHash = cur.Block.ID() + case *pbfirehose.SingleBlockRequest_BlockNumber_: + blockNum = ref.BlockNumber.Num + } + + blk, err := s.blockGetter.Get(ctx, blockNum, blockHash, s.logger) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + return nil, status.Error(codes.Internal, err.Error()) + } + if blk == nil { + return nil, status.Errorf(codes.NotFound, "block %s not found", bstream.NewBlockRef(blockHash, blockNum)) + } + + return &pbfirehose.SingleBlockResponse{ + Block: blk.Payload, + }, nil +} + +func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream_BlocksServer) error { + ctx := streamSrv.Context() + metrics.RequestCounter.Inc() + + logger := logging.Logger(ctx, s.logger) + + if s.rateLimiter != nil { + rlCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + if allow := s.rateLimiter.Take(rlCtx, "", "Blocks"); !allow { + <-time.After(time.Millisecond * 500) // force a minimal backoff + return status.Error(codes.Unavailable, "rate limit exceeded") + } else { + defer s.rateLimiter.Return() + } + } + + metrics.ActiveRequests.Inc() + defer metrics.ActiveRequests.Dec() + + if os.Getenv("FIREHOSE_SEND_HOSTNAME") != "" { + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + logger.Warn("cannot determine hostname, using 'unknown'", zap.Error(err)) + } + md := metadata.New(map[string]string{"hostname": hostname}) + if err := streamSrv.SendHeader(md); err != nil { + logger.Warn("cannot send metadata header", zap.Error(err)) + } + } + + isLiveBlock := func(step pbfirehose.ForkStep) bool { + if step == pbfirehose.ForkStep_STEP_NEW { + return true + } + + return false + } + + var blockCount uint64 + handlerFunc := bstream.HandlerFunc(func(block *bstream.Block, obj interface{}) error { + blockCount++ + cursorable := obj.(bstream.Cursorable) + cursor := cursorable.Cursor() + + stepable := obj.(bstream.Stepable) + step := stepable.Step() + + wrapped := obj.(bstream.ObjectWrapper) + obj = wrapped.WrappedObject() + if obj == nil { + obj = block.Payload + } + + protoStep, skip := stepToProto(step, request.FinalBlocksOnly) + if skip { + return nil + } + + resp := &pbfirehose.Response{ + Step: protoStep, + Cursor: cursor.ToOpaque(), + } + + switch v := obj.(type) { + case *anypb.Any: + resp.Block = v + break + case proto.Message: + cnt, err := anypb.New(v) + if err != nil { + return fmt.Errorf("to any: %w", err) + } + resp.Block = cnt + default: + // this can be the out + return fmt.Errorf("unknown object type %t, cannot marshal to protobuf Any", v) + } + + if s.postHookFunc != nil { + s.postHookFunc(ctx, resp) + } + start := time.Now() + err := streamSrv.Send(resp) + if err != nil { + logger.Info("stream send error", zap.Stringer("block", block), zap.Error(err)) + return NewErrSendBlock(err) + } + + if isLiveBlock(protoStep) { + dmetering.GetBytesMeter(ctx).AddBytesRead(len(block.Payload.Value)) + } + + level := zap.DebugLevel + if block.Number%200 == 0 { + level = zap.InfoLevel + } + + logger.Check(level, "stream sent block").Write(zap.Stringer("block", block), zap.Duration("duration", time.Since(start))) + + return nil + }) + + if s.transformRegistry != nil { + passthroughTr, err := s.transformRegistry.PassthroughFromTransforms(request.Transforms) + if err != nil { + return status.Errorf(codes.Internal, "unable to create pre-proc function: %s", err) + } + + if passthroughTr != nil { + metrics.ActiveSubstreams.Inc() + defer metrics.ActiveSubstreams.Dec() + metrics.SubstreamsCounter.Inc() + outputFunc := func(cursor *bstream.Cursor, message *anypb.Any) error { + var blocknum uint64 + var opaqueCursor string + var outStep pbfirehose.ForkStep + if cursor != nil { + blocknum = cursor.Block.Num() + opaqueCursor = cursor.ToOpaque() + + protoStep, skip := stepToProto(cursor.Step, request.FinalBlocksOnly) + if skip { + return nil + } + outStep = protoStep + } + resp := &pbfirehose.Response{ + Step: outStep, + Cursor: opaqueCursor, + Block: message, + } + if s.postHookFunc != nil { + s.postHookFunc(ctx, resp) + } + start := time.Now() + err := streamSrv.Send(resp) + if err != nil { + logger.Info("stream send error from transform", zap.Uint64("blocknum", blocknum), zap.Error(err)) + return NewErrSendBlock(err) + } + + level := zap.DebugLevel + if blocknum%200 == 0 { + level = zap.InfoLevel + } + logger.Check(level, "stream sent message from transform").Write(zap.Uint64("blocknum", blocknum), zap.Duration("duration", time.Since(start))) + return nil + } + request.Transforms = nil + + return passthroughTr.Run(ctx, request, s.streamFactory.New, outputFunc) + // --> will want to start a few firehose instances,sources, manage them, process them... + // --> I give them an output func to print back to the user with the request + // --> I could HERE give him the + } + } else if len(request.Transforms) > 0 { + return status.Errorf(codes.Unimplemented, "no transforms registry configured within this instance") + } + + ctx = s.initFunc(ctx, request) + str, err := s.streamFactory.New(ctx, handlerFunc, request, true, logger) // firehose always want decoded the blocks + if err != nil { + return err + } + + err = str.Run(ctx) + meter := getRequestMeter(ctx) + + fields := []zap.Field{ + zap.Uint64("block_sent", meter.blocks), + zap.Int("egress_bytes", meter.egressBytes), + zap.Error(err), + } + + auth := dauth.FromContext(ctx) + if auth != nil { + fields = append(fields, + zap.String("api_key_id", auth.APIKeyID()), + zap.String("user_id", auth.UserID()), + zap.String("real_ip", auth.RealIP()), + ) + } + logger.Info("firehose process completed", fields...) + if err != nil { + if errors.Is(err, stream.ErrStopBlockReached) { + logger.Info("stream of blocks reached end block") + return nil + } + + if errors.Is(err, context.Canceled) { + if ctx.Err() != context.Canceled { + logger.Debug("stream of blocks ended with context canceled, but our own context was not canceled", zap.Error(err)) + } + return status.Error(codes.Canceled, "source canceled") + } + + if errors.Is(err, context.DeadlineExceeded) { + logger.Info("stream of blocks ended with context deadline exceeded", zap.Error(err)) + return status.Error(codes.DeadlineExceeded, "source deadline exceeded") + } + + var errInvalidArg *stream.ErrInvalidArg + if errors.As(err, &errInvalidArg) { + return status.Error(codes.InvalidArgument, errInvalidArg.Error()) + } + + var errSendBlock *ErrSendBlock + if errors.As(err, &errSendBlock) { + logger.Info("unable to send block probably due to client disconnecting", zap.Error(errSendBlock.inner)) + return status.Error(codes.Unavailable, errSendBlock.inner.Error()) + } + + logger.Info("unexpected stream of blocks termination", zap.Error(err)) + return status.Errorf(codes.Internal, "unexpected stream termination") + } + + logger.Error("source is not expected to terminate gracefully, should stop at block or continue forever") + return status.Error(codes.Internal, "unexpected stream completion") + +} + +func stepToProto(step bstream.StepType, finalBlocksOnly bool) (outStep pbfirehose.ForkStep, skip bool) { + if finalBlocksOnly { + if step.Matches(bstream.StepIrreversible) { + return pbfirehose.ForkStep_STEP_FINAL, false + } + return 0, true + } + + if step.Matches(bstream.StepNew) { + return pbfirehose.ForkStep_STEP_NEW, false + } + if step.Matches(bstream.StepUndo) { + return pbfirehose.ForkStep_STEP_UNDO, false + } + return 0, true // simply skip irreversible or stalled here +} diff --git a/firehose/server/errors.go b/firehose/server/errors.go new file mode 100644 index 0000000..2264e5e --- /dev/null +++ b/firehose/server/errors.go @@ -0,0 +1,19 @@ +package server + +import ( + "fmt" +) + +type ErrSendBlock struct { + inner error +} + +func NewErrSendBlock(inner error) ErrSendBlock { + return ErrSendBlock{ + inner: inner, + } +} + +func (e ErrSendBlock) Error() string { + return fmt.Sprintf("send error: %s", e.inner) +} diff --git a/firehose/server/local.go b/firehose/server/local.go new file mode 100644 index 0000000..43f0db2 --- /dev/null +++ b/firehose/server/local.go @@ -0,0 +1,77 @@ +package server + +import ( + "context" + + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type BlocksPipe struct { + //grpc.ServerStream + grpc.ClientStream + ctx context.Context + pipeChan chan *pbfirehose.Response + err error +} + +func (p *BlocksPipe) SendHeader(metadata.MD) error { + return nil +} +func (p *BlocksPipe) SetHeader(metadata.MD) error { + return nil +} +func (p *BlocksPipe) SetTrailer(metadata.MD) { + return +} + +func (p *BlocksPipe) Context() context.Context { + return p.ctx +} + +func (p *BlocksPipe) Send(resp *pbfirehose.Response) error { + select { + case <-p.ctx.Done(): + return p.ctx.Err() + case p.pipeChan <- resp: + } + return nil +} + +func (p *BlocksPipe) Recv() (*pbfirehose.Response, error) { + select { + case resp, ok := <-p.pipeChan: + if !ok { + return resp, p.err + } + return resp, nil + case <-p.ctx.Done(): + select { + // ensure we empty the pipeChan + case resp, ok := <-p.pipeChan: + if !ok { + return resp, p.err + } + return resp, nil + default: + return nil, p.err + } + } +} + +func (s *Server) BlocksFromLocal(ctx context.Context, req *pbfirehose.Request) pbfirehose.Stream_BlocksClient { + cctx, cancel := context.WithCancel(ctx) + + pipe := &BlocksPipe{ + ctx: cctx, + pipeChan: make(chan *pbfirehose.Response), + } + go func() { + err := s.Blocks(req, pipe) + pipe.err = err + cancel() + }() + + return pipe +} diff --git a/firehose/server/server.go b/firehose/server/server.go new file mode 100644 index 0000000..2de72a4 --- /dev/null +++ b/firehose/server/server.go @@ -0,0 +1,183 @@ +package server + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/streamingfast/firehose-core/firehose/firehose" + "github.com/streamingfast/firehose-core/firehose/firehose/rate" + + _ "github.com/mostynb/go-grpc-compression/zstd" + "github.com/streamingfast/bstream/transform" + "github.com/streamingfast/dauth" + dauthgrpc "github.com/streamingfast/dauth/middleware/grpc" + dgrpcserver "github.com/streamingfast/dgrpc/server" + "github.com/streamingfast/dgrpc/server/factory" + "github.com/streamingfast/dmetering" + "github.com/streamingfast/dmetrics" + pbfirehoseV1 "github.com/streamingfast/pbgo/sf/firehose/v1" + pbfirehoseV2 "github.com/streamingfast/pbgo/sf/firehose/v2" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel" + "go.uber.org/zap" + "google.golang.org/grpc" + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/protobuf/proto" +) + +type Server struct { + streamFactory *firehose.StreamFactory + transformRegistry *transform.Registry + blockGetter *firehose.BlockGetter + + initFunc func(context.Context, *pbfirehoseV2.Request) context.Context + postHookFunc func(context.Context, *pbfirehoseV2.Response) + + dgrpcserver.Server + listenAddr string + healthListenAddr string + logger *zap.Logger + metrics dmetrics.Set + + rateLimiter rate.Limiter +} + +type Option func(*Server) + +func WithLeakyBucketLimiter(size int, dripRate time.Duration) Option { + return func(s *Server) { + s.rateLimiter = rate.NewLeakyBucketLimiter(size, dripRate) + } +} + +func New( + transformRegistry *transform.Registry, + streamFactory *firehose.StreamFactory, + blockGetter *firehose.BlockGetter, + logger *zap.Logger, + authenticator dauth.Authenticator, + isReady func(context.Context) bool, + listenAddr string, + serviceDiscoveryURL *url.URL, + opts ...Option, +) *Server { + initFunc := func(ctx context.Context, request *pbfirehoseV2.Request) context.Context { + ////////////////////////////////////////////////////////////////////// + ctx = dmetering.WithBytesMeter(ctx) + ctx = withRequestMeter(ctx) + return ctx + ////////////////////////////////////////////////////////////////////// + } + + postHookFunc := func(ctx context.Context, response *pbfirehoseV2.Response) { + ////////////////////////////////////////////////////////////////////// + meter := dmetering.GetBytesMeter(ctx) + bytesRead := meter.BytesReadDelta() + bytesWritten := meter.BytesWrittenDelta() + size := proto.Size(response) + + auth := dauth.FromContext(ctx) + event := dmetering.Event{ + UserID: auth.UserID(), + ApiKeyID: auth.APIKeyID(), + IpAddress: auth.RealIP(), + Endpoint: "sf.firehose.v2.Firehose/Blocks", + Metrics: map[string]float64{ + "egress_bytes": float64(size), + "written_bytes": float64(bytesWritten), + "read_bytes": float64(bytesRead), + "block_count": 1, + }, + Timestamp: time.Now(), + } + + requestMeter := getRequestMeter(ctx) + requestMeter.blocks++ + requestMeter.egressBytes += size + dmetering.Emit(ctx, event) + ////////////////////////////////////////////////////////////////////// + } + + tracerProvider := otel.GetTracerProvider() + options := []dgrpcserver.Option{ + dgrpcserver.WithLogger(logger), + dgrpcserver.WithHealthCheck(dgrpcserver.HealthCheckOverGRPC|dgrpcserver.HealthCheckOverHTTP, createHealthCheck(isReady)), + dgrpcserver.WithPostUnaryInterceptor(otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), + dgrpcserver.WithPostStreamInterceptor(otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), + dgrpcserver.WithGRPCServerOptions(grpc.MaxRecvMsgSize(25 * 1024 * 1024)), + dgrpcserver.WithPostUnaryInterceptor(dauthgrpc.UnaryAuthChecker(authenticator, logger)), + dgrpcserver.WithPostStreamInterceptor(dauthgrpc.StreamAuthChecker(authenticator, logger)), + } + + if serviceDiscoveryURL != nil { + options = append(options, dgrpcserver.WithServiceDiscoveryURL(serviceDiscoveryURL)) + } + + if strings.Contains(listenAddr, "*") { + options = append(options, dgrpcserver.WithInsecureServer()) + } else { + options = append(options, dgrpcserver.WithPlainTextServer()) + } + + grpcServer := factory.ServerFromOptions(options...) + + s := &Server{ + Server: grpcServer, + transformRegistry: transformRegistry, + blockGetter: blockGetter, + streamFactory: streamFactory, + listenAddr: strings.ReplaceAll(listenAddr, "*", ""), + initFunc: initFunc, + postHookFunc: postHookFunc, + logger: logger, + } + + logger.Info("registering grpc services") + grpcServer.RegisterService(func(gs grpc.ServiceRegistrar) { + if blockGetter != nil { + pbfirehoseV2.RegisterFetchServer(gs, s) + } + pbfirehoseV2.RegisterStreamServer(gs, s) + pbfirehoseV1.RegisterStreamServer(gs, NewFirehoseProxyV1ToV2(s)) // compatibility with firehose + }) + + for _, opt := range opts { + opt(s) + } + + return s +} + +func (s *Server) Launch() { + s.Server.Launch(s.listenAddr) +} + +func createHealthCheck(isReady func(ctx context.Context) bool) dgrpcserver.HealthCheck { + return func(ctx context.Context) (bool, interface{}, error) { + return isReady(ctx), nil, nil + } +} + +type key int + +var requestMeterKey key + +type requestMeter struct { + blocks uint64 + egressBytes int +} + +func getRequestMeter(ctx context.Context) *requestMeter { + if rm, ok := ctx.Value(requestMeterKey).(*requestMeter); ok { + return rm + } + return &requestMeter{} // not so useful but won't break tests +} +func withRequestMeter(ctx context.Context) context.Context { + if _, ok := ctx.Value(requestMeterKey).(*requestMeter); ok { + return ctx + } + return context.WithValue(ctx, requestMeterKey, &requestMeter{}) +} diff --git a/firehose/server/v1proxy.go b/firehose/server/v1proxy.go new file mode 100644 index 0000000..a6724d5 --- /dev/null +++ b/firehose/server/v1proxy.go @@ -0,0 +1,86 @@ +package server + +import ( + "fmt" + + pbfirehoseV1 "github.com/streamingfast/pbgo/sf/firehose/v1" + pbfirehoseV2 "github.com/streamingfast/pbgo/sf/firehose/v2" + "google.golang.org/grpc" +) + +type FirehoseProxyV1ToV2 struct { + server *Server +} + +func NewFirehoseProxyV1ToV2(server *Server) *FirehoseProxyV1ToV2 { + return &FirehoseProxyV1ToV2{ + server: server, + } +} + +func (s *FirehoseProxyV1ToV2) Blocks(req *pbfirehoseV1.Request, streamSrv pbfirehoseV1.Stream_BlocksServer) error { + + var finalBlocksOnly bool + var validSteps bool + var withUndo bool + switch len(req.ForkSteps) { + case 1: + if req.ForkSteps[0] == pbfirehoseV1.ForkStep_STEP_IRREVERSIBLE { + finalBlocksOnly = true + validSteps = true + } + if req.ForkSteps[0] == pbfirehoseV1.ForkStep_STEP_NEW { + validSteps = true + } + case 2: + if (req.ForkSteps[0] == pbfirehoseV1.ForkStep_STEP_NEW && req.ForkSteps[1] == pbfirehoseV1.ForkStep_STEP_UNDO) || + (req.ForkSteps[1] == pbfirehoseV1.ForkStep_STEP_NEW && req.ForkSteps[0] == pbfirehoseV1.ForkStep_STEP_UNDO) { + validSteps = true + withUndo = true + } else if req.ForkSteps[0] == pbfirehoseV1.ForkStep_STEP_NEW && req.ForkSteps[1] == pbfirehoseV1.ForkStep_STEP_IRREVERSIBLE { + validSteps = true + // compatibility hack. you won't receive IRREVERSIBLE here + } + } + if !validSteps { + return fmt.Errorf("invalid parameter for ForkSteps: this server implements firehose v2 operation and only supports [NEW,UNDO] or [IRREVERSIBLE]") + } + + reqV2 := &pbfirehoseV2.Request{ + StartBlockNum: req.StartBlockNum, + Cursor: req.StartCursor, + StopBlockNum: req.StopBlockNum, + FinalBlocksOnly: finalBlocksOnly, + Transforms: req.Transforms, + } + + wrapper := streamWrapper{ServerStream: streamSrv, next: streamSrv, withUndo: withUndo} + + return s.server.Blocks(reqV2, wrapper) +} + +type streamWrapper struct { + grpc.ServerStream + next pbfirehoseV1.Stream_BlocksServer + withUndo bool +} + +func (w streamWrapper) Send(response *pbfirehoseV2.Response) error { + return w.next.Send(&pbfirehoseV1.Response{ + Block: response.Block, + Step: convertForkStep(response.Step), + Cursor: response.Cursor, + }) +} + +func convertForkStep(in pbfirehoseV2.ForkStep) pbfirehoseV1.ForkStep { + switch in { + case pbfirehoseV2.ForkStep_STEP_FINAL: + return pbfirehoseV1.ForkStep_STEP_IRREVERSIBLE + case pbfirehoseV2.ForkStep_STEP_NEW: + return pbfirehoseV1.ForkStep_STEP_NEW + case pbfirehoseV2.ForkStep_STEP_UNDO: + return pbfirehoseV1.ForkStep_STEP_UNDO + } + return pbfirehoseV1.ForkStep_STEP_UNKNOWN +} diff --git a/firehose/tests/integration_test.go b/firehose/tests/integration_test.go new file mode 100644 index 0000000..6b44847 --- /dev/null +++ b/firehose/tests/integration_test.go @@ -0,0 +1,236 @@ +package firehose + +//import ( +// "context" +// "encoding/json" +// "fmt" +// "testing" +// "time" +// +// "github.com/alicebob/miniredis/v2/server" +// "github.com/streamingfast/bstream" +// "github.com/streamingfast/dstore" +// pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +// pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// "go.uber.org/zap" +// "google.golang.org/protobuf/proto" +//) +// +//func TestFullFlow(t *testing.T) { +// +// stepNew := pbfirehose.ForkStep_STEP_NEW +// stepIrr := pbfirehose.ForkStep_STEP_IRREVERSIBLE +// stepUndo := pbfirehose.ForkStep_STEP_UNDO +// _ = stepUndo +// +// type expectedResp struct { +// num uint64 +// id string +// step pbfirehose.ForkStep +// } +// +// tests := []struct { +// name string +// files map[int][]byte +// irreversibleBlocksIndexes map[int]map[int]string +// startBlockNum uint64 +// stopBlockNum uint64 +// cursor *bstream.Cursor +// expectedResponses []expectedResp +// }{ +// { +// "scenario 1 -- irreversible index, no cursor", +// map[int][]byte{ +// 0: testBlocks( +// 4, "4a", "3a", 0, +// 6, "6a", "4a", 0, +// ), +// 100: testBlocks( +// 100, "100a", "6a", 6, +// 102, "102a", "100a", 6, +// 103, "103a", "102a", 100, // moves LIB from 6 to 100 +// ), +// 200: testBlocks( +// 204, "204b", "103a", 102, // moves LIB from 100 to 102 +// 205, "205b", "103b", 100, //unlinkable +// ), +// }, +// map[int]map[int]string{ +// 0: { +// 4: "4a", +// 6: "6a", +// }, +// 200: { // this hould not be used +// 204: "204a", +// 206: "206a", +// }, +// }, +// 5, +// 0, +// nil, +// []expectedResp{ +// {6, "6a", stepNew}, +// {6, "6a", stepIrr}, +// {100, "100a", stepNew}, +// {102, "102a", stepNew}, +// {103, "103a", stepNew}, +// {100, "100a", stepIrr}, +// {204, "204b", stepNew}, +// {102, "102a", stepIrr}, +// }, +// }, +// { +// "scenario 2 -- no irreversible index, start->stop with some libs", +// map[int][]byte{ +// 0: testBlocks( +// 4, "4a", "3a", 0, +// 6, "6a", "4a", 4, +// ), +// 100: testBlocks( +// 100, "100a", "6a", 6, +// 102, "102a", "100a", 6, +// 103, "103a", "102a", 100, // triggers StepIrr +// 104, "104a", "103a", 100, // after stop block +// ), +// }, +// nil, +// 6, +// 103, +// nil, +// []expectedResp{ +// {6, "6a", stepNew}, +// {100, "100a", stepNew}, +// {6, "6a", stepIrr}, +// {102, "102a", stepNew}, +// {103, "103a", stepNew}, +// }, +// }, +// } +// +// for _, c := range tests { +// t.Run(c.name, func(t *testing.T) { +// +// logger := zap.NewNop() +// bs := dstore.NewMockStore(nil) +// for i, data := range c.files { +// bs.SetFile(base(i), data) +// } +// +// irrStore := getIrrStore(c.irreversibleBlocksIndexes) +// +// // fake block decoder func to return pbbstream.Block +// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *bstream.Block) (interface{}, error) { +// block := new(pbbstream.Block) +// block.Number = blk.Number +// block.Id = blk.Id +// block.PreviousId = blk.PreviousId +// return block, nil +// }) +// +// tracker := bstream.NewTracker(0) // 0 value not used +// fmt.Println(bstream.GetProtocolFirstStreamableBlock) +// tracker.AddResolver(bstream.OffsetStartBlockResolver(200)) +// +// i := NewStreamFactory( +// []dstore.Store{bs}, +// irrStore, +// []uint64{10000, 1000, 100}, +// nil, +// nil, +// tracker, +// ) +// +// s := server.NewServer( +// logger, +// nil, +// i, +// ) +// +// ctx, cancelCtx := context.WithCancel(context.Background()) +// defer cancelCtx() +// localClient := s.BlocksFromLocal(ctx, &pbfirehose.Request{ +// StartBlockNum: int64(c.startBlockNum), +// StopBlockNum: c.stopBlockNum, +// }) +// +// for _, r := range c.expectedResponses { +// resp, err := localClient.Recv() +// require.NotNil(t, resp) +// require.NoError(t, err) +// +// fmt.Println(resp.Cursor) +// cursor, err := bstream.CursorFromOpaque(resp.Cursor) +// require.NoError(t, err, "cursor sent from firehose should always be valid") +// require.False(t, cursor.IsEmpty()) +// +// b := &pbbstream.Block{} +// err = proto.Unmarshal(resp.Block.Value, b) +// require.NoError(t, err) +// +// require.Equal(t, r.num, b.Number) +// require.Equal(t, r.id, b.Id) +// require.Equal(t, r.step, resp.Step) +// } +// +// // catchExtraBlock +// moreChan := make(chan *pbbstream.Block) +// go func() { +// resp, err := localClient.Recv() +// require.NoError(t, err) +// if resp == nil { +// return +// } +// +// b := &pbbstream.Block{} +// err = proto.Unmarshal(resp.Block.Value, b) +// require.NoError(t, err) +// moreChan <- b +// }() +// +// select { +// case resp := <-moreChan: +// assert.Falsef(t, true, "an extra block was seen: %s", resp.String()) +// case <-time.After(time.Millisecond * 50): +// } +// +// }) +// } +// +//} +// +//func base(in int) string { +// return fmt.Sprintf("%010d", in) +//} +// +//func testBlocks(in ...interface{}) (out []byte) { +// var blks []bstream.ParsableTestBlock +// for i := 0; i < len(in); i += 4 { +// blks = append(blks, bstream.ParsableTestBlock{ +// Number: uint64(in[i].(int)), +// ID: in[i+1].(string), +// PreviousID: in[i+2].(string), +// LIBNum: uint64(in[i+3].(int)), +// }) +// } +// +// for _, blk := range blks { +// b, err := json.Marshal(blk) +// if err != nil { +// panic(err) +// } +// out = append(out, b...) +// out = append(out, '\n') +// } +// return +//} +// +//func getIrrStore(irrBlkIdxs map[int]map[int]string) (irrStore *dstore.MockStore) { +// irrStore = dstore.NewMockStore(nil) +// for j, n := range irrBlkIdxs { +// filename, cnt := bstream.TestIrrBlocksIdx(j, 100, n) +// irrStore.SetFile(filename, cnt) +// } +// return +//} diff --git a/firehose/tests/stream_blocks_test.go b/firehose/tests/stream_blocks_test.go new file mode 100644 index 0000000..3263db0 --- /dev/null +++ b/firehose/tests/stream_blocks_test.go @@ -0,0 +1,103 @@ +package firehose + +//import ( +// "context" +// "strings" +// "testing" +// +// pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v1" +// +// "github.com/streamingfast/bstream" +// "github.com/streamingfast/dstore" +// pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/require" +// "go.uber.org/zap" +// "google.golang.org/protobuf/proto" +//) +// +//func TestLocalBlocks(t *testing.T) { +// +// store := dstore.NewMockStore(nil) +// idxStore := dstore.NewMockStore(nil) +// blocksStores := []dstore.Store{store} +// logger := zap.NewNop() +// +// i := NewStreamFactory( +// blocksStores, +// idxStore, +// []uint64{10000, 1000, 100}, +// nil, +// nil, +// nil, +// ) +// +// s := NewServer( +// logger, +// nil, +// i, +// ) +// +// // fake block decoder func to return bstream.Block +// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *bstream.Block) (interface{}, error) { +// block := new(pbbstream.Block) +// block.Number = blk.Number +// block.Id = blk.Id +// block.PreviousId = blk.PreviousId +// return block, nil +// }) +// +// blocks := strings.Join([]string{ +// bstream.TestJSONBlockWithLIBNum("00000002a", "00000001a", 1), +// bstream.TestJSONBlockWithLIBNum("00000003a", "00000002a", 2), +// bstream.TestJSONBlockWithLIBNum("00000004a", "00000003a", 3), // last one closes on endblock +// }, "\n") +// +// store.SetFile("0000000000", []byte(blocks)) +// +// localClient := s.BlocksFromLocal(context.Background(), &pbfirehose.Request{ +// StartBlockNum: 2, +// StopBlockNum: 4, +// }) +// +// // ---- +// blk, err := localClient.Recv() +// require.NoError(t, err) +// b := &pbbstream.Block{} +// err = proto.Unmarshal(blk.Block.Value, b) +// require.NoError(t, err) +// require.Equal(t, uint64(2), b.Number) +// require.Equal(t, blk.Step, pbfirehose.ForkStep_STEP_NEW) +// +// // ---- +// blk, err = localClient.Recv() +// require.NoError(t, err) +// b = &pbbstream.Block{} +// err = proto.Unmarshal(blk.Block.Value, b) +// require.NoError(t, err) +// assert.Equal(t, uint64(3), b.Number) +// assert.Equal(t, blk.Step, pbfirehose.ForkStep_STEP_NEW) +// +// // ---- +// blk, err = localClient.Recv() +// require.NoError(t, err) +// b = &pbbstream.Block{} +// err = proto.Unmarshal(blk.Block.Value, b) +// require.NoError(t, err) +// assert.Equal(t, uint64(2), b.Number) +// assert.Equal(t, blk.Step, pbfirehose.ForkStep_STEP_IRREVERSIBLE) +// +// // ---- +// blk, err = localClient.Recv() +// require.NoError(t, err) +// b = &pbbstream.Block{} +// err = proto.Unmarshal(blk.Block.Value, b) +// require.NoError(t, err) +// assert.Equal(t, uint64(4), b.Number) +// assert.Equal(t, blk.Step, pbfirehose.ForkStep_STEP_NEW) +// +// // ---- +// blk, err = localClient.Recv() +// require.NoError(t, err) +// require.Nil(t, blk) +//} diff --git a/go.mod b/go.mod index b9e3a3e..2115ddd 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/streamingfast/firehose-core +module github.com/streamingfast/firehose-core/firehose go 1.21 @@ -20,7 +20,6 @@ require ( github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 - github.com/streamingfast/firehose v0.1.1-0.20231109192301-ebfed7417cf6 github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 @@ -37,6 +36,7 @@ require ( require ( github.com/google/s2a-go v0.1.4 // indirect + github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect ) @@ -62,7 +62,7 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/RoaringBitmap/roaring v0.9.4 // indirect github.com/ShinyTrinkets/meta-logger v0.2.0 // indirect - github.com/abourget/llerrgroup v0.2.0 // indirect + github.com/abourget/llerrgroup v0.2.0 github.com/aws/aws-sdk-go v1.44.325 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -105,7 +105,7 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -166,7 +166,7 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad - github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 // indirect + github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839 // indirect github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 // indirect github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 // indirect @@ -178,8 +178,8 @@ require ( github.com/yourbasic/graph v0.0.0-20210606180040-8ecfec1c2869 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.9.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 // indirect - go.opentelemetry.io/otel v1.18.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.44.0 + go.opentelemetry.io/otel v1.18.0 go.opentelemetry.io/otel/exporters/jaeger v1.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.18.0 // indirect @@ -189,12 +189,12 @@ require ( go.opentelemetry.io/otel/sdk v1.18.0 // indirect go.opentelemetry.io/otel/trace v1.18.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.5.1 // indirect golang.org/x/crypto v0.13.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.15.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/term v0.12.0 // indirect diff --git a/go.sum b/go.sum index a6c5631..f4195c8 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,9 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -36,12 +39,16 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= @@ -49,6 +56,7 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -61,12 +69,15 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= +contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= contrib.go.opencensus.io/exporter/stackdriver v0.13.10 h1:a9+GZPUe+ONKUwULjlEOucMMG0qfSCCenlji0Nhqbys= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= contrib.go.opencensus.io/exporter/zipkin v0.1.1 h1:PR+1zWqY8ceXs1qDQQIlgXe+sdiwCf0n32bH4+Epk8g= @@ -117,8 +128,14 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/abourget/llerrgroup v0.2.0 h1:2nPXy6Owo/KOKDQYvjMmS8rsjtitvuP2OEGrqgpj428= github.com/abourget/llerrgroup v0.2.0/go.mod h1:QukSa1Sim/0R4aRlWdiBdAy+0i1PBfOd1WHpfYM1ngA= github.com/alecthomas/gometalinter v2.0.11+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.325 h1:jF/L99fJSq/BfiLmUOflO/aM+LwcqBm0Fe/qTK5xxuI= github.com/aws/aws-sdk-go v1.44.325/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= @@ -129,6 +146,8 @@ github.com/azer/logger v1.0.0/go.mod h1:iaDID7UeBTyUh31bjGFlLkr87k23z/mHMMLzt6YQ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= @@ -155,6 +174,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= @@ -201,6 +221,7 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2U github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -235,8 +256,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d h1:zqfo2jECgX5eYQseB/X+uV4Y5ocGOG/vG/LTztUCyPA= github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -258,6 +283,7 @@ github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -319,6 +345,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -406,15 +433,22 @@ github.com/josephburnett/jd v1.7.1 h1:oXBPMS+SNnILTMGj1fWLK9pexpeJUXtbVFfRku/PjB github.com/josephburnett/jd v1.7.1/go.mod h1:R8ZnZnLt2D4rhW4NvBc/USTo6mzyNT6fYNIIWOJA9GY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -423,6 +457,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -462,6 +497,7 @@ github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= @@ -475,6 +511,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.1.17 h1:N9t6taOJN3mNTTi0wDf4e3lp/G/ON1TP67Pn0vTUA9I= @@ -501,6 +539,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -522,6 +562,7 @@ github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvI github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -530,13 +571,30 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -550,11 +608,14 @@ github.com/rs/cors v1.10.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sethvargo/go-retry v0.1.0/go.mod h1:JzIOdZqQDNpPkQDmcqgtteAcxFLtYpNF/zJCM1ysDg8= github.com/sethvargo/go-retry v0.2.3 h1:oYlgvIvsju3jNbottWABtbnoLC+GDtLdBHxKWxQm/iU= github.com/sethvargo/go-retry v0.2.3/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -574,49 +635,59 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streamingfast/atm v0.0.0-20220131151839-18c87005e680/go.mod h1:iISPGAstbUsPgyC3auLLi7PYUTi9lHv5z0COam0OPOY= +github.com/streamingfast/bstream v0.0.2-0.20220810182344-114d9f8705b2/go.mod h1:dMhUgTdaY+3F+weWsiLo5bNkoUxZbAhdwtCauYDLjEQ= github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab h1:NED6em0qaVsCFlSL5HX2vo/xmDnNzGZxCjpCuDmLjPY= github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab/go.mod h1:ryNdCDG4CCOo2QYctNFzAuNf3ITGhfTwbgRK0/VRDdQ= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= +github.com/streamingfast/dauth v0.0.0-20210812020920-1c83ba29add1/go.mod h1:FIYpVqt+ICVuNBoOH3ZIicIctpVoCq3393+RpfXsPEM= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= +github.com/streamingfast/dbin v0.0.0-20210809205249-73d5eca35dc5/go.mod h1:YStE7K5/GH47JsWpY7LMKsDaXXpMLU/M26vYFzXHYRk= github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad h1:6z4uS6TlD9KoHdyE1vzoGsELVCCcviTFT/3/vqCylh8= github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad/go.mod h1:YStE7K5/GH47JsWpY7LMKsDaXXpMLU/M26vYFzXHYRk= +github.com/streamingfast/derr v0.0.0-20210811180100-9138d738bcec/go.mod h1:ulVfui/yGXmPBbt9aAqCWdAjM7YxnZkYHzvQktLfw3M= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1/go.mod h1:QSm/AfaDsE0k1xBYi0lW580YJ/WDV/FKZI628tkZR0Y= +github.com/streamingfast/dgrpc v0.0.0-20220301153539-536adf71b594/go.mod h1:HFjyAk8wkkb92dLBq1lxArfaWvDHb9Y53+fg7O5WTiU= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa h1:L/Ipge5pkZtyHucT7c8F/PiCitiNqQxjoUuxyzWKZew= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa/go.mod h1:AcY2kk28XswihgU6z37288a3ZF4gGGO7nNwlTI/vET4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e h1:Nh/gLDv8rOMIidb/gpO4rZOYVe09k+tof/trezkpku4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e/go.mod h1:xErlHEDd5+4NlR+Mg3ZtW7BTTLB0yZBxZAjHPrkk8X4= +github.com/streamingfast/dmetering v0.0.0-20220301165106-a642bb6a21bd/go.mod h1:Eu1SH2HyBbDUmQqJV+f5oowCQ/c02HkAZyR5U2BKIT8= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa h1:bM6iy5X7Gtw1oh1bMxFmtroouKZu4K4BHXaFvR96jNw= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= +github.com/streamingfast/dmetrics v0.0.0-20210811180524-8494aeb34447/go.mod h1:VLdQY/FwczmC/flqWkcsBbqXO4BhU4zQDSK7GMrpcjY= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 h1:SUl04bZKGAv207lp7/6CHOJIRpjUKunwItrno3K463Y= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545/go.mod h1:JbxEDbzWRG1dHdNIPrYfuPllEkktZMgm40AwVIBENcw= +github.com/streamingfast/dstore v0.1.1-0.20220607202639-35118aeaf648/go.mod h1:SHSEIPowGeE1TfNNmGeAUUnlO3dwevmX5kFOSazU60M= github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 h1:u7FWLqz3Uwff609Ja9M+3aGOWqBCVU7dx9i6R6Qc4qI= github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77/go.mod h1:ngKU7WzHwVjOFpt2g+Wtob5mX4IvN90HYlnARcTRbmQ= +github.com/streamingfast/dtracing v0.0.0-20210811175635-d55665d3622a/go.mod h1:bqiYZaX6L/MoXNfFQeAdau6g9HLA3yKHkX8KzStt58Q= github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839 h1:K6mJPvh1jAL+/gBS7Bh9jyzWaTib6N47m06gZOTUPwQ= github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839/go.mod h1:huOJyjMYS6K8upTuxDxaNd+emD65RrXoVBvh8f1/7Ns= -github.com/streamingfast/firehose v0.1.1-0.20231109192301-ebfed7417cf6 h1:hcSx7R9f1y+wWoAkJc3XBUXi2p9bYlc2dbt+mZUwdbQ= -github.com/streamingfast/firehose v0.1.1-0.20231109192301-ebfed7417cf6/go.mod h1:lGC1T6mpAAApjBQNF5COSXb3SbrYRI3dBR1f6/PZE54= +github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804 h1:zKKMqfigTHRJyDXo4ixlnzBJd/DaLgBhISJT0P8Ii6o= +github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804/go.mod h1:L31zyyw1r7uYyPwoaVHsKXB2Jd9MO3rDIPNtfoZ+jSM= github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2 h1:dgYLhP3STiPi30fISAijFPEB11D4r1fQFc8D3cpgV5s= github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2/go.mod h1:OYv1UX/kRsV9aP4SEwa9zpt34qGzdtJzOvdGn+n56as= github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 h1:g8eEYbFSykyzIyuxNMmHEUGGUvJE0ivmqZagLDK42gw= github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0/go.mod h1:cTNObq2Uofb330y05JbbZZ6RwE6QUXw5iVcHk1Fx3fk= github.com/streamingfast/logging v0.0.0-20210811175431-f3b44b61606a/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= +github.com/streamingfast/logging v0.0.0-20210908162127-bdc5856d5341/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= +github.com/streamingfast/logging v0.0.0-20220222131651-12c3943aac2e/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304183711-ddba33d79e27/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304214715-bc750a74b424/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 h1:RN5mrigyirb8anBEtdjtHFIufXdacyTi6i4KBfeNXeo= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= -github.com/streamingfast/firehose-core/node-manager v0.0.2-0.20230406142433-692298a8b8d2 h1:6Jdu6LBwaW38n2jjInFk1fM460cq+5paEAHGPPRWWN0= -github.com/streamingfast/firehose-core/node-manager v0.0.2-0.20230406142433-692298a8b8d2/go.mod h1:R5WwJuyNueq0QXKAFinTGU8zaON0hWJBFHX6KA9WZqk= github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 h1:xlWSfi1BoPfsHtPb0VEHGUcAdBF208LUiFCwfaVPfLA= github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308/go.mod h1:K1p8Bj/wG34KJvYzPUqtzpndffmpkrVY11u2hkyxCWQ= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHRsqvI+vKJwgF1OMV6L55jHbaV/ZLoU4IAG/dME= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= +github.com/streamingfast/pbgo v0.0.6-0.20220629184423-cfd0608e0cf4/go.mod h1:huKwfgTGFIFZMKSVbD5TywClM7zAeBUG/zePZMqvXQQ= +github.com/streamingfast/pbgo v0.0.6-0.20220630154121-2e8bba36234e/go.mod h1:huKwfgTGFIFZMKSVbD5TywClM7zAeBUG/zePZMqvXQQ= github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482 h1:eCL6jUDZoSmScqHsp5kiFyEGgo0B5jvCGp21oM7Ow0k= github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= -github.com/streamingfast/relayer v0.0.2-0.20220909122435-e67fbc964fd9 h1:V3LPBmTofZbmT46qQsr0lFa+0qDHZNJXgqLRo9iZBHY= -github.com/streamingfast/relayer v0.0.2-0.20220909122435-e67fbc964fd9/go.mod h1:55E/1g+ojZoX86Odp48LFgceJVyh1xx9ZuhknKfmc/o= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= @@ -667,6 +738,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -699,25 +771,34 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -782,6 +863,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -811,7 +893,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= @@ -824,6 +909,7 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -832,6 +918,7 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -849,7 +936,9 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -866,6 +955,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -878,10 +968,13 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201113233024-12cec1faf1ba/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -897,6 +990,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -906,6 +1000,12 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -985,6 +1085,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1021,6 +1122,7 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= @@ -1034,6 +1136,13 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= +google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1052,6 +1161,7 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1083,6 +1193,7 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1109,6 +1220,20 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= @@ -1142,7 +1267,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= @@ -1162,6 +1289,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1180,7 +1308,10 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/merger.go b/merger.go index cd1cf57..ef851a7 100644 --- a/merger.go +++ b/merger.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - "github.com/streamingfast/firehose-core/merger/app/merger" + "github.com/streamingfast/firehose-core/firehose/merger/app/merger" ) func registerMergerApp() { diff --git a/merger/CHANGELOG.md b/merger/CHANGELOG.md index cf1bc3c..130d155 100644 --- a/merger/CHANGELOG.md +++ b/merger/CHANGELOG.md @@ -27,7 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Improved * Logging of OneBlockFile deletion now only called once per delete batch -* When someone else pushes a merged file, merger now detects it and reads the actual blocks to populate its seenblockscache, as discussed here: https://github.com/streamingfast/firehose-core/merger/issues/1 +* When someone else pushes a merged file, merger now detects it and reads the actual blocks to populate its seenblockscache, as discussed here: https://github.com/streamingfast/firehose-core/firehose/merger/issues/1 * Fixed waiting time to actually use TimeBetweenStoreLookups instead of hardcoded value of 1 second when bundle is incomplete ## [v0.0.1] diff --git a/merger/README.md b/merger/README.md index a64d0ad..a55db5b 100644 --- a/merger/README.md +++ b/merger/README.md @@ -1,6 +1,6 @@ # StreamingFast Merger -[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/merger) +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/firehose/merger) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) The merger process is responsible for accumulating blocks from all diff --git a/merger/app/merger/app.go b/merger/app/merger/app.go index 52e6ae1..8fb5374 100644 --- a/merger/app/merger/app.go +++ b/merger/app/merger/app.go @@ -23,8 +23,8 @@ import ( "github.com/streamingfast/dgrpc" "github.com/streamingfast/dmetrics" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/merger" - "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/firehose-core/firehose/merger" + "github.com/streamingfast/firehose-core/firehose/merger/metrics" "github.com/streamingfast/shutter" "go.uber.org/zap" pbhealth "google.golang.org/grpc/health/grpc_health_v1" diff --git a/merger/app/merger/logging.go b/merger/app/merger/logging.go index c0ee237..68409dd 100644 --- a/merger/app/merger/logging.go +++ b/merger/app/merger/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/app/merger") +var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger/app/merger") diff --git a/merger/bundler.go b/merger/bundler.go index bfa9299..e50eee6 100644 --- a/merger/bundler.go +++ b/merger/bundler.go @@ -25,7 +25,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" - "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/firehose-core/firehose/merger/metrics" "github.com/streamingfast/logging" "go.uber.org/zap" ) @@ -54,7 +54,7 @@ type Bundler struct { logger *zap.Logger } -var logger, _ = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/bundler") +var logger, _ = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger/bundler") func NewBundler(startBlock, stopBlock, firstStreamableBlock, bundleSize uint64, io IOInterface) *Bundler { b := &Bundler{ diff --git a/merger/bundler_test.go b/merger/bundler_test.go index 19d12fd..05438a8 100644 --- a/merger/bundler_test.go +++ b/merger/bundler_test.go @@ -10,7 +10,7 @@ import ( // "time" // "github.com/streamingfast/bstream" - //"github.com/streamingfast/firehose-core/merger/bundle" + //"github.com/streamingfast/firehose-core/firehose/merger/bundle" "github.com/streamingfast/bstream" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/merger/init_test.go b/merger/init_test.go index e6f09a1..450c347 100644 --- a/merger/init_test.go +++ b/merger/init_test.go @@ -18,7 +18,7 @@ import ( "github.com/streamingfast/logging" ) -var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger_tests") +var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger_tests") func init() { logging.InstantiateLoggers() diff --git a/merger/merger_io.go b/merger/merger_io.go index 0b928e9..cbb237a 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -14,7 +14,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/merger/metrics" + "github.com/streamingfast/firehose-core/firehose/merger/metrics" "github.com/streamingfast/logging" "go.uber.org/zap" ) diff --git a/node-manager/app/node_manager/app.go b/node-manager/app/node_manager/app.go index ceff8ae..d65e12b 100644 --- a/node-manager/app/node_manager/app.go +++ b/node-manager/app/node_manager/app.go @@ -24,10 +24,10 @@ import ( dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" "github.com/streamingfast/dmetrics" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - "github.com/streamingfast/firehose-core/node-manager/metrics" - "github.com/streamingfast/firehose-core/node-manager/mindreader" - "github.com/streamingfast/firehose-core/node-manager/operator" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" + "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + "github.com/streamingfast/firehose-core/firehose/node-manager/operator" "github.com/streamingfast/shutter" "go.uber.org/zap" "google.golang.org/grpc" diff --git a/node-manager/app/node_reader_stdin/app.go b/node-manager/app/node_reader_stdin/app.go index 3a54eca..2606048 100644 --- a/node-manager/app/node_reader_stdin/app.go +++ b/node-manager/app/node_reader_stdin/app.go @@ -22,9 +22,9 @@ import ( "github.com/streamingfast/bstream/blockstream" dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" - "github.com/streamingfast/firehose-core/node-manager/mindreader" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" + "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" "github.com/streamingfast/logging" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" diff --git a/node-manager/mindreader/init_test.go b/node-manager/mindreader/init_test.go index 9f82b3c..24bfa9c 100644 --- a/node-manager/mindreader/init_test.go +++ b/node-manager/mindreader/init_test.go @@ -18,7 +18,7 @@ import ( "github.com/streamingfast/logging" ) -var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/node_manager/mindreader/tests") +var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/firehose/node_manager/mindreader/tests") func init() { logging.InstantiateLoggers() diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index ecc5967..5b95dc4 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -26,7 +26,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/dstore" - nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" "github.com/streamingfast/logging" "github.com/streamingfast/shutter" "go.uber.org/zap" diff --git a/node-manager/operator/operator.go b/node-manager/operator/operator.go index 03428e3..0bdee26 100644 --- a/node-manager/operator/operator.go +++ b/node-manager/operator/operator.go @@ -23,7 +23,7 @@ import ( "time" "github.com/streamingfast/derr" - nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" "github.com/streamingfast/shutter" "go.uber.org/atomic" "go.uber.org/zap" diff --git a/node-manager/superviser.go b/node-manager/superviser.go index 3b2aaeb..7360f8e 100644 --- a/node-manager/superviser.go +++ b/node-manager/superviser.go @@ -17,7 +17,7 @@ package node_manager import ( "time" - logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" ) type StartOption string diff --git a/node-manager/superviser/superviser.go b/node-manager/superviser/superviser.go index ad433ed..5049182 100644 --- a/node-manager/superviser/superviser.go +++ b/node-manager/superviser/superviser.go @@ -22,8 +22,8 @@ import ( "github.com/ShinyTrinkets/overseer" "github.com/streamingfast/bstream" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" "github.com/streamingfast/shutter" "go.uber.org/zap" ) diff --git a/node-manager/superviser/superviser_test.go b/node-manager/superviser/superviser_test.go index 1f628b2..2de17e7 100644 --- a/node-manager/superviser/superviser_test.go +++ b/node-manager/superviser/superviser_test.go @@ -19,7 +19,7 @@ import ( "testing" "time" - logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" "github.com/streamingfast/logging" "github.com/stretchr/testify/assert" "go.uber.org/zap" diff --git a/reader_node.go b/reader_node.go index a273ce2..830e4c2 100644 --- a/reader_node.go +++ b/reader_node.go @@ -13,12 +13,12 @@ import ( "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - nodeManagerApp "github.com/streamingfast/firehose-core/node-manager/app/node_manager" - "github.com/streamingfast/firehose-core/node-manager/metrics" - reader "github.com/streamingfast/firehose-core/node-manager/mindreader" - "github.com/streamingfast/firehose-core/node-manager/operator" - sv "github.com/streamingfast/firehose-core/superviser" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + nodeManagerApp "github.com/streamingfast/firehose-core/firehose/node-manager/app/node_manager" + "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" + reader "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + "github.com/streamingfast/firehose-core/firehose/node-manager/operator" + sv "github.com/streamingfast/firehose-core/firehose/superviser" "github.com/streamingfast/logging" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" diff --git a/reader_node_stdin.go b/reader_node_stdin.go index a06a728..a612245 100644 --- a/reader_node_stdin.go +++ b/reader_node_stdin.go @@ -18,10 +18,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - nodeReaderStdinApp "github.com/streamingfast/firehose-core/node-manager/app/node_reader_stdin" - "github.com/streamingfast/firehose-core/node-manager/metrics" - "github.com/streamingfast/firehose-core/node-manager/mindreader" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + nodeReaderStdinApp "github.com/streamingfast/firehose-core/firehose/node-manager/app/node_reader_stdin" + "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" + "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" "github.com/streamingfast/logging" ) diff --git a/relayer.go b/relayer.go index a13fe16..595c451 100644 --- a/relayer.go +++ b/relayer.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - "github.com/streamingfast/firehose-core/relayer/app/relayer" + "github.com/streamingfast/firehose-core/firehose/relayer/app/relayer" ) func registerRelayerApp() { diff --git a/relayer/README.md b/relayer/README.md index 13a0526..b4c94f9 100644 --- a/relayer/README.md +++ b/relayer/README.md @@ -1,6 +1,6 @@ # StreamingFast Relayer -[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/relayer) +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/firehose/relayer) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) The relayer process fans out and propagates blocks from instrumented diff --git a/relayer/app/relayer/app.go b/relayer/app/relayer/app.go index 00ee91f..98db0d8 100644 --- a/relayer/app/relayer/app.go +++ b/relayer/app/relayer/app.go @@ -22,8 +22,8 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/dmetrics" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/relayer" - "github.com/streamingfast/firehose-core/relayer/metrics" + "github.com/streamingfast/firehose-core/firehose/relayer" + "github.com/streamingfast/firehose-core/firehose/relayer/metrics" "github.com/streamingfast/shutter" "go.uber.org/zap" pbhealth "google.golang.org/grpc/health/grpc_health_v1" diff --git a/relayer/app/relayer/logging.go b/relayer/app/relayer/logging.go index b68787c..7626d2c 100644 --- a/relayer/app/relayer/logging.go +++ b/relayer/app/relayer/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer/app/relayer") +var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/firehose/relayer/app/relayer") diff --git a/relayer/logging.go b/relayer/logging.go index e123d96..43859ea 100644 --- a/relayer/logging.go +++ b/relayer/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer") +var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/firehose/relayer") diff --git a/relayer/relayer.go b/relayer/relayer.go index 0f15a95..225ee96 100644 --- a/relayer/relayer.go +++ b/relayer/relayer.go @@ -24,7 +24,7 @@ import ( "github.com/streamingfast/bstream/forkable" "github.com/streamingfast/bstream/hub" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" - "github.com/streamingfast/firehose-core/relayer/metrics" + "github.com/streamingfast/firehose-core/firehose/relayer/metrics" "github.com/streamingfast/shutter" pbhealth "google.golang.org/grpc/health/grpc_health_v1" ) diff --git a/substreams_tier1.go b/substreams_tier1.go index acfc734..0af1f9d 100644 --- a/substreams_tier1.go +++ b/substreams_tier1.go @@ -33,7 +33,7 @@ var ss1HeadBlockNumMetric = metricset.NewHeadBlockNumber("substreams-tier1") var ss1HeadTimeDriftmetric = metricset.NewHeadTimeDrift("substreams-tier1") func registerSubstreamsTier1App[B Block](chain *Chain[B]) { - appLogger, _ := logging.PackageLogger("substreams-tier1", "github.com/streamingfast/firehose-ethereum/substreams-tier1") + appLogger, _ := logging.PackageLogger("substreams-tier1", "github.com/streamingfast/firehose-core/firehose-ethereum/substreams-tier1") launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "substreams-tier1", diff --git a/substreams_tier2.go b/substreams_tier2.go index 8c62793..01e60e9 100644 --- a/substreams_tier2.go +++ b/substreams_tier2.go @@ -31,7 +31,7 @@ var ss2HeadBlockNumMetric = metricset.NewHeadBlockNumber("substreams-tier2") var ss2HeadTimeDriftmetric = metricset.NewHeadTimeDrift("substreams-tier2") func registerSubstreamsTier2App[B Block](chain *Chain[B]) { - appLogger, _ := logging.PackageLogger("substreams-tier2", "github.com/streamingfast/firehose-ethereum/substreams-tier2") + appLogger, _ := logging.PackageLogger("substreams-tier2", "github.com/streamingfast/firehose-core/firehose-ethereum/substreams-tier2") launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "substreams-tier2", diff --git a/superviser/genericsupervisor.go b/superviser/genericsupervisor.go index fa4e416..d7e016d 100644 --- a/superviser/genericsupervisor.go +++ b/superviser/genericsupervisor.go @@ -4,8 +4,8 @@ import ( "strings" "github.com/ShinyTrinkets/overseer" - nodeManager "github.com/streamingfast/firehose-core/node-manager" - "github.com/streamingfast/firehose-core/node-manager/superviser" + nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + "github.com/streamingfast/firehose-core/firehose/node-manager/superviser" "go.uber.org/zap" ) diff --git a/superviser/logging.go b/superviser/logging.go index 5c79bfe..6a9f32b 100644 --- a/superviser/logging.go +++ b/superviser/logging.go @@ -1,7 +1,7 @@ package superviser import ( - logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" ) // This file configures a logging reader that transforms log lines received from the blockchain process running diff --git a/tools.go b/tools.go index ce44844..2472e9d 100644 --- a/tools.go +++ b/tools.go @@ -22,7 +22,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose/client" + "github.com/streamingfast/firehose-core/firehose/firehose/client" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/grpc" diff --git a/tools_check.go b/tools_check.go index 36b1ba4..0f7fae0 100644 --- a/tools_check.go +++ b/tools_check.go @@ -25,7 +25,7 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) diff --git a/tools_checkmergedbatch.go b/tools_checkmergedbatch.go index dfe13b5..22aec06 100644 --- a/tools_checkmergedbatch.go +++ b/tools_checkmergedbatch.go @@ -19,7 +19,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" ) var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ diff --git a/tools_compare_blocks.go b/tools_compare_blocks.go index f6476d0..460fa62 100644 --- a/tools_compare_blocks.go +++ b/tools_compare_blocks.go @@ -30,7 +30,7 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" "go.uber.org/multierr" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" diff --git a/tools_firehose_client.go b/tools_firehose_client.go index 95ada32..7c6c93f 100644 --- a/tools_firehose_client.go +++ b/tools_firehose_client.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" diff --git a/tools_fix_bloated_merged_blocks.go b/tools_fix_bloated_merged_blocks.go index bab0013..2a0f972 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/tools_fix_bloated_merged_blocks.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" "go.uber.org/zap" ) diff --git a/tools_print.go b/tools_print.go index 950345f..3ad4de7 100644 --- a/tools_print.go +++ b/tools_print.go @@ -15,24 +15,16 @@ package firecore import ( - "encoding/hex" - - "github.com/mr-tron/base58" - "fmt" "io" "os" "strconv" - "github.com/go-json-experiment/json" - "github.com/go-json-experiment/json/jsontext" - "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" - "google.golang.org/protobuf/proto" + "github.com/streamingfast/firehose-core/firehose/tools" ) var toolsPrintCmd = &cobra.Command{ @@ -220,34 +212,36 @@ func printBlock(block *bstream.Block, outputMode PrintOutputMode, printTransacti } case PrintOutputModeJSON, PrintOutputModeJSONL: - nativeBlock := block.ToProtocol().(proto.Message) - - var options []jsontext.Options - if outputMode == PrintOutputModeJSON { - options = append(options, jsontext.WithIndent(" ")) - } - encoder := jsontext.NewEncoder(os.Stdout) - - var marshallers *json.Marshalers - switch UnsafeJsonBytesEncoder { - case "hex": - marshallers = json.NewMarshalers( - json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) - }), - ) - case "base58": - marshallers = json.NewMarshalers( - json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - return encoder.WriteToken(jsontext.String(base58.Encode(t))) - }), - ) - } - - err := json.MarshalEncode(encoder, nativeBlock, json.WithMarshalers(marshallers)) - if err != nil { - return fmt.Errorf("block JSON printing: json marshal: %w", err) - } + //todo: implement when we have buf registry + panic("not implemented") + //nativeBlock := block.ToProtocol().(proto.Message) + // + //var options []jsontext.Options + //if outputMode == PrintOutputModeJSON { + // options = append(options, jsontext.WithIndent(" ")) + //} + //encoder := jsontext.NewEncoder(os.Stdout) + // + //var marshallers *json.Marshalers + //switch UnsafeJsonBytesEncoder { + //case "hex": + // marshallers = json.NewMarshalers( + // json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + // return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) + // }), + // ) + //case "base58": + // marshallers = json.NewMarshalers( + // json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + // return encoder.WriteToken(jsontext.String(base58.Encode(t))) + // }), + // ) + //} + // + //err := json.MarshalEncode(encoder, nativeBlock, json.WithMarshalers(marshallers)) + //if err != nil { + // return fmt.Errorf("block JSON printing: json marshal: %w", err) + //} } return nil diff --git a/tools_unmerge_blocks.go b/tools_unmerge_blocks.go index 6f66752..d19102b 100644 --- a/tools_unmerge_blocks.go +++ b/tools_unmerge_blocks.go @@ -9,7 +9,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/cli" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/firehose-core/firehose/tools" "go.uber.org/zap" ) From 0f3c6011f60b9e3e5fb8af979e4d26f3a4f1c506 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 16 Nov 2023 11:12:42 -0500 Subject: [PATCH 05/66] bye bye bstream.Block! --- blockencoder.go | 17 ----- chain.go | 38 ++---------- cmd/firecore/main.go | 6 +- consolereader.go | 7 ++- firehose/app/firehose/app.go | 2 +- firehose/factory.go | 4 +- firehose/server/blocks.go | 2 +- firehose/tests/integration_test.go | 2 +- firehose/tests/stream_blocks_test.go | 2 +- go.mod | 5 ++ go.sum | 72 ++++++++++++++++++++++ index_builder.go | 6 +- merger/bundler.go | 2 +- merger/merger_io.go | 2 +- node-manager/mindreader/archiver.go | 2 +- node-manager/mindreader/mindreader.go | 8 +-- node-manager/mindreader/mindreader_test.go | 6 +- node-manager/monitor.go | 9 ++- node-manager/types.go | 6 +- relayer/relayer.go | 2 +- tools/check_blocks.go | 8 +-- tools/check_merged_batch.go | 2 +- tools_check.go | 3 +- tools_download_from_firehose.go | 2 +- tools_fix_bloated_merged_blocks.go | 10 +-- tools_print.go | 8 ++- tools_unmerge_blocks.go | 8 +-- tools_upgrade_merged_blocks.go | 28 ++++----- types.go | 18 +++--- 29 files changed, 159 insertions(+), 128 deletions(-) delete mode 100644 blockencoder.go diff --git a/blockencoder.go b/blockencoder.go deleted file mode 100644 index be0784e..0000000 --- a/blockencoder.go +++ /dev/null @@ -1,17 +0,0 @@ -package firecore - -import ( - "github.com/streamingfast/bstream" -) - -type GenericBlockEncoder struct { -} - -func NewGenericBlockEncoder() *GenericBlockEncoder { - return &GenericBlockEncoder{} -} - -func (g GenericBlockEncoder) Encode(block Block) (blk *bstream.Block, err error) { - //TODO implement me - panic("implement me") -} diff --git a/chain.go b/chain.go index fa8da67..f8c92fc 100644 --- a/chain.go +++ b/chain.go @@ -7,9 +7,10 @@ import ( "runtime/debug" "strings" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/streamingfast/bstream" "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" "github.com/streamingfast/firehose-core/firehose/node-manager/operator" "github.com/streamingfast/logging" @@ -22,7 +23,7 @@ import ( // BlockPrinterFunc takes a chain agnostic [block] and prints it to a human readable form. // // See [ToolsConfig#BlockPrinter] for extra details about expected printing. -type BlockPrinterFunc func(block *bstream.Block, alsoPrintTransactions bool, out io.Writer) error +type BlockPrinterFunc func(block *pbbstream.Block, alsoPrintTransactions bool, out io.Writer) error // SanitizeBlockForCompareFunc takes a chain agnostic [block] and transforms it in-place, removing fields // that should not be compared. @@ -50,21 +51,6 @@ type Chain[B Block] struct { // The [LongName] **must** be non-empty. LongName string - // Protocol is exactly 3 characters long that is going to identify your chain when writing blocks - // to file. The written file contains an header and a part of this header is the protocol value. - // - // The [Protocol] **must** be non-empty and exactly 3 characters long all upper case. - Protocol string - - // ProtocolVersion is the version of the protocol that is used to write blocks to file. This value - // is used in the header of the written file. It should be changed each time the Protobuf model change - // to become backward incompatible. This usually should be accompagnied by a change in the Protobuf - // block model of the chain. For example for Ethereum we would go from `sf.ethereum.v1.Block` to - // `sf.ethereum.v2.Block` and the [ProtocolVersion] would be incremented from `1` to `2`. - // - // The [ProtocolVersion] **must** be positive and non-zero and should be incremented each time the Protobuf model change. - ProtocolVersion int32 - // ExecutableName is the name of the binary that is used to launch a syncing full node for this chain. For example, // on Ethereum, the binary by default is `geth`. This is used by the `reader-node` app to specify the // `reader-node-binary-name` flag. @@ -100,8 +86,6 @@ type Chain[B Block] struct { // for most chains. FirstStreamableBlock uint64 - BlockAcceptedVersions []int32 - // ConsoleReaderFactory is the function that should return the `ConsoleReader` that knowns // how to transform your your chain specific Firehose instrumentation logs into the proper // Block model of your chain. @@ -229,7 +213,7 @@ type ToolsConfig[B Block] struct { // to upgrade from one version to another of the merged blocks. // // The [MergedBlockUpgrader] is optional and not specifying it disables command `fire tools upgrade-merged-blocks`. - MergedBlockUpgrader func(block *bstream.Block) (*bstream.Block, error) + MergedBlockUpgrader func(block *pbbstream.Block) (*pbbstream.Block, error) } // GetSanitizeBlockForCompare returns the [SanitizeBlockForCompare] value if defined, otherwise a no-op sanitizer. @@ -259,7 +243,6 @@ type TransformFlags struct { func (c *Chain[B]) Validate() { c.ShortName = strings.ToLower(strings.TrimSpace(c.ShortName)) c.LongName = strings.TrimSpace(c.LongName) - c.Protocol = strings.ToLower(c.Protocol) c.ExecutableName = strings.TrimSpace(c.ExecutableName) var err error @@ -276,14 +259,6 @@ func (c *Chain[B]) Validate() { err = multierr.Append(err, fmt.Errorf("field 'LongName' must be non-empty")) } - if len(c.Protocol) != 3 { - err = multierr.Append(err, fmt.Errorf("field 'Protocol' must be non-empty and have exactly 3 characters")) - } - - if c.ProtocolVersion <= 0 { - err = multierr.Append(err, fmt.Errorf("field 'ProtocolVersion' must be positive and non-zero")) - } - if c.ExecutableName == "" { err = multierr.Append(err, fmt.Errorf("field 'ExecutableName' must be non-empty")) } @@ -335,9 +310,6 @@ func (c *Chain[B]) Validate() { // **Caveats** Two chain in the same Go binary will not work today as `bstream` uses global // variables to store configuration which presents multiple chain to exist in the same process. func (c *Chain[B]) Init() { - if c.BlockAcceptedVersions == nil { - c.BlockAcceptedVersions = []int32{c.ProtocolVersion} - } c.BlockEncoder = NewBlockEncoder() } @@ -404,7 +376,7 @@ func (c *Chain[B]) BlockPrinter() BlockPrinterFunc { return c.Tools.BlockPrinter } -func defaultBlockPrinter(block *bstream.Block, alsoPrintTransactions bool, out io.Writer) error { +func defaultBlockPrinter(block *pbbstream.Block, alsoPrintTransactions bool, out io.Writer) error { if alsoPrintTransactions { return fmt.Errorf("transactions is not supported by the default block printer") } diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index bf548be..fd535f2 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -13,12 +13,8 @@ func main() { FullyQualifiedModule: "github.com/streamingfast/firehose-core/firehose", Version: version, - Protocol: "NEA", - ProtocolVersion: 1, - ConsoleReaderFactory: firecore.NewConsoleReader, - - Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, + Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) } diff --git a/consolereader.go b/consolereader.go index 6456d1b..8e6cac6 100644 --- a/consolereader.go +++ b/consolereader.go @@ -11,6 +11,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" "github.com/streamingfast/logging" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -53,7 +54,7 @@ func (r *ConsoleReader) Done() <-chan interface{} { return r.done } -func (r *ConsoleReader) ReadBlock() (out *bstream.Block, err error) { +func (r *ConsoleReader) ReadBlock() (out *pbbstream.Block, err error) { out, err = r.next() if err != nil { return nil, err @@ -62,7 +63,7 @@ func (r *ConsoleReader) ReadBlock() (out *bstream.Block, err error) { return out, nil } -func (r *ConsoleReader) next() (out *bstream.Block, err error) { +func (r *ConsoleReader) next() (out *pbbstream.Block, err error) { for line := range r.lines { if !strings.HasPrefix(line, "FIRE ") { @@ -100,7 +101,7 @@ func (r *ConsoleReader) next() (out *bstream.Block, err error) { // Formats // [block_num:342342342] [block_hash] [parent_num] [parent_hash] [lib:123123123] [timestamp:unix_nano] B64ENCODED_any -func (ctx *parseCtx) readBlock(line string) (out *bstream.Block, err error) { +func (ctx *parseCtx) readBlock(line string) (out *pbbstream.Block, err error) { chunks, err := SplitInBoundedChunks(line, 7) if err != nil { return nil, fmt.Errorf("splitting block log line: %w", err) diff --git a/firehose/app/firehose/app.go b/firehose/app/firehose/app.go index 62a04dd..89bcf27 100644 --- a/firehose/app/firehose/app.go +++ b/firehose/app/firehose/app.go @@ -120,7 +120,7 @@ func (a *App) Run() error { context.Background(), a.config.BlockStreamAddr, 2, - bstream.HandlerFunc(func(blk *bstream.Block, obj interface{}) error { + bstream.HandlerFunc(func(blk *pbbstream.Block, obj interface{}) error { a.modules.HeadBlockNumberMetric.SetUint64(blk.Num()) a.modules.HeadTimeDriftMetric.SetBlockTime(blk.Time()) return h.ProcessBlock(blk, obj) diff --git a/firehose/factory.go b/firehose/factory.go index b5a38e0..d9485e7 100644 --- a/firehose/factory.go +++ b/firehose/factory.go @@ -25,7 +25,7 @@ import ( // bstream blocks to protobuf and applying other transforms var StreamMergedBlocksPreprocThreads = 25 -var bstreamToProtocolPreprocFunc = func(blk *bstream.Block) (interface{}, error) { +var bstreamToProtocolPreprocFunc = func(blk *pbbstream.Block) (interface{}, error) { return blk.ToProtocol(), nil } @@ -51,7 +51,7 @@ func (g *BlockGetter) Get( ctx context.Context, num uint64, id string, - logger *zap.Logger) (out *bstream.Block, err error) { + logger *zap.Logger) (out *pbbstream.Block, err error) { id = bstream.NormalizeBlockID(id) reqLogger := logger.With( diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 3aeb0cb..fe332c4 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -98,7 +98,7 @@ func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream } var blockCount uint64 - handlerFunc := bstream.HandlerFunc(func(block *bstream.Block, obj interface{}) error { + handlerFunc := bstream.HandlerFunc(func(block *pbbstream.Block, obj interface{}) error { blockCount++ cursorable := obj.(bstream.Cursorable) cursor := cursorable.Cursor() diff --git a/firehose/tests/integration_test.go b/firehose/tests/integration_test.go index 6b44847..72e20ef 100644 --- a/firehose/tests/integration_test.go +++ b/firehose/tests/integration_test.go @@ -121,7 +121,7 @@ package firehose // irrStore := getIrrStore(c.irreversibleBlocksIndexes) // // // fake block decoder func to return pbbstream.Block -// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *bstream.Block) (interface{}, error) { +// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *pbbstream.Block) (interface{}, error) { // block := new(pbbstream.Block) // block.Number = blk.Number // block.Id = blk.Id diff --git a/firehose/tests/stream_blocks_test.go b/firehose/tests/stream_blocks_test.go index 3263db0..7dbd14e 100644 --- a/firehose/tests/stream_blocks_test.go +++ b/firehose/tests/stream_blocks_test.go @@ -39,7 +39,7 @@ package firehose // ) // // // fake block decoder func to return bstream.Block -// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *bstream.Block) (interface{}, error) { +// bstream.GetBlockDecoder = bstream.BlockDecoderFunc(func(blk *pbbstream.Block) (interface{}, error) { // block := new(pbbstream.Block) // block.Number = blk.Number // block.Id = blk.Id diff --git a/go.mod b/go.mod index 2115ddd..9a3496b 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,11 @@ module github.com/streamingfast/firehose-core/firehose go 1.21 +replace ( + github.com/streamingfast/bstream => ../bstream + github.com/streamingfast/pbgo => ../pbgo +) + require ( github.com/ShinyTrinkets/overseer v0.3.0 github.com/dustin/go-humanize v1.0.1 diff --git a/go.sum b/go.sum index f4195c8..b752873 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,8 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -42,13 +44,19 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/container v1.3.1/go.mod h1:/mI/mTug/DwXJPxysUoInyvF3ekeXGiP8teCAtgGMdM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= @@ -57,6 +65,7 @@ cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tE cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= +cloud.google.com/go/monitoring v1.6.0/go.mod h1:w+OY1TYCk4MtvY7WfEHlIp5mP8SV/gDSqOsvGhVa2KM= cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -70,6 +79,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= @@ -200,6 +210,7 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -237,6 +248,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -346,6 +358,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -381,14 +394,20 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -651,6 +670,7 @@ github.com/streamingfast/derr v0.0.0-20210811180100-9138d738bcec/go.mod h1:ulVfu github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1/go.mod h1:QSm/AfaDsE0k1xBYi0lW580YJ/WDV/FKZI628tkZR0Y= github.com/streamingfast/dgrpc v0.0.0-20220301153539-536adf71b594/go.mod h1:HFjyAk8wkkb92dLBq1lxArfaWvDHb9Y53+fg7O5WTiU= +github.com/streamingfast/dgrpc v0.0.0-20220909121013-162e9305bbfc/go.mod h1:YlFJuFiB9rmglB5UfTfnsOTfKC1rFo+D0sRbTzLcqgc= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa h1:L/Ipge5pkZtyHucT7c8F/PiCitiNqQxjoUuxyzWKZew= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa/go.mod h1:AcY2kk28XswihgU6z37288a3ZF4gGGO7nNwlTI/vET4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e h1:Nh/gLDv8rOMIidb/gpO4rZOYVe09k+tof/trezkpku4= @@ -897,6 +917,11 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= @@ -919,6 +944,11 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -932,6 +962,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= @@ -1006,9 +1037,15 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1102,6 +1139,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1143,6 +1183,14 @@ google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oY google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1199,6 +1247,7 @@ google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1234,6 +1283,23 @@ google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= @@ -1271,6 +1337,11 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1287,6 +1358,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/index_builder.go b/index_builder.go index 488c749..2de8962 100644 --- a/index_builder.go +++ b/index_builder.go @@ -10,6 +10,7 @@ import ( bstransform "github.com/streamingfast/bstream/transform" "github.com/streamingfast/dlauncher/launcher" indexerApp "github.com/streamingfast/index-builder/app/index-builder" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) func registerIndexBuilderApp[B Block](chain *Chain[B]) { @@ -69,9 +70,8 @@ func registerIndexBuilderApp[B Block](chain *Chain[B]) { return nil, fmt.Errorf("unable to create indexer: %w", err) } - handler := bstream.HandlerFunc(func(blk *bstream.Block, _ interface{}) error { - indexer.ProcessBlock(blk.ToProtocol().(B)) - return nil + handler := bstream.HandlerFunc(func(blk *pbbstream.Block, _ interface{}) error { + return indexer.ProcessBlock(any(blk).(B)) }) app := indexerApp.New(&indexerApp.Config{ diff --git a/merger/bundler.go b/merger/bundler.go index e50eee6..08ee879 100644 --- a/merger/bundler.go +++ b/merger/bundler.go @@ -137,7 +137,7 @@ func readBlockTime(data []byte) (time.Time, error) { return blk.Time(), nil } -func (b *Bundler) ProcessBlock(_ *bstream.Block, obj interface{}) error { +func (b *Bundler) ProcessBlock(_ *pbbstream.Block, obj interface{}) error { obf := obj.(bstream.ObjectWrapper).WrappedObject().(*bstream.OneBlockFile) if obf.Num < b.baseBlockNum { // we may be receiving an inclusive LIB just before our bundle, ignore it diff --git a/merger/merger_io.go b/merger/merger_io.go index cbb237a..f4218bf 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -375,7 +375,7 @@ func (od *oneBlockFilesDeleter) processDeletions() { } } -func lastBlock(mergeFileReader io.ReadCloser) (out *bstream.Block, err error) { +func lastBlock(mergeFileReader io.ReadCloser) (out *pbbstream.Block, err error) { defer mergeFileReader.Close() blkReader, err := bstream.GetBlockReaderFactory.New(mergeFileReader) diff --git a/node-manager/mindreader/archiver.go b/node-manager/mindreader/archiver.go index 1d09730..970354e 100644 --- a/node-manager/mindreader/archiver.go +++ b/node-manager/mindreader/archiver.go @@ -80,7 +80,7 @@ func (a *Archiver) Start(ctx context.Context) { go a.fileUploader.Start(ctx) } -func (a *Archiver) StoreBlock(ctx context.Context, block *bstream.Block) error { +func (a *Archiver) StoreBlock(ctx context.Context, block *pbbstream.Block) error { if block.Number < a.startBlock { a.logger.Debug("skipping block below start_block", zap.Stringer("block", block), zap.Uint64("start_block", a.startBlock)) return nil diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index 5b95dc4..737b1d6 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -37,7 +37,7 @@ var ( ) type ConsolerReader interface { - ReadBlock() (obj *bstream.Block, err error) + ReadBlock() (obj *pbbstream.Block, err error) Done() <-chan interface{} } @@ -179,7 +179,7 @@ func (p *MindReaderPlugin) Launch() { } func (p *MindReaderPlugin) launch() { - blocks := make(chan *bstream.Block, p.channelCapacity) + blocks := make(chan *pbbstream.Block, p.channelCapacity) p.zlogger.Info("launching blocks reading loop", zap.Int("capacity", p.channelCapacity)) go p.consumeReadFlow(blocks) @@ -226,7 +226,7 @@ func (p *MindReaderPlugin) waitForReadFlowToComplete() { } // consumeReadFlow is the one function blocking termination until consumption/writeBlock/upload is done -func (p *MindReaderPlugin) consumeReadFlow(blocks <-chan *bstream.Block) { +func (p *MindReaderPlugin) consumeReadFlow(blocks <-chan *pbbstream.Block) { p.zlogger.Info("starting consume flow") defer close(p.consumeReadFlowDone) @@ -291,7 +291,7 @@ func (p *MindReaderPlugin) drainMessages() { } } -func (p *MindReaderPlugin) readOneMessage(blocks chan<- *bstream.Block) error { +func (p *MindReaderPlugin) readOneMessage(blocks chan<- *pbbstream.Block) error { block, err := p.consoleReader.ReadBlock() if err != nil { return err diff --git a/node-manager/mindreader/mindreader_test.go b/node-manager/mindreader/mindreader_test.go index 5e76d58..e78b9b1 100644 --- a/node-manager/mindreader/mindreader_test.go +++ b/node-manager/mindreader/mindreader_test.go @@ -27,7 +27,7 @@ func TestMindReaderPlugin_LegacyPrefix_ReadFlow(t *testing.T) { func testMindReaderPluginReadFlow(t *testing.T, prefix string) { numOfLines := 1 lines := make(chan string, numOfLines) - blocks := make(chan *bstream.Block, numOfLines) + blocks := make(chan *pbbstream.Block, numOfLines) mindReader := &MindReaderPlugin{ Shutter: shutter.New(), @@ -59,7 +59,7 @@ func testMindReaderPluginReadFlow(t *testing.T, prefix string) { func TestMindReaderPlugin_StopAtBlockNumReached(t *testing.T) { numOfLines := 2 lines := make(chan string, numOfLines) - blocks := make(chan *bstream.Block, numOfLines) + blocks := make(chan *pbbstream.Block, numOfLines) done := make(chan interface{}) mindReader := &MindReaderPlugin{ @@ -130,7 +130,7 @@ func (c *testConsoleReader) Done() <-chan interface{} { return c.done } -func (c *testConsoleReader) ReadBlock() (*bstream.Block, error) { +func (c *testConsoleReader) ReadBlock() (*pbbstream.Block, error) { line, _ := <-c.lines var formatedLine string diff --git a/node-manager/monitor.go b/node-manager/monitor.go index 0c36f24..78e8616 100644 --- a/node-manager/monitor.go +++ b/node-manager/monitor.go @@ -3,7 +3,6 @@ package node_manager import ( "time" - "github.com/streamingfast/bstream" "github.com/streamingfast/dmetrics" "go.uber.org/atomic" "go.uber.org/zap" @@ -14,7 +13,7 @@ type Readiness interface { } type MetricsAndReadinessManager struct { - headBlockChan chan *bstream.Block + headBlockChan chan *pbbstream.Block headBlockTimeDrift *dmetrics.HeadTimeDrift headBlockNumber *dmetrics.HeadBlockNum appReadiness *dmetrics.AppReadiness @@ -29,7 +28,7 @@ type MetricsAndReadinessManager struct { func NewMetricsAndReadinessManager(headBlockTimeDrift *dmetrics.HeadTimeDrift, headBlockNumber *dmetrics.HeadBlockNum, appReadiness *dmetrics.AppReadiness, readinessMaxLatency time.Duration) *MetricsAndReadinessManager { return &MetricsAndReadinessManager{ - headBlockChan: make(chan *bstream.Block, 1), // just for non-blocking, saving a few nanoseconds here + headBlockChan: make(chan *pbbstream.Block, 1), // just for non-blocking, saving a few nanoseconds here readinessProbe: atomic.NewBool(false), appReadiness: appReadiness, headBlockTimeDrift: headBlockTimeDrift, @@ -54,7 +53,7 @@ func (m *MetricsAndReadinessManager) IsReady() bool { func (m *MetricsAndReadinessManager) Launch() { for { - var lastSeenBlock *bstream.Block + var lastSeenBlock *pbbstream.Block select { case block := <-m.headBlockChan: lastSeenBlock = block @@ -86,7 +85,7 @@ func (m *MetricsAndReadinessManager) Launch() { } } -func (m *MetricsAndReadinessManager) UpdateHeadBlock(block *bstream.Block) error { +func (m *MetricsAndReadinessManager) UpdateHeadBlock(block *pbbstream.Block) error { m.headBlockChan <- block return nil } diff --git a/node-manager/types.go b/node-manager/types.go index a732be8..dc974a7 100644 --- a/node-manager/types.go +++ b/node-manager/types.go @@ -14,12 +14,10 @@ package node_manager -import "github.com/streamingfast/bstream" - type DeepMindDebuggable interface { DebugDeepMind(enabled bool) } -type HeadBlockUpdater func(block *bstream.Block) error +type HeadBlockUpdater func(block *pbbstream.Block) error -type OnBlockWritten func(block *bstream.Block) error +type OnBlockWritten func(block *pbbstream.Block) error diff --git a/relayer/relayer.go b/relayer/relayer.go index 225ee96..13063be 100644 --- a/relayer/relayer.go +++ b/relayer/relayer.go @@ -90,7 +90,7 @@ func NewMultiplexedSource(handler bstream.Handler, sourceAddresses []string, max gate := bstream.NewRealtimeGate(maxSourceLatency, subHandler, bstream.GateOptionWithLogger(logger)) var upstreamHandler bstream.Handler - upstreamHandler = bstream.HandlerFunc(func(blk *bstream.Block, obj interface{}) error { + upstreamHandler = bstream.HandlerFunc(func(blk *pbbstream.Block, obj interface{}) error { return gate.ProcessBlock(blk, &namedObj{ Obj: obj, Name: sourceName, diff --git a/tools/check_blocks.go b/tools/check_blocks.go index b767a4a..f739f11 100644 --- a/tools/check_blocks.go +++ b/tools/check_blocks.go @@ -32,7 +32,7 @@ func CheckMergedBlocks( storeURL string, fileBlockSize uint64, blockRange BlockRange, - blockPrinter func(block *bstream.Block), + blockPrinter func(block *pbbstream.Block), printDetails PrintDetails, ) error { readAllBlocks := printDetails != PrintNoDetails @@ -165,8 +165,8 @@ func CheckMergedBlocks( type trackedForkDB struct { fdb *forkable.ForkDB - firstUnlinkableBlock *bstream.Block - lastLinkedBlock *bstream.Block + firstUnlinkableBlock *pbbstream.Block + lastLinkedBlock *pbbstream.Block unlinkableSegmentCount int } @@ -176,7 +176,7 @@ func validateBlockSegment( segment string, fileBlockSize uint64, blockRange BlockRange, - blockPrinter func(block *bstream.Block), + blockPrinter func(block *pbbstream.Block), printDetails PrintDetails, tfdb *trackedForkDB, ) (lowestBlockSeen, highestBlockSeen uint64) { diff --git a/tools/check_merged_batch.go b/tools/check_merged_batch.go index 2dbb4ec..09132f7 100644 --- a/tools/check_merged_batch.go +++ b/tools/check_merged_batch.go @@ -158,7 +158,7 @@ func checkMergedBlockFileBroken( } for { - var block *bstream.Block + var block *pbbstream.Block block, err = readerFactory.Read() if block == nil { diff --git a/tools_check.go b/tools_check.go index 0f7fae0..f76e5f0 100644 --- a/tools_check.go +++ b/tools_check.go @@ -26,6 +26,7 @@ import ( "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/firehose/tools" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -94,7 +95,7 @@ func createToolsCheckMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto printDetails = tools.PrintFull } - return tools.CheckMergedBlocks(cmd.Context(), rootLog, storeURL, fileBlockSize, blockRange, func(block *bstream.Block) { + return tools.CheckMergedBlocks(cmd.Context(), rootLog, storeURL, fileBlockSize, blockRange, func(block *pbbstream.Block) { blockPrinter(block, false, os.Stdout) }, printDetails) } diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index 4d44cfb..741c86b 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -54,7 +54,7 @@ func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger //mergeWriter := &mergedBlocksWriter{ // store: store, // writerFactory: bstream.GetBlockWriterFactory, - // tweakBlock: func(b *bstream.Block) (*bstream.Block, error) { return b, nil }, + // tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, // logger: zlog, //} // diff --git a/tools_fix_bloated_merged_blocks.go b/tools_fix_bloated_merged_blocks.go index 2a0f972..8a17c4a 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/tools_fix_bloated_merged_blocks.go @@ -8,6 +8,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/firehose/tools" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) @@ -60,16 +61,15 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { } defer rc.Close() - br, err := bstream.GetBlockReaderFactory.New(rc) + br, err := bstream.NewDBinBlockReader(rc) if err != nil { return fmt.Errorf("creating block reader: %w", err) } mergeWriter := &mergedBlocksWriter{ - store: destStore, - writerFactory: bstream.GetBlockWriterFactory, - tweakBlock: func(b *bstream.Block) (*bstream.Block, error) { return b, nil }, - logger: zlog, + store: destStore, + tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, + logger: zlog, } seen := make(map[string]bool) diff --git a/tools_print.go b/tools_print.go index 3ad4de7..c2dedc4 100644 --- a/tools_print.go +++ b/tools_print.go @@ -20,6 +20,8 @@ import ( "os" "strconv" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/cli/sflags" @@ -92,7 +94,7 @@ func createToolsPrintMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto } defer reader.Close() - readerFactory, err := bstream.GetBlockReaderFactory.New(reader) + readerFactory, err := bstream.NewDBinBlockReader(reader) if err != nil { fmt.Printf("❌ Unable to read blocks filename %s: %s\n", filename, err) return err @@ -159,7 +161,7 @@ func createToolsPrintOneBlockE(blockPrinter BlockPrinterFunc) CommandExecutor { } defer reader.Close() - readerFactory, err := bstream.GetBlockReaderFactory.New(reader) + readerFactory, err := bstream.NewDBinBlockReader(reader) if err != nil { fmt.Printf("❌ Unable to read blocks filename %s: %s\n", filepath, err) return err @@ -204,7 +206,7 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock(block *bstream.Block, outputMode PrintOutputMode, printTransactions bool, blockPrinter BlockPrinterFunc) error { +func printBlock(block *pbbstream.Block, outputMode PrintOutputMode, printTransactions bool, blockPrinter BlockPrinterFunc) error { switch outputMode { case PrintOutputModeText: if err := blockPrinter(block, printTransactions, os.Stdout); err != nil { diff --git a/tools_unmerge_blocks.go b/tools_unmerge_blocks.go index d19102b..b400af0 100644 --- a/tools_unmerge_blocks.go +++ b/tools_unmerge_blocks.go @@ -10,6 +10,7 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/firehose/tools" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) @@ -62,7 +63,7 @@ func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { } defer rc.Close() - br, err := bstream.GetBlockReaderFactory.New(rc) + br, err := bstream.NewDBinBlockReader(rc) if err != nil { return fmt.Errorf("creating block reader: %w", err) } @@ -88,14 +89,13 @@ func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { pr, pw := io.Pipe() //write block data to pipe, and then close to signal end of data - go func(block *bstream.Block) { + go func(block *pbbstream.Block) { var err error defer func() { pw.CloseWithError(err) }() - var bw bstream.BlockWriter - bw, err = bstream.GetBlockWriterFactory.New(pw) + bw, err := bstream.NewDBinBlockWriter(pw) if err != nil { zlog.Error("creating block writer", zap.Error(err)) return diff --git a/tools_upgrade_merged_blocks.go b/tools_upgrade_merged_blocks.go index c118e55..bfe7358 100644 --- a/tools_upgrade_merged_blocks.go +++ b/tools_upgrade_merged_blocks.go @@ -7,6 +7,8 @@ import ( "io" "strconv" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" @@ -23,7 +25,7 @@ func NewToolsUpgradeMergedBlocksCmd[B Block](chain *Chain[B]) *cobra.Command { } } -func getMergedBlockUpgrader(tweakFunc func(block *bstream.Block) (*bstream.Block, error)) func(cmd *cobra.Command, args []string) error { +func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.Block, error)) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { source := args[0] sourceStore, err := dstore.NewDBinStore(source) @@ -48,12 +50,11 @@ func getMergedBlockUpgrader(tweakFunc func(block *bstream.Block) (*bstream.Block rootLog.Info("starting block upgrader process", zap.Uint64("start", start), zap.Uint64("stop", stop), zap.String("source", source), zap.String("dest", dest)) writer := &mergedBlocksWriter{ - cmd: cmd, - store: destStore, - lowBlockNum: lowBoundary(start), - stopBlockNum: stop, - writerFactory: bstream.GetBlockWriterFactory, - tweakBlock: tweakFunc, + cmd: cmd, + store: destStore, + lowBlockNum: lowBoundary(start), + stopBlockNum: stop, + tweakBlock: tweakFunc, } stream := stream.New(nil, sourceStore, nil, int64(start), writer, stream.WithFinalBlocksOnly()) @@ -71,15 +72,14 @@ type mergedBlocksWriter struct { lowBlockNum uint64 stopBlockNum uint64 - blocks []*bstream.Block - writerFactory bstream.BlockWriterFactory - logger *zap.Logger - cmd *cobra.Command + blocks []*pbbstream.Block + logger *zap.Logger + cmd *cobra.Command - tweakBlock func(*bstream.Block) (*bstream.Block, error) + tweakBlock func(*pbbstream.Block) (*pbbstream.Block, error) } -func (w *mergedBlocksWriter) ProcessBlock(blk *bstream.Block, obj interface{}) error { +func (w *mergedBlocksWriter) ProcessBlock(blk *pbbstream.Block, obj interface{}) error { if w.tweakBlock != nil { b, err := w.tweakBlock(blk) if err != nil { @@ -140,7 +140,7 @@ func (w *mergedBlocksWriter) writeBundle() error { pw.CloseWithError(err) }() - blockWriter, err := w.writerFactory.New(pw) + blockWriter, err := bstream.NewDBinBlockWriter(pw) if err != nil { return } diff --git a/types.go b/types.go index 3fca748..14cb5c2 100644 --- a/types.go +++ b/types.go @@ -4,12 +4,14 @@ import ( "fmt" "time" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/anypb" "github.com/spf13/cobra" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/transform" "github.com/streamingfast/dstore" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "google.golang.org/protobuf/proto" ) @@ -117,24 +119,24 @@ func (b BlockEnveloppe) GetFirehoseBlockLIBNum() uint64 { // block implementing [Block] interface that will be encoded into [bstream.Block] // type which is the type used by Firehose core to "envelope" the block. type BlockEncoder interface { - Encode(block Block) (blk *bstream.Block, err error) + Encode(block Block) (blk *pbbstream.Block, err error) } -type BlockEncoderFunc func(block Block) (blk *bstream.Block, err error) +type BlockEncoderFunc func(block Block) (blk *pbbstream.Block, err error) -func (f BlockEncoderFunc) Encode(block Block) (blk *bstream.Block, err error) { +func (f BlockEncoderFunc) Encode(block Block) (blk *pbbstream.Block, err error) { return f(block) } type CommandExecutor func(cmd *cobra.Command, args []string) (err error) func NewBlockEncoder() BlockEncoder { - return BlockEncoderFunc(func(block Block) (blk *bstream.Block, err error) { + return BlockEncoderFunc(func(block Block) (blk *pbbstream.Block, err error) { return EncodeBlock(block) }) } -func EncodeBlock(b Block) (blk *bstream.Block, err error) { +func EncodeBlock(b Block) (blk *pbbstream.Block, err error) { real := b if b, ok := b.(BlockEnveloppe); ok { real = b.Block @@ -161,11 +163,11 @@ func EncodeBlock(b Block) (blk *bstream.Block, err error) { return nil, fmt.Errorf("unmarshaling block payload: %w", err) } - bstreamBlock := &bstream.Block{ + bstreamBlock := &pbbstream.Block{ Id: b.GetFirehoseBlockID(), Number: b.GetFirehoseBlockNumber(), PreviousId: b.GetFirehoseBlockParentID(), - Timestamp: b.GetFirehoseBlockTime(), + Timestamp: timestamppb.New(b.GetFirehoseBlockTime()), LibNum: v.GetFirehoseBlockLIBNum(), Payload: blockPayload, } From 189450e05802879a802c4ae5653c6a17d24d8d0b Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 16 Nov 2023 11:47:53 -0500 Subject: [PATCH 06/66] fix print and download tools --- chain.go | 19 ++- cmd/firecore/main.go | 2 +- tools_download_from_firehose.go | 229 +++++++++++++++++--------------- tools_print.go | 72 +++++----- 4 files changed, 178 insertions(+), 144 deletions(-) diff --git a/chain.go b/chain.go index f8c92fc..cec0f52 100644 --- a/chain.go +++ b/chain.go @@ -23,7 +23,7 @@ import ( // BlockPrinterFunc takes a chain agnostic [block] and prints it to a human readable form. // // See [ToolsConfig#BlockPrinter] for extra details about expected printing. -type BlockPrinterFunc func(block *pbbstream.Block, alsoPrintTransactions bool, out io.Writer) error +type BlockPrinterFunc func(block Block, alsoPrintTransactions bool, out io.Writer) error // SanitizeBlockForCompareFunc takes a chain agnostic [block] and transforms it in-place, removing fields // that should not be compared. @@ -86,6 +86,13 @@ type Chain[B Block] struct { // for most chains. FirstStreamableBlock uint64 + // BlockFactory is a factory function that returns a new instance of your chain's Block. + // This new instance is usually used within `firecore` to unmarshal some bytes into your + // chain's specific block model and return a [proto.Message] fully instantiated. + // + // The [BlockFactory] **must** be non-nil and must return a non-nil [proto.Message]. + BlockFactory func() Block + // ConsoleReaderFactory is the function that should return the `ConsoleReader` that knowns // how to transform your your chain specific Firehose instrumentation logs into the proper // Block model of your chain. @@ -271,6 +278,12 @@ func (c *Chain[B]) Validate() { err = multierr.Append(err, fmt.Errorf("field 'Version' must be non-empty")) } + if c.BlockFactory == nil { + err = multierr.Append(err, fmt.Errorf("field 'BlockFactory' must be non-nil")) + } else if c.BlockFactory() == nil { + err = multierr.Append(err, fmt.Errorf("field 'BlockFactory' must not produce nil blocks")) + } + if c.ConsoleReaderFactory == nil { err = multierr.Append(err, fmt.Errorf("field 'ConsoleReaderFactory' must be non-nil")) } @@ -376,12 +389,12 @@ func (c *Chain[B]) BlockPrinter() BlockPrinterFunc { return c.Tools.BlockPrinter } -func defaultBlockPrinter(block *pbbstream.Block, alsoPrintTransactions bool, out io.Writer) error { +func defaultBlockPrinter(block Block, alsoPrintTransactions bool, out io.Writer) error { if alsoPrintTransactions { return fmt.Errorf("transactions is not supported by the default block printer") } - if _, err := fmt.Fprintf(out, "Block #%d (%s)\n", block.Number, block.Id); err != nil { + if _, err := fmt.Fprintf(out, "Block #%d (%s)\n", block.GetFirehoseBlockNumber(), block.GetFirehoseBlockID()); err != nil { return err } diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index fd535f2..1a9d45f 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -12,7 +12,7 @@ func main() { ExecutableName: "fire-core", //only used to set default value of reader-node-path, we should not provide a default value anymore ... FullyQualifiedModule: "github.com/streamingfast/firehose-core/firehose", Version: version, - + BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, ConsoleReaderFactory: firecore.NewConsoleReader, Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index 741c86b..5d137bf 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -1,8 +1,20 @@ package firecore import ( + "context" + "fmt" + "io" + "strconv" + "time" + "github.com/spf13/cobra" + "github.com/streamingfast/bstream" + "github.com/streamingfast/dstore" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) *cobra.Command { @@ -24,112 +36,115 @@ func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { - panic("not implemented") - //ctx := context.Background() - // - //endpoint := args[0] - //startBlock, err := strconv.ParseUint(args[1], 10, 64) - //if err != nil { - // return fmt.Errorf("parsing start block num: %w", err) - //} - //stopBlock, err := strconv.ParseUint(args[2], 10, 64) - //if err != nil { - // return fmt.Errorf("parsing stop block num: %w", err) - //} - //destFolder := args[3] - // - //firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, zlog, endpoint, chain) - //if err != nil { - // return err - //} - //defer connClose() - // - //var retryDelay = time.Second * 4 - // - //store, err := dstore.NewDBinStore(destFolder) - //if err != nil { - // return err - //} - // - //mergeWriter := &mergedBlocksWriter{ - // store: store, - // writerFactory: bstream.GetBlockWriterFactory, - // tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, - // logger: zlog, - //} - // - //approximateLIBWarningIssued := false - //var lastBlockID string - //var lastBlockNum uint64 - //for { - // - // request := &pbfirehose.Request{ - // StartBlockNum: int64(startBlock), - // StopBlockNum: stopBlock, - // FinalBlocksOnly: true, - // Cursor: requestInfo.Cursor, - // } - // - // stream, err := firehoseClient.Blocks(ctx, request, requestInfo.GRPCCallOpts...) - // if err != nil { - // return fmt.Errorf("unable to start blocks stream: %w", err) - // } - // - // for { - // response, err := stream.Recv() - // if err != nil { - // if err == io.EOF { - // return nil - // } - // - // zlog.Error("stream encountered a remote error, going to retry", - // zap.Duration("retry_delay", retryDelay), - // zap.Error(err), - // ) - // <-time.After(retryDelay) - // break - // } - // - // block := chain.BlockFactory() - // if err := anypb.UnmarshalTo(response.Block, block, proto.UnmarshalOptions{}); err != nil { - // return fmt.Errorf("unmarshal response block: %w", err) - // } - // - // if _, ok := block.(BlockLIBNumDerivable); !ok { - // // We must wrap the block in a BlockEnveloppe and "provide" the LIB number as itself minus 1 since - // // there is nothing we can do more here to obtain the value sadly. For chain where the LIB can be - // // derived from the Block itself, this code does **not** run (so it will have the correct value) - // if !approximateLIBWarningIssued { - // approximateLIBWarningIssued = true - // zlog.Warn("LIB number is approximated, it is not provided by the chain's Block model so we msut set it to block number minus 1 (which is kinda ok because only final blocks are retrieved in this download tool)") - // } - // - // number := block.GetFirehoseBlockNumber() - // libNum := number - 1 - // if number <= bstream.GetProtocolFirstStreamableBlock { - // libNum = number - // } - // - // block = BlockEnveloppe{ - // Block: block, - // LIBNum: libNum, - // } - // } - // - // blk, err := chain.BlockEncoder.Encode(block) - // if err != nil { - // return fmt.Errorf("error decoding response to bstream block: %w", err) - // } - // if lastBlockID != "" && blk.PreviousId != lastBlockID { - // return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.PreviousId, lastBlockNum, lastBlockID) - // } - // lastBlockID = blk.Id - // lastBlockNum = blk.Number - // - // if err := mergeWriter.ProcessBlock(blk, nil); err != nil { - // return fmt.Errorf("write to blockwriter: %w", err) - // } - // } - //} + ctx := context.Background() + + if _, ok := chain.BlockFactory().(*pbbstream.Block); ok { + //todo: fix this with buf registry + return fmt.Errorf("this tool only works with blocks that are not of type *pbbstream.Block") + } + + endpoint := args[0] + startBlock, err := strconv.ParseUint(args[1], 10, 64) + if err != nil { + return fmt.Errorf("parsing start block num: %w", err) + } + stopBlock, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return fmt.Errorf("parsing stop block num: %w", err) + } + destFolder := args[3] + + firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, zlog, endpoint, chain) + if err != nil { + return err + } + defer connClose() + + var retryDelay = time.Second * 4 + + store, err := dstore.NewDBinStore(destFolder) + if err != nil { + return err + } + + mergeWriter := &mergedBlocksWriter{ + store: store, + tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, + logger: zlog, + } + + approximateLIBWarningIssued := false + var lastBlockID string + var lastBlockNum uint64 + for { + + request := &pbfirehose.Request{ + StartBlockNum: int64(startBlock), + StopBlockNum: stopBlock, + FinalBlocksOnly: true, + Cursor: requestInfo.Cursor, + } + + stream, err := firehoseClient.Blocks(ctx, request, requestInfo.GRPCCallOpts...) + if err != nil { + return fmt.Errorf("unable to start blocks stream: %w", err) + } + + for { + response, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + + zlog.Error("stream encountered a remote error, going to retry", + zap.Duration("retry_delay", retryDelay), + zap.Error(err), + ) + <-time.After(retryDelay) + break + } + + block := chain.BlockFactory() + if err := anypb.UnmarshalTo(response.Block, block, proto.UnmarshalOptions{}); err != nil { + return fmt.Errorf("unmarshal response block: %w", err) + } + + if _, ok := block.(BlockLIBNumDerivable); !ok { + // We must wrap the block in a BlockEnveloppe and "provide" the LIB number as itself minus 1 since + // there is nothing we can do more here to obtain the value sadly. For chain where the LIB can be + // derived from the Block itself, this code does **not** run (so it will have the correct value) + if !approximateLIBWarningIssued { + approximateLIBWarningIssued = true + zlog.Warn("LIB number is approximated, it is not provided by the chain's Block model so we msut set it to block number minus 1 (which is kinda ok because only final blocks are retrieved in this download tool)") + } + + number := block.GetFirehoseBlockNumber() + libNum := number - 1 + if number <= bstream.GetProtocolFirstStreamableBlock { + libNum = number + } + + block = BlockEnveloppe{ + Block: block, + LIBNum: libNum, + } + } + + blk, err := chain.BlockEncoder.Encode(block) + if err != nil { + return fmt.Errorf("error decoding response to bstream block: %w", err) + } + if lastBlockID != "" && blk.PreviousId != lastBlockID { + return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.PreviousId, lastBlockNum, lastBlockID) + } + lastBlockID = blk.Id + lastBlockNum = blk.Number + + if err := mergeWriter.ProcessBlock(blk, nil); err != nil { + return fmt.Errorf("write to blockwriter: %w", err) + } + } + } } } diff --git a/tools_print.go b/tools_print.go index c2dedc4..311018d 100644 --- a/tools_print.go +++ b/tools_print.go @@ -15,11 +15,16 @@ package firecore import ( + "encoding/hex" "fmt" "io" "os" "strconv" + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/mr-tron/base58" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "github.com/spf13/cobra" @@ -59,7 +64,7 @@ func init() { func configureToolsPrintCmd[B Block](chain *Chain[B]) { blockPrinter := chain.BlockPrinter() - toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(blockPrinter) + toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain, blockPrinter) toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE(blockPrinter) } @@ -121,10 +126,15 @@ func createToolsPrintMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto } } -func createToolsPrintOneBlockE(blockPrinter BlockPrinterFunc) CommandExecutor { +func createToolsPrintOneBlockE[B Block](chain *Chain[B], blockPrinter BlockPrinterFunc) CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() + if _, ok := chain.BlockFactory().(*pbbstream.Block); ok { + //todo: fix this with buf registry + return fmt.Errorf("this tool only works with blocks that are not of type *pbbstream.Block") + } + outputMode, err := toolsPrintCmdGetOutputMode(cmd) if err != nil { return fmt.Errorf("invalid 'output' flag: %w", err) @@ -206,7 +216,7 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock(block *pbbstream.Block, outputMode PrintOutputMode, printTransactions bool, blockPrinter BlockPrinterFunc) error { +func printBlock(block Block, outputMode PrintOutputMode, printTransactions bool, blockPrinter BlockPrinterFunc) error { switch outputMode { case PrintOutputModeText: if err := blockPrinter(block, printTransactions, os.Stdout); err != nil { @@ -214,36 +224,32 @@ func printBlock(block *pbbstream.Block, outputMode PrintOutputMode, printTransac } case PrintOutputModeJSON, PrintOutputModeJSONL: - //todo: implement when we have buf registry - panic("not implemented") - //nativeBlock := block.ToProtocol().(proto.Message) - // - //var options []jsontext.Options - //if outputMode == PrintOutputModeJSON { - // options = append(options, jsontext.WithIndent(" ")) - //} - //encoder := jsontext.NewEncoder(os.Stdout) - // - //var marshallers *json.Marshalers - //switch UnsafeJsonBytesEncoder { - //case "hex": - // marshallers = json.NewMarshalers( - // json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - // return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) - // }), - // ) - //case "base58": - // marshallers = json.NewMarshalers( - // json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - // return encoder.WriteToken(jsontext.String(base58.Encode(t))) - // }), - // ) - //} - // - //err := json.MarshalEncode(encoder, nativeBlock, json.WithMarshalers(marshallers)) - //if err != nil { - // return fmt.Errorf("block JSON printing: json marshal: %w", err) - //} + var options []jsontext.Options + if outputMode == PrintOutputModeJSON { + options = append(options, jsontext.WithIndent(" ")) + } + encoder := jsontext.NewEncoder(os.Stdout) + + var marshallers *json.Marshalers + switch UnsafeJsonBytesEncoder { + case "hex": + marshallers = json.NewMarshalers( + json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) + }), + ) + case "base58": + marshallers = json.NewMarshalers( + json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + return encoder.WriteToken(jsontext.String(base58.Encode(t))) + }), + ) + } + + err := json.MarshalEncode(encoder, block, json.WithMarshalers(marshallers)) + if err != nil { + return fmt.Errorf("block JSON printing: json marshal: %w", err) + } } return nil From 9b48bf1dc1e10dea4677e78177d52dde7bb1b62f Mon Sep 17 00:00:00 2001 From: billettc Date: Fri, 17 Nov 2023 15:05:37 -0500 Subject: [PATCH 07/66] all test pass! --- chain.go | 7 +- cmd/firecore/main.go | 4 +- consolereader.go | 20 +- consolereader_test.go | 10 +- firehose.go | 4 +- firehose/app/firehose/app.go | 10 +- firehose/factory.go | 17 +- firehose/init_test.go | 3 - firehose/server/blocks.go | 4 +- firehose/server/server.go | 4 +- go.mod | 19 +- go.sum | 225 +----------------- index-builder/README.md | 27 +++ index-builder/app/index-builder/app.go | 96 ++++++++ index-builder/app/index-builder/logging.go | 7 + index-builder/healthz.go | 42 ++++ index-builder/index-builder.go | 95 ++++++++ index-builder/metrics/metrics.go | 9 + index_builder.go | 5 +- merger.go | 2 +- merger/CHANGELOG.md | 2 +- merger/README.md | 2 +- merger/app/merger/app.go | 4 +- merger/app/merger/logging.go | 2 +- merger/bundler.go | 8 +- merger/bundler_test.go | 134 ++++++----- merger/bundlereader.go | 29 ++- merger/bundlereader_test.go | 113 ++++----- merger/init_test.go | 2 +- merger/merger_io.go | 9 +- merger/merger_io_test.go | 48 ++-- node-manager/app/node_manager/app.go | 8 +- node-manager/app/node_reader_stdin/app.go | 6 +- node-manager/mindreader/archiver.go | 7 +- node-manager/mindreader/init_test.go | 2 +- node-manager/mindreader/mindreader.go | 9 +- node-manager/mindreader/mindreader_test.go | 2 + node-manager/monitor.go | 4 +- node-manager/operator/operator.go | 2 +- node-manager/superviser.go | 2 +- node-manager/superviser/superviser.go | 4 +- node-manager/superviser/superviser_test.go | 2 +- node-manager/types.go | 2 + reader_node.go | 12 +- reader_node_stdin.go | 8 +- relayer.go | 2 +- relayer/README.md | 2 +- relayer/app/relayer/app.go | 4 +- relayer/app/relayer/logging.go | 2 +- relayer/logging.go | 2 +- relayer/relayer.go | 4 +- start.go | 11 - superviser/genericsupervisor.go | 4 +- superviser/logging.go | 2 +- tools.go | 2 +- tools_check.go | 14 +- .../check_blocks.go => tools_check_blocks.go | 73 +++--- ...ed_batch.go => tools_check_merged_batch.go | 24 +- tools_checkmergedbatch.go | 4 +- tools_compare_blocks.go | 12 +- tools_download_from_firehose.go | 4 +- tools_firehose_client.go | 2 +- tools_fix_bloated_merged_blocks.go | 12 +- tools_print.go | 2 +- tools_unmerge_blocks.go | 8 +- types.go | 22 +- 66 files changed, 663 insertions(+), 582 deletions(-) create mode 100644 index-builder/README.md create mode 100644 index-builder/app/index-builder/app.go create mode 100644 index-builder/app/index-builder/logging.go create mode 100644 index-builder/healthz.go create mode 100644 index-builder/index-builder.go create mode 100644 index-builder/metrics/metrics.go rename tools/check_blocks.go => tools_check_blocks.go (73%) rename tools/check_merged_batch.go => tools_check_merged_batch.go (84%) diff --git a/chain.go b/chain.go index cec0f52..48922c4 100644 --- a/chain.go +++ b/chain.go @@ -7,13 +7,12 @@ import ( "runtime/debug" "strings" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" - "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" - "github.com/streamingfast/firehose-core/firehose/node-manager/operator" + "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" "github.com/streamingfast/logging" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/protobuf/reflect/protoreflect" diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index 1a9d45f..b97edb5 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -1,7 +1,7 @@ package main import ( - firecore "github.com/streamingfast/firehose-core/firehose" + firecore "github.com/streamingfast/firehose-core" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) @@ -10,7 +10,7 @@ func main() { ShortName: "core", //used to compose the binary name LongName: "CORE", //only used to compose cmd title and description ExecutableName: "fire-core", //only used to set default value of reader-node-path, we should not provide a default value anymore ... - FullyQualifiedModule: "github.com/streamingfast/firehose-core/firehose", + FullyQualifiedModule: "github.com/streamingfast/firehose-core", Version: version, BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, ConsoleReaderFactory: firecore.NewConsoleReader, diff --git a/consolereader.go b/consolereader.go index 8e6cac6..5b06b89 100644 --- a/consolereader.go +++ b/consolereader.go @@ -8,8 +8,10 @@ import ( "strings" "time" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/streamingfast/bstream" - "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" @@ -135,7 +137,7 @@ func (ctx *parseCtx) readBlock(line string) (out *pbbstream.Block, err error) { payload, err := base64.StdEncoding.DecodeString(chunks[6]) - var blockPayload *anypb.Any + blockPayload := &anypb.Any{} if err := proto.Unmarshal(payload, blockPayload); err != nil { return nil, fmt.Errorf("unmarshaling block payload: %w", err) } @@ -147,13 +149,13 @@ func (ctx *parseCtx) readBlock(line string) (out *pbbstream.Block, err error) { } block := &bstream.Block{ - Id: blockHash, - Number: blockNum, - PreviousId: parentHash, - PreviousNum: parentNum, - Timestamp: timestamp, - LibNum: libNum, - Payload: blockPayload, + Id: blockHash, + Number: blockNum, + ParentId: parentHash, + ParentNum: parentNum, + Timestamp: timestamppb.New(timestamp), + LibNum: libNum, + Payload: blockPayload, } return block, nil diff --git a/consolereader_test.go b/consolereader_test.go index 4d224de..80f550d 100644 --- a/consolereader_test.go +++ b/consolereader_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/streamingfast/firehose-core/firehose/test" + "github.com/streamingfast/firehose-core/test" "github.com/stretchr/testify/require" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -55,9 +55,9 @@ func Test_Ctx_readBlock(t *testing.T) { require.Equal(t, blockNumber, block.Number) require.Equal(t, blockHash, block.Id) - require.Equal(t, parentHash, block.PreviousId) + require.Equal(t, parentHash, block.ParentId) require.Equal(t, uint64(libNumber), block.LibNum) - require.Equal(t, time.Unix(0, nowNano), block.Timestamp) + require.Equal(t, int32(time.Unix(0, nowNano).Nanosecond()), block.Timestamp.Nanos) require.NoError(t, err) require.Equal(t, anypbBlock.GetValue(), block.Payload.Value) @@ -88,8 +88,8 @@ func Test_GetNext(t *testing.T) { require.Equal(t, uint64(18571000), block.Number) require.Equal(t, "d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659", block.Id) - require.Equal(t, "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81", block.PreviousId) + require.Equal(t, "55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81", block.ParentId) require.Equal(t, uint64(18570800), block.LibNum) - require.Equal(t, time.Unix(0, 1699992393935935000), block.Timestamp) + require.Equal(t, int32(time.Unix(0, 1699992393935935000).Nanosecond()), block.Timestamp.Nanos) } diff --git a/firehose.go b/firehose.go index 066992b..76821d4 100644 --- a/firehose.go +++ b/firehose.go @@ -12,8 +12,8 @@ import ( discoveryservice "github.com/streamingfast/dgrpc/server/discovery-service" "github.com/streamingfast/dlauncher/launcher" "github.com/streamingfast/dmetrics" - "github.com/streamingfast/firehose-core/firehose/firehose/app/firehose" - "github.com/streamingfast/firehose-core/firehose/firehose/server" + "github.com/streamingfast/firehose-core/firehose/app/firehose" + "github.com/streamingfast/firehose-core/firehose/server" "github.com/streamingfast/logging" ) diff --git a/firehose/app/firehose/app.go b/firehose/app/firehose/app.go index 89bcf27..d66177e 100644 --- a/firehose/app/firehose/app.go +++ b/firehose/app/firehose/app.go @@ -20,6 +20,8 @@ import ( "net/url" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/bstream/hub" @@ -28,9 +30,9 @@ import ( dgrpcserver "github.com/streamingfast/dgrpc/server" "github.com/streamingfast/dmetrics" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/firehose" - "github.com/streamingfast/firehose-core/firehose/firehose/metrics" - "github.com/streamingfast/firehose-core/firehose/firehose/server" + "github.com/streamingfast/firehose-core/firehose" + "github.com/streamingfast/firehose-core/firehose/metrics" + "github.com/streamingfast/firehose-core/firehose/server" "github.com/streamingfast/shutter" "go.uber.org/atomic" "go.uber.org/zap" @@ -121,7 +123,7 @@ func (a *App) Run() error { a.config.BlockStreamAddr, 2, bstream.HandlerFunc(func(blk *pbbstream.Block, obj interface{}) error { - a.modules.HeadBlockNumberMetric.SetUint64(blk.Num()) + a.modules.HeadBlockNumberMetric.SetUint64(blk.Number) a.modules.HeadTimeDriftMetric.SetBlockTime(blk.Time()) return h.ProcessBlock(blk, obj) }), diff --git a/firehose/factory.go b/firehose/factory.go index d9485e7..d5b120e 100644 --- a/firehose/factory.go +++ b/firehose/factory.go @@ -5,30 +5,23 @@ import ( "errors" "fmt" - "github.com/streamingfast/dauth" - "github.com/streamingfast/derr" - "github.com/streamingfast/dmetering" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/hub" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/bstream/transform" + "github.com/streamingfast/dauth" + "github.com/streamingfast/derr" + "github.com/streamingfast/dmetering" "github.com/streamingfast/dstore" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -// StreamMergedBlocksPreprocThreads defines the number of threads -// that the filesource is allowed to use PER FILE. Used for decoding -// bstream blocks to protobuf and applying other transforms var StreamMergedBlocksPreprocThreads = 25 -var bstreamToProtocolPreprocFunc = func(blk *pbbstream.Block) (interface{}, error) { - return blk.ToProtocol(), nil -} - type BlockGetter struct { mergedBlocksStore dstore.Store forkedBlocksStore dstore.Store @@ -168,7 +161,7 @@ func (sf *StreamFactory) New( if preprocFunc != nil { options = append(options, stream.WithPreprocessFunc(preprocFunc, StreamMergedBlocksPreprocThreads)) } else if decodeBlock { - options = append(options, stream.WithPreprocessFunc(bstreamToProtocolPreprocFunc, StreamMergedBlocksPreprocThreads)) // decoding bstream in parallel, faster + panic("not supported anymore") } if blockIndexProvider != nil { reqLogger = reqLogger.With(zap.Bool("with_index_provider", true)) diff --git a/firehose/init_test.go b/firehose/init_test.go index 7bd0c75..b35a7c3 100644 --- a/firehose/init_test.go +++ b/firehose/init_test.go @@ -1,12 +1,9 @@ package firehose import ( - "github.com/streamingfast/bstream" "github.com/streamingfast/logging" ) func init() { logging.InstantiateLoggers() - - bstream.GetBlockReaderFactory = bstream.TestBlockReaderFactory } diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index fe332c4..5d64399 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -7,11 +7,13 @@ import ( "os" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/dauth" "github.com/streamingfast/dmetering" - "github.com/streamingfast/firehose-core/firehose/firehose/metrics" + "github.com/streamingfast/firehose-core/firehose/metrics" "github.com/streamingfast/logging" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" diff --git a/firehose/server/server.go b/firehose/server/server.go index 2de72a4..6d7c4ca 100644 --- a/firehose/server/server.go +++ b/firehose/server/server.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/streamingfast/firehose-core/firehose/firehose" - "github.com/streamingfast/firehose-core/firehose/firehose/rate" + "github.com/streamingfast/firehose-core/firehose" + "github.com/streamingfast/firehose-core/firehose/rate" _ "github.com/mostynb/go-grpc-compression/zstd" "github.com/streamingfast/bstream/transform" diff --git a/go.mod b/go.mod index 9a3496b..b502dd8 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,8 @@ -module github.com/streamingfast/firehose-core/firehose +module github.com/streamingfast/firehose-core go 1.21 -replace ( - github.com/streamingfast/bstream => ../bstream - github.com/streamingfast/pbgo => ../pbgo -) +replace github.com/streamingfast/bstream => ../bstream require ( github.com/ShinyTrinkets/overseer v0.3.0 @@ -17,20 +14,21 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab + github.com/streamingfast/bstream v0.0.2-0.20231116220707-c5946cce90ff github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 + github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713 + github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 - github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 - github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482 + github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.20 + github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be github.com/stretchr/testify v1.8.4 go.uber.org/multierr v1.10.0 go.uber.org/zap v1.26.0 @@ -41,7 +39,6 @@ require ( require ( github.com/google/s2a-go v0.1.4 // indirect - github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect ) @@ -170,8 +167,6 @@ require ( github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad - github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839 // indirect github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 // indirect github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 // indirect diff --git a/go.sum b/go.sum index b752873..979af70 100644 --- a/go.sum +++ b/go.sum @@ -28,11 +28,6 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -41,22 +36,12 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/container v1.3.1/go.mod h1:/mI/mTug/DwXJPxysUoInyvF3ekeXGiP8teCAtgGMdM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= @@ -64,8 +49,6 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= -cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= -cloud.google.com/go/monitoring v1.6.0/go.mod h1:w+OY1TYCk4MtvY7WfEHlIp5mP8SV/gDSqOsvGhVa2KM= cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -78,16 +61,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= -cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= -contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= contrib.go.opencensus.io/exporter/stackdriver v0.13.10 h1:a9+GZPUe+ONKUwULjlEOucMMG0qfSCCenlji0Nhqbys= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= contrib.go.opencensus.io/exporter/zipkin v0.1.1 h1:PR+1zWqY8ceXs1qDQQIlgXe+sdiwCf0n32bH4+Epk8g= @@ -138,14 +117,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/abourget/llerrgroup v0.2.0 h1:2nPXy6Owo/KOKDQYvjMmS8rsjtitvuP2OEGrqgpj428= github.com/abourget/llerrgroup v0.2.0/go.mod h1:QukSa1Sim/0R4aRlWdiBdAy+0i1PBfOd1WHpfYM1ngA= github.com/alecthomas/gometalinter v2.0.11+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.325 h1:jF/L99fJSq/BfiLmUOflO/aM+LwcqBm0Fe/qTK5xxuI= github.com/aws/aws-sdk-go v1.44.325/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= @@ -156,8 +129,6 @@ github.com/azer/logger v1.0.0/go.mod h1:iaDID7UeBTyUh31bjGFlLkr87k23z/mHMMLzt6YQ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= @@ -184,7 +155,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= @@ -210,7 +180,6 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -232,7 +201,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2U github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -248,7 +216,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -268,12 +235,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d h1:zqfo2jECgX5eYQseB/X+uV4Y5ocGOG/vG/LTztUCyPA= github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -295,7 +258,6 @@ github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -357,8 +319,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -394,20 +354,14 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -452,22 +406,15 @@ github.com/josephburnett/jd v1.7.1 h1:oXBPMS+SNnILTMGj1fWLK9pexpeJUXtbVFfRku/PjB github.com/josephburnett/jd v1.7.1/go.mod h1:R8ZnZnLt2D4rhW4NvBc/USTo6mzyNT6fYNIIWOJA9GY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -476,7 +423,6 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -516,7 +462,6 @@ github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= @@ -530,8 +475,6 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.1.17 h1:N9t6taOJN3mNTTi0wDf4e3lp/G/ON1TP67Pn0vTUA9I= @@ -558,8 +501,6 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -581,7 +522,6 @@ github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvI github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -590,30 +530,13 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -627,14 +550,11 @@ github.com/rs/cors v1.10.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sethvargo/go-retry v0.1.0/go.mod h1:JzIOdZqQDNpPkQDmcqgtteAcxFLtYpNF/zJCM1ysDg8= github.com/sethvargo/go-retry v0.2.3 h1:oYlgvIvsju3jNbottWABtbnoLC+GDtLdBHxKWxQm/iU= github.com/sethvargo/go-retry v0.2.3/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -654,48 +574,29 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/atm v0.0.0-20220131151839-18c87005e680/go.mod h1:iISPGAstbUsPgyC3auLLi7PYUTi9lHv5z0COam0OPOY= -github.com/streamingfast/bstream v0.0.2-0.20220810182344-114d9f8705b2/go.mod h1:dMhUgTdaY+3F+weWsiLo5bNkoUxZbAhdwtCauYDLjEQ= -github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab h1:NED6em0qaVsCFlSL5HX2vo/xmDnNzGZxCjpCuDmLjPY= -github.com/streamingfast/bstream v0.0.2-0.20231115182919-10a5d61a80ab/go.mod h1:ryNdCDG4CCOo2QYctNFzAuNf3ITGhfTwbgRK0/VRDdQ= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= -github.com/streamingfast/dauth v0.0.0-20210812020920-1c83ba29add1/go.mod h1:FIYpVqt+ICVuNBoOH3ZIicIctpVoCq3393+RpfXsPEM= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= -github.com/streamingfast/dbin v0.0.0-20210809205249-73d5eca35dc5/go.mod h1:YStE7K5/GH47JsWpY7LMKsDaXXpMLU/M26vYFzXHYRk= -github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad h1:6z4uS6TlD9KoHdyE1vzoGsELVCCcviTFT/3/vqCylh8= -github.com/streamingfast/dbin v0.9.1-0.20220513054835-1abebbb944ad/go.mod h1:YStE7K5/GH47JsWpY7LMKsDaXXpMLU/M26vYFzXHYRk= -github.com/streamingfast/derr v0.0.0-20210811180100-9138d738bcec/go.mod h1:ulVfui/yGXmPBbt9aAqCWdAjM7YxnZkYHzvQktLfw3M= +github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713 h1:pVUxJeGfR8+WU+t1ZHiPqRFqeZOp2swYbYW57BAEpWI= +github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1/go.mod h1:QSm/AfaDsE0k1xBYi0lW580YJ/WDV/FKZI628tkZR0Y= -github.com/streamingfast/dgrpc v0.0.0-20220301153539-536adf71b594/go.mod h1:HFjyAk8wkkb92dLBq1lxArfaWvDHb9Y53+fg7O5WTiU= -github.com/streamingfast/dgrpc v0.0.0-20220909121013-162e9305bbfc/go.mod h1:YlFJuFiB9rmglB5UfTfnsOTfKC1rFo+D0sRbTzLcqgc= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa h1:L/Ipge5pkZtyHucT7c8F/PiCitiNqQxjoUuxyzWKZew= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa/go.mod h1:AcY2kk28XswihgU6z37288a3ZF4gGGO7nNwlTI/vET4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e h1:Nh/gLDv8rOMIidb/gpO4rZOYVe09k+tof/trezkpku4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e/go.mod h1:xErlHEDd5+4NlR+Mg3ZtW7BTTLB0yZBxZAjHPrkk8X4= -github.com/streamingfast/dmetering v0.0.0-20220301165106-a642bb6a21bd/go.mod h1:Eu1SH2HyBbDUmQqJV+f5oowCQ/c02HkAZyR5U2BKIT8= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa h1:bM6iy5X7Gtw1oh1bMxFmtroouKZu4K4BHXaFvR96jNw= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= -github.com/streamingfast/dmetrics v0.0.0-20210811180524-8494aeb34447/go.mod h1:VLdQY/FwczmC/flqWkcsBbqXO4BhU4zQDSK7GMrpcjY= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 h1:SUl04bZKGAv207lp7/6CHOJIRpjUKunwItrno3K463Y= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545/go.mod h1:JbxEDbzWRG1dHdNIPrYfuPllEkktZMgm40AwVIBENcw= -github.com/streamingfast/dstore v0.1.1-0.20220607202639-35118aeaf648/go.mod h1:SHSEIPowGeE1TfNNmGeAUUnlO3dwevmX5kFOSazU60M= github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 h1:u7FWLqz3Uwff609Ja9M+3aGOWqBCVU7dx9i6R6Qc4qI= github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77/go.mod h1:ngKU7WzHwVjOFpt2g+Wtob5mX4IvN90HYlnARcTRbmQ= -github.com/streamingfast/dtracing v0.0.0-20210811175635-d55665d3622a/go.mod h1:bqiYZaX6L/MoXNfFQeAdau6g9HLA3yKHkX8KzStt58Q= github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839 h1:K6mJPvh1jAL+/gBS7Bh9jyzWaTib6N47m06gZOTUPwQ= github.com/streamingfast/dtracing v0.0.0-20220305214756-b5c0e8699839/go.mod h1:huOJyjMYS6K8upTuxDxaNd+emD65RrXoVBvh8f1/7Ns= -github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804 h1:zKKMqfigTHRJyDXo4ixlnzBJd/DaLgBhISJT0P8Ii6o= -github.com/streamingfast/firehose v0.1.1-0.20220810182727-6f3191de9804/go.mod h1:L31zyyw1r7uYyPwoaVHsKXB2Jd9MO3rDIPNtfoZ+jSM= -github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2 h1:dgYLhP3STiPi30fISAijFPEB11D4r1fQFc8D3cpgV5s= -github.com/streamingfast/index-builder v0.0.0-20221031203737-fa2e70f09dc2/go.mod h1:OYv1UX/kRsV9aP4SEwa9zpt34qGzdtJzOvdGn+n56as= github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 h1:g8eEYbFSykyzIyuxNMmHEUGGUvJE0ivmqZagLDK42gw= github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0/go.mod h1:cTNObq2Uofb330y05JbbZZ6RwE6QUXw5iVcHk1Fx3fk= github.com/streamingfast/logging v0.0.0-20210811175431-f3b44b61606a/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= -github.com/streamingfast/logging v0.0.0-20210908162127-bdc5856d5341/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= -github.com/streamingfast/logging v0.0.0-20220222131651-12c3943aac2e/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304183711-ddba33d79e27/go.mod h1:4GdqELhZOXj4xwc4IaBmzofzdErGynnaSzuzxy0ZIBo= github.com/streamingfast/logging v0.0.0-20220304214715-bc750a74b424/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 h1:RN5mrigyirb8anBEtdjtHFIufXdacyTi6i4KBfeNXeo= @@ -704,18 +605,16 @@ github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 h1:xlWSfi1BoP github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308/go.mod h1:K1p8Bj/wG34KJvYzPUqtzpndffmpkrVY11u2hkyxCWQ= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHRsqvI+vKJwgF1OMV6L55jHbaV/ZLoU4IAG/dME= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= -github.com/streamingfast/pbgo v0.0.6-0.20220629184423-cfd0608e0cf4/go.mod h1:huKwfgTGFIFZMKSVbD5TywClM7zAeBUG/zePZMqvXQQ= -github.com/streamingfast/pbgo v0.0.6-0.20220630154121-2e8bba36234e/go.mod h1:huKwfgTGFIFZMKSVbD5TywClM7zAeBUG/zePZMqvXQQ= -github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482 h1:eCL6jUDZoSmScqHsp5kiFyEGgo0B5jvCGp21oM7Ow0k= -github.com/streamingfast/pbgo v0.0.6-0.20231115160849-aa578f33a482/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= +github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c h1:DPvapg4SdVcXlk/lxAFSYJJTh9+M7UGQ2+XZGdw4wko= +github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.20 h1:61k/HKti9xo7vDAu5zew/VL8qzY+ye/9Zzt1om+tVks= -github.com/streamingfast/substreams v1.1.20/go.mod h1:Ak7a+EM8MRehep0ZaQD1NwG27ZE9auZY9+/VLbhBnDU= +github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be h1:/SKBNBzCSrt/gQmzKDro8BmFH5C3fyVEYustmTL7qs4= +github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be/go.mod h1:9KE5zXdfRAnXVyilMnuPcqMujFobaJ+LsKXADBYssJs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -758,7 +657,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -791,34 +689,25 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -883,7 +772,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -913,15 +801,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= @@ -934,7 +814,6 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -943,12 +822,6 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -962,14 +835,11 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -986,7 +856,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -999,13 +868,10 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201113233024-12cec1faf1ba/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1021,7 +887,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1031,21 +896,9 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1122,7 +975,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1139,9 +991,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1162,7 +1011,6 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= @@ -1176,21 +1024,6 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= -google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1209,7 +1042,6 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1241,13 +1073,11 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1269,37 +1099,6 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= @@ -1333,15 +1132,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1358,10 +1150,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1380,10 +1170,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/index-builder/README.md b/index-builder/README.md new file mode 100644 index 0000000..872fd3d --- /dev/null +++ b/index-builder/README.md @@ -0,0 +1,27 @@ +# StreamingFast Index-Builder + +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/merger) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +The index-builder process is responsible for building various indices + +## Installation & Usage + +See the different protocol-specific `StreamingFast` binaries at https://github.com/streamingfast/streamingfast#protocols + + +## Contributing + +**Issues and PR in this repo related strictly to the index-builder functionalities** + +Report any protocol-specific issues in their +[respective repositories](https://github.com/streamingfast/streamingfast#protocols) + +**Please first refer to the general +[streamingfast contribution guide](https://github.com/streamingfast/streamingfast/blob/master/CONTRIBUTING.md)**, +if you wish to contribute to this code base. + + +## License + +[Apache 2.0](LICENSE) \ No newline at end of file diff --git a/index-builder/app/index-builder/app.go b/index-builder/app/index-builder/app.go new file mode 100644 index 0000000..cddc03f --- /dev/null +++ b/index-builder/app/index-builder/app.go @@ -0,0 +1,96 @@ +package index_builder + +import ( + "context" + "fmt" + + index_builder "github.com/streamingfast/firehose-core/index-builder" + + "github.com/streamingfast/dgrpc" + "github.com/streamingfast/firehose-core/index-builder/metrics" + + "go.uber.org/zap" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/dmetrics" + "github.com/streamingfast/dstore" + "github.com/streamingfast/shutter" + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +type Config struct { + BlockHandler bstream.Handler + StartBlockResolver func(ctx context.Context) (uint64, error) + EndBlock uint64 + BlockStorePath string + GRPCListenAddr string +} + +type App struct { + *shutter.Shutter + config *Config + readinessProbe pbhealth.HealthClient +} + +func New(config *Config) *App { + return &App{ + Shutter: shutter.New(), + config: config, + } +} + +func (a *App) Run() error { + blockStore, err := dstore.NewDBinStore(a.config.BlockStorePath) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + a.OnTerminating(func(error) { + cancel() + }) + + startBlock, err := a.config.StartBlockResolver(ctx) + + indexBuilder := index_builder.NewIndexBuilder( + zlog, + a.config.BlockHandler, + startBlock, + a.config.EndBlock, + blockStore, + ) + + gs, err := dgrpc.NewInternalClient(a.config.GRPCListenAddr) + if err != nil { + return fmt.Errorf("cannot create readiness probe") + } + a.readinessProbe = pbhealth.NewHealthClient(gs) + + dmetrics.Register(metrics.MetricSet) + + a.OnTerminating(indexBuilder.Shutdown) + indexBuilder.OnTerminated(a.Shutdown) + + go indexBuilder.Launch() + + zlog.Info("index builder running") + return nil +} + +func (a *App) IsReady() bool { + if a.readinessProbe == nil { + return false + } + + resp, err := a.readinessProbe.Check(context.Background(), &pbhealth.HealthCheckRequest{}) + if err != nil { + zlog.Info("index-builder readiness probe error", zap.Error(err)) + return false + } + + if resp.Status == pbhealth.HealthCheckResponse_SERVING { + return true + } + + return false +} diff --git a/index-builder/app/index-builder/logging.go b/index-builder/app/index-builder/logging.go new file mode 100644 index 0000000..d24e498 --- /dev/null +++ b/index-builder/app/index-builder/logging.go @@ -0,0 +1,7 @@ +package index_builder + +import ( + "github.com/streamingfast/logging" +) + +var zlog, tracer = logging.PackageLogger("index-builder", "github.com/streamingfast/firehose-core/index-builder/app/index-builder") diff --git a/index-builder/healthz.go b/index-builder/healthz.go new file mode 100644 index 0000000..63b69e0 --- /dev/null +++ b/index-builder/healthz.go @@ -0,0 +1,42 @@ +package index_builder + +// Copyright 2019 dfuse Platform Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + + pbhealth "google.golang.org/grpc/health/grpc_health_v1" +) + +// Check is basic GRPC Healthcheck +func (app *IndexBuilder) Check(ctx context.Context, in *pbhealth.HealthCheckRequest) (*pbhealth.HealthCheckResponse, error) { + status := pbhealth.HealthCheckResponse_SERVING + return &pbhealth.HealthCheckResponse{ + Status: status, + }, nil +} + +// Watch is basic GRPC Healthcheck as a stream +func (app *IndexBuilder) Watch(req *pbhealth.HealthCheckRequest, stream pbhealth.Health_WatchServer) error { + err := stream.Send(&pbhealth.HealthCheckResponse{ + Status: pbhealth.HealthCheckResponse_SERVING, + }) + if err != nil { + return err + } + + <-stream.Context().Done() + return nil +} diff --git a/index-builder/index-builder.go b/index-builder/index-builder.go new file mode 100644 index 0000000..b926d0a --- /dev/null +++ b/index-builder/index-builder.go @@ -0,0 +1,95 @@ +package index_builder + +import ( + "context" + "errors" + "fmt" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/stream" + "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/firehose" + "github.com/streamingfast/firehose-core/index-builder/metrics" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "github.com/streamingfast/shutter" + "go.uber.org/zap" +) + +type IndexBuilder struct { + *shutter.Shutter + logger *zap.Logger + + startBlockNum uint64 + stopBlockNum uint64 + + handler bstream.Handler + + blocksStore dstore.Store +} + +func NewIndexBuilder(logger *zap.Logger, handler bstream.Handler, startBlockNum, stopBlockNum uint64, blockStore dstore.Store) *IndexBuilder { + return &IndexBuilder{ + Shutter: shutter.New(), + startBlockNum: startBlockNum, + stopBlockNum: stopBlockNum, + handler: handler, + blocksStore: blockStore, + + logger: logger, + } +} + +func (app *IndexBuilder) Launch() { + err := app.launch() + if errors.Is(err, stream.ErrStopBlockReached) { + app.logger.Info("index builder reached stop block", zap.Uint64("stop_block_num", app.stopBlockNum)) + err = nil + } + app.logger.Info("index builder exited", zap.Error(err)) + app.Shutdown(err) +} + +func (app *IndexBuilder) launch() error { + startBlockNum := app.startBlockNum + stopBlockNum := app.stopBlockNum + + streamFactory := firehose.NewStreamFactory( + app.blocksStore, + nil, + nil, + nil, + ) + ctx := context.Background() + + req := &pbfirehose.Request{ + StartBlockNum: int64(startBlockNum), + StopBlockNum: stopBlockNum, + FinalBlocksOnly: true, + } + + handlerFunc := func(block *bstream.Block, obj interface{}) error { + app.logger.Debug("handling block", zap.Uint64("block_num", block.Number)) + + metrics.HeadBlockNumber.SetUint64(block.Number) + metrics.HeadBlockTimeDrift.SetBlockTime(block.Time()) + metrics.AppReadiness.SetReady() + + app.logger.Debug("updated head block metrics", zap.Uint64("block_num", block.Number), zap.Time("block_time", block.Time())) + + return app.handler.ProcessBlock(block, obj) + } + + stream, err := streamFactory.New( + ctx, + bstream.HandlerFunc(handlerFunc), + req, + true, + app.logger, + ) + + if err != nil { + return fmt.Errorf("getting firehose stream: %w", err) + } + + return stream.Run(ctx) +} diff --git a/index-builder/metrics/metrics.go b/index-builder/metrics/metrics.go new file mode 100644 index 0000000..d691c4d --- /dev/null +++ b/index-builder/metrics/metrics.go @@ -0,0 +1,9 @@ +package metrics + +import "github.com/streamingfast/dmetrics" + +var MetricSet = dmetrics.NewSet() + +var HeadBlockTimeDrift = MetricSet.NewHeadTimeDrift("block-indexer") +var HeadBlockNumber = MetricSet.NewHeadBlockNumber("block-indexer") +var AppReadiness = MetricSet.NewAppReadiness("block-indexer") diff --git a/index_builder.go b/index_builder.go index 2de8962..98e6e5d 100644 --- a/index_builder.go +++ b/index_builder.go @@ -4,12 +4,13 @@ import ( "context" "fmt" + index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" + "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/bstream" bstransform "github.com/streamingfast/bstream/transform" "github.com/streamingfast/dlauncher/launcher" - indexerApp "github.com/streamingfast/index-builder/app/index-builder" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) @@ -74,7 +75,7 @@ func registerIndexBuilderApp[B Block](chain *Chain[B]) { return indexer.ProcessBlock(any(blk).(B)) }) - app := indexerApp.New(&indexerApp.Config{ + app := index_builder.New(&index_builder.Config{ BlockHandler: handler, StartBlockResolver: startBlockResolver, EndBlock: stopBlockNum, diff --git a/merger.go b/merger.go index ef851a7..cd1cf57 100644 --- a/merger.go +++ b/merger.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - "github.com/streamingfast/firehose-core/firehose/merger/app/merger" + "github.com/streamingfast/firehose-core/merger/app/merger" ) func registerMergerApp() { diff --git a/merger/CHANGELOG.md b/merger/CHANGELOG.md index 130d155..cf1bc3c 100644 --- a/merger/CHANGELOG.md +++ b/merger/CHANGELOG.md @@ -27,7 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Improved * Logging of OneBlockFile deletion now only called once per delete batch -* When someone else pushes a merged file, merger now detects it and reads the actual blocks to populate its seenblockscache, as discussed here: https://github.com/streamingfast/firehose-core/firehose/merger/issues/1 +* When someone else pushes a merged file, merger now detects it and reads the actual blocks to populate its seenblockscache, as discussed here: https://github.com/streamingfast/firehose-core/merger/issues/1 * Fixed waiting time to actually use TimeBetweenStoreLookups instead of hardcoded value of 1 second when bundle is incomplete ## [v0.0.1] diff --git a/merger/README.md b/merger/README.md index a55db5b..a64d0ad 100644 --- a/merger/README.md +++ b/merger/README.md @@ -1,6 +1,6 @@ # StreamingFast Merger -[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/firehose/merger) +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/merger) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) The merger process is responsible for accumulating blocks from all diff --git a/merger/app/merger/app.go b/merger/app/merger/app.go index 8fb5374..52e6ae1 100644 --- a/merger/app/merger/app.go +++ b/merger/app/merger/app.go @@ -23,8 +23,8 @@ import ( "github.com/streamingfast/dgrpc" "github.com/streamingfast/dmetrics" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/merger" - "github.com/streamingfast/firehose-core/firehose/merger/metrics" + "github.com/streamingfast/firehose-core/merger" + "github.com/streamingfast/firehose-core/merger/metrics" "github.com/streamingfast/shutter" "go.uber.org/zap" pbhealth "google.golang.org/grpc/health/grpc_health_v1" diff --git a/merger/app/merger/logging.go b/merger/app/merger/logging.go index 68409dd..c0ee237 100644 --- a/merger/app/merger/logging.go +++ b/merger/app/merger/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger/app/merger") +var zlog, tracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/app/merger") diff --git a/merger/bundler.go b/merger/bundler.go index 08ee879..57dd285 100644 --- a/merger/bundler.go +++ b/merger/bundler.go @@ -23,9 +23,11 @@ import ( "sync" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" - "github.com/streamingfast/firehose-core/firehose/merger/metrics" + "github.com/streamingfast/firehose-core/merger/metrics" "github.com/streamingfast/logging" "go.uber.org/zap" ) @@ -54,7 +56,7 @@ type Bundler struct { logger *zap.Logger } -var logger, _ = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger/bundler") +var logger, _ = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger/bundler") func NewBundler(startBlock, stopBlock, firstStreamableBlock, bundleSize uint64, io IOInterface) *Bundler { b := &Bundler{ @@ -126,7 +128,7 @@ func (b *Bundler) Reset(nextBase uint64, lib bstream.BlockRef) { func readBlockTime(data []byte) (time.Time, error) { reader := bytes.NewReader(data) - blockReader, err := bstream.GetBlockReaderFactory.New(reader) + blockReader, err := bstream.NewDBinBlockReader(reader) if err != nil { return time.Time{}, fmt.Errorf("unable to create block reader: %w", err) } diff --git a/merger/bundler_test.go b/merger/bundler_test.go index 05438a8..f06df72 100644 --- a/merger/bundler_test.go +++ b/merger/bundler_test.go @@ -10,28 +10,48 @@ import ( // "time" // "github.com/streamingfast/bstream" - //"github.com/streamingfast/firehose-core/firehose/merger/bundle" + //"github.com/streamingfast/firehose-core/merger/bundle" "github.com/streamingfast/bstream" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var block98 = bstream.MustNewOneBlockFile("0000000098-0000000000000098a-0000000000000097a-96-suffix") -var block99 = bstream.MustNewOneBlockFile("0000000099-0000000000000099a-0000000000000098a-97-suffix") -var block100 = bstream.MustNewOneBlockFile("0000000100-0000000000000100a-0000000000000099a-98-suffix") -var block101 = bstream.MustNewOneBlockFile("0000000101-0000000000000101a-0000000000000100a-99-suffix") -var block102Final100 = bstream.MustNewOneBlockFile("0000000102-0000000000000102a-0000000000000101a-100-suffix") -var block103Final101 = bstream.MustNewOneBlockFile("0000000103-0000000000000103a-0000000000000102a-101-suffix") -var block104Final102 = bstream.MustNewOneBlockFile("0000000104-0000000000000104a-0000000000000103a-102-suffix") -var block105Final103 = bstream.MustNewOneBlockFile("0000000105-0000000000000105a-0000000000000104a-103-suffix") -var block106Final104 = bstream.MustNewOneBlockFile("0000000106-0000000000000106a-0000000000000105a-104-suffix") - -var block507Final106 = bstream.MustNewOneBlockFile("0000000507-0000000000000507a-0000000000000106a-106-suffix") -var block608Final507 = bstream.MustNewOneBlockFile("0000000608-0000000000000608a-0000000000000507a-507-suffix") -var block609Final608 = bstream.MustNewOneBlockFile("0000000609-0000000000000607a-0000000000000608a-608-suffix") - -func init() { - bstream.GetBlockReaderFactory = bstream.TestBlockReaderFactory +var block98 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000098-0000000000000098a-0000000000000097a-96-suffix") +} +var block99 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000099-0000000000000099a-0000000000000098a-97-suffix") +} +var block100 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000100-0000000000000100a-0000000000000099a-98-suffix") +} +var block101 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000101-0000000000000101a-0000000000000100a-99-suffix") +} +var block102Final100 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000102-0000000000000102a-0000000000000101a-100-suffix") +} +var block103Final101 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000103-0000000000000103a-0000000000000102a-101-suffix") +} +var block104Final102 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000104-0000000000000104a-0000000000000103a-102-suffix") +} +var block105Final103 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000105-0000000000000105a-0000000000000104a-103-suffix") +} +var block106Final104 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000106-0000000000000106a-0000000000000105a-104-suffix") +} + +var block507Final106 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000507-0000000000000507a-0000000000000106a-106-suffix") +} +var block608Final507 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000608-0000000000000608a-0000000000000507a-507-suffix") +} +var block609Final608 = func() *bstream.OneBlockFile { + return bstream.MustNewOneBlockFile("0000000609-0000000000000607a-0000000000000608a-608-suffix") } func TestNewBundler(t *testing.T) { @@ -46,8 +66,8 @@ func TestNewBundler(t *testing.T) { func TestBundlerReset(t *testing.T) { b := NewBundler(100, 200, 2, 2, nil) // merge every 2 blocks - b.irreversibleBlocks = []*bstream.OneBlockFile{block100, block101} - b.Reset(102, block100.ToBstreamBlock().AsRef()) + b.irreversibleBlocks = []*bstream.OneBlockFile{block100(), block101()} + b.Reset(102, block100().ToBstreamBlock().AsRef()) assert.Nil(t, b.irreversibleBlocks) assert.EqualValues(t, 102, b.baseBlockNum) @@ -66,16 +86,16 @@ func TestBundlerMergeKeepOne(t *testing.T) { { name: "vanilla", inBlocks: []*bstream.OneBlockFile{ - block100, - block101, - block102Final100, - block103Final101, - block104Final102, + block100(), + block101(), + block102Final100(), + block103Final101(), + block104Final102(), }, mergeSize: 2, expectRemaining: []*bstream.OneBlockFile{ - block101, - block102Final100, + block101(), + block102Final100(), }, expectBase: 102, expectMerged: []uint64{100}, @@ -83,18 +103,18 @@ func TestBundlerMergeKeepOne(t *testing.T) { { name: "vanilla_plus_one", inBlocks: []*bstream.OneBlockFile{ - block100, - block101, - block102Final100, - block103Final101, - block104Final102, - block105Final103, + block100(), + block101(), + block102Final100(), + block103Final101(), + block104Final102(), + block105Final103(), }, mergeSize: 2, expectRemaining: []*bstream.OneBlockFile{ - block101, - block102Final100, - block103Final101, + block101(), + block102Final100(), + block103Final101(), }, expectBase: 102, expectMerged: []uint64{100}, @@ -102,18 +122,18 @@ func TestBundlerMergeKeepOne(t *testing.T) { { name: "twoMerges", inBlocks: []*bstream.OneBlockFile{ - block100, - block101, - block102Final100, - block103Final101, - block104Final102, - block105Final103, - block106Final104, + block100(), + block101(), + block102Final100(), + block103Final101(), + block104Final102(), + block105Final103(), + block106Final104(), }, mergeSize: 2, expectRemaining: []*bstream.OneBlockFile{ - block103Final101, - block104Final102, + block103Final101(), + block104Final102(), }, expectBase: 104, expectMerged: []uint64{100, 102}, @@ -121,21 +141,21 @@ func TestBundlerMergeKeepOne(t *testing.T) { { name: "big_hole", inBlocks: []*bstream.OneBlockFile{ - block100, - block101, - block102Final100, - block103Final101, - block104Final102, - block105Final103, - block106Final104, - block507Final106, - block608Final507, - block609Final608, + block100(), + block101(), + block102Final100(), + block103Final101(), + block104Final102(), + block105Final103(), + block106Final104(), + block507Final106(), + block608Final507(), + block609Final608(), }, mergeSize: 100, expectRemaining: []*bstream.OneBlockFile{ - block507Final106, // last from bundle 500 - block608Final507, // the only irreversible block from current bundle + block507Final106(), // last from bundle 500 + block608Final507(), // the only irreversible block from current bundle }, expectBase: 600, expectMerged: []uint64{100, 200, 300, 400, 500}, @@ -152,7 +172,7 @@ func TestBundlerMergeKeepOne(t *testing.T) { return nil }, }) // merge every 2 blocks - b.irreversibleBlocks = []*bstream.OneBlockFile{block100, block101} + b.irreversibleBlocks = []*bstream.OneBlockFile{block100(), block101()} for _, blk := range c.inBlocks { require.NoError(t, b.HandleBlockFile(blk)) diff --git a/merger/bundlereader.go b/merger/bundlereader.go index 94a10ea..df4e167 100644 --- a/merger/bundlereader.go +++ b/merger/bundlereader.go @@ -15,11 +15,13 @@ package merger import ( + "bytes" "context" "fmt" "io" "github.com/streamingfast/bstream" + "github.com/streamingfast/dbin" "github.com/streamingfast/logging" "go.uber.org/zap" ) @@ -31,7 +33,9 @@ type BundleReader struct { oneBlockDataChan chan []byte errChan chan error - logger *zap.Logger + logger *zap.Logger + header *dbin.Header + headerLength int } func NewBundleReader(ctx context.Context, logger *zap.Logger, tracer logging.Tracer, oneBlockFiles []*bstream.OneBlockFile, anyOneBlockFile *bstream.OneBlockFile, oneBlockDownloader bstream.OneBlockDownloaderFunc) (*BundleReader, error) { @@ -46,13 +50,21 @@ func NewBundleReader(ctx context.Context, logger *zap.Logger, tracer logging.Tra if err != nil { return nil, fmt.Errorf("cannot read one_block_file to get header: %w", err) } - if len(data) < bstream.GetBlockWriterHeaderLen { - return nil, fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", bstream.GetBlockWriterHeaderLen, len(data)) + + dbinReader, err := bstream.NewDBinBlockReader(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("creating block reader: %w", err) } - r.readBuffer = data[:bstream.GetBlockWriterHeaderLen] - go r.downloadAll(oneBlockFiles, oneBlockDownloader) + r.header = dbinReader.Header + r.headerLength = len(r.header.Data) + if len(data) < r.headerLength { + return nil, fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", r.headerLength, len(data)) + } + r.readBuffer = data[:r.headerLength] + + go r.downloadAll(oneBlockFiles, oneBlockDownloader) return r, nil } @@ -70,7 +82,6 @@ func (r *BundleReader) downloadAll(oneBlockFiles []*bstream.OneBlockFile, oneBlo } func (r *BundleReader) Read(p []byte) (bytesRead int, err error) { - if r.readBuffer == nil { if err := r.fillBuffer(); err != nil { return 0, err @@ -105,10 +116,10 @@ func (r *BundleReader) fillBuffer() error { return fmt.Errorf("one-block-file corrupt: empty data") } - if len(data) < bstream.GetBlockWriterHeaderLen { - return fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", bstream.GetBlockWriterHeaderLen, len(data)) + if len(data) < r.headerLength { + return fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", r.headerLength, len(data)) } - data = data[bstream.GetBlockWriterHeaderLen:] + data = data[r.headerLength:] r.readBuffer = data r.readBufferOffset = 0 return nil diff --git a/merger/bundlereader_test.go b/merger/bundlereader_test.go index ce131c8..a67ef37 100644 --- a/merger/bundlereader_test.go +++ b/merger/bundlereader_test.go @@ -12,94 +12,88 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/dbin" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestBundleReader_ReadSimpleFiles(t *testing.T) { bundle := NewTestBundle() - bstream.GetBlockWriterHeaderLen = 0 - r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], nil) require.NoError(t, err) - r1 := make([]byte, 4) + r1 := make([]byte, len(testOneBlockHeader)) read, err := r.Read(r1) require.NoError(t, err, "reading header") - assert.Equal(t, 0, read) + require.Equal(t, string(testOneBlockHeader), string(r1)) + r1 = make([]byte, 2) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 2, read) - assert.Equal(t, []byte{0x1, 0x2, 0x0, 0x0}, r1) + require.Equal(t, 2, read) + require.Equal(t, []byte{0x1, 0x2}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 2, read) - assert.Equal(t, []byte{0x3, 0x4, 0x0, 0x0}, r1) + require.Equal(t, 2, read) + require.Equal(t, []byte{0x3, 0x4}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 2, read) - assert.Equal(t, []byte{0x5, 0x6, 0x0, 0x0}, r1) + require.Equal(t, 2, read) + require.Equal(t, []byte{0x5, 0x6}, r1) read, err = r.Read(r1) - assert.Equal(t, 0, read) - assert.Equal(t, io.EOF, err) + require.Equal(t, 0, read) + require.Equal(t, io.EOF, err) } func TestBundleReader_ReadByChunk(t *testing.T) { bundle := NewTestBundle() - bstream.GetBlockWriterHeaderLen = 0 - r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, bundle[0], nil) require.NoError(t, err) - r1 := make([]byte, 1) + r1 := make([]byte, len(testOneBlockHeader)) read, err := r.Read(r1) require.NoError(t, err, "reading header") - assert.Equal(t, 0, read) + require.Equal(t, string(testOneBlockHeader), string(r1)) + r1 = make([]byte, 1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x1}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x1}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x2}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x2}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x3}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x3}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x4}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x4}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x5}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x5}, r1) read, err = r.Read(r1) require.NoError(t, err) - assert.Equal(t, 1, read) - assert.Equal(t, []byte{0x6}, r1) + require.Equal(t, 1, read) + require.Equal(t, []byte{0x6}, r1) _, err = r.Read(r1) require.Equal(t, err, io.EOF) } func TestBundleReader_Read_Then_Read_Block(t *testing.T) { - //important - bstream.GetBlockWriterHeaderLen = 10 - bundle := []*bstream.OneBlockFile{ NewTestOneBlockFileFromFile(t, "0000000001-20150730T152628.0-13406cb6-b1cb8fa3.dbin"), NewTestOneBlockFileFromFile(t, "0000000002-20150730T152657.0-044698c9-13406cb6.dbin"), @@ -113,7 +107,7 @@ func TestBundleReader_Read_Then_Read_Block(t *testing.T) { dbinReader := dbin.NewReader(bytes.NewReader(allBlockData)) //Reader header once - _, _, err = dbinReader.ReadHeader() + _, err = dbinReader.ReadHeader() //Block 1 require.NoError(t, err) @@ -135,12 +129,10 @@ func TestBundleReader_Read_Then_Read_Block(t *testing.T) { } func TestBundleReader_Read_DownloadOneBlockFileError(t *testing.T) { - bundle := NewDownloadBundle() - bstream.GetBlockWriterHeaderLen = 0 - + bundle := NewBundleNoMemoize() anyOB := &bstream.OneBlockFile{ CanonicalName: "header", - MemoizeData: []byte{0x3, 0x4}, + MemoizeData: testOneBlockHeader, } downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { @@ -148,11 +140,11 @@ func TestBundleReader_Read_DownloadOneBlockFileError(t *testing.T) { } r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyOB, downloadOneBlockFile) require.NoError(t, err) - r1 := make([]byte, 4) + r1 := make([]byte, len(testOneBlockHeader)) read, err := r.Read(r1) require.NoError(t, err, "reading header") - require.Equal(t, 0, read) + require.Equal(t, string(testOneBlockHeader), string(r1)) read, err = r.Read(r1) require.Equal(t, 0, read) @@ -160,9 +152,8 @@ func TestBundleReader_Read_DownloadOneBlockFileError(t *testing.T) { } func TestBundleReader_Read_DownloadOneBlockFileCorrupt(t *testing.T) { - bstream.GetBlockWriterHeaderLen = 4 - bundle := NewDownloadBundle() + bundle := NewBundleNoMemoize() downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { return []byte{0xAB, 0xCD, 0xEF}, nil // shorter than header length @@ -173,12 +164,11 @@ func TestBundleReader_Read_DownloadOneBlockFileCorrupt(t *testing.T) { } func TestBundleReader_Read_DownloadOneBlockFileZeroLength(t *testing.T) { - bundle := NewDownloadBundle() + bundle := NewBundleNoMemoize() - bstream.GetBlockWriterHeaderLen = 2 anyBlockFile := &bstream.OneBlockFile{ CanonicalName: "header", - MemoizeData: []byte{0xa, 0xb}, + MemoizeData: testOneBlockHeader, } downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { @@ -187,24 +177,24 @@ func TestBundleReader_Read_DownloadOneBlockFileZeroLength(t *testing.T) { r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyBlockFile, downloadOneBlockFile) require.NoError(t, err) - r1 := make([]byte, 4) + r1 := make([]byte, len(testOneBlockHeader)) read, err := r.Read(r1) - require.Equal(t, 2, read, "header") - require.NoError(t, err) + require.NoError(t, err, "reading header") + require.Equal(t, string(testOneBlockHeader), string(r1)) + r1 = make([]byte, 4) read, err = r.Read(r1) require.Equal(t, read, 0) require.Error(t, err, "EOF expected") } func TestBundleReader_Read_ReadBufferNotNil(t *testing.T) { - bundle := NewDownloadBundle() + bundle := NewBundleNoMemoize() - bstream.GetBlockWriterHeaderLen = 2 anyBlockFile := &bstream.OneBlockFile{ CanonicalName: "header", - MemoizeData: []byte{0xa, 0xb}, + MemoizeData: testOneBlockHeader, } downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { @@ -222,12 +212,11 @@ func TestBundleReader_Read_ReadBufferNotNil(t *testing.T) { } func TestBundleReader_Read_EmptyListOfOneBlockFiles(t *testing.T) { - bundle := NewDownloadBundle() + bundle := NewBundleNoMemoize() - bstream.GetBlockWriterHeaderLen = 2 anyBlockFile := &bstream.OneBlockFile{ CanonicalName: "header", - MemoizeData: []byte{0xa, 0xb}, + MemoizeData: testOneBlockHeader, } downloadOneBlockFile := func(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { @@ -236,12 +225,13 @@ func TestBundleReader_Read_EmptyListOfOneBlockFiles(t *testing.T) { r, err := NewBundleReader(context.Background(), testLogger, testTracer, bundle, anyBlockFile, downloadOneBlockFile) require.NoError(t, err) - r1 := make([]byte, 4) + r1 := make([]byte, len(testOneBlockHeader)) read, err := r.Read(r1) - require.Equal(t, 2, read, "header") - require.NoError(t, err) + require.NoError(t, err, "reading header") + require.Equal(t, string(testOneBlockHeader), string(r1)) + r1 = make([]byte, 4) read, err = r.Read(r1) require.Equal(t, 0, read) require.Errorf(t, err, "EOF") @@ -262,25 +252,26 @@ func NewTestOneBlockFileFromFile(t *testing.T, fileName string) *bstream.OneBloc } } +var testOneBlockHeader = []byte("dbin\x00tes\x00\x00") + func NewTestBundle() []*bstream.OneBlockFile { - bstream.GetBlockWriterHeaderLen = 0 o1 := &bstream.OneBlockFile{ CanonicalName: "o1", - MemoizeData: []byte{0x1, 0x2}, + MemoizeData: append(testOneBlockHeader, []byte{0x1, 0x2}...), } o2 := &bstream.OneBlockFile{ CanonicalName: "o2", - MemoizeData: []byte{0x3, 0x4}, + MemoizeData: append(testOneBlockHeader, []byte{0x3, 0x4}...), } o3 := &bstream.OneBlockFile{ CanonicalName: "o3", - MemoizeData: []byte{0x5, 0x6}, + MemoizeData: append(testOneBlockHeader, []byte{0x5, 0x6}...), } return []*bstream.OneBlockFile{o1, o2, o3} } -func NewDownloadBundle() []*bstream.OneBlockFile { +func NewBundleNoMemoize() []*bstream.OneBlockFile { o1 := &bstream.OneBlockFile{ CanonicalName: "o1", MemoizeData: []byte{}, diff --git a/merger/init_test.go b/merger/init_test.go index 450c347..e6f09a1 100644 --- a/merger/init_test.go +++ b/merger/init_test.go @@ -18,7 +18,7 @@ import ( "github.com/streamingfast/logging" ) -var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/firehose/merger_tests") +var testLogger, testTracer = logging.PackageLogger("merger", "github.com/streamingfast/firehose-core/merger_tests") func init() { logging.InstantiateLoggers() diff --git a/merger/merger_io.go b/merger/merger_io.go index f4218bf..28195e2 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -12,9 +12,11 @@ import ( "sync" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/merger/metrics" + "github.com/streamingfast/firehose-core/merger/metrics" "github.com/streamingfast/logging" "go.uber.org/zap" ) @@ -239,7 +241,8 @@ func (s *DStoreIO) readLastBlockFromMerged(ctx context.Context, baseBlock uint64 return nil, nil, err } // we truncate the block ID to have the short version that we get on oneBlockFiles - return bstream.NewBlockRef(bstream.TruncateBlockID(last.Id), last.Number), &last.Timestamp, nil + t := last.Timestamp.AsTime() + return bstream.NewBlockRef(bstream.TruncateBlockID(last.Id), last.Number), &t, nil } func (s *DStoreIO) DeleteAsync(oneBlockFiles []*bstream.OneBlockFile) error { @@ -378,7 +381,7 @@ func (od *oneBlockFilesDeleter) processDeletions() { func lastBlock(mergeFileReader io.ReadCloser) (out *pbbstream.Block, err error) { defer mergeFileReader.Close() - blkReader, err := bstream.GetBlockReaderFactory.New(mergeFileReader) + blkReader, err := bstream.NewDBinBlockReader(mergeFileReader) if err != nil { return nil, err } diff --git a/merger/merger_io_test.go b/merger/merger_io_test.go index 10bfbad..56479ff 100644 --- a/merger/merger_io_test.go +++ b/merger/merger_io_test.go @@ -17,14 +17,12 @@ package merger import ( "context" "io" - "io/ioutil" "strings" "testing" "time" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -67,11 +65,9 @@ func newDStoreIO( } func TestMergerIO_MergeUploadPerfect(t *testing.T) { - bstream.GetBlockWriterHeaderLen = 0 - files := []*bstream.OneBlockFile{ - block100, - block101, + block100(), + block101(), } var mergeLastBase string var filesRead []string @@ -79,12 +75,13 @@ func TestMergerIO_MergeUploadPerfect(t *testing.T) { done := make(chan struct{}) oneBlockStore := dstore.NewMockStore(nil) + oneBlockStore.OpenObjectFunc = func(_ context.Context, name string) (io.ReadCloser, error) { filesRead = append(filesRead, name) if len(filesRead) == 2 { close(done) } - return ioutil.NopCloser(strings.NewReader("")), nil + return io.NopCloser(strings.NewReader(string(testOneBlockHeader))), nil } mergedBlocksStore := dstore.NewMockStore( func(base string, f io.Reader) (err error) { @@ -98,29 +95,28 @@ func TestMergerIO_MergeUploadPerfect(t *testing.T) { err := mio.MergeAndStore(context.Background(), 100, files) require.NoError(t, err) - assert.Equal(t, mergeCounter, 1) - assert.Equal(t, mergeLastBase, "0000000100") + require.Equal(t, mergeCounter, 1) + require.Equal(t, mergeLastBase, "0000000100") expectFilenames := []string{ - "0000000100-0000000000000100a-0000000000000099a-98-suffix", // read header "0000000100-0000000000000100a-0000000000000099a-98-suffix", "0000000101-0000000000000101a-0000000000000100a-99-suffix", } select { case <-time.After(time.Second): - t.Error("timeout waiting for read") + t.Error("timeout waiting for read", filesRead) case <-done: + require.Equal(t, expectFilenames, filesRead) } - assert.Equal(t, expectFilenames, filesRead) } func TestMergerIO_MergeUploadFiltered(t *testing.T) { files := []*bstream.OneBlockFile{ - block98, - block99, - block100, - block101, + block98(), + block99(), + block100(), + block101(), } var mergeLastBase string @@ -134,7 +130,7 @@ func TestMergerIO_MergeUploadFiltered(t *testing.T) { if len(filesRead) == 2 { close(done) } - return ioutil.NopCloser(strings.NewReader("")), nil + return io.NopCloser(strings.NewReader(string(testOneBlockHeader))), nil } mergedBlocksStore := dstore.NewMockStore( func(base string, f io.Reader) (err error) { @@ -148,8 +144,8 @@ func TestMergerIO_MergeUploadFiltered(t *testing.T) { err := mio.MergeAndStore(context.Background(), 100, files) require.NoError(t, err) - assert.Equal(t, mergeCounter, 1) - assert.Equal(t, mergeLastBase, "0000000100") + require.Equal(t, mergeCounter, 1) + require.Equal(t, mergeLastBase, "0000000100") expectFilenames := []string{ "0000000098-0000000000000098a-0000000000000097a-96-suffix", // read header @@ -160,10 +156,10 @@ func TestMergerIO_MergeUploadFiltered(t *testing.T) { select { case <-time.After(time.Second): - t.Error("timeout waiting for read") + t.Error("timeout waiting for read", filesRead) case <-done: + require.Equal(t, expectFilenames, filesRead) } - assert.Equal(t, expectFilenames, filesRead) } func TestMergerIO_MergeUploadNoFiles(t *testing.T) { @@ -177,16 +173,18 @@ func TestMergerIO_MergeUploadNoFiles(t *testing.T) { require.Error(t, err) } func TestMergerIO_MergeUploadFilteredToZero(t *testing.T) { + b100 := block102Final100() + b101 := block103Final101() files := []*bstream.OneBlockFile{ - block102Final100, - block103Final101, + b100, + b101, } oneBlockStore := dstore.NewMockStore(nil) mergedBlocksStore := dstore.NewMockStore(nil) mio := newDStoreIO(oneBlockStore, mergedBlocksStore) - block102Final100.MemoizeData = []byte{0x0, 0x1, 0x2, 0x3} - block103Final101.MemoizeData = []byte{0x0, 0x1, 0x2, 0x3} + b100.MemoizeData = append(testOneBlockHeader, []byte{0x0, 0x1, 0x2, 0x3}...) + b101.MemoizeData = append(testOneBlockHeader, []byte{0x0, 0x1, 0x2, 0x3}...) err := mio.MergeAndStore(context.Background(), 114, files) require.NoError(t, err) diff --git a/node-manager/app/node_manager/app.go b/node-manager/app/node_manager/app.go index d65e12b..ceff8ae 100644 --- a/node-manager/app/node_manager/app.go +++ b/node-manager/app/node_manager/app.go @@ -24,10 +24,10 @@ import ( dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" "github.com/streamingfast/dmetrics" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" - "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" - "github.com/streamingfast/firehose-core/firehose/node-manager/operator" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/firehose-core/node-manager/metrics" + "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" "github.com/streamingfast/shutter" "go.uber.org/zap" "google.golang.org/grpc" diff --git a/node-manager/app/node_reader_stdin/app.go b/node-manager/app/node_reader_stdin/app.go index 2606048..3a54eca 100644 --- a/node-manager/app/node_reader_stdin/app.go +++ b/node-manager/app/node_reader_stdin/app.go @@ -22,9 +22,9 @@ import ( "github.com/streamingfast/bstream/blockstream" dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" - "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" + "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" diff --git a/node-manager/mindreader/archiver.go b/node-manager/mindreader/archiver.go index 970354e..8297120 100644 --- a/node-manager/mindreader/archiver.go +++ b/node-manager/mindreader/archiver.go @@ -19,6 +19,8 @@ import ( "fmt" "io" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" "github.com/streamingfast/logging" @@ -33,7 +35,6 @@ type Archiver struct { oneblockSuffix string localOneBlocksStore dstore.Store - blockWriterFactory bstream.BlockWriterFactory fileUploader *FileUploader logger *zap.Logger @@ -45,7 +46,6 @@ func NewArchiver( oneblockSuffix string, localOneBlocksStore dstore.Store, remoteOneBlocksStore dstore.Store, - blockWriterFactory bstream.BlockWriterFactory, logger *zap.Logger, tracer logging.Tracer, ) *Archiver { @@ -60,7 +60,6 @@ func NewArchiver( startBlock: startBlock, oneblockSuffix: oneblockSuffix, localOneBlocksStore: localOneBlocksStore, - blockWriterFactory: blockWriterFactory, fileUploader: fileUploader, logger: logger, tracer: tracer, @@ -96,7 +95,7 @@ func (a *Archiver) StoreBlock(ctx context.Context, block *pbbstream.Block) error writeObjectErrChan <- a.localOneBlocksStore.WriteObject(ctx, bstream.BlockFileNameWithSuffix(block, a.oneblockSuffix), pipeRead) }() - blockWriter, err := a.blockWriterFactory.New(pipeWrite) + blockWriter, err := bstream.NewDBinBlockWriter(pipeWrite) if err != nil { return fmt.Errorf("write block factory: %w", err) } diff --git a/node-manager/mindreader/init_test.go b/node-manager/mindreader/init_test.go index 24bfa9c..9f82b3c 100644 --- a/node-manager/mindreader/init_test.go +++ b/node-manager/mindreader/init_test.go @@ -18,7 +18,7 @@ import ( "github.com/streamingfast/logging" ) -var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/firehose/node_manager/mindreader/tests") +var testLogger, testTracer = logging.PackageLogger("node-manager", "github.com/streamingfast/firehose-core/node_manager/mindreader/tests") func init() { logging.InstantiateLoggers() diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index 737b1d6..e621474 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -23,10 +23,12 @@ import ( "regexp" "sync" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/dstore" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + nodeManager "github.com/streamingfast/firehose-core/node-manager" "github.com/streamingfast/logging" "github.com/streamingfast/shutter" "go.uber.org/zap" @@ -121,7 +123,6 @@ func NewMindReaderPlugin( oneBlockSuffix, localOneBlocksStore, remoteOneBlocksStore, - bstream.GetBlockWriterFactory, zlogger, tracer, ) @@ -297,7 +298,7 @@ func (p *MindReaderPlugin) readOneMessage(blocks chan<- *pbbstream.Block) error return err } - if block.Num() < bstream.GetProtocolFirstStreamableBlock { + if block.Number < bstream.GetProtocolFirstStreamableBlock { return nil } @@ -320,7 +321,7 @@ func (p *MindReaderPlugin) readOneMessage(blocks chan<- *pbbstream.Block) error blocks <- block - if p.stopBlock != 0 && block.Num() >= p.stopBlock && !p.IsTerminating() { + if p.stopBlock != 0 && block.Number >= p.stopBlock && !p.IsTerminating() { p.zlogger.Info("shutting down because requested end block reached", zap.Stringer("block", block)) // See comment tagged 0a33f6b578cc4d0b diff --git a/node-manager/mindreader/mindreader_test.go b/node-manager/mindreader/mindreader_test.go index e78b9b1..fb1da85 100644 --- a/node-manager/mindreader/mindreader_test.go +++ b/node-manager/mindreader/mindreader_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/shutter" "github.com/stretchr/testify/assert" diff --git a/node-manager/monitor.go b/node-manager/monitor.go index 78e8616..9a17317 100644 --- a/node-manager/monitor.go +++ b/node-manager/monitor.go @@ -3,6 +3,8 @@ package node_manager import ( "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/dmetrics" "go.uber.org/atomic" "go.uber.org/zap" @@ -66,7 +68,7 @@ func (m *MetricsAndReadinessManager) Launch() { // metrics if m.headBlockNumber != nil { - m.headBlockNumber.SetUint64(lastSeenBlock.Num()) + m.headBlockNumber.SetUint64(lastSeenBlock.Number) } if lastSeenBlock.Time().IsZero() { // never act upon zero timestamps diff --git a/node-manager/operator/operator.go b/node-manager/operator/operator.go index 0bdee26..03428e3 100644 --- a/node-manager/operator/operator.go +++ b/node-manager/operator/operator.go @@ -23,7 +23,7 @@ import ( "time" "github.com/streamingfast/derr" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" + nodeManager "github.com/streamingfast/firehose-core/node-manager" "github.com/streamingfast/shutter" "go.uber.org/atomic" "go.uber.org/zap" diff --git a/node-manager/superviser.go b/node-manager/superviser.go index 7360f8e..3b2aaeb 100644 --- a/node-manager/superviser.go +++ b/node-manager/superviser.go @@ -17,7 +17,7 @@ package node_manager import ( "time" - logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" ) type StartOption string diff --git a/node-manager/superviser/superviser.go b/node-manager/superviser/superviser.go index 5049182..ad433ed 100644 --- a/node-manager/superviser/superviser.go +++ b/node-manager/superviser/superviser.go @@ -22,8 +22,8 @@ import ( "github.com/ShinyTrinkets/overseer" "github.com/streamingfast/bstream" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" "github.com/streamingfast/shutter" "go.uber.org/zap" ) diff --git a/node-manager/superviser/superviser_test.go b/node-manager/superviser/superviser_test.go index 2de17e7..1f628b2 100644 --- a/node-manager/superviser/superviser_test.go +++ b/node-manager/superviser/superviser_test.go @@ -19,7 +19,7 @@ import ( "testing" "time" - logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" "github.com/streamingfast/logging" "github.com/stretchr/testify/assert" "go.uber.org/zap" diff --git a/node-manager/types.go b/node-manager/types.go index dc974a7..d7e5266 100644 --- a/node-manager/types.go +++ b/node-manager/types.go @@ -14,6 +14,8 @@ package node_manager +import pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + type DeepMindDebuggable interface { DebugDeepMind(enabled bool) } diff --git a/reader_node.go b/reader_node.go index 830e4c2..a273ce2 100644 --- a/reader_node.go +++ b/reader_node.go @@ -13,12 +13,12 @@ import ( "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - nodeManagerApp "github.com/streamingfast/firehose-core/firehose/node-manager/app/node_manager" - "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" - reader "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" - "github.com/streamingfast/firehose-core/firehose/node-manager/operator" - sv "github.com/streamingfast/firehose-core/firehose/superviser" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeManagerApp "github.com/streamingfast/firehose-core/node-manager/app/node_manager" + "github.com/streamingfast/firehose-core/node-manager/metrics" + reader "github.com/streamingfast/firehose-core/node-manager/mindreader" + "github.com/streamingfast/firehose-core/node-manager/operator" + sv "github.com/streamingfast/firehose-core/superviser" "github.com/streamingfast/logging" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" diff --git a/reader_node_stdin.go b/reader_node_stdin.go index a612245..a06a728 100644 --- a/reader_node_stdin.go +++ b/reader_node_stdin.go @@ -18,10 +18,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - nodeReaderStdinApp "github.com/streamingfast/firehose-core/firehose/node-manager/app/node_reader_stdin" - "github.com/streamingfast/firehose-core/firehose/node-manager/metrics" - "github.com/streamingfast/firehose-core/firehose/node-manager/mindreader" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + nodeReaderStdinApp "github.com/streamingfast/firehose-core/node-manager/app/node_reader_stdin" + "github.com/streamingfast/firehose-core/node-manager/metrics" + "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" ) diff --git a/relayer.go b/relayer.go index 595c451..a13fe16 100644 --- a/relayer.go +++ b/relayer.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" - "github.com/streamingfast/firehose-core/firehose/relayer/app/relayer" + "github.com/streamingfast/firehose-core/relayer/app/relayer" ) func registerRelayerApp() { diff --git a/relayer/README.md b/relayer/README.md index b4c94f9..13a0526 100644 --- a/relayer/README.md +++ b/relayer/README.md @@ -1,6 +1,6 @@ # StreamingFast Relayer -[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/firehose/relayer) +[![reference](https://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](https://pkg.go.dev/github.com/streamingfast/firehose-core/relayer) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) The relayer process fans out and propagates blocks from instrumented diff --git a/relayer/app/relayer/app.go b/relayer/app/relayer/app.go index 98db0d8..00ee91f 100644 --- a/relayer/app/relayer/app.go +++ b/relayer/app/relayer/app.go @@ -22,8 +22,8 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/dmetrics" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/relayer" - "github.com/streamingfast/firehose-core/firehose/relayer/metrics" + "github.com/streamingfast/firehose-core/relayer" + "github.com/streamingfast/firehose-core/relayer/metrics" "github.com/streamingfast/shutter" "go.uber.org/zap" pbhealth "google.golang.org/grpc/health/grpc_health_v1" diff --git a/relayer/app/relayer/logging.go b/relayer/app/relayer/logging.go index 7626d2c..b68787c 100644 --- a/relayer/app/relayer/logging.go +++ b/relayer/app/relayer/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/firehose/relayer/app/relayer") +var zlog, _ = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer/app/relayer") diff --git a/relayer/logging.go b/relayer/logging.go index 43859ea..e123d96 100644 --- a/relayer/logging.go +++ b/relayer/logging.go @@ -18,4 +18,4 @@ import ( "github.com/streamingfast/logging" ) -var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/firehose/relayer") +var zlog, ztrace = logging.PackageLogger("relayer", "github.com/streamingfast/firehose-core/relayer") diff --git a/relayer/relayer.go b/relayer/relayer.go index 13063be..e84696c 100644 --- a/relayer/relayer.go +++ b/relayer/relayer.go @@ -19,12 +19,14 @@ import ( "strings" "time" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" "github.com/streamingfast/bstream/forkable" "github.com/streamingfast/bstream/hub" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" - "github.com/streamingfast/firehose-core/firehose/relayer/metrics" + "github.com/streamingfast/firehose-core/relayer/metrics" "github.com/streamingfast/shutter" pbhealth "google.golang.org/grpc/health/grpc_health_v1" ) diff --git a/start.go b/start.go index bf2c52b..3633218 100644 --- a/start.go +++ b/start.go @@ -68,17 +68,6 @@ func start(dataDir string, args []string) (err error) { AbsDataDir: dataDirAbs, } - blocksCacheEnabled := viper.GetBool("common-blocks-cache-enabled") - if blocksCacheEnabled { - bstream.GetBlockPayloadSetter = bstream.ATMCachedPayloadSetter - - cacheDir := MustReplaceDataDir(modules.AbsDataDir, viper.GetString("common-blocks-cache-dir")) - storeUrl := MustReplaceDataDir(modules.AbsDataDir, viper.GetString("common-merged-blocks-store-url")) - maxRecentEntryBytes := viper.GetInt("common-blocks-cache-max-recent-entry-bytes") - maxEntryByAgeBytes := viper.GetInt("common-blocks-cache-max-entry-by-age-bytes") - bstream.InitCache(storeUrl, cacheDir, maxRecentEntryBytes, maxEntryByAgeBytes) - } - bstream.GetProtocolFirstStreamableBlock = uint64(viper.GetInt("common-first-streamable-block")) err = bstream.ValidateRegistry() diff --git a/superviser/genericsupervisor.go b/superviser/genericsupervisor.go index d7e016d..fa4e416 100644 --- a/superviser/genericsupervisor.go +++ b/superviser/genericsupervisor.go @@ -4,8 +4,8 @@ import ( "strings" "github.com/ShinyTrinkets/overseer" - nodeManager "github.com/streamingfast/firehose-core/firehose/node-manager" - "github.com/streamingfast/firehose-core/firehose/node-manager/superviser" + nodeManager "github.com/streamingfast/firehose-core/node-manager" + "github.com/streamingfast/firehose-core/node-manager/superviser" "go.uber.org/zap" ) diff --git a/superviser/logging.go b/superviser/logging.go index 6a9f32b..5c79bfe 100644 --- a/superviser/logging.go +++ b/superviser/logging.go @@ -1,7 +1,7 @@ package superviser import ( - logplugin "github.com/streamingfast/firehose-core/firehose/node-manager/log_plugin" + logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" ) // This file configures a logging reader that transforms log lines received from the blockchain process running diff --git a/tools.go b/tools.go index 2472e9d..0d1bbf0 100644 --- a/tools.go +++ b/tools.go @@ -22,7 +22,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/firehose/firehose/client" + "github.com/streamingfast/firehose-core/firehose/client" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/grpc" diff --git a/tools_check.go b/tools_check.go index f76e5f0..56e3d4c 100644 --- a/tools_check.go +++ b/tools_check.go @@ -25,7 +25,7 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "golang.org/x/exp/maps" "golang.org/x/exp/slices" @@ -65,7 +65,7 @@ func init() { func configureToolsCheckCmd[B Block](chain *Chain[B]) { blockPrinter := chain.BlockPrinter() - toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(blockPrinter) + toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain, blockPrinter) toolsCheckMergedBlocksCmd.Example = ExamplePrefixed(chain, "tools check merged-blocks", ` "./sf-data/storage/merged-blocks" "gs:////" -s @@ -76,7 +76,7 @@ func configureToolsCheckCmd[B Block](chain *Chain[B]) { toolsCheckForksCmd.RunE = toolsCheckForksE } -func createToolsCheckMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecutor { +func createToolsCheckMergedBlocksE[B Block](chain *Chain[B], blockPrinter BlockPrinterFunc) CommandExecutor { return func(cmd *cobra.Command, args []string) error { storeURL := args[0] fileBlockSize := uint64(100) @@ -86,16 +86,16 @@ func createToolsCheckMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto return err } - printDetails := tools.PrintNoDetails + printDetails := PrintNoDetails if sflags.MustGetBool(cmd, "print-stats") { - printDetails = tools.PrintStats + printDetails = PrintStats } if sflags.MustGetBool(cmd, "print-full") { - printDetails = tools.PrintFull + printDetails = PrintFull } - return tools.CheckMergedBlocks(cmd.Context(), rootLog, storeURL, fileBlockSize, blockRange, func(block *pbbstream.Block) { + return CheckMergedBlocks(cmd.Context(), chain, rootLog, storeURL, fileBlockSize, blockRange, func(block *pbbstream.Block) { blockPrinter(block, false, os.Stdout) }, printDetails) } diff --git a/tools/check_blocks.go b/tools_check_blocks.go similarity index 73% rename from tools/check_blocks.go rename to tools_check_blocks.go index f739f11..c2ce247 100644 --- a/tools/check_blocks.go +++ b/tools_check_blocks.go @@ -1,4 +1,4 @@ -package tools +package firecore import ( "context" @@ -8,11 +8,13 @@ import ( "regexp" "strconv" + "github.com/streamingfast/firehose-core/tools" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" "github.com/streamingfast/dstore" + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" - "google.golang.org/protobuf/proto" ) var numberRegex = regexp.MustCompile(`(\d{10})`) @@ -26,15 +28,7 @@ const ( MaxUint64 = ^uint64(0) ) -func CheckMergedBlocks( - ctx context.Context, - logger *zap.Logger, - storeURL string, - fileBlockSize uint64, - blockRange BlockRange, - blockPrinter func(block *pbbstream.Block), - printDetails PrintDetails, -) error { +func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange tools.BlockRange, blockPrinter func(block *pbbstream.Block), printDetails PrintDetails) error { readAllBlocks := printDetails != PrintNoDetails fmt.Printf("Checking block holes on %s\n", storeURL) if readAllBlocks { @@ -55,7 +49,7 @@ func CheckMergedBlocks( // } holeFound := false - expected = RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected = tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) currentStartBlk := uint64(blockRange.Start) blocksStore, err := dstore.NewDBinStore(storeURL) @@ -88,11 +82,11 @@ func CheckMergedBlocks( if baseNum != expected { // There is no previous valid block range if we are at the ever first seen file if count > 1 { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) + fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), uint64(tools.RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) } // Otherwise, we do not follow last seen element (previous is `100 - 199` but we are `299 - 300`) - missingRange := NewClosedRange(int64(expected), RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) + missingRange := tools.NewClosedRange(int64(expected), tools.RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) fmt.Printf("❌ Range %s (Missing, [%s])\n", missingRange, missingRange.ReprocRange()) currentStartBlk = baseNum @@ -101,7 +95,7 @@ func CheckMergedBlocks( expected = baseNum + fileBlockSize if readAllBlocks { - lowestBlockSegment, highestBlockSegment := validateBlockSegment(ctx, blocksStore, filename, fileBlockSize, blockRange, blockPrinter, printDetails, tfdb) + lowestBlockSegment, highestBlockSegment := validateBlockSegment(ctx, chain, blocksStore, filename, fileBlockSize, blockRange, blockPrinter, printDetails, tfdb) if lowestBlockSegment < lowestBlockSeen { lowestBlockSeen = lowestBlockSegment } @@ -118,11 +112,11 @@ func CheckMergedBlocks( } if count%10000 == 0 { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), RoundToBundleEndBlock(baseNum, fileBlockSize))) + fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), tools.RoundToBundleEndBlock(baseNum, fileBlockSize))) currentStartBlk = baseNum + fileBlockSize } - if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && tools.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } @@ -140,9 +134,9 @@ func CheckMergedBlocks( zap.Uint64("highest_block_seen", highestBlockSeen), ) if tfdb.lastLinkedBlock != nil && tfdb.lastLinkedBlock.Number < highestBlockSeen { - fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) + fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", tools.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) } else { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) + fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) } fmt.Println() @@ -151,7 +145,7 @@ func CheckMergedBlocks( if blockRange.IsClosed() && (highestBlockSeen < uint64(*blockRange.Stop-1) || (lowestBlockSeen > uint64(blockRange.Start) && lowestBlockSeen > bstream.GetProtocolFirstStreamableBlock)) { - fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, PrettyBlockNum(lowestBlockSeen), PrettyBlockNum(highestBlockSeen)) + fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, tools.PrettyBlockNum(lowestBlockSeen), tools.PrettyBlockNum(highestBlockSeen)) } if holeFound { @@ -170,12 +164,13 @@ type trackedForkDB struct { unlinkableSegmentCount int } -func validateBlockSegment( +func validateBlockSegment[B Block]( ctx context.Context, + chain *Chain[B], store dstore.Store, segment string, fileBlockSize uint64, - blockRange BlockRange, + blockRange tools.BlockRange, blockPrinter func(block *pbbstream.Block), printDetails PrintDetails, tfdb *trackedForkDB, @@ -188,7 +183,7 @@ func validateBlockSegment( } defer reader.Close() - readerFactory, err := bstream.GetBlockReaderFactory.New(reader) + readerFactory, err := bstream.NewDBinBlockReader(reader) if err != nil { fmt.Printf("❌ Unable to read blocks segment %s: %s\n", segment, err) return @@ -214,10 +209,10 @@ func validateBlockSegment( } if !tfdb.fdb.HasLIB() { - tfdb.fdb.InitLIB(block) + tfdb.fdb.InitLIB(block.AsRef()) } - tfdb.fdb.AddLink(block.AsRef(), block.PreviousID(), nil) + tfdb.fdb.AddLink(block.AsRef(), block.ParentId, nil) revSeg, _ := tfdb.fdb.ReversibleSegment(block.AsRef()) if revSeg == nil { tfdb.unlinkableSegmentCount++ @@ -226,17 +221,17 @@ func validateBlockSegment( } // TODO: this print should be under a 'check forkable' flag? - fmt.Printf("🔶 Block #%d is not linkable at this point\n", block.Num()) + fmt.Printf("🔶 Block #%d is not linkable at this point\n", block.Number) if tfdb.unlinkableSegmentCount > 99 && tfdb.unlinkableSegmentCount%100 == 0 { // TODO: this print should be under a 'check forkable' flag? - fmt.Printf("❌ Large gap of %d unlinkable blocks found in chain. Last linked block: %d, first Unlinkable block: %d. \n", tfdb.unlinkableSegmentCount, tfdb.lastLinkedBlock.Num(), tfdb.firstUnlinkableBlock.Num()) + fmt.Printf("❌ Large gap of %d unlinkable blocks found in chain. Last linked block: %d, first Unlinkable block: %d. \n", tfdb.unlinkableSegmentCount, tfdb.lastLinkedBlock.Number, tfdb.firstUnlinkableBlock.Number) } } else { tfdb.lastLinkedBlock = block tfdb.unlinkableSegmentCount = 0 tfdb.firstUnlinkableBlock = nil - tfdb.fdb.SetLIB(block, block.PreviousId, block.LibNum) + tfdb.fdb.SetLIB(block.AsRef(), block.ParentId, block.LibNum) if tfdb.fdb.HasLIB() { tfdb.fdb.PurgeBeforeLIB(0) } @@ -248,7 +243,20 @@ func validateBlockSegment( } if printDetails == PrintFull { - out, err := json.MarshalIndent(block.ToProtocol().(proto.Message), "", " ") + var b = chain.BlockFactory() + + if _, ok := b.(*pbbstream.Block); ok { + //todo: implements when buf registry available ... + panic("printing full block is not supported for pbbstream.Block") + } + + if err := block.Payload.UnmarshalTo(b); err != nil { + fmt.Printf("❌ Unable unmarshall block %s: %s\n", block.AsRef(), err) + break + } + + out, err := json.MarshalIndent(b, "", " ") + if err != nil { fmt.Printf("❌ Unable to print full block %s: %s\n", block.AsRef(), err) continue @@ -273,15 +281,16 @@ func validateBlockSegment( return } } + return } -func WalkBlockPrefix(blockRange BlockRange, fileBlockSize uint64) string { +func WalkBlockPrefix(blockRange tools.BlockRange, fileBlockSize uint64) string { if blockRange.IsOpen() { return "" } - startString := fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) - endString := fmt.Sprintf("%010d", RoundToBundleEndBlock(uint64(*blockRange.Stop-1), fileBlockSize)+1) + startString := fmt.Sprintf("%010d", tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + endString := fmt.Sprintf("%010d", tools.RoundToBundleEndBlock(uint64(*blockRange.Stop-1), fileBlockSize)+1) offset := 0 for i := 0; i < len(startString); i++ { diff --git a/tools/check_merged_batch.go b/tools_check_merged_batch.go similarity index 84% rename from tools/check_merged_batch.go rename to tools_check_merged_batch.go index 09132f7..b85f648 100644 --- a/tools/check_merged_batch.go +++ b/tools_check_merged_batch.go @@ -1,4 +1,4 @@ -package tools +package firecore import ( "context" @@ -7,6 +7,10 @@ import ( "strconv" "strings" + "github.com/streamingfast/firehose-core/tools" + + pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" ) @@ -39,13 +43,13 @@ func CheckMergedBlocksBatch( sourceStoreURL string, destStoreURL string, fileBlockSize uint64, - blockRange BlockRange, + blockRange tools.BlockRange, ) error { if !blockRange.IsResolved() { return fmt.Errorf("check merged blocks can only work with fully resolved range, got %s", blockRange) } - expected := RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected := tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) fileBlockSize64 := uint64(fileBlockSize) blocksStore, err := dstore.NewDBinStore(sourceStoreURL) @@ -60,7 +64,7 @@ func CheckMergedBlocksBatch( } } - var firstFilename = fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + var firstFilename = fmt.Sprintf("%010d", tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) lastSeenBlock := &blockRef{} @@ -101,7 +105,7 @@ func CheckMergedBlocksBatch( destStore.WriteObject(ctx, outputFile, strings.NewReader("")) } } else { - brokenSince := RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) + brokenSince := tools.RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) for i := brokenSince; i <= baseNum; i += fileBlockSize64 { fmt.Printf("found broken file %q, %s\n", filename, details) if destStore != nil { @@ -118,7 +122,7 @@ func CheckMergedBlocksBatch( return err } - if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && tools.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } expected = baseNum + fileBlockSize64 @@ -152,7 +156,7 @@ func checkMergedBlockFileBroken( } defer reader.Close() - readerFactory, err := bstream.GetBlockReaderFactory.New(reader) + readerFactory, err := bstream.NewDBinBlockReader(reader) if err != nil { return true, "", err } @@ -182,13 +186,13 @@ func checkMergedBlockFileBroken( if fakePreviousNum != 0 { fakePreviousNum -= 1 } - lastSeenBlock.set(block.PreviousId, fakePreviousNum) + lastSeenBlock.set(block.ParentId, fakePreviousNum) } - if block.PreviousId != lastSeenBlock.hash { + if block.ParentId != lastSeenBlock.hash { if block.Id == lastSeenBlock.hash && block.Number == lastSeenBlock.num { continue } - details = fmt.Sprintf("broken on block %d: expecting %q, got %q", block.Number, lastSeenBlock.hash, block.PreviousId) + details = fmt.Sprintf("broken on block %d: expecting %q, got %q", block.Number, lastSeenBlock.hash, block.ParentId) broken = true return } diff --git a/tools_checkmergedbatch.go b/tools_checkmergedbatch.go index 22aec06..26b13c7 100644 --- a/tools_checkmergedbatch.go +++ b/tools_checkmergedbatch.go @@ -19,7 +19,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" ) var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ @@ -54,5 +54,5 @@ func checkMergedBlocksBatchRunE(cmd *cobra.Command, args []string) error { resultsStoreURL := sflags.MustGetString(cmd, "output-to-store") - return tools.CheckMergedBlocksBatch(cmd.Context(), storeURL, resultsStoreURL, fileBlockSize, blockRange) + return CheckMergedBlocksBatch(cmd.Context(), storeURL, resultsStoreURL, fileBlockSize, blockRange) } diff --git a/tools_compare_blocks.go b/tools_compare_blocks.go index 460fa62..2ab7991 100644 --- a/tools_compare_blocks.go +++ b/tools_compare_blocks.go @@ -30,7 +30,7 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" "go.uber.org/multierr" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" @@ -91,7 +91,7 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("invalid block range, you must provide a closed range fully resolved (no negative value)") } - stopBlock := uint64(blockRange.GetStopBlockOr(tools.MaxUint64)) + stopBlock := uint64(blockRange.GetStopBlockOr(MaxUint64)) // Create stores storeReference, err := dstore.NewDBinStore(args[0]) @@ -111,7 +111,7 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { segments: segments, } - err = storeReference.Walk(ctx, tools.WalkBlockPrefix(blockRange, 100), func(filename string) (err error) { + err = storeReference.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) (err error) { fileStartBlock, err := strconv.Atoi(filename) if err != nil { return fmt.Errorf("parsing filename: %w", err) @@ -213,7 +213,7 @@ func readBundle[B Block]( return nil, nil, fmt.Errorf("creating reader: %w", err) } - blockReader, err := bstream.GetBlockReaderFactory.New(fileReader) + blockReader, err := bstream.NewDBinBlockReader(fileReader) if err != nil { return nil, nil, fmt.Errorf("creating block reader: %w", err) } @@ -238,7 +238,7 @@ func readBundle[B Block]( continue } - curBlockPB := sanitizer(curBlock.ToProtocol().(B)) + curBlockPB := sanitizer(any(curBlock).(B)) blockHashes = append(blockHashes, curBlock.Id) blocksMap[curBlock.Id] = curBlockPB } @@ -279,7 +279,7 @@ func (s *state) process(blockNum uint64, isDifferent bool, isMissing bool) { } func (s *state) print() { - endBlock := fmt.Sprintf("%d", s.segments[s.currentSegmentIdx].GetStopBlockOr(tools.MaxUint64)) + endBlock := fmt.Sprintf("%d", s.segments[s.currentSegmentIdx].GetStopBlockOr(MaxUint64)) if s.totalBlocksCounted == 0 { fmt.Printf("✖ No blocks were found at all for segment %d - %s\n", s.segments[s.currentSegmentIdx].Start, endBlock) diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index 5d137bf..fdfded7 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -135,8 +135,8 @@ func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger if err != nil { return fmt.Errorf("error decoding response to bstream block: %w", err) } - if lastBlockID != "" && blk.PreviousId != lastBlockID { - return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.PreviousId, lastBlockNum, lastBlockID) + if lastBlockID != "" && blk.ParentId != lastBlockID { + return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", blk.String(), blk.ParentId, lastBlockNum, lastBlockID) } lastBlockID = blk.Id lastBlockNum = blk.Number diff --git a/tools_firehose_client.go b/tools_firehose_client.go index 7c6c93f..95ada32 100644 --- a/tools_firehose_client.go +++ b/tools_firehose_client.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" diff --git a/tools_fix_bloated_merged_blocks.go b/tools_fix_bloated_merged_blocks.go index 8a17c4a..efc971e 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/tools_fix_bloated_merged_blocks.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) @@ -40,12 +40,12 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { return fmt.Errorf("parsing block range: %w", err) } - err = srcStore.Walk(ctx, tools.WalkBlockPrefix(blockRange, 100), func(filename string) error { + err = srcStore.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) startBlock := mustParseUint64(filename) - if startBlock > uint64(blockRange.GetStopBlockOr(tools.MaxUint64)) { + if startBlock > uint64(blockRange.GetStopBlockOr(MaxUint64)) { zlog.Debug("skipping merged block file", zap.String("reason", "past stop block"), zap.String("filename", filename)) return dstore.StopIteration } @@ -88,7 +88,7 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { continue } - if block.Number > uint64(blockRange.GetStopBlockOr(tools.MaxUint64)) { + if block.Number > uint64(blockRange.GetStopBlockOr(MaxUint64)) { break } @@ -97,8 +97,8 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { continue } - if lastBlockID != "" && block.PreviousId != lastBlockID { - return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", block.String(), block.PreviousId, lastBlockNum, lastBlockID) + if lastBlockID != "" && block.ParentId != lastBlockID { + return fmt.Errorf("got an invalid sequence of blocks: block %q has previousId %s, previous block %d had ID %q, this endpoint is serving blocks out of order", block.String(), block.ParentId, lastBlockNum, lastBlockID) } lastBlockID = block.Id lastBlockNum = block.Number diff --git a/tools_print.go b/tools_print.go index 311018d..2ac0f00 100644 --- a/tools_print.go +++ b/tools_print.go @@ -31,7 +31,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" ) var toolsPrintCmd = &cobra.Command{ diff --git a/tools_unmerge_blocks.go b/tools_unmerge_blocks.go index b400af0..07aba91 100644 --- a/tools_unmerge_blocks.go +++ b/tools_unmerge_blocks.go @@ -9,7 +9,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/cli" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/firehose/tools" + "github.com/streamingfast/firehose-core/tools" pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) @@ -42,12 +42,12 @@ func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { return fmt.Errorf("parsing block range: %w", err) } - err = srcStore.Walk(ctx, tools.WalkBlockPrefix(blockRange, 100), func(filename string) error { + err = srcStore.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) startBlock := mustParseUint64(filename) - if startBlock > uint64(blockRange.GetStopBlockOr(tools.MaxUint64)) { + if startBlock > uint64(blockRange.GetStopBlockOr(MaxUint64)) { zlog.Debug("skipping merged block file", zap.String("reason", "past stop block"), zap.String("filename", filename)) return dstore.StopIteration } @@ -79,7 +79,7 @@ func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { continue } - if block.Number > uint64(blockRange.GetStopBlockOr(tools.MaxUint64)) { + if block.Number > uint64(blockRange.GetStopBlockOr(MaxUint64)) { break } diff --git a/types.go b/types.go index 14cb5c2..394cc8d 100644 --- a/types.go +++ b/types.go @@ -64,14 +64,6 @@ type Block interface { // GetFirehoseBlockTime returns the block timestamp as a time.Time of when the block was // produced. This should the consensus agreed time of the block. GetFirehoseBlockTime() time.Time - - // GetFirehoseBlockVersion returns the version of this block. This is used to determine - // what value to assign to `bstream.Block#PayloadVersion` variable when encoding a chain - // specific block to a chain agnostic `bstream.Block` type. - // - // If you come here because you now need to implement this value, you can implement so it - // returned a fixed value, usually `chain.ProtocolVersion`: - GetFirehoseBlockVersion() int32 } // BlockLIBNumDerivable is an optional interface that can be implemented by your chain's block model Block @@ -158,18 +150,18 @@ func EncodeBlock(b Block) (blk *pbbstream.Block, err error) { ) } - var blockPayload *anypb.Any + blockPayload := &anypb.Any{} if err := proto.Unmarshal(content, blockPayload); err != nil { return nil, fmt.Errorf("unmarshaling block payload: %w", err) } bstreamBlock := &pbbstream.Block{ - Id: b.GetFirehoseBlockID(), - Number: b.GetFirehoseBlockNumber(), - PreviousId: b.GetFirehoseBlockParentID(), - Timestamp: timestamppb.New(b.GetFirehoseBlockTime()), - LibNum: v.GetFirehoseBlockLIBNum(), - Payload: blockPayload, + Id: b.GetFirehoseBlockID(), + Number: b.GetFirehoseBlockNumber(), + ParentId: b.GetFirehoseBlockParentID(), + Timestamp: timestamppb.New(b.GetFirehoseBlockTime()), + LibNum: v.GetFirehoseBlockLIBNum(), + Payload: blockPayload, } return bstreamBlock, nil From 7a5d1dc41ad28e2ba2115f39e5e52d38c338fac9 Mon Sep 17 00:00:00 2001 From: billettc Date: Mon, 20 Nov 2023 12:42:20 -0500 Subject: [PATCH 08/66] all test pass! --- go.mod | 4 ++-- go.sum | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index b502dd8..09e7a4d 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/streamingfast/bstream v0.0.2-0.20231116220707-c5946cce90ff github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 - github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713 + github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e @@ -26,7 +26,7 @@ require ( github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 - github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c + github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be github.com/stretchr/testify v1.8.4 diff --git a/go.sum b/go.sum index 979af70..c9ec1ce 100644 --- a/go.sum +++ b/go.sum @@ -580,6 +580,7 @@ github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALG github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713 h1:pVUxJeGfR8+WU+t1ZHiPqRFqeZOp2swYbYW57BAEpWI= github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= +github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1/go.mod h1:QSm/AfaDsE0k1xBYi0lW580YJ/WDV/FKZI628tkZR0Y= github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa h1:L/Ipge5pkZtyHucT7c8F/PiCitiNqQxjoUuxyzWKZew= @@ -607,6 +608,7 @@ github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHR github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c h1:DPvapg4SdVcXlk/lxAFSYJJTh9+M7UGQ2+XZGdw4wko= github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= +github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= From c47d93756add6423d497617f26b1b489dc8bb63b Mon Sep 17 00:00:00 2001 From: billettc Date: Mon, 20 Nov 2023 12:58:35 -0500 Subject: [PATCH 09/66] bump bstream with new pbbstream.Block --- chain.go | 2 +- cmd/firecore/main.go | 2 +- consolereader.go | 5 ++--- firehose/app/firehose/app.go | 2 +- firehose/factory.go | 2 +- firehose/server/blocks.go | 2 +- firehose/tests/integration_test.go | 2 +- firehose/tests/stream_blocks_test.go | 2 +- go.mod | 4 ++-- go.sum | 10 ++++------ index-builder/index-builder.go | 4 +++- index_builder.go | 2 +- merger/bundler.go | 2 +- merger/bundlereader.go | 2 +- merger/merger_io.go | 2 +- node-manager/app/node_reader_stdin/app.go | 2 +- node-manager/mindreader/archiver.go | 2 +- node-manager/mindreader/mindreader.go | 2 +- node-manager/mindreader/mindreader_test.go | 5 ++--- node-manager/monitor.go | 2 +- node-manager/types.go | 2 +- reader_node.go | 2 +- relayer/relayer.go | 2 +- tools_check.go | 2 +- tools_check_blocks.go | 2 +- tools_check_merged_batch.go | 2 +- tools_download_from_firehose.go | 2 +- tools_fix_bloated_merged_blocks.go | 2 +- tools_print.go | 2 +- tools_unmerge_blocks.go | 2 +- tools_upgrade_merged_blocks.go | 2 +- types.go | 2 +- unsafe_extensions.go | 2 +- 33 files changed, 41 insertions(+), 43 deletions(-) diff --git a/chain.go b/chain.go index 48922c4..b14f01a 100644 --- a/chain.go +++ b/chain.go @@ -9,10 +9,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/firehose-core/node-manager/operator" "github.com/streamingfast/logging" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/protobuf/reflect/protoreflect" diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index b97edb5..5ad8ce1 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -1,8 +1,8 @@ package main import ( + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" firecore "github.com/streamingfast/firehose-core" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) func main() { diff --git a/consolereader.go b/consolereader.go index 5b06b89..6f2a473 100644 --- a/consolereader.go +++ b/consolereader.go @@ -10,10 +10,9 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" - "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -148,7 +147,7 @@ func (ctx *parseCtx) readBlock(line string) (out *pbbstream.Block, err error) { return nil, fmt.Errorf("invalid payload type, expected %q, got %q", ctx.protoMessageType, blockPayload.TypeUrl) } - block := &bstream.Block{ + block := &pbbstream.Block{ Id: blockHash, Number: blockNum, ParentId: parentHash, diff --git a/firehose/app/firehose/app.go b/firehose/app/firehose/app.go index d66177e..398ee30 100644 --- a/firehose/app/firehose/app.go +++ b/firehose/app/firehose/app.go @@ -20,7 +20,7 @@ import ( "net/url" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" diff --git a/firehose/factory.go b/firehose/factory.go index d5b120e..aaa3019 100644 --- a/firehose/factory.go +++ b/firehose/factory.go @@ -9,11 +9,11 @@ import ( "github.com/streamingfast/bstream/hub" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/bstream/transform" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dauth" "github.com/streamingfast/derr" "github.com/streamingfast/dmetering" "github.com/streamingfast/dstore" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 5d64399..2ec99f8 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -7,7 +7,7 @@ import ( "os" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" diff --git a/firehose/tests/integration_test.go b/firehose/tests/integration_test.go index 72e20ef..654dbd1 100644 --- a/firehose/tests/integration_test.go +++ b/firehose/tests/integration_test.go @@ -10,7 +10,7 @@ package firehose // "github.com/alicebob/miniredis/v2/server" // "github.com/streamingfast/bstream" // "github.com/streamingfast/dstore" -// pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +// pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" // pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v1" // "github.com/stretchr/testify/assert" // "github.com/stretchr/testify/require" diff --git a/firehose/tests/stream_blocks_test.go b/firehose/tests/stream_blocks_test.go index 7dbd14e..8914170 100644 --- a/firehose/tests/stream_blocks_test.go +++ b/firehose/tests/stream_blocks_test.go @@ -9,7 +9,7 @@ package firehose // // "github.com/streamingfast/bstream" // "github.com/streamingfast/dstore" -// pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +// pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" // "github.com/stretchr/testify/assert" // "github.com/stretchr/testify/require" // "go.uber.org/zap" diff --git a/go.mod b/go.mod index 09e7a4d..4139b3d 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231116220707-c5946cce90ff + github.com/streamingfast/bstream v0.0.2-0.20231120175342-23d38d055176 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -28,7 +28,7 @@ require ( github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be + github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 github.com/stretchr/testify v1.8.4 go.uber.org/multierr v1.10.0 go.uber.org/zap v1.26.0 diff --git a/go.sum b/go.sum index c9ec1ce..2dc82fe 100644 --- a/go.sum +++ b/go.sum @@ -578,8 +578,7 @@ github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZ github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= -github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713 h1:pVUxJeGfR8+WU+t1ZHiPqRFqeZOp2swYbYW57BAEpWI= -github.com/streamingfast/dbin v0.9.1-0.20231115202300-f0d94cacb713/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= +github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c h1:6WjE2yInE+5jnI7cmCcxOiGZiEs2FQm9Zsg2a9Ivp0Q= github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1/go.mod h1:QSm/AfaDsE0k1xBYi0lW580YJ/WDV/FKZI628tkZR0Y= @@ -606,8 +605,7 @@ github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 h1:xlWSfi1BoP github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308/go.mod h1:K1p8Bj/wG34KJvYzPUqtzpndffmpkrVY11u2hkyxCWQ= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHRsqvI+vKJwgF1OMV6L55jHbaV/ZLoU4IAG/dME= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= -github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c h1:DPvapg4SdVcXlk/lxAFSYJJTh9+M7UGQ2+XZGdw4wko= -github.com/streamingfast/pbgo v0.0.6-0.20231116213602-165f136fce2c/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= +github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e h1:8hoT2QUwh+YNgIcCPux9xd4u9XojHR8hbyAzz7rQuEM= github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= @@ -615,8 +613,8 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be h1:/SKBNBzCSrt/gQmzKDro8BmFH5C3fyVEYustmTL7qs4= -github.com/streamingfast/substreams v1.1.21-0.20231117153234-b3b90b45b5be/go.mod h1:9KE5zXdfRAnXVyilMnuPcqMujFobaJ+LsKXADBYssJs= +github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 h1:PQeoATwMtCXKw2ztdS3ruwIOMzCj4GEGkkxB8LvcMOE= +github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1/go.mod h1:HimUVtUnRKCdWfkNKFdVgqGCcrNu49+Az5Cyzdpuc4Q= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/index-builder/index-builder.go b/index-builder/index-builder.go index b926d0a..e6f6d0e 100644 --- a/index-builder/index-builder.go +++ b/index-builder/index-builder.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/dstore" @@ -67,7 +69,7 @@ func (app *IndexBuilder) launch() error { FinalBlocksOnly: true, } - handlerFunc := func(block *bstream.Block, obj interface{}) error { + handlerFunc := func(block *pbbstream.Block, obj interface{}) error { app.logger.Debug("handling block", zap.Uint64("block_num", block.Number)) metrics.HeadBlockNumber.SetUint64(block.Number) diff --git a/index_builder.go b/index_builder.go index 98e6e5d..6878d71 100644 --- a/index_builder.go +++ b/index_builder.go @@ -10,8 +10,8 @@ import ( "github.com/spf13/viper" "github.com/streamingfast/bstream" bstransform "github.com/streamingfast/bstream/transform" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dlauncher/launcher" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" ) func registerIndexBuilderApp[B Block](chain *Chain[B]) { diff --git a/merger/bundler.go b/merger/bundler.go index 57dd285..1b191d0 100644 --- a/merger/bundler.go +++ b/merger/bundler.go @@ -23,7 +23,7 @@ import ( "sync" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" diff --git a/merger/bundlereader.go b/merger/bundlereader.go index df4e167..e2ec0ba 100644 --- a/merger/bundlereader.go +++ b/merger/bundlereader.go @@ -57,7 +57,7 @@ func NewBundleReader(ctx context.Context, logger *zap.Logger, tracer logging.Tra } r.header = dbinReader.Header - r.headerLength = len(r.header.Data) + r.headerLength = len(r.header.RawBytes) if len(data) < r.headerLength { return nil, fmt.Errorf("one-block-file corrupt: expected header size of %d, but file size is only %d bytes", r.headerLength, len(data)) diff --git a/merger/merger_io.go b/merger/merger_io.go index 28195e2..584fa28 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -12,7 +12,7 @@ import ( "sync" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/node-manager/app/node_reader_stdin/app.go b/node-manager/app/node_reader_stdin/app.go index 3a54eca..e4244e3 100644 --- a/node-manager/app/node_reader_stdin/app.go +++ b/node-manager/app/node_reader_stdin/app.go @@ -20,13 +20,13 @@ import ( "os" "github.com/streamingfast/bstream/blockstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" nodeManager "github.com/streamingfast/firehose-core/node-manager" logplugin "github.com/streamingfast/firehose-core/node-manager/log_plugin" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" "github.com/streamingfast/shutter" "go.uber.org/zap" diff --git a/node-manager/mindreader/archiver.go b/node-manager/mindreader/archiver.go index 8297120..2217797 100644 --- a/node-manager/mindreader/archiver.go +++ b/node-manager/mindreader/archiver.go @@ -19,7 +19,7 @@ import ( "fmt" "io" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index e621474..e1314a3 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -23,7 +23,7 @@ import ( "regexp" "sync" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" diff --git a/node-manager/mindreader/mindreader_test.go b/node-manager/mindreader/mindreader_test.go index fb1da85..9c15595 100644 --- a/node-manager/mindreader/mindreader_test.go +++ b/node-manager/mindreader/mindreader_test.go @@ -10,9 +10,8 @@ import ( "testing" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" - "github.com/streamingfast/bstream" "github.com/streamingfast/shutter" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -150,7 +149,7 @@ func (c *testConsoleReader) ReadBlock() (*pbbstream.Block, error) { if err := json.Unmarshal([]byte(formatedLine), data); err != nil { return nil, fmt.Errorf("marshalling error on '%s': %w", formatedLine, err) } - return &bstream.Block{ + return &pbbstream.Block{ Id: data.ID, Number: toBlockNum(data.ID), }, nil diff --git a/node-manager/monitor.go b/node-manager/monitor.go index 9a17317..beee7d9 100644 --- a/node-manager/monitor.go +++ b/node-manager/monitor.go @@ -3,7 +3,7 @@ package node_manager import ( "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dmetrics" "go.uber.org/atomic" diff --git a/node-manager/types.go b/node-manager/types.go index d7e5266..6c5df75 100644 --- a/node-manager/types.go +++ b/node-manager/types.go @@ -14,7 +14,7 @@ package node_manager -import pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" +import pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" type DeepMindDebuggable interface { DebugDeepMind(enabled bool) diff --git a/reader_node.go b/reader_node.go index a273ce2..bf73207 100644 --- a/reader_node.go +++ b/reader_node.go @@ -11,6 +11,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/bstream/blockstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" nodeManager "github.com/streamingfast/firehose-core/node-manager" @@ -20,7 +21,6 @@ import ( "github.com/streamingfast/firehose-core/node-manager/operator" sv "github.com/streamingfast/firehose-core/superviser" "github.com/streamingfast/logging" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbheadinfo "github.com/streamingfast/pbgo/sf/headinfo/v1" "github.com/streamingfast/snapshotter" "go.uber.org/zap" diff --git a/relayer/relayer.go b/relayer/relayer.go index e84696c..dc268f2 100644 --- a/relayer/relayer.go +++ b/relayer/relayer.go @@ -19,7 +19,7 @@ import ( "strings" "time" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" diff --git a/tools_check.go b/tools_check.go index 56e3d4c..f514e89 100644 --- a/tools_check.go +++ b/tools_check.go @@ -22,11 +22,11 @@ import ( "github.com/dustin/go-humanize" "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) diff --git a/tools_check_blocks.go b/tools_check_blocks.go index c2ce247..a88fbee 100644 --- a/tools_check_blocks.go +++ b/tools_check_blocks.go @@ -12,8 +12,8 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) diff --git a/tools_check_merged_batch.go b/tools_check_merged_batch.go index b85f648..63ffdb9 100644 --- a/tools_check_merged_batch.go +++ b/tools_check_merged_batch.go @@ -9,7 +9,7 @@ import ( "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index fdfded7..55d6d6b 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -9,8 +9,8 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/protobuf/proto" diff --git a/tools_fix_bloated_merged_blocks.go b/tools_fix_bloated_merged_blocks.go index efc971e..c4125c3 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/tools_fix_bloated_merged_blocks.go @@ -6,9 +6,9 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) diff --git a/tools_print.go b/tools_print.go index 2ac0f00..781861e 100644 --- a/tools_print.go +++ b/tools_print.go @@ -25,7 +25,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "github.com/mr-tron/base58" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/spf13/cobra" "github.com/streamingfast/bstream" diff --git a/tools_unmerge_blocks.go b/tools_unmerge_blocks.go index 07aba91..1c96ab2 100644 --- a/tools_unmerge_blocks.go +++ b/tools_unmerge_blocks.go @@ -7,10 +7,10 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) diff --git a/tools_upgrade_merged_blocks.go b/tools_upgrade_merged_blocks.go index bfe7358..2120d98 100644 --- a/tools_upgrade_merged_blocks.go +++ b/tools_upgrade_merged_blocks.go @@ -7,7 +7,7 @@ import ( "io" "strconv" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/spf13/cobra" "github.com/streamingfast/bstream" diff --git a/types.go b/types.go index 394cc8d..3180793 100644 --- a/types.go +++ b/types.go @@ -10,8 +10,8 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream/transform" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "google.golang.org/protobuf/proto" ) diff --git a/unsafe_extensions.go b/unsafe_extensions.go index e07ff78..6fc6a9e 100644 --- a/unsafe_extensions.go +++ b/unsafe_extensions.go @@ -4,9 +4,9 @@ import ( "context" "github.com/spf13/cobra" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dlauncher/launcher" - pbbstream "github.com/streamingfast/pbgo/sf/bstream/v1" "go.uber.org/zap" ) From 3f8b86d6a03eba08cdd73e090c952f758a99aaf6 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 09:04:02 -0500 Subject: [PATCH 10/66] Added rpc poller --- rpcpoller/fetcher.go | 13 ++ rpcpoller/init_test.go | 93 +++++++++++++ rpcpoller/poller.go | 177 ++++++++++++++++++++++++ rpcpoller/poller_test.go | 289 +++++++++++++++++++++++++++++++++++++++ rpcpoller/state.go | 74 ++++++++++ 5 files changed, 646 insertions(+) create mode 100644 rpcpoller/fetcher.go create mode 100644 rpcpoller/init_test.go create mode 100644 rpcpoller/poller.go create mode 100644 rpcpoller/poller_test.go create mode 100644 rpcpoller/state.go diff --git a/rpcpoller/fetcher.go b/rpcpoller/fetcher.go new file mode 100644 index 0000000..1c340f4 --- /dev/null +++ b/rpcpoller/fetcher.go @@ -0,0 +1,13 @@ +package forkhandler + +import ( + "context" + "time" + + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" +) + +type BlockFetcher interface { + PollingInterval() time.Duration + Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) +} diff --git a/rpcpoller/init_test.go b/rpcpoller/init_test.go new file mode 100644 index 0000000..0206f0e --- /dev/null +++ b/rpcpoller/init_test.go @@ -0,0 +1,93 @@ +package forkhandler + +import ( + "context" + "fmt" + "testing" + "time" + + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + "github.com/streamingfast/logging" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" +) + +var logger, tracer = logging.PackageLogger("forkhandler", "github.com/streamingfast/firehose-bitcoin/forkhandler.test") + +func init() { + logging.InstantiateLoggers(logging.WithDefaultLevel(zapcore.DebugLevel)) +} + +var errCompleteDone = fmt.Errorf("complete done") + +type TestBlock struct { + expect *pbbstream.Block + send *pbbstream.Block +} + +var _ BlockFetcher = &TestBlockFetcher{} + +type TestBlockFetcher struct { + t *testing.T + blocks []*TestBlock + idx uint64 +} + +func newTestBlockFetcher(t *testing.T, blocks []*TestBlock) *TestBlockFetcher { + return &TestBlockFetcher{ + t: t, + blocks: blocks, + } +} + +func (b *TestBlockFetcher) PollingInterval() time.Duration { + return 0 +} + +func (b *TestBlockFetcher) Fetch(_ context.Context, blkNum uint64) (*pbbstream.Block, error) { + if len(b.blocks) == 0 { + assert.Fail(b.t, fmt.Sprintf("should not have ffetchired block %d", blkNum)) + } + + if b.idx >= uint64(len(b.blocks)) { + return nil, errCompleteDone + } + + if blkNum != b.blocks[b.idx].expect.Number { + assert.Fail(b.t, fmt.Sprintf("expected to fetch block %d, got %d", b.blocks[b.idx].expect.Number, blkNum)) + } + + blkToSend := b.blocks[b.idx].send + b.idx++ + return blkToSend, nil +} + +func (b *TestBlockFetcher) check() { + assert.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) +} + +type TestBlockFire struct { + blocks []*pbbstream.Block + idx uint64 +} + +func (b *TestBlockFire) check(t *testing.T) { + assert.Equal(t, uint64(len(b.blocks)), b.idx, "we should have fired all %d blocks, only fired %d blocks", len(b.blocks), b.idx) +} + +func (b *TestBlockFire) fetchBlockFire(t *testing.T) BlockFireFunc { + return func(p *pbbstream.Block) { + if len(b.blocks) == 0 { + assert.Fail(t, fmt.Sprintf("should not have fired block %d", p.Number)) + } + + if b.idx >= uint64(len(b.blocks)) { + assert.Fail(t, fmt.Sprintf("should not have fired block %d", p.Number)) + } + + if p.Number != b.blocks[b.idx].Number || p.Id != b.blocks[b.idx].Id { + assert.Fail(t, fmt.Sprintf("expected to tryFire block %s, got %s", b.blocks[b.idx].String(), p.String())) + } + b.idx++ + } +} diff --git a/rpcpoller/poller.go b/rpcpoller/poller.go new file mode 100644 index 0000000..685a7a4 --- /dev/null +++ b/rpcpoller/poller.go @@ -0,0 +1,177 @@ +package forkhandler + +import ( + "context" + "fmt" + "time" + + "github.com/streamingfast/derr" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/forkable" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + + "go.uber.org/zap" +) + +type BlockFireFunc func(*pbbstream.Block) + +type Poller struct { + blockFetcher BlockFetcher + blockFireFunc BlockFireFunc + fetchBlockRetryCount uint64 + forkDB *forkable.ForkDB + startBlockNumGate uint64 + logger *zap.Logger +} + +func New( + blockFetcher BlockFetcher, + blockFire BlockFireFunc, + logger *zap.Logger, +) *Poller { + return &Poller{ + blockFetcher: blockFetcher, + blockFireFunc: blockFire, + fetchBlockRetryCount: 4, + forkDB: forkable.NewForkDB(forkable.ForkDBWithLogger(logger)), + logger: logger, + } +} + +func (p *Poller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { + p.startBlockNumGate = startBlockNum + resolveStartBlockNum := resolveStartBlock(startBlockNum, finalizedBlockNum.Num()) + p.logger.Info("starting poller", + zap.Uint64("start_block_num", startBlockNum), + zap.Stringer("finalized_block_num", finalizedBlockNum), + zap.Uint64("resolved_start_block_num", resolveStartBlockNum), + ) + + startBlock, err := p.blockFetcher.Fetch(ctx, resolveStartBlockNum) + if err != nil { + + return fmt.Errorf("unable to fetch start block %d: %w", resolveStartBlockNum, err) + } + + return p.run(startBlock.AsRef()) +} + +func (p *Poller) run(resolvedStartBlock bstream.BlockRef) (err error) { + currentState := &state{state: ContinuousSegState, logger: p.logger} + p.forkDB.InitLIB(resolvedStartBlock) + blkIter := resolvedStartBlock.Num() + intervalDuration := p.blockFetcher.PollingInterval() + for { + blkIter, err = p.processBlock(currentState, blkIter) + if err != nil { + return fmt.Errorf("unable to fetch block %d: %w", blkIter, err) + } + time.Sleep(intervalDuration) + } +} + +func (p *Poller) processBlock(currentState *state, blkNum uint64) (uint64, error) { + if blkNum < p.forkDB.LIBNum() { + panic(fmt.Errorf("unexpected error block %d is below the current LIB num %d. There should be no re-org above the current LIB num", blkNum, p.forkDB.LIBNum())) + } + + // On the first run, we will fetch the blk for the `startBlockRef`, since we have a `Ref` it stands + // to reason that we may already have the block. We could potentially optimize this + blk, err := p.fetchBlock(blkNum) + if err != nil { + return 0, fmt.Errorf("unable to fetch block %d: %w", blkNum, err) + } + + seenBlk, seenParent := p.forkDB.AddLink(blk.AsRef(), blk.ParentId, newBlock(blk)) + + currentState.addBlk(blk, seenBlk, seenParent) + + blkCompleteSegNum := currentState.getBlkSegmentNum() + blocks, reachLib := p.forkDB.CompleteSegment(blkCompleteSegNum) + p.logger.Debug("checked if block is complete segment", + zap.Uint64("blk_num", blkCompleteSegNum.Num()), + zap.Int("segment_len", len(blocks)), + zap.Bool("reached_lib", reachLib), + ) + + if reachLib { + currentState.blkIsConnectedToLib() + p.fireCompleteSegment(blocks) + + // since the block is linkable to the current lib + // we can safely set the new lib to the current block's Lib + // the assumption here is that teh Lib the Block we received from the block fetcher ir ALWAYS CORRECT + p.logger.Debug("setting lib", zap.Stringer("blk", blk.AsRef()), zap.Uint64("lib_num", blk.LibNum)) + p.forkDB.SetLIB(blk.AsRef(), "", blk.LibNum) + p.forkDB.PurgeBeforeLIB(0) + + return nextBlkInSeg(blocks), nil + } + + currentState.blkIsNotConnectedToLib() + return prevBlkInSeg(blocks), nil +} + +func (p *Poller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { + var out *pbbstream.Block + if err := derr.Retry(p.fetchBlockRetryCount, func(ctx context.Context) error { + out, err = p.blockFetcher.Fetch(ctx, blkNum) + if err != nil { + return fmt.Errorf("unable to fetch block %d: %w", blkNum, err) + } + return nil + }); err != nil { + return nil, fmt.Errorf("failed to fetch block with retries %d: %w", blkNum, err) + } + return out, nil +} + +func nextBlkInSeg(blocks []*forkable.Block) uint64 { + if len(blocks) == 0 { + panic(fmt.Errorf("the blocks segments should never be empty")) + } + return blocks[len(blocks)-1].BlockNum + 1 +} + +func prevBlkInSeg(blocks []*forkable.Block) uint64 { + if len(blocks) == 0 { + panic(fmt.Errorf("the blocks segments should never be empty")) + } + return blocks[0].Object.(*block).ParentNum +} + +func resolveStartBlock(startBlockNum, finalizedBlockNum uint64) uint64 { + if finalizedBlockNum < startBlockNum { + return finalizedBlockNum + } + return startBlockNum +} + +type block struct { + *pbbstream.Block + fired bool +} + +func newBlock(block2 *pbbstream.Block) *block { + return &block{block2, false} +} + +func (p *Poller) fireCompleteSegment(blocks []*forkable.Block) { + for _, blk := range blocks { + if blk.BlockNum < p.startBlockNumGate { + continue + } + p.tryFire(blk.Object.(*block)) + } +} + +func (p *Poller) tryFire(b *block) bool { + if b.fired { + return false + } + p.blockFireFunc(b.Block) + p.logger.Debug("block fired", zap.Stringer("blk", b.Block.AsRef())) + b.fired = true + return true +} diff --git a/rpcpoller/poller_test.go b/rpcpoller/poller_test.go new file mode 100644 index 0000000..b93a3a7 --- /dev/null +++ b/rpcpoller/poller_test.go @@ -0,0 +1,289 @@ +package forkhandler + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "go.uber.org/zap" + + "github.com/streamingfast/bstream" + "github.com/streamingfast/bstream/forkable" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + "github.com/stretchr/testify/assert" +) + +func TestForkHandler_run(t *testing.T) { + tests := []struct { + name string + startBlock bstream.BlockRef + blocks []*TestBlock + expectFireBlock []*pbbstream.Block + }{ + { + name: "start block 0", + startBlock: blk("0a", "", 0).AsRef(), + blocks: []*TestBlock{ + tb("0a", "", 0), + tb("1a", "0a", 0), + tb("2a", "1a", 0), + }, + expectFireBlock: []*pbbstream.Block{ + blk("0a", "", 0), + blk("1a", "0a", 0), + blk("2a", "1a", 0), + }, + }, + { + name: "Fork 1", + startBlock: blk("100a", "99a", 100).AsRef(), + blocks: []*TestBlock{ + tb("100a", "99a", 100), + tb("101a", "100a", 100), + tb("102a", "101a", 100), + tb("103a", "102a", 100), + tb("104b", "103b", 100), + tb("103a", "102a", 100), + tb("104a", "103a", 100), + tb("105b", "104b", 100), + tb("103b", "102b", 100), + tb("102b", "101a", 100), + tb("106a", "105a", 100), + tb("105a", "104a", 100), + }, + expectFireBlock: []*pbbstream.Block{ + blk("100a", "99a", 100), + blk("101a", "100a", 100), + blk("102a", "101a", 100), + blk("103a", "102a", 100), + blk("104a", "103a", 100), + blk("102b", "101a", 100), + blk("103b", "102b", 100), + blk("104b", "103b", 100), + blk("105b", "104b", 100), + blk("105a", "104a", 100), + blk("106a", "105a", 100), + }, + }, + { + name: "Fork 2", + startBlock: blk("100a", "99a", 100).AsRef(), + blocks: []*TestBlock{ + tb("100a", "99a", 100), + tb("101a", "100a", 100), + tb("102a", "101a", 100), + tb("103a", "102a", 100), + tb("104b", "103b", 100), + tb("103a", "102a", 100), + tb("104a", "103a", 100), + tb("105b", "104b", 100), + tb("103b", "102b", 100), + tb("102a", "101a", 100), + tb("103a", "104a", 100), + tb("104a", "105a", 100), + tb("105a", "104a", 100), + }, + expectFireBlock: []*pbbstream.Block{ + blk("100a", "99a", 100), + blk("101a", "100a", 100), + blk("102a", "101a", 100), + blk("103a", "102a", 100), + blk("104a", "103a", 100), + blk("105a", "104a", 100), + }, + }, + { + name: "with lib advancing", + startBlock: blk("100a", "99a", 100).AsRef(), + blocks: []*TestBlock{ + tb("100a", "99a", 100), + tb("101a", "100a", 100), + tb("102a", "101a", 100), + tb("103a", "102a", 101), + tb("104b", "103b", 101), + tb("103a", "102a", 101), + tb("104a", "103a", 101), + tb("105b", "104b", 101), + tb("103b", "102b", 101), + tb("102a", "101a", 101), + tb("103a", "104a", 101), + tb("104a", "105a", 101), + tb("105a", "104a", 101), + }, + expectFireBlock: []*pbbstream.Block{ + blk("100a", "99a", 100), + blk("101a", "100a", 100), + blk("102a", "101a", 100), + blk("103a", "102a", 100), + blk("104a", "103a", 100), + blk("105a", "104a", 100), + }, + }, + { + name: "with skipping blocks", + startBlock: blk("100a", "99a", 100).AsRef(), + blocks: []*TestBlock{ + tb("100a", "99a", 100), + tb("101a", "100a", 100), + tb("102a", "101a", 100), + tb("103a", "102a", 101), + tb("104b", "103b", 101), + tb("103a", "102a", 101), + tb("104a", "103a", 101), + tb("105b", "104b", 101), + tb("103b", "102b", 101), + tb("102a", "101a", 101), + tb("103a", "104a", 101), + tb("104a", "105a", 101), + tb("105a", "104a", 101), + }, + expectFireBlock: []*pbbstream.Block{ + blk("100a", "99a", 100), + blk("101a", "100a", 100), + blk("102a", "101a", 100), + blk("103a", "102a", 100), + blk("104a", "103a", 100), + blk("105a", "104a", 100), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + blockFetcher := newTestBlockFetcher(t, tt.blocks) + + blockFire := &TestBlockFire{ + blocks: tt.expectFireBlock, + } + + f := &Poller{ + blockFetcher: blockFetcher, + blockFireFunc: blockFire.fetchBlockFire(t), + forkDB: forkable.NewForkDB(), + startBlockNumGate: 0, + logger: logger, + } + + err := f.run(tt.startBlock) + if !errors.Is(err, errCompleteDone) { + assert.Fail(t, "expected errCompleteDone") + } + blockFetcher.check() + blockFire.check(t) + }) + } +} + +func TestForkHandler_resolveStartBlock(t *testing.T) { + tests := []struct { + startBlockNum uint64 + finalizedBlockNum uint64 + expected uint64 + }{ + {90, 100, 90}, + {100, 100, 100}, + {110, 100, 100}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + assert.Equal(t, test.expected, resolveStartBlock(test.startBlockNum, test.finalizedBlockNum)) + }) + } +} + +func TestForkHandler_fireCompleteSegment(t *testing.T) { + tests := []struct { + name string + blocks []*forkable.Block + startBlockNum uint64 + expect []string + }{ + { + name: "start block less then first block", + blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + startBlockNum: 98, + expect: []string{"100a", "101a", "102a"}, + }, + { + name: "start block is first block", + blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + startBlockNum: 100, + expect: []string{"100a", "101a", "102a"}, + }, + { + name: "start block is middle block", + blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + startBlockNum: 101, + expect: []string{"101a", "102a"}, + }, + { + name: "start block is last block", + blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + startBlockNum: 102, + expect: []string{"102a"}, + }, + { + name: "start block is past block", blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + startBlockNum: 104, + expect: []string{}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f := &Poller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} + receviedIds := []string{} + f.blockFireFunc = func(p *pbbstream.Block) { + receviedIds = append(receviedIds, p.Id) + } + + f.fireCompleteSegment(test.blocks) + assert.Equal(t, test.expect, receviedIds) + }) + } + +} + +func tb(id, prev string, libNum uint64) *TestBlock { + return &TestBlock{ + expect: blk(id, prev, libNum), + send: blk(id, prev, libNum), + } +} + +func blk(id, prev string, libNum uint64) *pbbstream.Block { + return &pbbstream.Block{ + Number: blocknum(id), + Id: id, + ParentId: prev, + LibNum: libNum, + ParentNum: blocknum(prev), + } +} + +func forkBlk(id string) *forkable.Block { + return &forkable.Block{ + BlockID: id, + BlockNum: blocknum(id), + Object: &block{ + Block: &pbbstream.Block{ + Number: blocknum(id), + Id: id, + }, + }, + } +} + +func blocknum(blockID string) uint64 { + b := blockID + if len(blockID) < 8 { // shorter version, like 8a for 00000008a + b = fmt.Sprintf("%09s", blockID) + } + bin, err := strconv.ParseUint(b[:8], 10, 64) + if err != nil { + panic(err) + } + return bin +} diff --git a/rpcpoller/state.go b/rpcpoller/state.go new file mode 100644 index 0000000..e40addc --- /dev/null +++ b/rpcpoller/state.go @@ -0,0 +1,74 @@ +package forkhandler + +import ( + "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + "go.uber.org/zap" +) + +type State string + +const ( + ContinuousSegState State = "CONTINUOUS" + IncompleteSegState State = "INCOMPLETE" +) + +func (s State) String() string { + return string(s) +} + +type state struct { + currentBlk bstream.BlockRef + currentIncompleteSeg *bstream.BasicBlockRef + state State + logger *zap.Logger +} + +func (s *state) addBlk(blk *pbbstream.Block, blockSeen bool, parentSeen bool) { + blkRef := blk.AsRef() + logger := s.logger.With( + zap.Stringer("blk", blkRef), + zap.Stringer("parent_blk", blk.PreviousRef()), + zap.Bool("seen_blk", blockSeen), + zap.Bool("seen_parent", parentSeen), + zap.Stringer("previous_state", s.state), + ) + if s.currentIncompleteSeg != nil { + logger = logger.With(zap.Stringer("current_incomplete_seg", *s.currentIncompleteSeg)) + } else { + logger = logger.With(zap.String("current_incomplete_seg", "none")) + + } + + if s.state == IncompleteSegState && blockSeen && parentSeen { + // if we are checking an incomplete segement, and we get a block that is already in the forkdb + // and whose parent is also in the forkdb, then we are back on a continuous segment + s.state = ContinuousSegState + } + s.currentBlk = blkRef + logger.Debug("received block", zap.Stringer("current_state", s.state)) +} + +func (s *state) getBlkSegmentNum() bstream.BlockRef { + if s.state == IncompleteSegState { + if s.currentIncompleteSeg == nil { + panic("current incomplete segment is nil, when state is incomplete segment, this should never happen") + } + return *s.currentIncompleteSeg + } + return s.currentBlk +} + +func (s *state) blkIsConnectedToLib() { + s.state = ContinuousSegState + s.currentIncompleteSeg = nil +} + +func (s *state) blkIsNotConnectedToLib() { + if s.state != IncompleteSegState { + s.state = IncompleteSegState + // we don't want to point the current blk since that will change + v := bstream.NewBlockRef(s.currentBlk.ID(), s.currentBlk.Num()) + s.currentIncompleteSeg = &v + } +} From 1f22525ca709974347f5aeeaccda90da4c8aa5b5 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 09:29:29 -0500 Subject: [PATCH 11/66] rename rpcpoller pckg to blkpoller --- {rpcpoller => blkpoller}/fetcher.go | 2 +- {rpcpoller => blkpoller}/init_test.go | 2 +- {rpcpoller => blkpoller}/poller.go | 20 ++++++++++---------- {rpcpoller => blkpoller}/poller_test.go | 6 +++--- {rpcpoller => blkpoller}/state.go | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) rename {rpcpoller => blkpoller}/fetcher.go (92%) rename {rpcpoller => blkpoller}/init_test.go (99%) rename {rpcpoller => blkpoller}/poller.go (89%) rename {rpcpoller => blkpoller}/poller_test.go (98%) rename {rpcpoller => blkpoller}/state.go (99%) diff --git a/rpcpoller/fetcher.go b/blkpoller/fetcher.go similarity index 92% rename from rpcpoller/fetcher.go rename to blkpoller/fetcher.go index 1c340f4..b325c78 100644 --- a/rpcpoller/fetcher.go +++ b/blkpoller/fetcher.go @@ -1,4 +1,4 @@ -package forkhandler +package blkpoller import ( "context" diff --git a/rpcpoller/init_test.go b/blkpoller/init_test.go similarity index 99% rename from rpcpoller/init_test.go rename to blkpoller/init_test.go index 0206f0e..2fcb608 100644 --- a/rpcpoller/init_test.go +++ b/blkpoller/init_test.go @@ -1,4 +1,4 @@ -package forkhandler +package blkpoller import ( "context" diff --git a/rpcpoller/poller.go b/blkpoller/poller.go similarity index 89% rename from rpcpoller/poller.go rename to blkpoller/poller.go index 685a7a4..bb43e67 100644 --- a/rpcpoller/poller.go +++ b/blkpoller/poller.go @@ -1,4 +1,4 @@ -package forkhandler +package blkpoller import ( "context" @@ -16,7 +16,7 @@ import ( type BlockFireFunc func(*pbbstream.Block) -type Poller struct { +type BlkPoller struct { blockFetcher BlockFetcher blockFireFunc BlockFireFunc fetchBlockRetryCount uint64 @@ -29,8 +29,8 @@ func New( blockFetcher BlockFetcher, blockFire BlockFireFunc, logger *zap.Logger, -) *Poller { - return &Poller{ +) *BlkPoller { + return &BlkPoller{ blockFetcher: blockFetcher, blockFireFunc: blockFire, fetchBlockRetryCount: 4, @@ -39,7 +39,7 @@ func New( } } -func (p *Poller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { +func (p *BlkPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { p.startBlockNumGate = startBlockNum resolveStartBlockNum := resolveStartBlock(startBlockNum, finalizedBlockNum.Num()) p.logger.Info("starting poller", @@ -57,7 +57,7 @@ func (p *Poller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNu return p.run(startBlock.AsRef()) } -func (p *Poller) run(resolvedStartBlock bstream.BlockRef) (err error) { +func (p *BlkPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { currentState := &state{state: ContinuousSegState, logger: p.logger} p.forkDB.InitLIB(resolvedStartBlock) blkIter := resolvedStartBlock.Num() @@ -71,7 +71,7 @@ func (p *Poller) run(resolvedStartBlock bstream.BlockRef) (err error) { } } -func (p *Poller) processBlock(currentState *state, blkNum uint64) (uint64, error) { +func (p *BlkPoller) processBlock(currentState *state, blkNum uint64) (uint64, error) { if blkNum < p.forkDB.LIBNum() { panic(fmt.Errorf("unexpected error block %d is below the current LIB num %d. There should be no re-org above the current LIB num", blkNum, p.forkDB.LIBNum())) } @@ -113,7 +113,7 @@ func (p *Poller) processBlock(currentState *state, blkNum uint64) (uint64, error return prevBlkInSeg(blocks), nil } -func (p *Poller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { +func (p *BlkPoller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { var out *pbbstream.Block if err := derr.Retry(p.fetchBlockRetryCount, func(ctx context.Context) error { out, err = p.blockFetcher.Fetch(ctx, blkNum) @@ -157,7 +157,7 @@ func newBlock(block2 *pbbstream.Block) *block { return &block{block2, false} } -func (p *Poller) fireCompleteSegment(blocks []*forkable.Block) { +func (p *BlkPoller) fireCompleteSegment(blocks []*forkable.Block) { for _, blk := range blocks { if blk.BlockNum < p.startBlockNumGate { continue @@ -166,7 +166,7 @@ func (p *Poller) fireCompleteSegment(blocks []*forkable.Block) { } } -func (p *Poller) tryFire(b *block) bool { +func (p *BlkPoller) tryFire(b *block) bool { if b.fired { return false } diff --git a/rpcpoller/poller_test.go b/blkpoller/poller_test.go similarity index 98% rename from rpcpoller/poller_test.go rename to blkpoller/poller_test.go index b93a3a7..c1e3987 100644 --- a/rpcpoller/poller_test.go +++ b/blkpoller/poller_test.go @@ -1,4 +1,4 @@ -package forkhandler +package blkpoller import ( "errors" @@ -157,7 +157,7 @@ func TestForkHandler_run(t *testing.T) { blocks: tt.expectFireBlock, } - f := &Poller{ + f := &BlkPoller{ blockFetcher: blockFetcher, blockFireFunc: blockFire.fetchBlockFire(t), forkDB: forkable.NewForkDB(), @@ -233,7 +233,7 @@ func TestForkHandler_fireCompleteSegment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f := &Poller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} + f := &BlkPoller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} receviedIds := []string{} f.blockFireFunc = func(p *pbbstream.Block) { receviedIds = append(receviedIds, p.Id) diff --git a/rpcpoller/state.go b/blkpoller/state.go similarity index 99% rename from rpcpoller/state.go rename to blkpoller/state.go index e40addc..2f1ff4a 100644 --- a/rpcpoller/state.go +++ b/blkpoller/state.go @@ -1,4 +1,4 @@ -package forkhandler +package blkpoller import ( "github.com/streamingfast/bstream" From e146a25e8ee97fee28258f0da72e7a5289a69a2d Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 09:31:45 -0500 Subject: [PATCH 12/66] rename blkpoller to blockpoller --- {blkpoller => blockpoller}/fetcher.go | 2 +- {blkpoller => blockpoller}/init_test.go | 2 +- {blkpoller => blockpoller}/poller.go | 26 ++++++++++++----------- {blkpoller => blockpoller}/poller_test.go | 6 +++--- {blkpoller => blockpoller}/state.go | 2 +- 5 files changed, 20 insertions(+), 18 deletions(-) rename {blkpoller => blockpoller}/fetcher.go (92%) rename {blkpoller => blockpoller}/init_test.go (99%) rename {blkpoller => blockpoller}/poller.go (88%) rename {blkpoller => blockpoller}/poller_test.go (98%) rename {blkpoller => blockpoller}/state.go (99%) diff --git a/blkpoller/fetcher.go b/blockpoller/fetcher.go similarity index 92% rename from blkpoller/fetcher.go rename to blockpoller/fetcher.go index b325c78..b4c81e8 100644 --- a/blkpoller/fetcher.go +++ b/blockpoller/fetcher.go @@ -1,4 +1,4 @@ -package blkpoller +package blockpoller import ( "context" diff --git a/blkpoller/init_test.go b/blockpoller/init_test.go similarity index 99% rename from blkpoller/init_test.go rename to blockpoller/init_test.go index 2fcb608..1c6c703 100644 --- a/blkpoller/init_test.go +++ b/blockpoller/init_test.go @@ -1,4 +1,4 @@ -package blkpoller +package blockpoller import ( "context" diff --git a/blkpoller/poller.go b/blockpoller/poller.go similarity index 88% rename from blkpoller/poller.go rename to blockpoller/poller.go index bb43e67..d9f24a9 100644 --- a/blkpoller/poller.go +++ b/blockpoller/poller.go @@ -1,4 +1,4 @@ -package blkpoller +package blockpoller import ( "context" @@ -16,21 +16,23 @@ import ( type BlockFireFunc func(*pbbstream.Block) -type BlkPoller struct { +type BlockPoller struct { + // the block number at which + startBlockNumGate uint64 blockFetcher BlockFetcher blockFireFunc BlockFireFunc fetchBlockRetryCount uint64 forkDB *forkable.ForkDB - startBlockNumGate uint64 - logger *zap.Logger + + logger *zap.Logger } func New( blockFetcher BlockFetcher, blockFire BlockFireFunc, logger *zap.Logger, -) *BlkPoller { - return &BlkPoller{ +) *BlockPoller { + return &BlockPoller{ blockFetcher: blockFetcher, blockFireFunc: blockFire, fetchBlockRetryCount: 4, @@ -39,7 +41,7 @@ func New( } } -func (p *BlkPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { +func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { p.startBlockNumGate = startBlockNum resolveStartBlockNum := resolveStartBlock(startBlockNum, finalizedBlockNum.Num()) p.logger.Info("starting poller", @@ -57,7 +59,7 @@ func (p *BlkPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBloc return p.run(startBlock.AsRef()) } -func (p *BlkPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { +func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { currentState := &state{state: ContinuousSegState, logger: p.logger} p.forkDB.InitLIB(resolvedStartBlock) blkIter := resolvedStartBlock.Num() @@ -71,7 +73,7 @@ func (p *BlkPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { } } -func (p *BlkPoller) processBlock(currentState *state, blkNum uint64) (uint64, error) { +func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, error) { if blkNum < p.forkDB.LIBNum() { panic(fmt.Errorf("unexpected error block %d is below the current LIB num %d. There should be no re-org above the current LIB num", blkNum, p.forkDB.LIBNum())) } @@ -113,7 +115,7 @@ func (p *BlkPoller) processBlock(currentState *state, blkNum uint64) (uint64, er return prevBlkInSeg(blocks), nil } -func (p *BlkPoller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { +func (p *BlockPoller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { var out *pbbstream.Block if err := derr.Retry(p.fetchBlockRetryCount, func(ctx context.Context) error { out, err = p.blockFetcher.Fetch(ctx, blkNum) @@ -157,7 +159,7 @@ func newBlock(block2 *pbbstream.Block) *block { return &block{block2, false} } -func (p *BlkPoller) fireCompleteSegment(blocks []*forkable.Block) { +func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block) { for _, blk := range blocks { if blk.BlockNum < p.startBlockNumGate { continue @@ -166,7 +168,7 @@ func (p *BlkPoller) fireCompleteSegment(blocks []*forkable.Block) { } } -func (p *BlkPoller) tryFire(b *block) bool { +func (p *BlockPoller) tryFire(b *block) bool { if b.fired { return false } diff --git a/blkpoller/poller_test.go b/blockpoller/poller_test.go similarity index 98% rename from blkpoller/poller_test.go rename to blockpoller/poller_test.go index c1e3987..738d662 100644 --- a/blkpoller/poller_test.go +++ b/blockpoller/poller_test.go @@ -1,4 +1,4 @@ -package blkpoller +package blockpoller import ( "errors" @@ -157,7 +157,7 @@ func TestForkHandler_run(t *testing.T) { blocks: tt.expectFireBlock, } - f := &BlkPoller{ + f := &BlockPoller{ blockFetcher: blockFetcher, blockFireFunc: blockFire.fetchBlockFire(t), forkDB: forkable.NewForkDB(), @@ -233,7 +233,7 @@ func TestForkHandler_fireCompleteSegment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f := &BlkPoller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} + f := &BlockPoller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} receviedIds := []string{} f.blockFireFunc = func(p *pbbstream.Block) { receviedIds = append(receviedIds, p.Id) diff --git a/blkpoller/state.go b/blockpoller/state.go similarity index 99% rename from blkpoller/state.go rename to blockpoller/state.go index 2f1ff4a..8c3cb99 100644 --- a/blkpoller/state.go +++ b/blockpoller/state.go @@ -1,4 +1,4 @@ -package blkpoller +package blockpoller import ( "github.com/streamingfast/bstream" From 2dda9778d497aa6f591590229018a50e30327566 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 21 Nov 2023 13:07:05 -0500 Subject: [PATCH 13/66] poller BlockFireFunc is now for internal use and tests --- blockpoller/init_test.go | 44 ++++--------- blockpoller/poller.go | 91 ++++++++++++++++++++------- blockpoller/poller_test.go | 72 ++++++++++++--------- go.mod | 5 +- go.sum | 2 + index_builder.go | 3 +- node-manager/mindreader/mindreader.go | 3 +- 7 files changed, 129 insertions(+), 91 deletions(-) diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index 1c6c703..82d40fc 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -9,6 +9,7 @@ import ( pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/logging" "github.com/stretchr/testify/assert" + "github.com/test-go/testify/require" "go.uber.org/zap/zapcore" ) @@ -18,7 +19,7 @@ func init() { logging.InstantiateLoggers(logging.WithDefaultLevel(zapcore.DebugLevel)) } -var errCompleteDone = fmt.Errorf("complete done") +var TestErrCompleteDone = fmt.Errorf("complete done") type TestBlock struct { expect *pbbstream.Block @@ -28,9 +29,10 @@ type TestBlock struct { var _ BlockFetcher = &TestBlockFetcher{} type TestBlockFetcher struct { - t *testing.T - blocks []*TestBlock - idx uint64 + t *testing.T + blocks []*TestBlock + idx uint64 + completed bool } func newTestBlockFetcher(t *testing.T, blocks []*TestBlock) *TestBlockFetcher { @@ -50,7 +52,8 @@ func (b *TestBlockFetcher) Fetch(_ context.Context, blkNum uint64) (*pbbstream.B } if b.idx >= uint64(len(b.blocks)) { - return nil, errCompleteDone + b.completed = true + return nil, nil } if blkNum != b.blocks[b.idx].expect.Number { @@ -62,32 +65,7 @@ func (b *TestBlockFetcher) Fetch(_ context.Context, blkNum uint64) (*pbbstream.B return blkToSend, nil } -func (b *TestBlockFetcher) check() { - assert.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) -} - -type TestBlockFire struct { - blocks []*pbbstream.Block - idx uint64 -} - -func (b *TestBlockFire) check(t *testing.T) { - assert.Equal(t, uint64(len(b.blocks)), b.idx, "we should have fired all %d blocks, only fired %d blocks", len(b.blocks), b.idx) -} - -func (b *TestBlockFire) fetchBlockFire(t *testing.T) BlockFireFunc { - return func(p *pbbstream.Block) { - if len(b.blocks) == 0 { - assert.Fail(t, fmt.Sprintf("should not have fired block %d", p.Number)) - } - - if b.idx >= uint64(len(b.blocks)) { - assert.Fail(t, fmt.Sprintf("should not have fired block %d", p.Number)) - } - - if p.Number != b.blocks[b.idx].Number || p.Id != b.blocks[b.idx].Id { - assert.Fail(t, fmt.Sprintf("expected to tryFire block %s, got %s", b.blocks[b.idx].String(), p.String())) - } - b.idx++ - } +func (b *TestBlockFetcher) check(t *testing.T) { + t.Helper() + require.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) } diff --git a/blockpoller/poller.go b/blockpoller/poller.go index d9f24a9..853f0d9 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -2,43 +2,47 @@ package blockpoller import ( "context" + "encoding/base64" "fmt" "time" - "github.com/streamingfast/derr" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" - + "github.com/streamingfast/derr" "go.uber.org/zap" + "google.golang.org/protobuf/types/known/anypb" ) -type BlockFireFunc func(*pbbstream.Block) +type BlockFireFunc func(b *block) error type BlockPoller struct { + blockTypeURL string // the block number at which startBlockNumGate uint64 blockFetcher BlockFetcher - blockFireFunc BlockFireFunc fetchBlockRetryCount uint64 - forkDB *forkable.ForkDB - logger *zap.Logger + forkDB *forkable.ForkDB + logger *zap.Logger + fireFunc BlockFireFunc + stopRequested bool } func New( + blockType string, blockFetcher BlockFetcher, - blockFire BlockFireFunc, logger *zap.Logger, ) *BlockPoller { - return &BlockPoller{ + poller := &BlockPoller{ + blockTypeURL: blockType, blockFetcher: blockFetcher, - blockFireFunc: blockFire, fetchBlockRetryCount: 4, forkDB: forkable.NewForkDB(forkable.ForkDBWithLogger(logger)), logger: logger, } + poller.fireFunc = poller.fire + return poller } func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { @@ -50,6 +54,9 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBl zap.Uint64("resolved_start_block_num", resolveStartBlockNum), ) + //initLine := "FIRE INIT 1.0 sf.ethereum.type.v2.Block" + fmt.Println("FIRE INIT 1.0 ", p.blockTypeURL) + startBlock, err := p.blockFetcher.Fetch(ctx, resolveStartBlockNum) if err != nil { @@ -69,10 +76,17 @@ func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { if err != nil { return fmt.Errorf("unable to fetch block %d: %w", blkIter, err) } + if p.stopRequested { + return nil + } time.Sleep(intervalDuration) } } +func (p *BlockPoller) Stop() { + p.stopRequested = true +} + func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, error) { if blkNum < p.forkDB.LIBNum() { panic(fmt.Errorf("unexpected error block %d is below the current LIB num %d. There should be no re-org above the current LIB num", blkNum, p.forkDB.LIBNum())) @@ -99,7 +113,10 @@ func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, if reachLib { currentState.blkIsConnectedToLib() - p.fireCompleteSegment(blocks) + err = p.fireCompleteSegment(blocks, p.fireFunc) + if err != nil { + return 0, fmt.Errorf("firing complete segment: %w", err) + } // since the block is linkable to the current lib // we can safely set the new lib to the current block's Lib @@ -117,15 +134,18 @@ func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, func (p *BlockPoller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error) { var out *pbbstream.Block - if err := derr.Retry(p.fetchBlockRetryCount, func(ctx context.Context) error { + err = derr.Retry(p.fetchBlockRetryCount, func(ctx context.Context) error { out, err = p.blockFetcher.Fetch(ctx, blkNum) if err != nil { return fmt.Errorf("unable to fetch block %d: %w", blkNum, err) } return nil - }); err != nil { + }) + + if err != nil { return nil, fmt.Errorf("failed to fetch block with retries %d: %w", blkNum, err) } + return out, nil } @@ -159,21 +179,48 @@ func newBlock(block2 *pbbstream.Block) *block { return &block{block2, false} } -func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block) { +func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block, fireFunc BlockFireFunc) error { for _, blk := range blocks { if blk.BlockNum < p.startBlockNumGate { continue } - p.tryFire(blk.Object.(*block)) + + b := blk.Object.(*block) + if !b.fired { + err := fireFunc(b) + if err != nil { + return fmt.Errorf("fireing block %d %q: %w", b.Block.Number, b.Block.Id, err) + } + b.fired = true + } } + return nil } -func (p *BlockPoller) tryFire(b *block) bool { - if b.fired { - return false +func (p *BlockPoller) fire(b *block) error { + + //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" + anyBlock, err := anypb.New(b.Block) + if err != nil { + return fmt.Errorf("converting block to anypb: %w", err) } - p.blockFireFunc(b.Block) - p.logger.Debug("block fired", zap.Stringer("blk", b.Block.AsRef())) - b.fired = true - return true + + if anyBlock.TypeUrl != p.blockTypeURL { + return fmt.Errorf("block type url %q does not match expected type %q", anyBlock.TypeUrl, p.blockTypeURL) + } + + blockLine := fmt.Sprintf( + "FIRE BLOCK %d %s %d %s %d %d %s", + b.Block.Number, + b.Block.Id, + b.Block.ParentNum, + b.Block.ParentId, + b.Block.LibNum, + b.Block.Timestamp.AsTime().UnixNano(), + base64.StdEncoding.EncodeToString(anyBlock.Value), + ) + + fmt.Println(blockLine) + + return nil } diff --git a/blockpoller/poller_test.go b/blockpoller/poller_test.go index 738d662..c15812d 100644 --- a/blockpoller/poller_test.go +++ b/blockpoller/poller_test.go @@ -1,17 +1,17 @@ package blockpoller import ( - "errors" "fmt" "strconv" "testing" - - "go.uber.org/zap" + "time" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" ) func TestForkHandler_run(t *testing.T) { @@ -153,24 +153,38 @@ func TestForkHandler_run(t *testing.T) { blockFetcher := newTestBlockFetcher(t, tt.blocks) - blockFire := &TestBlockFire{ - blocks: tt.expectFireBlock, - } + f := New("test", blockFetcher, zap.NewNop()) + f.forkDB = forkable.NewForkDB() + + done := make(chan error) + firedBlock := 0 - f := &BlockPoller{ - blockFetcher: blockFetcher, - blockFireFunc: blockFire.fetchBlockFire(t), - forkDB: forkable.NewForkDB(), - startBlockNumGate: 0, - logger: logger, + f.fireFunc = func(blk *block) error { + if blk.Number != tt.expectFireBlock[firedBlock].Number || blk.Id != tt.expectFireBlock[firedBlock].Id { + done <- fmt.Errorf("expected [%d] to fire block %d %q, got %d %q", firedBlock, tt.expectFireBlock[firedBlock].Number, tt.expectFireBlock[firedBlock].Id, blk.Number, blk.Id) + } + + firedBlock++ + if firedBlock >= len(tt.expectFireBlock) { + f.Stop() + close(done) + } + + return nil } - err := f.run(tt.startBlock) - if !errors.Is(err, errCompleteDone) { - assert.Fail(t, "expected errCompleteDone") + go func() { + err := f.run(tt.startBlock) + require.NoError(t, err) + }() + + select { + case err := <-done: + require.NoError(t, err) + blockFetcher.check(t) + case <-time.After(1 * time.Second): + t.Fatal("timeout, missing fetch calls") } - blockFetcher.check() - blockFire.check(t) }) } } @@ -234,13 +248,13 @@ func TestForkHandler_fireCompleteSegment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { f := &BlockPoller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} - receviedIds := []string{} - f.blockFireFunc = func(p *pbbstream.Block) { - receviedIds = append(receviedIds, p.Id) - } - - f.fireCompleteSegment(test.blocks) - assert.Equal(t, test.expect, receviedIds) + receivedIds := []string{} + err := f.fireCompleteSegment(test.blocks, func(p *block) error { + receivedIds = append(receivedIds, p.Id) + return nil + }) + require.NoError(t, err) + assert.Equal(t, test.expect, receivedIds) }) } @@ -255,28 +269,28 @@ func tb(id, prev string, libNum uint64) *TestBlock { func blk(id, prev string, libNum uint64) *pbbstream.Block { return &pbbstream.Block{ - Number: blocknum(id), + Number: blockNum(id), Id: id, ParentId: prev, LibNum: libNum, - ParentNum: blocknum(prev), + ParentNum: blockNum(prev), } } func forkBlk(id string) *forkable.Block { return &forkable.Block{ BlockID: id, - BlockNum: blocknum(id), + BlockNum: blockNum(id), Object: &block{ Block: &pbbstream.Block{ - Number: blocknum(id), + Number: blockNum(id), Id: id, }, }, } } -func blocknum(blockID string) uint64 { +func blockNum(blockID string) uint64 { b := blockID if len(blockID) < 8 { // shorter version, like 8a for 00000008a b = fmt.Sprintf("%09s", blockID) diff --git a/go.mod b/go.mod index 4139b3d..f355dc2 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/streamingfast/firehose-core go 1.21 -replace github.com/streamingfast/bstream => ../bstream - require ( github.com/ShinyTrinkets/overseer v0.3.0 github.com/dustin/go-humanize v1.0.1 @@ -14,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231120175342-23d38d055176 + github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -30,6 +28,7 @@ require ( github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 github.com/stretchr/testify v1.8.4 + github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 diff --git a/go.sum b/go.sum index 2dc82fe..cc63f16 100644 --- a/go.sum +++ b/go.sum @@ -574,6 +574,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d h1:yPqseR0o2RFU2HNoAi6hqIszWr8WVoOQbnHKcEI2zZE= +github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= diff --git a/index_builder.go b/index_builder.go index 6878d71..9244a70 100644 --- a/index_builder.go +++ b/index_builder.go @@ -4,14 +4,13 @@ import ( "context" "fmt" - index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" - "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/bstream" bstransform "github.com/streamingfast/bstream/transform" pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dlauncher/launcher" + index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" ) func registerIndexBuilderApp[B Block](chain *Chain[B]) { diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index e1314a3..c0ea51a 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -23,10 +23,9 @@ import ( "regexp" "sync" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" + pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" nodeManager "github.com/streamingfast/firehose-core/node-manager" "github.com/streamingfast/logging" From 297b1ef5bed31192792b7c1f37319627fff1e3b3 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 20:33:27 -0500 Subject: [PATCH 14/66] clean up poller and bstream --- blockpoller/fetcher.go | 13 --- blockpoller/init_test.go | 59 +++++++++- blockpoller/options.go | 23 ++++ blockpoller/poller.go | 123 ++++++++++----------- blockpoller/poller_test.go | 89 +++++---------- blockpoller/state.go | 14 +-- blockpoller/state_file.go | 121 ++++++++++++++++++++ blockpoller/types.go | 67 +++++++++++ chain.go | 2 +- cmd/firecore/main.go | 2 +- consolereader.go | 2 +- firehose/app/firehose/app.go | 2 +- firehose/factory.go | 2 +- firehose/server/blocks.go | 2 +- firehose/tests/integration_test.go | 8 +- firehose/tests/stream_blocks_test.go | 2 +- go.mod | 4 +- go.sum | 4 + index-builder/index-builder.go | 2 +- index_builder.go | 2 +- merger/bundler.go | 2 +- merger/merger_io.go | 2 +- node-manager/app/node_reader_stdin/app.go | 2 +- node-manager/mindreader/archiver.go | 2 +- node-manager/mindreader/mindreader.go | 2 +- node-manager/mindreader/mindreader_test.go | 2 +- node-manager/monitor.go | 2 +- node-manager/types.go | 2 +- reader_node.go | 2 +- relayer/relayer.go | 2 +- tools_check.go | 2 +- tools_check_blocks.go | 7 +- tools_check_merged_batch.go | 2 +- tools_download_from_firehose.go | 2 +- tools_fix_bloated_merged_blocks.go | 2 +- tools_print.go | 2 +- tools_unmerge_blocks.go | 2 +- tools_upgrade_merged_blocks.go | 2 +- types.go | 2 +- unsafe_extensions.go | 2 +- 40 files changed, 400 insertions(+), 188 deletions(-) delete mode 100644 blockpoller/fetcher.go create mode 100644 blockpoller/options.go create mode 100644 blockpoller/state_file.go create mode 100644 blockpoller/types.go diff --git a/blockpoller/fetcher.go b/blockpoller/fetcher.go deleted file mode 100644 index b4c81e8..0000000 --- a/blockpoller/fetcher.go +++ /dev/null @@ -1,13 +0,0 @@ -package blockpoller - -import ( - "context" - "time" - - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" -) - -type BlockFetcher interface { - PollingInterval() time.Duration - Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) -} diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index 82d40fc..dd4c909 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -6,7 +6,9 @@ import ( "testing" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + "github.com/streamingfast/derr" + + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/logging" "github.com/stretchr/testify/assert" "github.com/test-go/testify/require" @@ -19,7 +21,7 @@ func init() { logging.InstantiateLoggers(logging.WithDefaultLevel(zapcore.DebugLevel)) } -var TestErrCompleteDone = fmt.Errorf("complete done") +var TestErrCompleteDone = fmt.Errorf("test complete done") type TestBlock struct { expect *pbbstream.Block @@ -48,12 +50,11 @@ func (b *TestBlockFetcher) PollingInterval() time.Duration { func (b *TestBlockFetcher) Fetch(_ context.Context, blkNum uint64) (*pbbstream.Block, error) { if len(b.blocks) == 0 { - assert.Fail(b.t, fmt.Sprintf("should not have ffetchired block %d", blkNum)) + assert.Fail(b.t, fmt.Sprintf("should not have fetched block %d", blkNum)) } if b.idx >= uint64(len(b.blocks)) { - b.completed = true - return nil, nil + return nil, derr.NewFatalError(TestErrCompleteDone) } if blkNum != b.blocks[b.idx].expect.Number { @@ -69,3 +70,51 @@ func (b *TestBlockFetcher) check(t *testing.T) { t.Helper() require.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) } + +var _ BlockFinalizer = &TestBlockFinalizer{} + +type TestBlockFinalizer struct { + t *testing.T + fireBlocks []*pbbstream.Block + idx uint64 +} + +func newTestBlockFinalizer(t *testing.T, fireBlocks []*pbbstream.Block) *TestBlockFinalizer { + return &TestBlockFinalizer{ + t: t, + fireBlocks: fireBlocks, + } +} + +func (t *TestBlockFinalizer) Init() { + //TODO implement me + panic("implement me") +} + +func (t *TestBlockFinalizer) Fire(blk *pbbstream.Block) error { + if len(t.fireBlocks) == 0 { + assert.Fail(t.t, fmt.Sprintf("should not have fired block %s", blk.AsRef())) + } + + if t.idx >= uint64(len(t.fireBlocks)) { + return TestErrCompleteDone + } + + if blk.Number != t.fireBlocks[t.idx].Number { + assert.Fail(t.t, fmt.Sprintf("expected to fetch block %d, got %d", t.fireBlocks[t.idx].Number, blk.Number)) + } + t.idx++ + return nil +} + +func (b *TestBlockFinalizer) check(t *testing.T) { + t.Helper() + require.Equal(b.t, uint64(len(b.fireBlocks)), b.idx, "we should have fired all %d blocks, only fired %d blocks", len(b.fireBlocks), b.idx) +} + +var _ BlockFinalizer = &TestNoopBlockFinalizer{} + +type TestNoopBlockFinalizer struct{} + +func (t *TestNoopBlockFinalizer) Init() {} +func (t *TestNoopBlockFinalizer) Fire(blk *pbbstream.Block) error { return nil } diff --git a/blockpoller/options.go b/blockpoller/options.go new file mode 100644 index 0000000..5554783 --- /dev/null +++ b/blockpoller/options.go @@ -0,0 +1,23 @@ +package blockpoller + +import "go.uber.org/zap" + +type Option func(*BlockPoller) + +func WithBlockFetchRetryCount(v uint64) Option { + return func(p *BlockPoller) { + p.fetchBlockRetryCount = v + } +} + +func WithStoringState(stateStorePath string) Option { + return func(p *BlockPoller) { + p.stateStorePath = stateStorePath + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(p *BlockPoller) { + p.logger = logger + } +} diff --git a/blockpoller/poller.go b/blockpoller/poller.go index 853f0d9..502f848 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -2,47 +2,49 @@ package blockpoller import ( "context" - "encoding/base64" "fmt" "time" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/derr" + "github.com/streamingfast/shutter" "go.uber.org/zap" - "google.golang.org/protobuf/types/known/anypb" ) -type BlockFireFunc func(b *block) error - type BlockPoller struct { - blockTypeURL string - // the block number at which + *shutter.Shutter startBlockNumGate uint64 - blockFetcher BlockFetcher fetchBlockRetryCount uint64 + stateStorePath string + + blockFetcher BlockFetcher + blockFinalizer BlockFinalizer + forkDB *forkable.ForkDB - forkDB *forkable.ForkDB - logger *zap.Logger - fireFunc BlockFireFunc - stopRequested bool + logger *zap.Logger } func New( - blockType string, blockFetcher BlockFetcher, - logger *zap.Logger, + blockFinalizer BlockFinalizer, + opts ...Option, ) *BlockPoller { - poller := &BlockPoller{ - blockTypeURL: blockType, + + b := &BlockPoller{ + Shutter: shutter.New(), blockFetcher: blockFetcher, + blockFinalizer: blockFinalizer, fetchBlockRetryCount: 4, - forkDB: forkable.NewForkDB(forkable.ForkDBWithLogger(logger)), - logger: logger, + logger: zap.NewNop(), } - poller.fireFunc = poller.fire - return poller + + for _, opt := range opts { + opt(b) + } + + return b } func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { @@ -54,8 +56,7 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBl zap.Uint64("resolved_start_block_num", resolveStartBlockNum), ) - //initLine := "FIRE INIT 1.0 sf.ethereum.type.v2.Block" - fmt.Println("FIRE INIT 1.0 ", p.blockTypeURL) + p.blockFinalizer.Init() startBlock, err := p.blockFetcher.Fetch(ctx, resolveStartBlockNum) if err != nil { @@ -67,27 +68,34 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBl } func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { - currentState := &state{state: ContinuousSegState, logger: p.logger} - p.forkDB.InitLIB(resolvedStartBlock) + + p.forkDB, resolvedStartBlock, err = p.initState(resolvedStartBlock) + if err != nil { + return fmt.Errorf("unable to initialize cursor: %w", err) + } + + currentCursor := &cursor{state: ContinuousSegState, logger: p.logger} blkIter := resolvedStartBlock.Num() intervalDuration := p.blockFetcher.PollingInterval() for { - blkIter, err = p.processBlock(currentState, blkIter) + if p.IsTerminating() { + p.logger.Info("block poller is terminating") + } + + blkIter, err = p.processBlock(currentCursor, blkIter) if err != nil { return fmt.Errorf("unable to fetch block %d: %w", blkIter, err) } - if p.stopRequested { - return nil + + if p.IsTerminating() { + p.logger.Info("block poller is terminating") } + time.Sleep(intervalDuration) } } -func (p *BlockPoller) Stop() { - p.stopRequested = true -} - -func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, error) { +func (p *BlockPoller) processBlock(currentState *cursor, blkNum uint64) (uint64, error) { if blkNum < p.forkDB.LIBNum() { panic(fmt.Errorf("unexpected error block %d is below the current LIB num %d. There should be no re-org above the current LIB num", blkNum, p.forkDB.LIBNum())) } @@ -113,7 +121,7 @@ func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, if reachLib { currentState.blkIsConnectedToLib() - err = p.fireCompleteSegment(blocks, p.fireFunc) + err = p.fireCompleteSegment(blocks) if err != nil { return 0, fmt.Errorf("firing complete segment: %w", err) } @@ -122,9 +130,11 @@ func (p *BlockPoller) processBlock(currentState *state, blkNum uint64) (uint64, // we can safely set the new lib to the current block's Lib // the assumption here is that teh Lib the Block we received from the block fetcher ir ALWAYS CORRECT p.logger.Debug("setting lib", zap.Stringer("blk", blk.AsRef()), zap.Uint64("lib_num", blk.LibNum)) - p.forkDB.SetLIB(blk.AsRef(), "", blk.LibNum) + p.forkDB.SetLIB(blk.AsRef(), blk.LibNum) p.forkDB.PurgeBeforeLIB(0) + p.saveState(blocks) + return nextBlkInSeg(blocks), nil } @@ -179,48 +189,29 @@ func newBlock(block2 *pbbstream.Block) *block { return &block{block2, false} } -func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block, fireFunc BlockFireFunc) error { +func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block) error { for _, blk := range blocks { - if blk.BlockNum < p.startBlockNumGate { - continue - } - b := blk.Object.(*block) - if !b.fired { - err := fireFunc(b) - if err != nil { - return fmt.Errorf("fireing block %d %q: %w", b.Block.Number, b.Block.Id, err) - } - b.fired = true + if _, err := p.fire(b); err != nil { + return fmt.Errorf("fireing block %d (%qs) %w", blk.BlockNum, blk.BlockID, err) } } return nil } -func (p *BlockPoller) fire(b *block) error { - - //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" - anyBlock, err := anypb.New(b.Block) - if err != nil { - return fmt.Errorf("converting block to anypb: %w", err) +func (p *BlockPoller) fire(blk *block) (bool, error) { + if blk.Number < p.startBlockNumGate { + return false, nil } - if anyBlock.TypeUrl != p.blockTypeURL { - return fmt.Errorf("block type url %q does not match expected type %q", anyBlock.TypeUrl, p.blockTypeURL) + if blk.fired { + return false, nil } - blockLine := fmt.Sprintf( - "FIRE BLOCK %d %s %d %s %d %d %s", - b.Block.Number, - b.Block.Id, - b.Block.ParentNum, - b.Block.ParentId, - b.Block.LibNum, - b.Block.Timestamp.AsTime().UnixNano(), - base64.StdEncoding.EncodeToString(anyBlock.Value), - ) - - fmt.Println(blockLine) + if err := p.blockFinalizer.Fire(blk.Block); err != nil { + return false, err + } - return nil + blk.fired = true + return true, nil } diff --git a/blockpoller/poller_test.go b/blockpoller/poller_test.go index c15812d..5260b68 100644 --- a/blockpoller/poller_test.go +++ b/blockpoller/poller_test.go @@ -1,17 +1,17 @@ package blockpoller import ( + "errors" "fmt" "strconv" "testing" - "time" + + "github.com/stretchr/testify/require" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" ) func TestForkHandler_run(t *testing.T) { @@ -152,39 +152,19 @@ func TestForkHandler_run(t *testing.T) { t.Run(tt.name, func(t *testing.T) { blockFetcher := newTestBlockFetcher(t, tt.blocks) + blockFinalizer := newTestBlockFinalizer(t, tt.expectFireBlock) - f := New("test", blockFetcher, zap.NewNop()) + f := New(blockFetcher, blockFinalizer) f.forkDB = forkable.NewForkDB() - done := make(chan error) - firedBlock := 0 - - f.fireFunc = func(blk *block) error { - if blk.Number != tt.expectFireBlock[firedBlock].Number || blk.Id != tt.expectFireBlock[firedBlock].Id { - done <- fmt.Errorf("expected [%d] to fire block %d %q, got %d %q", firedBlock, tt.expectFireBlock[firedBlock].Number, tt.expectFireBlock[firedBlock].Id, blk.Number, blk.Id) - } - - firedBlock++ - if firedBlock >= len(tt.expectFireBlock) { - f.Stop() - close(done) - } - - return nil + err := f.run(tt.startBlock) + if !errors.Is(err, TestErrCompleteDone) { + require.NoError(t, err) } - go func() { - err := f.run(tt.startBlock) - require.NoError(t, err) - }() + blockFetcher.check(t) + blockFinalizer.check(t) - select { - case err := <-done: - require.NoError(t, err) - blockFetcher.check(t) - case <-time.After(1 * time.Second): - t.Fatal("timeout, missing fetch calls") - } }) } } @@ -207,54 +187,45 @@ func TestForkHandler_resolveStartBlock(t *testing.T) { } } -func TestForkHandler_fireCompleteSegment(t *testing.T) { +func TestForkHandler_fire(t *testing.T) { tests := []struct { name string - blocks []*forkable.Block + block *block startBlockNum uint64 - expect []string + expect bool }{ { - name: "start block less then first block", - blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + name: "greater then start block", + block: &block{blk("100a", "99a", 98), false}, startBlockNum: 98, - expect: []string{"100a", "101a", "102a"}, + expect: true, }, { - name: "start block is first block", - blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + name: "on then start block", + block: &block{blk("100a", "99a", 98), false}, startBlockNum: 100, - expect: []string{"100a", "101a", "102a"}, + expect: true, }, { - name: "start block is middle block", - blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, + name: "less then start block", + block: &block{blk("100a", "99a", 98), false}, startBlockNum: 101, - expect: []string{"101a", "102a"}, + expect: false, }, { - name: "start block is last block", - blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, - startBlockNum: 102, - expect: []string{"102a"}, - }, - { - name: "start block is past block", blocks: []*forkable.Block{forkBlk("100a"), forkBlk("101a"), forkBlk("102a")}, - startBlockNum: 104, - expect: []string{}, + name: "already fired", + block: &block{blk("100a", "99a", 98), true}, + startBlockNum: 98, + expect: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f := &BlockPoller{startBlockNumGate: test.startBlockNum, logger: zap.NewNop()} - receivedIds := []string{} - err := f.fireCompleteSegment(test.blocks, func(p *block) error { - receivedIds = append(receivedIds, p.Id) - return nil - }) + poller := &BlockPoller{startBlockNumGate: test.startBlockNum, blockFinalizer: &TestNoopBlockFinalizer{}} + ok, err := poller.fire(test.block) require.NoError(t, err) - assert.Equal(t, test.expect, receivedIds) + assert.Equal(t, test.expect, ok) }) } diff --git a/blockpoller/state.go b/blockpoller/state.go index 8c3cb99..a76f3dc 100644 --- a/blockpoller/state.go +++ b/blockpoller/state.go @@ -2,7 +2,7 @@ package blockpoller import ( "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "go.uber.org/zap" ) @@ -17,14 +17,14 @@ func (s State) String() string { return string(s) } -type state struct { +type cursor struct { currentBlk bstream.BlockRef currentIncompleteSeg *bstream.BasicBlockRef state State logger *zap.Logger } -func (s *state) addBlk(blk *pbbstream.Block, blockSeen bool, parentSeen bool) { +func (s *cursor) addBlk(blk *pbbstream.Block, blockSeen bool, parentSeen bool) { blkRef := blk.AsRef() logger := s.logger.With( zap.Stringer("blk", blkRef), @@ -49,22 +49,22 @@ func (s *state) addBlk(blk *pbbstream.Block, blockSeen bool, parentSeen bool) { logger.Debug("received block", zap.Stringer("current_state", s.state)) } -func (s *state) getBlkSegmentNum() bstream.BlockRef { +func (s *cursor) getBlkSegmentNum() bstream.BlockRef { if s.state == IncompleteSegState { if s.currentIncompleteSeg == nil { - panic("current incomplete segment is nil, when state is incomplete segment, this should never happen") + panic("current incomplete segment is nil, when cursor is incomplete segment, this should never happen") } return *s.currentIncompleteSeg } return s.currentBlk } -func (s *state) blkIsConnectedToLib() { +func (s *cursor) blkIsConnectedToLib() { s.state = ContinuousSegState s.currentIncompleteSeg = nil } -func (s *state) blkIsNotConnectedToLib() { +func (s *cursor) blkIsNotConnectedToLib() { if s.state != IncompleteSegState { s.state = IncompleteSegState // we don't want to point the current blk since that will change diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go new file mode 100644 index 0000000..75beb40 --- /dev/null +++ b/blockpoller/state_file.go @@ -0,0 +1,121 @@ +package blockpoller + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/streamingfast/bstream" + + "github.com/streamingfast/bstream/forkable" + "go.uber.org/zap" +) + +type blockRef struct { + Id string `json:"id"` + Num uint64 `json:"num"` + PrevBlockId string `json:"previous_ref_id"` +} + +func (b blockRef) String() string { + return fmt.Sprintf("%d (%s)", b.Num, b.Id) +} + +func br(id string, num uint64, prevBlockId string) blockRef { + return blockRef{ + Id: id, + Num: num, + PrevBlockId: prevBlockId, + } +} + +type stateFile struct { + Lib blockRef + LastFiredBlock blockRef + Blocks []blockRef +} + +func (p *BlockPoller) getState() (*stateFile, error) { + if p.stateStorePath == "" { + return nil, fmt.Errorf("no cursor store path set") + } + + filepath := filepath.Join(p.stateStorePath, "cursor.json") + file, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("unable to open cursor file %s: %w", filepath, err) + } + sf := stateFile{} + decoder := json.NewDecoder(file) + if err := decoder.Decode(&sf); err != nil { + return nil, fmt.Errorf("feailed to decode cursor file %s: %w", filepath, err) + } + return &sf, nil +} + +func (p *BlockPoller) saveState(blocks []*forkable.Block) error { + if p.stateStorePath == "" { + return nil + } + + lastFiredBlock := blocks[len(blocks)-1] + + sf := stateFile{ + Lib: br(p.forkDB.LIBID(), p.forkDB.LIBNum(), ""), + LastFiredBlock: br(lastFiredBlock.BlockID, lastFiredBlock.BlockNum, lastFiredBlock.PreviousBlockID), + } + + for _, blk := range blocks { + sf.Blocks = append(sf.Blocks, br(blk.BlockID, blk.BlockNum, blk.PreviousBlockID)) + } + + filepath := filepath.Join(p.stateStorePath, "cursor.json") + file, err := os.OpenFile(filepath, os.O_CREATE, os.ModePerm) + if err != nil { + return fmt.Errorf("unable to open cursor file %s: %w", filepath, err) + } + defer file.Close() + encoder := json.NewEncoder(file) + if err := encoder.Encode(sf); err != nil { + return fmt.Errorf("unable to encode cursor file %s: %w", filepath, err) + } + + p.logger.Info("saved cursor", + zap.Reflect("filepath", filepath), + zap.Stringer("last_fired_block", sf.LastFiredBlock), + zap.Stringer("lib", sf.Lib), + zap.Int("block_count", len(sf.Blocks)), + ) + return nil +} + +func (p *BlockPoller) initState(resolvedStartBlock bstream.BlockRef) (*forkable.ForkDB, bstream.BlockRef, error) { + forkDB := forkable.NewForkDB(forkable.ForkDBWithLogger(p.logger)) + + sf, err := p.getState() + if err != nil { + p.logger.Warn("unable to load cursor file, initializing a new forkdb", + zap.Stringer("start_block", resolvedStartBlock), + zap.Stringer("lib", resolvedStartBlock), + zap.Error(err), + ) + forkDB.InitLIB(resolvedStartBlock) + return forkDB, resolvedStartBlock, nil + } + + forkDB.InitLIB(bstream.NewBlockRef(sf.Lib.Id, sf.Lib.Num)) + + for _, blk := range sf.Blocks { + b := &block{nil, true} + forkDB.AddLink(bstream.NewBlockRef(blk.Id, blk.Num), blk.PrevBlockId, b) + } + + p.logger.Info("loaded cursor", + zap.Stringer("start_block", sf.LastFiredBlock), + zap.Stringer("lib", sf.Lib), + zap.Int("block_count", len(sf.Blocks)), + ) + + return forkDB, bstream.NewBlockRef(sf.LastFiredBlock.Id, sf.LastFiredBlock.Num), nil +} diff --git a/blockpoller/types.go b/blockpoller/types.go new file mode 100644 index 0000000..b802f67 --- /dev/null +++ b/blockpoller/types.go @@ -0,0 +1,67 @@ +package blockpoller + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "google.golang.org/protobuf/types/known/anypb" + + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" +) + +type BlockFireFunc func(b *block) error + +type BlockFetcher interface { + PollingInterval() time.Duration + Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) +} + +type BlockFinalizer interface { + Init() + Fire(blk *pbbstream.Block) error +} + +var _ BlockFinalizer = (*FireBlockFinalizer)(nil) + +type FireBlockFinalizer struct { + blockTypeURL string +} + +func NewFireBlockFinalizer(blockTypeURL string) *FireBlockFinalizer { + return &FireBlockFinalizer{ + blockTypeURL: blockTypeURL, + } +} + +func (f *FireBlockFinalizer) Init() { + fmt.Println("FIRE INIT 1.0 ", f.blockTypeURL) +} + +func (f *FireBlockFinalizer) Fire(b *pbbstream.Block) error { + //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" + anyBlock, err := anypb.New(b) + if err != nil { + return fmt.Errorf("converting block to anypb: %w", err) + } + + if anyBlock.TypeUrl != f.blockTypeURL { + return fmt.Errorf("block type url %q does not match expected type %q", anyBlock.TypeUrl, f.blockTypeURL) + } + + blockLine := fmt.Sprintf( + "FIRE BLOCK %d %s %d %s %d %d %s", + b.Number, + b.Id, + b.ParentNum, + b.ParentId, + b.LibNum, + b.Timestamp.AsTime().UnixNano(), + base64.StdEncoding.EncodeToString(anyBlock.Value), + ) + + fmt.Println(blockLine) + return nil + +} diff --git a/chain.go b/chain.go index b14f01a..ffc0396 100644 --- a/chain.go +++ b/chain.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/firehose-core/node-manager/operator" "github.com/streamingfast/logging" diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index 5ad8ce1..5781c23 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -1,7 +1,7 @@ package main import ( - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" firecore "github.com/streamingfast/firehose-core" ) diff --git a/consolereader.go b/consolereader.go index 6f2a473..cdcc887 100644 --- a/consolereader.go +++ b/consolereader.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" "go.uber.org/zap" diff --git a/firehose/app/firehose/app.go b/firehose/app/firehose/app.go index 398ee30..6d5f2d4 100644 --- a/firehose/app/firehose/app.go +++ b/firehose/app/firehose/app.go @@ -20,7 +20,7 @@ import ( "net/url" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" diff --git a/firehose/factory.go b/firehose/factory.go index aaa3019..4064cfd 100644 --- a/firehose/factory.go +++ b/firehose/factory.go @@ -7,9 +7,9 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/hub" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/bstream/transform" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dauth" "github.com/streamingfast/derr" "github.com/streamingfast/dmetering" diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 2ec99f8..4e0b19a 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -7,7 +7,7 @@ import ( "os" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" diff --git a/firehose/tests/integration_test.go b/firehose/tests/integration_test.go index 654dbd1..1e42891 100644 --- a/firehose/tests/integration_test.go +++ b/firehose/tests/integration_test.go @@ -10,7 +10,7 @@ package firehose // "github.com/alicebob/miniredis/v2/server" // "github.com/streamingfast/bstream" // "github.com/streamingfast/dstore" -// pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" +// pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" // pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v1" // "github.com/stretchr/testify/assert" // "github.com/stretchr/testify/require" @@ -37,7 +37,7 @@ package firehose // irreversibleBlocksIndexes map[int]map[int]string // startBlockNum uint64 // stopBlockNum uint64 -// cursor *bstream.Cursor +// cursor *bstream.LastFiredBlock // expectedResponses []expectedResp // }{ // { @@ -160,8 +160,8 @@ package firehose // require.NotNil(t, resp) // require.NoError(t, err) // -// fmt.Println(resp.Cursor) -// cursor, err := bstream.CursorFromOpaque(resp.Cursor) +// fmt.Println(resp.LastFiredBlock) +// cursor, err := bstream.CursorFromOpaque(resp.LastFiredBlock) // require.NoError(t, err, "cursor sent from firehose should always be valid") // require.False(t, cursor.IsEmpty()) // diff --git a/firehose/tests/stream_blocks_test.go b/firehose/tests/stream_blocks_test.go index 8914170..3e2dd0a 100644 --- a/firehose/tests/stream_blocks_test.go +++ b/firehose/tests/stream_blocks_test.go @@ -9,7 +9,7 @@ package firehose // // "github.com/streamingfast/bstream" // "github.com/streamingfast/dstore" -// pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" +// pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" // "github.com/stretchr/testify/assert" // "github.com/stretchr/testify/require" // "go.uber.org/zap" diff --git a/go.mod b/go.mod index f355dc2..fbf9c8a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d + github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -26,7 +26,7 @@ require ( github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 + github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6 github.com/stretchr/testify v1.8.4 github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 diff --git a/go.sum b/go.sum index cc63f16..a16c930 100644 --- a/go.sum +++ b/go.sum @@ -576,6 +576,8 @@ github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jH github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d h1:yPqseR0o2RFU2HNoAi6hqIszWr8WVoOQbnHKcEI2zZE= github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472 h1:HhWgNV7PIdKiK7IZIXLQ/3mNsERqLm5ztzLwn43AcOo= +github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= @@ -617,6 +619,8 @@ github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1 github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 h1:PQeoATwMtCXKw2ztdS3ruwIOMzCj4GEGkkxB8LvcMOE= github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1/go.mod h1:HimUVtUnRKCdWfkNKFdVgqGCcrNu49+Az5Cyzdpuc4Q= +github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6 h1:eNA736ywGv/XXCotwx4LkfRDmOrHUyPbAqCKI+RzYI4= +github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6/go.mod h1:JHCOsJtgXUM2KWNxsvi5/hjrLy4KpClaMRriBR3ybnI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/index-builder/index-builder.go b/index-builder/index-builder.go index e6f6d0e..b393192 100644 --- a/index-builder/index-builder.go +++ b/index-builder/index-builder.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/stream" diff --git a/index_builder.go b/index_builder.go index 9244a70..155165e 100644 --- a/index_builder.go +++ b/index_builder.go @@ -7,8 +7,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" bstransform "github.com/streamingfast/bstream/transform" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dlauncher/launcher" index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" ) diff --git a/merger/bundler.go b/merger/bundler.go index 1b191d0..058ee1c 100644 --- a/merger/bundler.go +++ b/merger/bundler.go @@ -23,7 +23,7 @@ import ( "sync" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" diff --git a/merger/merger_io.go b/merger/merger_io.go index 584fa28..647835e 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -12,7 +12,7 @@ import ( "sync" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/node-manager/app/node_reader_stdin/app.go b/node-manager/app/node_reader_stdin/app.go index e4244e3..29e4792 100644 --- a/node-manager/app/node_reader_stdin/app.go +++ b/node-manager/app/node_reader_stdin/app.go @@ -20,7 +20,7 @@ import ( "os" "github.com/streamingfast/bstream/blockstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" dgrpcserver "github.com/streamingfast/dgrpc/server" dgrpcfactory "github.com/streamingfast/dgrpc/server/factory" nodeManager "github.com/streamingfast/firehose-core/node-manager" diff --git a/node-manager/mindreader/archiver.go b/node-manager/mindreader/archiver.go index 2217797..d9fbb27 100644 --- a/node-manager/mindreader/archiver.go +++ b/node-manager/mindreader/archiver.go @@ -19,7 +19,7 @@ import ( "fmt" "io" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/node-manager/mindreader/mindreader.go b/node-manager/mindreader/mindreader.go index c0ea51a..26f7663 100644 --- a/node-manager/mindreader/mindreader.go +++ b/node-manager/mindreader/mindreader.go @@ -25,7 +25,7 @@ import ( "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" nodeManager "github.com/streamingfast/firehose-core/node-manager" "github.com/streamingfast/logging" diff --git a/node-manager/mindreader/mindreader_test.go b/node-manager/mindreader/mindreader_test.go index 9c15595..add117d 100644 --- a/node-manager/mindreader/mindreader_test.go +++ b/node-manager/mindreader/mindreader_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/shutter" "github.com/stretchr/testify/assert" diff --git a/node-manager/monitor.go b/node-manager/monitor.go index beee7d9..34f8e54 100644 --- a/node-manager/monitor.go +++ b/node-manager/monitor.go @@ -3,7 +3,7 @@ package node_manager import ( "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dmetrics" "go.uber.org/atomic" diff --git a/node-manager/types.go b/node-manager/types.go index 6c5df75..450b155 100644 --- a/node-manager/types.go +++ b/node-manager/types.go @@ -14,7 +14,7 @@ package node_manager -import pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" +import pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" type DeepMindDebuggable interface { DebugDeepMind(enabled bool) diff --git a/reader_node.go b/reader_node.go index bf73207..eb1926c 100644 --- a/reader_node.go +++ b/reader_node.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/bstream/blockstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" nodeManager "github.com/streamingfast/firehose-core/node-manager" diff --git a/relayer/relayer.go b/relayer/relayer.go index dc268f2..d572e06 100644 --- a/relayer/relayer.go +++ b/relayer/relayer.go @@ -19,7 +19,7 @@ import ( "strings" "time" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/blockstream" diff --git a/tools_check.go b/tools_check.go index f514e89..1ceb15f 100644 --- a/tools_check.go +++ b/tools_check.go @@ -22,7 +22,7 @@ import ( "github.com/dustin/go-humanize" "github.com/spf13/cobra" "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" diff --git a/tools_check_blocks.go b/tools_check_blocks.go index a88fbee..c160460 100644 --- a/tools_check_blocks.go +++ b/tools_check_blocks.go @@ -8,12 +8,11 @@ import ( "regexp" "strconv" - "github.com/streamingfast/firehose-core/tools" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/tools" "go.uber.org/zap" ) @@ -231,7 +230,7 @@ func validateBlockSegment[B Block]( tfdb.lastLinkedBlock = block tfdb.unlinkableSegmentCount = 0 tfdb.firstUnlinkableBlock = nil - tfdb.fdb.SetLIB(block.AsRef(), block.ParentId, block.LibNum) + tfdb.fdb.SetLIB(block.AsRef(), block.LibNum) if tfdb.fdb.HasLIB() { tfdb.fdb.PurgeBeforeLIB(0) } diff --git a/tools_check_merged_batch.go b/tools_check_merged_batch.go index 63ffdb9..feef5b2 100644 --- a/tools_check_merged_batch.go +++ b/tools_check_merged_batch.go @@ -9,7 +9,7 @@ import ( "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" diff --git a/tools_download_from_firehose.go b/tools_download_from_firehose.go index 55d6d6b..ec5a887 100644 --- a/tools_download_from_firehose.go +++ b/tools_download_from_firehose.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" diff --git a/tools_fix_bloated_merged_blocks.go b/tools_fix_bloated_merged_blocks.go index c4125c3..cd225b4 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/tools_fix_bloated_merged_blocks.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" "go.uber.org/zap" diff --git a/tools_print.go b/tools_print.go index 781861e..42d0fd2 100644 --- a/tools_print.go +++ b/tools_print.go @@ -25,7 +25,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "github.com/mr-tron/base58" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/spf13/cobra" "github.com/streamingfast/bstream" diff --git a/tools_unmerge_blocks.go b/tools_unmerge_blocks.go index 1c96ab2..ac00bc1 100644 --- a/tools_unmerge_blocks.go +++ b/tools_unmerge_blocks.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" diff --git a/tools_upgrade_merged_blocks.go b/tools_upgrade_merged_blocks.go index 2120d98..fff0caf 100644 --- a/tools_upgrade_merged_blocks.go +++ b/tools_upgrade_merged_blocks.go @@ -7,7 +7,7 @@ import ( "io" "strconv" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/spf13/cobra" "github.com/streamingfast/bstream" diff --git a/types.go b/types.go index 3180793..bbd6bfd 100644 --- a/types.go +++ b/types.go @@ -9,8 +9,8 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/spf13/cobra" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream/transform" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" "github.com/streamingfast/dstore" "google.golang.org/protobuf/proto" ) diff --git a/unsafe_extensions.go b/unsafe_extensions.go index 6fc6a9e..0417434 100644 --- a/unsafe_extensions.go +++ b/unsafe_extensions.go @@ -4,7 +4,7 @@ import ( "context" "github.com/spf13/cobra" - pbbstream "github.com/streamingfast/bstream/types/pb/sf/bstream/v1" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dlauncher/launcher" "go.uber.org/zap" From c92a777176a2166acfa89846bcad7648cfb2f38d Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 20:52:33 -0500 Subject: [PATCH 15/66] Added cursor, state test --- blockpoller/{state.go => cursor.go} | 0 blockpoller/state_file.go | 14 +++---- blockpoller/state_file_test.go | 59 +++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 7 deletions(-) rename blockpoller/{state.go => cursor.go} (100%) create mode 100644 blockpoller/state_file_test.go diff --git a/blockpoller/state.go b/blockpoller/cursor.go similarity index 100% rename from blockpoller/state.go rename to blockpoller/cursor.go diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go index 75beb40..99b1423 100644 --- a/blockpoller/state_file.go +++ b/blockpoller/state_file.go @@ -70,15 +70,15 @@ func (p *BlockPoller) saveState(blocks []*forkable.Block) error { sf.Blocks = append(sf.Blocks, br(blk.BlockID, blk.BlockNum, blk.PreviousBlockID)) } - filepath := filepath.Join(p.stateStorePath, "cursor.json") - file, err := os.OpenFile(filepath, os.O_CREATE, os.ModePerm) + cnt, err := json.Marshal(sf) if err != nil { - return fmt.Errorf("unable to open cursor file %s: %w", filepath, err) + return fmt.Errorf("unable to marshal stateFile: %w", err) } - defer file.Close() - encoder := json.NewEncoder(file) - if err := encoder.Encode(sf); err != nil { - return fmt.Errorf("unable to encode cursor file %s: %w", filepath, err) + + filepath := filepath.Join(p.stateStorePath, "cursor.json") + + if err := os.WriteFile(filepath, cnt, os.ModePerm); err != nil { + return fmt.Errorf("unable to open cursor file %s: %w", filepath, err) } p.logger.Info("saved cursor", diff --git a/blockpoller/state_file_test.go b/blockpoller/state_file_test.go new file mode 100644 index 0000000..13b1dbf --- /dev/null +++ b/blockpoller/state_file_test.go @@ -0,0 +1,59 @@ +package blockpoller + +import ( + "os" + "path/filepath" + "testing" + + "github.com/streamingfast/bstream" + + "github.com/stretchr/testify/assert" + + "go.uber.org/zap" + + "github.com/stretchr/testify/require" + + "github.com/streamingfast/bstream/forkable" +) + +func TestFireBlockFinalizer_saveState(t *testing.T) { + tests := []struct { + blocks []*forkable.Block + forkDBFunc func() *forkable.ForkDB + expect string + }{ + { + blocks: []*forkable.Block{ + forkBlk("100a"), + }, + forkDBFunc: func() *forkable.ForkDB { + fk := forkable.NewForkDB() + fk.AddLink(bstream.NewBlockRef("97a", 97), "96a", nil) + fk.AddLink(bstream.NewBlockRef("98a", 98), "97a", nil) + fk.AddLink(bstream.NewBlockRef("99a", 99), "98a", nil) + fk.SetLIB(blk("99a", "97a", 95).AsRef(), 98) + return fk + }, + + expect: `{"Lib":{"id":"98a","num":98,"previous_ref_id":""},"LastFiredBlock":{"id":"100a","num":100,"previous_ref_id":""},"Blocks":[{"id":"100a","num":100,"previous_ref_id":""}]}`, + }, + } + for _, tt := range tests { + t.Run("", func(t *testing.T) { + dirName, err := os.MkdirTemp("", "fblk") + require.NoError(t, err) + defer os.Remove(dirName) + + poller := &BlockPoller{ + stateStorePath: dirName, + forkDB: tt.forkDBFunc(), + logger: zap.NewNop(), + } + require.NoError(t, poller.saveState(tt.blocks)) + cnt, err := os.ReadFile(filepath.Join(dirName, "cursor.json")) + require.NoError(t, err) + assert.Equal(t, tt.expect, string(cnt)) + }) + } + +} From 991d08a872dd912a793c683f6fbbc71f88c7b422 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 21 Nov 2023 20:57:29 -0500 Subject: [PATCH 16/66] clean up formatting --- blockpoller/init_test.go | 3 +-- blockpoller/state_file.go | 1 - blockpoller/state_file_test.go | 8 ++------ blockpoller/types.go | 3 +-- 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index dd4c909..8da54df 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -6,9 +6,8 @@ import ( "testing" "time" - "github.com/streamingfast/derr" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/derr" "github.com/streamingfast/logging" "github.com/stretchr/testify/assert" "github.com/test-go/testify/require" diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go index 99b1423..5a14a92 100644 --- a/blockpoller/state_file.go +++ b/blockpoller/state_file.go @@ -7,7 +7,6 @@ import ( "path/filepath" "github.com/streamingfast/bstream" - "github.com/streamingfast/bstream/forkable" "go.uber.org/zap" ) diff --git a/blockpoller/state_file_test.go b/blockpoller/state_file_test.go index 13b1dbf..08e25be 100644 --- a/blockpoller/state_file_test.go +++ b/blockpoller/state_file_test.go @@ -6,14 +6,10 @@ import ( "testing" "github.com/streamingfast/bstream" - + "github.com/streamingfast/bstream/forkable" "github.com/stretchr/testify/assert" - - "go.uber.org/zap" - "github.com/stretchr/testify/require" - - "github.com/streamingfast/bstream/forkable" + "go.uber.org/zap" ) func TestFireBlockFinalizer_saveState(t *testing.T) { diff --git a/blockpoller/types.go b/blockpoller/types.go index b802f67..c727574 100644 --- a/blockpoller/types.go +++ b/blockpoller/types.go @@ -6,9 +6,8 @@ import ( "fmt" "time" - "google.golang.org/protobuf/types/known/anypb" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "google.golang.org/protobuf/types/known/anypb" ) type BlockFireFunc func(b *block) error From b80045eaaf05b90c0185b0e10f6df2baa817948e Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 22 Nov 2023 10:19:37 -0500 Subject: [PATCH 17/66] clean up code and fix bug --- blockpoller/fetcher.go | 13 ++++++++++ blockpoller/{types.go => handler.go} | 37 +++++++++------------------- blockpoller/init_test.go | 4 +-- blockpoller/poller.go | 14 +++++------ blockpoller/poller_test.go | 2 +- go.mod | 2 +- go.sum | 6 ++--- 7 files changed, 38 insertions(+), 40 deletions(-) create mode 100644 blockpoller/fetcher.go rename blockpoller/{types.go => handler.go} (52%) diff --git a/blockpoller/fetcher.go b/blockpoller/fetcher.go new file mode 100644 index 0000000..f0b30ec --- /dev/null +++ b/blockpoller/fetcher.go @@ -0,0 +1,13 @@ +package blockpoller + +import ( + "context" + "time" + + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" +) + +type BlockFetcher interface { + PollingInterval() time.Duration + Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) +} diff --git a/blockpoller/types.go b/blockpoller/handler.go similarity index 52% rename from blockpoller/types.go rename to blockpoller/handler.go index c727574..8320623 100644 --- a/blockpoller/types.go +++ b/blockpoller/handler.go @@ -1,52 +1,39 @@ package blockpoller import ( - "context" "encoding/base64" "fmt" - "time" + "sync" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "google.golang.org/protobuf/types/known/anypb" ) -type BlockFireFunc func(b *block) error - -type BlockFetcher interface { - PollingInterval() time.Duration - Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) -} - -type BlockFinalizer interface { +type BlockHandler interface { Init() Fire(blk *pbbstream.Block) error } -var _ BlockFinalizer = (*FireBlockFinalizer)(nil) +var _ BlockHandler = (*FireBlockHandler)(nil) -type FireBlockFinalizer struct { +type FireBlockHandler struct { blockTypeURL string + init sync.Once } -func NewFireBlockFinalizer(blockTypeURL string) *FireBlockFinalizer { - return &FireBlockFinalizer{ +func NewFireBlockHandler(blockTypeURL string) *FireBlockHandler { + return &FireBlockHandler{ blockTypeURL: blockTypeURL, } } -func (f *FireBlockFinalizer) Init() { +func (f *FireBlockHandler) Init() { fmt.Println("FIRE INIT 1.0 ", f.blockTypeURL) } -func (f *FireBlockFinalizer) Fire(b *pbbstream.Block) error { +func (f *FireBlockHandler) Fire(b *pbbstream.Block) error { //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" - anyBlock, err := anypb.New(b) - if err != nil { - return fmt.Errorf("converting block to anypb: %w", err) - } - - if anyBlock.TypeUrl != f.blockTypeURL { - return fmt.Errorf("block type url %q does not match expected type %q", anyBlock.TypeUrl, f.blockTypeURL) + if b.Payload.TypeUrl != f.blockTypeURL { + return fmt.Errorf("block type url %q does not match expected type %q", b.Payload.TypeUrl, f.blockTypeURL) } blockLine := fmt.Sprintf( @@ -57,7 +44,7 @@ func (f *FireBlockFinalizer) Fire(b *pbbstream.Block) error { b.ParentId, b.LibNum, b.Timestamp.AsTime().UnixNano(), - base64.StdEncoding.EncodeToString(anyBlock.Value), + base64.StdEncoding.EncodeToString(b.Payload.Value), ) fmt.Println(blockLine) diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index 8da54df..3e08901 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -70,7 +70,7 @@ func (b *TestBlockFetcher) check(t *testing.T) { require.Equal(b.t, uint64(len(b.blocks)), b.idx, "we should have fetched all %d blocks, only fired %d blocks", len(b.blocks), b.idx) } -var _ BlockFinalizer = &TestBlockFinalizer{} +var _ BlockHandler = &TestBlockFinalizer{} type TestBlockFinalizer struct { t *testing.T @@ -111,7 +111,7 @@ func (b *TestBlockFinalizer) check(t *testing.T) { require.Equal(b.t, uint64(len(b.fireBlocks)), b.idx, "we should have fired all %d blocks, only fired %d blocks", len(b.fireBlocks), b.idx) } -var _ BlockFinalizer = &TestNoopBlockFinalizer{} +var _ BlockHandler = &TestNoopBlockFinalizer{} type TestNoopBlockFinalizer struct{} diff --git a/blockpoller/poller.go b/blockpoller/poller.go index 502f848..9490066 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -19,23 +19,23 @@ type BlockPoller struct { fetchBlockRetryCount uint64 stateStorePath string - blockFetcher BlockFetcher - blockFinalizer BlockFinalizer - forkDB *forkable.ForkDB + blockFetcher BlockFetcher + blockHandler BlockHandler + forkDB *forkable.ForkDB logger *zap.Logger } func New( blockFetcher BlockFetcher, - blockFinalizer BlockFinalizer, + blockFinalizer BlockHandler, opts ...Option, ) *BlockPoller { b := &BlockPoller{ Shutter: shutter.New(), blockFetcher: blockFetcher, - blockFinalizer: blockFinalizer, + blockHandler: blockFinalizer, fetchBlockRetryCount: 4, logger: zap.NewNop(), } @@ -56,7 +56,7 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBl zap.Uint64("resolved_start_block_num", resolveStartBlockNum), ) - p.blockFinalizer.Init() + p.blockHandler.Init() startBlock, err := p.blockFetcher.Fetch(ctx, resolveStartBlockNum) if err != nil { @@ -208,7 +208,7 @@ func (p *BlockPoller) fire(blk *block) (bool, error) { return false, nil } - if err := p.blockFinalizer.Fire(blk.Block); err != nil { + if err := p.blockHandler.Fire(blk.Block); err != nil { return false, err } diff --git a/blockpoller/poller_test.go b/blockpoller/poller_test.go index 5260b68..850b14e 100644 --- a/blockpoller/poller_test.go +++ b/blockpoller/poller_test.go @@ -222,7 +222,7 @@ func TestForkHandler_fire(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - poller := &BlockPoller{startBlockNumGate: test.startBlockNum, blockFinalizer: &TestNoopBlockFinalizer{}} + poller := &BlockPoller{startBlockNumGate: test.startBlockNum, blockHandler: &TestNoopBlockFinalizer{}} ok, err := poller.fire(test.block) require.NoError(t, err) assert.Equal(t, test.expect, ok) diff --git a/go.mod b/go.mod index fbf9c8a..fd4b5d7 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472 + github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c diff --git a/go.sum b/go.sum index a16c930..93fc3c6 100644 --- a/go.sum +++ b/go.sum @@ -574,10 +574,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d h1:yPqseR0o2RFU2HNoAi6hqIszWr8WVoOQbnHKcEI2zZE= -github.com/streamingfast/bstream v0.0.2-0.20231121140754-a458ffe57f0d/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472 h1:HhWgNV7PIdKiK7IZIXLQ/3mNsERqLm5ztzLwn43AcOo= github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39 h1:4MP75Z2TGPh/TpaoqFZH1fDI9yEv6ZeShqqjKMb6r/A= +github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= @@ -617,8 +617,6 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1 h1:PQeoATwMtCXKw2ztdS3ruwIOMzCj4GEGkkxB8LvcMOE= -github.com/streamingfast/substreams v1.1.21-0.20231120175501-9d89549d81a1/go.mod h1:HimUVtUnRKCdWfkNKFdVgqGCcrNu49+Az5Cyzdpuc4Q= github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6 h1:eNA736ywGv/XXCotwx4LkfRDmOrHUyPbAqCKI+RzYI4= github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6/go.mod h1:JHCOsJtgXUM2KWNxsvi5/hjrLy4KpClaMRriBR3ybnI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From 53446f57a22b449fadad29a035d6c732d5ade3ce Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 22 Nov 2023 10:26:01 -0500 Subject: [PATCH 18/66] fix block handler interface --- blockpoller/handler.go | 5 ++--- blockpoller/init_test.go | 6 +++--- blockpoller/poller.go | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/blockpoller/handler.go b/blockpoller/handler.go index 8320623..4f7e8f8 100644 --- a/blockpoller/handler.go +++ b/blockpoller/handler.go @@ -10,7 +10,7 @@ import ( type BlockHandler interface { Init() - Fire(blk *pbbstream.Block) error + Handle(blk *pbbstream.Block) error } var _ BlockHandler = (*FireBlockHandler)(nil) @@ -30,7 +30,7 @@ func (f *FireBlockHandler) Init() { fmt.Println("FIRE INIT 1.0 ", f.blockTypeURL) } -func (f *FireBlockHandler) Fire(b *pbbstream.Block) error { +func (f *FireBlockHandler) Handle(b *pbbstream.Block) error { //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" if b.Payload.TypeUrl != f.blockTypeURL { return fmt.Errorf("block type url %q does not match expected type %q", b.Payload.TypeUrl, f.blockTypeURL) @@ -49,5 +49,4 @@ func (f *FireBlockHandler) Fire(b *pbbstream.Block) error { fmt.Println(blockLine) return nil - } diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index 3e08901..fdf9f52 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -90,7 +90,7 @@ func (t *TestBlockFinalizer) Init() { panic("implement me") } -func (t *TestBlockFinalizer) Fire(blk *pbbstream.Block) error { +func (t *TestBlockFinalizer) Handle(blk *pbbstream.Block) error { if len(t.fireBlocks) == 0 { assert.Fail(t.t, fmt.Sprintf("should not have fired block %s", blk.AsRef())) } @@ -115,5 +115,5 @@ var _ BlockHandler = &TestNoopBlockFinalizer{} type TestNoopBlockFinalizer struct{} -func (t *TestNoopBlockFinalizer) Init() {} -func (t *TestNoopBlockFinalizer) Fire(blk *pbbstream.Block) error { return nil } +func (t *TestNoopBlockFinalizer) Init() {} +func (t *TestNoopBlockFinalizer) Handle(blk *pbbstream.Block) error { return nil } diff --git a/blockpoller/poller.go b/blockpoller/poller.go index 9490066..f13261d 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -208,7 +208,7 @@ func (p *BlockPoller) fire(blk *block) (bool, error) { return false, nil } - if err := p.blockHandler.Fire(blk.Block); err != nil { + if err := p.blockHandler.Handle(blk.Block); err != nil { return false, err } From bfd6e22ba94bd4d8e1b4ab0b068048c0f25f35a5 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 22 Nov 2023 13:03:00 -0500 Subject: [PATCH 19/66] fix state test --- blockpoller/poller.go | 66 ++++++++--------- blockpoller/state_file.go | 53 ++++++++------ blockpoller/state_file_test.go | 130 +++++++++++++++++++++++---------- 3 files changed, 155 insertions(+), 94 deletions(-) diff --git a/blockpoller/poller.go b/blockpoller/poller.go index f13261d..7479439 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -13,6 +13,15 @@ import ( "go.uber.org/zap" ) +type block struct { + *pbbstream.Block + fired bool +} + +func newBlock(block2 *pbbstream.Block) *block { + return &block{block2, false} +} + type BlockPoller struct { *shutter.Shutter startBlockNumGate uint64 @@ -69,7 +78,7 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBl func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { - p.forkDB, resolvedStartBlock, err = p.initState(resolvedStartBlock) + p.forkDB, resolvedStartBlock, err = initState(resolvedStartBlock, p.stateStorePath, p.logger) if err != nil { return fmt.Errorf("unable to initialize cursor: %w", err) } @@ -159,36 +168,6 @@ func (p *BlockPoller) fetchBlock(blkNum uint64) (blk *pbbstream.Block, err error return out, nil } -func nextBlkInSeg(blocks []*forkable.Block) uint64 { - if len(blocks) == 0 { - panic(fmt.Errorf("the blocks segments should never be empty")) - } - return blocks[len(blocks)-1].BlockNum + 1 -} - -func prevBlkInSeg(blocks []*forkable.Block) uint64 { - if len(blocks) == 0 { - panic(fmt.Errorf("the blocks segments should never be empty")) - } - return blocks[0].Object.(*block).ParentNum -} - -func resolveStartBlock(startBlockNum, finalizedBlockNum uint64) uint64 { - if finalizedBlockNum < startBlockNum { - return finalizedBlockNum - } - return startBlockNum -} - -type block struct { - *pbbstream.Block - fired bool -} - -func newBlock(block2 *pbbstream.Block) *block { - return &block{block2, false} -} - func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block) error { for _, blk := range blocks { b := blk.Object.(*block) @@ -200,11 +179,11 @@ func (p *BlockPoller) fireCompleteSegment(blocks []*forkable.Block) error { } func (p *BlockPoller) fire(blk *block) (bool, error) { - if blk.Number < p.startBlockNumGate { + if blk.fired { return false, nil } - if blk.fired { + if blk.Number < p.startBlockNumGate { return false, nil } @@ -215,3 +194,24 @@ func (p *BlockPoller) fire(blk *block) (bool, error) { blk.fired = true return true, nil } + +func nextBlkInSeg(blocks []*forkable.Block) uint64 { + if len(blocks) == 0 { + panic(fmt.Errorf("the blocks segments should never be empty")) + } + return blocks[len(blocks)-1].BlockNum + 1 +} + +func prevBlkInSeg(blocks []*forkable.Block) uint64 { + if len(blocks) == 0 { + panic(fmt.Errorf("the blocks segments should never be empty")) + } + return blocks[0].Object.(*block).ParentNum +} + +func resolveStartBlock(startBlockNum, finalizedBlockNum uint64) uint64 { + if finalizedBlockNum < startBlockNum { + return finalizedBlockNum + } + return startBlockNum +} diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go index 5a14a92..745dc0a 100644 --- a/blockpoller/state_file.go +++ b/blockpoller/state_file.go @@ -6,14 +6,20 @@ import ( "os" "path/filepath" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" "go.uber.org/zap" ) type blockRef struct { - Id string `json:"id"` - Num uint64 `json:"num"` + Id string `json:"id"` + Num uint64 `json:"num"` +} + +type blockRefWithPrev struct { + blockRef PrevBlockId string `json:"previous_ref_id"` } @@ -21,26 +27,18 @@ func (b blockRef) String() string { return fmt.Sprintf("%d (%s)", b.Num, b.Id) } -func br(id string, num uint64, prevBlockId string) blockRef { - return blockRef{ - Id: id, - Num: num, - PrevBlockId: prevBlockId, - } -} - type stateFile struct { Lib blockRef - LastFiredBlock blockRef - Blocks []blockRef + LastFiredBlock blockRefWithPrev + Blocks []blockRefWithPrev } -func (p *BlockPoller) getState() (*stateFile, error) { - if p.stateStorePath == "" { +func getState(stateStorePath string) (*stateFile, error) { + if stateStorePath == "" { return nil, fmt.Errorf("no cursor store path set") } - filepath := filepath.Join(p.stateStorePath, "cursor.json") + filepath := filepath.Join(stateStorePath, "cursor.json") file, err := os.Open(filepath) if err != nil { return nil, fmt.Errorf("unable to open cursor file %s: %w", filepath, err) @@ -61,12 +59,12 @@ func (p *BlockPoller) saveState(blocks []*forkable.Block) error { lastFiredBlock := blocks[len(blocks)-1] sf := stateFile{ - Lib: br(p.forkDB.LIBID(), p.forkDB.LIBNum(), ""), - LastFiredBlock: br(lastFiredBlock.BlockID, lastFiredBlock.BlockNum, lastFiredBlock.PreviousBlockID), + Lib: blockRef{p.forkDB.LIBID(), p.forkDB.LIBNum()}, + LastFiredBlock: blockRefWithPrev{blockRef{lastFiredBlock.BlockID, lastFiredBlock.BlockNum}, lastFiredBlock.PreviousBlockID}, } for _, blk := range blocks { - sf.Blocks = append(sf.Blocks, br(blk.BlockID, blk.BlockNum, blk.PreviousBlockID)) + sf.Blocks = append(sf.Blocks, blockRefWithPrev{blockRef{blk.BlockID, blk.BlockNum}, blk.PreviousBlockID}) } cnt, err := json.Marshal(sf) @@ -89,12 +87,12 @@ func (p *BlockPoller) saveState(blocks []*forkable.Block) error { return nil } -func (p *BlockPoller) initState(resolvedStartBlock bstream.BlockRef) (*forkable.ForkDB, bstream.BlockRef, error) { - forkDB := forkable.NewForkDB(forkable.ForkDBWithLogger(p.logger)) +func initState(resolvedStartBlock bstream.BlockRef, stateStorePath string, logger *zap.Logger) (*forkable.ForkDB, bstream.BlockRef, error) { + forkDB := forkable.NewForkDB(forkable.ForkDBWithLogger(logger)) - sf, err := p.getState() + sf, err := getState(stateStorePath) if err != nil { - p.logger.Warn("unable to load cursor file, initializing a new forkdb", + logger.Warn("unable to load cursor file, initializing a new forkdb", zap.Stringer("start_block", resolvedStartBlock), zap.Stringer("lib", resolvedStartBlock), zap.Error(err), @@ -106,11 +104,18 @@ func (p *BlockPoller) initState(resolvedStartBlock bstream.BlockRef) (*forkable. forkDB.InitLIB(bstream.NewBlockRef(sf.Lib.Id, sf.Lib.Num)) for _, blk := range sf.Blocks { - b := &block{nil, true} + b := &block{ + Block: &pbbstream.Block{ + Number: blk.Num, + Id: blk.Id, + ParentId: blk.PrevBlockId, + }, + fired: true, + } forkDB.AddLink(bstream.NewBlockRef(blk.Id, blk.Num), blk.PrevBlockId, b) } - p.logger.Info("loaded cursor", + logger.Info("loaded cursor", zap.Stringer("start_block", sf.LastFiredBlock), zap.Stringer("lib", sf.Lib), zap.Int("block_count", len(sf.Blocks)), diff --git a/blockpoller/state_file_test.go b/blockpoller/state_file_test.go index 08e25be..0d1f060 100644 --- a/blockpoller/state_file_test.go +++ b/blockpoller/state_file_test.go @@ -12,44 +12,100 @@ import ( "go.uber.org/zap" ) -func TestFireBlockFinalizer_saveState(t *testing.T) { - tests := []struct { - blocks []*forkable.Block - forkDBFunc func() *forkable.ForkDB - expect string - }{ - { - blocks: []*forkable.Block{ - forkBlk("100a"), - }, - forkDBFunc: func() *forkable.ForkDB { - fk := forkable.NewForkDB() - fk.AddLink(bstream.NewBlockRef("97a", 97), "96a", nil) - fk.AddLink(bstream.NewBlockRef("98a", 98), "97a", nil) - fk.AddLink(bstream.NewBlockRef("99a", 99), "98a", nil) - fk.SetLIB(blk("99a", "97a", 95).AsRef(), 98) - return fk - }, - - expect: `{"Lib":{"id":"98a","num":98,"previous_ref_id":""},"LastFiredBlock":{"id":"100a","num":100,"previous_ref_id":""},"Blocks":[{"id":"100a","num":100,"previous_ref_id":""}]}`, - }, +func TestFireBlockFinalizer_state(t *testing.T) { + dirName, err := os.MkdirTemp("", "fblk") + require.NoError(t, err) + defer os.Remove(dirName) + + fk := forkable.NewForkDB() + // simulating a flow where the lib libmoves + fk.SetLIB(bstream.NewBlockRef("100a", 100), 100) + fk.AddLink(bstream.NewBlockRef("101a", 101), "100a", &block{Block: blk("101a", "100a", 100)}) + fk.AddLink(bstream.NewBlockRef("102a", 102), "101a", &block{Block: blk("102a", "101a", 100)}) + fk.AddLink(bstream.NewBlockRef("103a", 103), "102a", &block{Block: blk("103a", "102a", 100)}) + fk.SetLIB(bstream.NewBlockRef("103a", 103), 101) + fk.AddLink(bstream.NewBlockRef("104b", 104), "103b", &block{Block: blk("104b", "103b", 101)}) + fk.AddLink(bstream.NewBlockRef("103a", 103), "102a", &block{Block: blk("103a", "102a", 101)}) + fk.AddLink(bstream.NewBlockRef("104a", 104), "103a", &block{Block: blk("104a", "103a", 101)}) + fk.AddLink(bstream.NewBlockRef("105b", 105), "104b", &block{Block: blk("105b", "104b", 101)}) + fk.AddLink(bstream.NewBlockRef("103b", 103), "102b", &block{Block: blk("103b", "102b", 101)}) + fk.AddLink(bstream.NewBlockRef("102b", 102), "101a", &block{Block: blk("102b", "101a", 101)}) + fk.AddLink(bstream.NewBlockRef("106a", 106), "105a", &block{Block: blk("106a", "105a", 101)}) + fk.AddLink(bstream.NewBlockRef("105a", 105), "104a", &block{Block: blk("105a", "104a", 101)}) + expectedBlocks, reachedLib := fk.CompleteSegment(blk("105a", "104a", 101).AsRef()) + // simulate firing the blocks + for _, blk := range expectedBlocks { + blk.Object.(*block).fired = true } - for _, tt := range tests { - t.Run("", func(t *testing.T) { - dirName, err := os.MkdirTemp("", "fblk") - require.NoError(t, err) - defer os.Remove(dirName) - - poller := &BlockPoller{ - stateStorePath: dirName, - forkDB: tt.forkDBFunc(), - logger: zap.NewNop(), - } - require.NoError(t, poller.saveState(tt.blocks)) - cnt, err := os.ReadFile(filepath.Join(dirName, "cursor.json")) - require.NoError(t, err) - assert.Equal(t, tt.expect, string(cnt)) - }) + assert.True(t, reachedLib) + require.Equal(t, 5, len(expectedBlocks)) + + expectedStateFileCnt := `{"Lib":{"id":"101a","num":101},"LastFiredBlock":{"id":"105a","num":105,"previous_ref_id":"104a"},"Blocks":[{"id":"101a","num":101,"previous_ref_id":"100a"},{"id":"102a","num":102,"previous_ref_id":"101a"},{"id":"103a","num":103,"previous_ref_id":"102a"},{"id":"104a","num":104,"previous_ref_id":"103a"},{"id":"105a","num":105,"previous_ref_id":"104a"}]}` + + poller := &BlockPoller{ + stateStorePath: dirName, + forkDB: fk, + logger: zap.NewNop(), } + require.NoError(t, poller.saveState(expectedBlocks)) + + filePath := filepath.Join(dirName, "cursor.json") + cnt, err := os.ReadFile(filePath) + require.NoError(t, err) + assert.Equal(t, expectedStateFileCnt, string(cnt)) + + forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, zap.NewNop()) + require.NoError(t, err) + + blocks, reachedLib := forkDB.CompleteSegment(bstream.NewBlockRef("105a", 105)) + assert.True(t, reachedLib) + assertForkableBlocks(t, expectedBlocks, blocks) + assert.Equal(t, bstream.NewBlockRef("105a", 105), startBlock) + assert.Equal(t, "101a", forkDB.LIBID()) + assert.Equal(t, uint64(101), forkDB.LIBNum()) +} + +func TestFireBlockFinalizer_noSstate(t *testing.T) { + dirName, err := os.MkdirTemp("", "fblk") + require.NoError(t, err) + defer os.Remove(dirName) + + forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, logger) + require.NoError(t, err) + + blocks, reachedLib := forkDB.CompleteSegment(bstream.NewBlockRef("60a", 60)) + assert.True(t, reachedLib) + require.Equal(t, 0, len(blocks)) + + blocks, reachedLib = forkDB.CompleteSegment(bstream.NewBlockRef("105a", 105)) + assert.False(t, reachedLib) + require.Equal(t, 0, len(blocks)) + + assert.Equal(t, bstream.NewBlockRef("60a", 60), startBlock) +} + +func assertForkableBlocks(t *testing.T, expected, actual []*forkable.Block) { + t.Helper() + + require.Equal(t, len(expected), len(actual)) + for idx, expect := range expected { + assertForkableBlock(t, expect, actual[idx]) + } +} + +func assertForkableBlock(t *testing.T, expected, actual *forkable.Block) { + t.Helper() + assert.Equal(t, expected.BlockID, actual.BlockID) + assert.Equal(t, expected.BlockNum, actual.BlockNum) + assert.Equal(t, expected.PreviousBlockID, actual.PreviousBlockID) + + expectedBlock, ok := expected.Object.(*block) + require.True(t, ok) + actualBlock, ok := actual.Object.(*block) + require.True(t, ok) + assert.Equal(t, expectedBlock.fired, actualBlock.fired) + assert.Equal(t, expectedBlock.Block.Id, actualBlock.Block.Id) + assert.Equal(t, expectedBlock.Block.Number, actualBlock.Block.Number) + assert.Equal(t, expectedBlock.Block.ParentId, actualBlock.Block.ParentId) } From e2de0951c3e7bb3fb903a2510453d8d70b8e5a0d Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 22 Nov 2023 14:30:40 -0500 Subject: [PATCH 20/66] fix block handler --- blockpoller/handler.go | 13 +++++++++---- blockpoller/handler_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 blockpoller/handler_test.go diff --git a/blockpoller/handler.go b/blockpoller/handler.go index 4f7e8f8..802ba82 100644 --- a/blockpoller/handler.go +++ b/blockpoller/handler.go @@ -3,6 +3,7 @@ package blockpoller import ( "encoding/base64" "fmt" + "strings" "sync" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" @@ -22,7 +23,7 @@ type FireBlockHandler struct { func NewFireBlockHandler(blockTypeURL string) *FireBlockHandler { return &FireBlockHandler{ - blockTypeURL: blockTypeURL, + blockTypeURL: clean(blockTypeURL), } } @@ -31,9 +32,9 @@ func (f *FireBlockHandler) Init() { } func (f *FireBlockHandler) Handle(b *pbbstream.Block) error { - //blockLine := "FIRE BLOCK 18571000 d2836a703a02f3ca2a13f05efe26fc48c6fa0db0d754a49e56b066d3b7d54659 18570999 55de88c909fa368ae1e93b6b8ffb3fbb12e64aefec1d4a1fcc27ae7633de2f81 18570800 1699992393935935000 Ci10eXBlLmdvb2dsZWFwaXMuY29tL3NmLmV0aGVyZXVtLnR5cGUudjIuQmxvY2sSJxIg0oNqcDoC88oqE/Be/ib8SMb6DbDXVKSeVrBm07fVRlkY+L3tCA==" - if b.Payload.TypeUrl != f.blockTypeURL { - return fmt.Errorf("block type url %q does not match expected type %q", b.Payload.TypeUrl, f.blockTypeURL) + typeURL := clean(b.Payload.TypeUrl) + if typeURL != f.blockTypeURL { + return fmt.Errorf("block type url %q does not match expected type %q", typeURL, f.blockTypeURL) } blockLine := fmt.Sprintf( @@ -50,3 +51,7 @@ func (f *FireBlockHandler) Handle(b *pbbstream.Block) error { fmt.Println(blockLine) return nil } + +func clean(in string) string { + return strings.Replace(in, "type.googleapis.com/", "", 1) +} diff --git a/blockpoller/handler_test.go b/blockpoller/handler_test.go new file mode 100644 index 0000000..81f3843 --- /dev/null +++ b/blockpoller/handler_test.go @@ -0,0 +1,24 @@ +package blockpoller + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFireBlockHandler_clean(t *testing.T) { + tests := []struct { + in string + expect string + }{ + {"type.googleapis.com/sf.bstream.v2.Block", "sf.bstream.v2.Block"}, + {"sf.bstream.v2.Block", "sf.bstream.v2.Block"}, + } + + for _, test := range tests { + t.Run(test.in, func(t *testing.T) { + assert.Equal(t, test.expect, clean(test.in)) + }) + } + +} From f7debde5b6ed14d78ee4ba7406cab9a22b47c6fe Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 08:02:10 -0500 Subject: [PATCH 21/66] Move block printing logic to the Block interface Reorganized the block printing functionalities in the code. What used to be as BlockPrinterFunc has been incorporated into the Block interface as a method. This change allows each specific type of Block to control its own printing logic. This code restructuring promotes better encapsulation and cohesion. --- chain.go | 52 ------------------------------------------- tools_check.go | 12 +++------- tools_check_blocks.go | 12 ++++++---- tools_print.go | 19 ++++++++-------- types.go | 52 ++++++++++++++++++++++++++++--------------- 5 files changed, 54 insertions(+), 93 deletions(-) diff --git a/chain.go b/chain.go index ffc0396..2465d2f 100644 --- a/chain.go +++ b/chain.go @@ -3,7 +3,6 @@ package firecore import ( "context" "fmt" - "io" "runtime/debug" "strings" @@ -19,11 +18,6 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -// BlockPrinterFunc takes a chain agnostic [block] and prints it to a human readable form. -// -// See [ToolsConfig#BlockPrinter] for extra details about expected printing. -type BlockPrinterFunc func(block Block, alsoPrintTransactions bool, out io.Writer) error - // SanitizeBlockForCompareFunc takes a chain agnostic [block] and transforms it in-place, removing fields // that should not be compared. type SanitizeBlockForCompareFunc[B Block] func(block B) B @@ -160,32 +154,6 @@ type Chain[B Block] struct { } type ToolsConfig[B Block] struct { - // BlockPrinter represents a printing function that render a chain specific human readable - // form of the receive chain agnostic [bstream.Block]. This block is expected to be rendered as - // a single line for example on Ethereum rendering of a single block looks like: - // - // ``` - // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls - // ``` - // - // If the [alsoPrintTransactions] argument is true, each transaction of the block should also be printed, following - // directly the block line. Each transaction should also be on a single line, usually prefixed with a `- ` to make - // the rendering more appealing. - // - // For example on Ethereum rendering with [alsoPrintTransactions] being `true` looks like: - // - // ``` - // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls - // - Transaction 0xc7e04240d6f2cc5f382c478fd0a0b5c493463498c64b31477b95bded8cd12ab4 (10 calls) - // - Transaction 0xc7d8a698351eb1ac64acb76c8bf898365bb639865271add95d2c81650b2bd98c (4 calls) - // ``` - // - // The `out` parameter is used to write to the correct location. You can use [fmt.Fprintf] and [fmt.Fprintln] - // and use `out` as the output writer in your implementation. - // - // The [BlockPrinter] is optional, if nil, a default block printer will be used. It's important to note - // that the default block printer error out if `alsoPrintTransactions` is true. - BlockPrinter BlockPrinterFunc // SanitizeBlockForCompare is a function that takes a chain agnostic [block] and transforms it in-place, removing fields // that should not be compared. @@ -379,23 +347,3 @@ func findSetting(key string, settings []debug.BuildSetting) (value string) { return "" } - -func (c *Chain[B]) BlockPrinter() BlockPrinterFunc { - if c.Tools == nil || c.Tools.BlockPrinter == nil { - return defaultBlockPrinter - } - - return c.Tools.BlockPrinter -} - -func defaultBlockPrinter(block Block, alsoPrintTransactions bool, out io.Writer) error { - if alsoPrintTransactions { - return fmt.Errorf("transactions is not supported by the default block printer") - } - - if _, err := fmt.Fprintf(out, "Block #%d (%s)\n", block.GetFirehoseBlockNumber(), block.GetFirehoseBlockID()); err != nil { - return err - } - - return nil -} diff --git a/tools_check.go b/tools_check.go index 1ceb15f..bcf25c4 100644 --- a/tools_check.go +++ b/tools_check.go @@ -16,13 +16,11 @@ package firecore import ( "fmt" - "os" "strings" "github.com/dustin/go-humanize" "github.com/spf13/cobra" "github.com/streamingfast/bstream" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" @@ -63,9 +61,7 @@ func init() { } func configureToolsCheckCmd[B Block](chain *Chain[B]) { - blockPrinter := chain.BlockPrinter() - - toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain, blockPrinter) + toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain) toolsCheckMergedBlocksCmd.Example = ExamplePrefixed(chain, "tools check merged-blocks", ` "./sf-data/storage/merged-blocks" "gs:////" -s @@ -76,7 +72,7 @@ func configureToolsCheckCmd[B Block](chain *Chain[B]) { toolsCheckForksCmd.RunE = toolsCheckForksE } -func createToolsCheckMergedBlocksE[B Block](chain *Chain[B], blockPrinter BlockPrinterFunc) CommandExecutor { +func createToolsCheckMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { return func(cmd *cobra.Command, args []string) error { storeURL := args[0] fileBlockSize := uint64(100) @@ -95,9 +91,7 @@ func createToolsCheckMergedBlocksE[B Block](chain *Chain[B], blockPrinter BlockP printDetails = PrintFull } - return CheckMergedBlocks(cmd.Context(), chain, rootLog, storeURL, fileBlockSize, blockRange, func(block *pbbstream.Block) { - blockPrinter(block, false, os.Stdout) - }, printDetails) + return CheckMergedBlocks(cmd.Context(), chain, rootLog, storeURL, fileBlockSize, blockRange, printDetails) } } diff --git a/tools_check_blocks.go b/tools_check_blocks.go index c160460..cb6a602 100644 --- a/tools_check_blocks.go +++ b/tools_check_blocks.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "os" "regexp" "strconv" @@ -27,7 +28,7 @@ const ( MaxUint64 = ^uint64(0) ) -func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange tools.BlockRange, blockPrinter func(block *pbbstream.Block), printDetails PrintDetails) error { +func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange tools.BlockRange, printDetails PrintDetails) error { readAllBlocks := printDetails != PrintNoDetails fmt.Printf("Checking block holes on %s\n", storeURL) if readAllBlocks { @@ -94,7 +95,7 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za expected = baseNum + fileBlockSize if readAllBlocks { - lowestBlockSegment, highestBlockSegment := validateBlockSegment(ctx, chain, blocksStore, filename, fileBlockSize, blockRange, blockPrinter, printDetails, tfdb) + lowestBlockSegment, highestBlockSegment := validateBlockSegment(ctx, chain, blocksStore, filename, fileBlockSize, blockRange, printDetails, tfdb) if lowestBlockSegment < lowestBlockSeen { lowestBlockSeen = lowestBlockSegment } @@ -170,7 +171,6 @@ func validateBlockSegment[B Block]( segment string, fileBlockSize uint64, blockRange tools.BlockRange, - blockPrinter func(block *pbbstream.Block), printDetails PrintDetails, tfdb *trackedForkDB, ) (lowestBlockSeen, highestBlockSeen uint64) { @@ -238,7 +238,11 @@ func validateBlockSegment[B Block]( seenBlockCount++ if printDetails == PrintStats { - blockPrinter(block) + err := block.PrintBlock(false, os.Stdout) + if err != nil { + fmt.Printf("❌ Unable to print block %s: %s\n", block.AsRef(), err) + continue + } } if printDetails == PrintFull { diff --git a/tools_print.go b/tools_print.go index 42d0fd2..850186d 100644 --- a/tools_print.go +++ b/tools_print.go @@ -62,13 +62,11 @@ func init() { } func configureToolsPrintCmd[B Block](chain *Chain[B]) { - blockPrinter := chain.BlockPrinter() - - toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain, blockPrinter) - toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE(blockPrinter) + toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain) + toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE() } -func createToolsPrintMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecutor { +func createToolsPrintMergedBlocksE() CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -118,7 +116,7 @@ func createToolsPrintMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto seenBlockCount++ - if err := printBlock(block, outputMode, printTransactions, blockPrinter); err != nil { + if err := printBlock(block, outputMode, printTransactions); err != nil { // Error is ready to be passed to the user as-is return err } @@ -126,7 +124,7 @@ func createToolsPrintMergedBlocksE(blockPrinter BlockPrinterFunc) CommandExecuto } } -func createToolsPrintOneBlockE[B Block](chain *Chain[B], blockPrinter BlockPrinterFunc) CommandExecutor { +func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -185,7 +183,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B], blockPrinter BlockPrint return fmt.Errorf("reading block: %w", err) } - if err := printBlock(block, outputMode, printTransactions, blockPrinter); err != nil { + if err := printBlock(block, outputMode, printTransactions); err != nil { // Error is ready to be passed to the user as-is return err } @@ -216,10 +214,11 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock(block Block, outputMode PrintOutputMode, printTransactions bool, blockPrinter BlockPrinterFunc) error { +func printBlock(block Block, outputMode PrintOutputMode, printTransactions bool) error { switch outputMode { case PrintOutputModeText: - if err := blockPrinter(block, printTransactions, os.Stdout); err != nil { + err := block.PrintBlock(printTransactions, os.Stdout) + if err != nil { return fmt.Errorf("block text printing: %w", err) } diff --git a/types.go b/types.go index bbd6bfd..2cf4caf 100644 --- a/types.go +++ b/types.go @@ -2,17 +2,16 @@ package firecore import ( "fmt" + "io" "time" - "google.golang.org/protobuf/types/known/timestamppb" - - "google.golang.org/protobuf/types/known/anypb" - "github.com/spf13/cobra" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream/transform" "github.com/streamingfast/dstore" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" ) // Block represents the chain-specific Protobuf block. Chain specific's block @@ -64,6 +63,33 @@ type Block interface { // GetFirehoseBlockTime returns the block timestamp as a time.Time of when the block was // produced. This should the consensus agreed time of the block. GetFirehoseBlockTime() time.Time + + // PrintBlock is printing function that render a chain specific human readable + // form Block. This block is expected to be rendered as + // a single line for example on Ethereum rendering of a single block looks like: + // + // ``` + // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls + // ``` + // + // If the [alsoPrintTransactions] argument is true, each transaction of the block should also be printed, following + // directly the block line. Each transaction should also be on a single line, usually prefixed with a `- ` to make + // the rendering more appealing. + // + // For example on Ethereum rendering with [alsoPrintTransactions] being `true` looks like: + // + // ``` + // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls + // - Transaction 0xc7e04240d6f2cc5f382c478fd0a0b5c493463498c64b31477b95bded8cd12ab4 (10 calls) + // - Transaction 0xc7d8a698351eb1ac64acb76c8bf898365bb639865271add95d2c81650b2bd98c (4 calls) + // ``` + // + // The `out` parameter is used to write to the correct location. You can use [fmt.Fprintf] and [fmt.Fprintln] + // and use `out` as the output writer in your implementation. + // + // The [BlockPrinter] is optional, if nil, a default block printer will be used. It's important to note + // that the default block printer error out if `alsoPrintTransactions` is true. + PrintBlock(printTransactions bool, out io.Writer) error } // BlockLIBNumDerivable is an optional interface that can be implemented by your chain's block model Block @@ -129,16 +155,6 @@ func NewBlockEncoder() BlockEncoder { } func EncodeBlock(b Block) (blk *pbbstream.Block, err error) { - real := b - if b, ok := b.(BlockEnveloppe); ok { - real = b.Block - } - - content, err := proto.Marshal(real) - if err != nil { - return nil, fmt.Errorf("unable to marshal to binary form: %s", err) - } - v, ok := b.(BlockLIBNumDerivable) if !ok { return nil, fmt.Errorf( @@ -150,9 +166,9 @@ func EncodeBlock(b Block) (blk *pbbstream.Block, err error) { ) } - blockPayload := &anypb.Any{} - if err := proto.Unmarshal(content, blockPayload); err != nil { - return nil, fmt.Errorf("unmarshaling block payload: %w", err) + anyBlock, err := anypb.New(b) + if err != nil { + return nil, fmt.Errorf("create any block: %w", err) } bstreamBlock := &pbbstream.Block{ @@ -161,7 +177,7 @@ func EncodeBlock(b Block) (blk *pbbstream.Block, err error) { ParentId: b.GetFirehoseBlockParentID(), Timestamp: timestamppb.New(b.GetFirehoseBlockTime()), LibNum: v.GetFirehoseBlockLIBNum(), - Payload: blockPayload, + Payload: anyBlock, } return bstreamBlock, nil From 3f07b24b6efed50ed0290f2a298fbc5aac5d9ea2 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 08:03:35 -0500 Subject: [PATCH 22/66] bump bstream --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index fd4b5d7..5f03fd4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39 + github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c diff --git a/go.sum b/go.sum index 93fc3c6..9891227 100644 --- a/go.sum +++ b/go.sum @@ -574,10 +574,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472 h1:HhWgNV7PIdKiK7IZIXLQ/3mNsERqLm5ztzLwn43AcOo= -github.com/streamingfast/bstream v0.0.2-0.20231121211820-e45c1b42f472/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= -github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39 h1:4MP75Z2TGPh/TpaoqFZH1fDI9yEv6ZeShqqjKMb6r/A= -github.com/streamingfast/bstream v0.0.2-0.20231122151642-aa0d174c0e39/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d h1:BvvIKTekSj8PAAda313Q6xd91w3gD1nDgOrG/5+YIZk= +github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= From 5bd6074c6f6d102624c0120e30b233795413fdb4 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Thu, 23 Nov 2023 10:19:16 -0500 Subject: [PATCH 23/66] clean up naming --- blockpoller/poller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockpoller/poller.go b/blockpoller/poller.go index 7479439..fe61e5d 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -37,14 +37,14 @@ type BlockPoller struct { func New( blockFetcher BlockFetcher, - blockFinalizer BlockHandler, + blockHandler BlockHandler, opts ...Option, ) *BlockPoller { b := &BlockPoller{ Shutter: shutter.New(), blockFetcher: blockFetcher, - blockHandler: blockFinalizer, + blockHandler: blockHandler, fetchBlockRetryCount: 4, logger: zap.NewNop(), } From 644e8a86ea6dd5f5de6743fa698fb88775bdd3e8 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 10:29:16 -0500 Subject: [PATCH 24/66] Refactor variable names in blockpoller The variable 'finalizedBlockNum' in blockpoller has been renamed to 'chainLatestFinalizeBlock' for better clarity and understanding. The change also impacts the 'resolveStartBlock' function arguments and 'Run' function parameters and logging. An extra newline in 'tools_checkmergedbatch.go' has been removed as well. --- blockpoller/poller.go | 12 ++++++------ tools_checkmergedbatch.go | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/blockpoller/poller.go b/blockpoller/poller.go index fe61e5d..f8d35b3 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -56,12 +56,12 @@ func New( return b } -func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, finalizedBlockNum bstream.BlockRef) error { +func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, chainLatestFinalizeBlock bstream.BlockRef) error { p.startBlockNumGate = startBlockNum - resolveStartBlockNum := resolveStartBlock(startBlockNum, finalizedBlockNum.Num()) + resolveStartBlockNum := resolveStartBlock(startBlockNum, chainLatestFinalizeBlock.Num()) p.logger.Info("starting poller", zap.Uint64("start_block_num", startBlockNum), - zap.Stringer("finalized_block_num", finalizedBlockNum), + zap.Stringer("chain_latest_finalize_block", chainLatestFinalizeBlock), zap.Uint64("resolved_start_block_num", resolveStartBlockNum), ) @@ -209,9 +209,9 @@ func prevBlkInSeg(blocks []*forkable.Block) uint64 { return blocks[0].Object.(*block).ParentNum } -func resolveStartBlock(startBlockNum, finalizedBlockNum uint64) uint64 { - if finalizedBlockNum < startBlockNum { - return finalizedBlockNum +func resolveStartBlock(startBlockNum, chainLatestFinalizeBlock uint64) uint64 { + if chainLatestFinalizeBlock < startBlockNum { + return chainLatestFinalizeBlock } return startBlockNum } diff --git a/tools_checkmergedbatch.go b/tools_checkmergedbatch.go index 26b13c7..a4cf4e5 100644 --- a/tools_checkmergedbatch.go +++ b/tools_checkmergedbatch.go @@ -31,7 +31,6 @@ var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ func init() { toolsCheckCmd.AddCommand(toolsCheckMergedBlocksBatchCmd) - toolsCheckMergedBlocksBatchCmd.PersistentFlags().String("output-to-store", "", "If non-empty, an empty file called .broken will be created for every problematic merged-blocks-file. This is a convenient way to gather the results from multiple parallel processes.") } From 80ea795cc6bfc94f4f10589f41caf1c9be529202 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 13:58:21 -0500 Subject: [PATCH 25/66] Add error handling and logging in saveState function Updated the saveState function to include error handling for state saving and directory creation. Additionally, log cursor saving details to provide useful information for debugging and monitoring. --- blockpoller/poller.go | 5 ++++- blockpoller/state_file.go | 13 +++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/blockpoller/poller.go b/blockpoller/poller.go index f8d35b3..cdbe54b 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -142,7 +142,10 @@ func (p *BlockPoller) processBlock(currentState *cursor, blkNum uint64) (uint64, p.forkDB.SetLIB(blk.AsRef(), blk.LibNum) p.forkDB.PurgeBeforeLIB(0) - p.saveState(blocks) + err := p.saveState(blocks) + if err != nil { + return 0, fmt.Errorf("saving state: %w", err) + } return nextBlkInSeg(blocks), nil } diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go index 745dc0a..def719f 100644 --- a/blockpoller/state_file.go +++ b/blockpoller/state_file.go @@ -52,6 +52,7 @@ func getState(stateStorePath string) (*stateFile, error) { } func (p *BlockPoller) saveState(blocks []*forkable.Block) error { + p.logger.Debug("saving cursor", zap.String("state_store_path", p.stateStorePath)) if p.stateStorePath == "" { return nil } @@ -72,14 +73,18 @@ func (p *BlockPoller) saveState(blocks []*forkable.Block) error { return fmt.Errorf("unable to marshal stateFile: %w", err) } - filepath := filepath.Join(p.stateStorePath, "cursor.json") + err = os.MkdirAll(p.stateStorePath, os.ModePerm) + if err != nil { + return fmt.Errorf("making state store path: %w", err) + } + fpath := filepath.Join(p.stateStorePath, "cursor.json") - if err := os.WriteFile(filepath, cnt, os.ModePerm); err != nil { - return fmt.Errorf("unable to open cursor file %s: %w", filepath, err) + if err := os.WriteFile(fpath, cnt, os.ModePerm); err != nil { + return fmt.Errorf("unable to open cursor file %s: %w", fpath, err) } p.logger.Info("saved cursor", - zap.Reflect("filepath", filepath), + zap.Reflect("filepath", fpath), zap.Stringer("last_fired_block", sf.LastFiredBlock), zap.Stringer("lib", sf.Lib), zap.Int("block_count", len(sf.Blocks)), From c9396c2c7d642a07cfca777e031fa301e3716583 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 14:34:16 -0500 Subject: [PATCH 26/66] Refactor print statement in blockpoller Adjusted FireBlockHandler.Init(). Removed an unnecessary space in the printed string. --- blockpoller/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockpoller/handler.go b/blockpoller/handler.go index 802ba82..98b4b0f 100644 --- a/blockpoller/handler.go +++ b/blockpoller/handler.go @@ -28,7 +28,7 @@ func NewFireBlockHandler(blockTypeURL string) *FireBlockHandler { } func (f *FireBlockHandler) Init() { - fmt.Println("FIRE INIT 1.0 ", f.blockTypeURL) + fmt.Println("FIRE INIT 1.0", f.blockTypeURL) } func (f *FireBlockHandler) Handle(b *pbbstream.Block) error { From 81e1e911138b47adcee0aaf9afbc66d23daef1a7 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 15:45:18 -0500 Subject: [PATCH 27/66] Remove protobuf usage in consolereader This change eliminates the use of protobuf for unmarshaling payload in consolereader.go. Instead, we directly assign expected typeUrl and payload value to blockPayload. This significantly simplifies the code, making it easier to understand and maintain. --- consolereader.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/consolereader.go b/consolereader.go index cdcc887..994c956 100644 --- a/consolereader.go +++ b/consolereader.go @@ -14,7 +14,6 @@ import ( "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" "go.uber.org/zap" - "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -136,15 +135,9 @@ func (ctx *parseCtx) readBlock(line string) (out *pbbstream.Block, err error) { payload, err := base64.StdEncoding.DecodeString(chunks[6]) - blockPayload := &anypb.Any{} - if err := proto.Unmarshal(payload, blockPayload); err != nil { - return nil, fmt.Errorf("unmarshaling block payload: %w", err) - } - - typeChunks := strings.Split(blockPayload.TypeUrl, "/") - payloadType := typeChunks[len(typeChunks)-1] - if payloadType != ctx.protoMessageType { - return nil, fmt.Errorf("invalid payload type, expected %q, got %q", ctx.protoMessageType, blockPayload.TypeUrl) + blockPayload := &anypb.Any{ + TypeUrl: ctx.protoMessageType, + Value: payload, } block := &pbbstream.Block{ From 19ad29a2cd6411becbfd1500590094ebe59f1473 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 15:49:48 -0500 Subject: [PATCH 28/66] fix test --- consolereader_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consolereader_test.go b/consolereader_test.go index 80f550d..df1e993 100644 --- a/consolereader_test.go +++ b/consolereader_test.go @@ -10,7 +10,6 @@ import ( "github.com/streamingfast/firehose-core/test" "github.com/stretchr/testify/require" "go.uber.org/zap" - "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -35,7 +34,6 @@ func Test_Ctx_readBlock(t *testing.T) { } anypbBlock, err := anypb.New(&pbBlock) - payload, err := proto.Marshal(anypbBlock) require.NoError(t, err) nowNano := time.Now().UnixNano() @@ -47,7 +45,7 @@ func Test_Ctx_readBlock(t *testing.T) { parentHash, libNumber, nowNano, - base64.StdEncoding.EncodeToString(payload), + base64.StdEncoding.EncodeToString(anypbBlock.Value), ) block, err := ctx.readBlock(line) From 679c34247e75feabe21c3d7b274c8fca1c424f4a Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 16:38:45 -0500 Subject: [PATCH 29/66] github workflows --- .github/workflows/docker.yml | 86 ++++++++++++++++++++++++++++++++++++ Dockerfile | 21 +++++++++ 2 files changed, 107 insertions(+) create mode 100644 .github/workflows/docker.yml create mode 100644 Dockerfile diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..7799d8a --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,86 @@ +name: Build docker image + +on: + push: + tags: + - "v*" + branches: + - "develop" + workflow_dispatch: + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + runs-on: ubuntu-20.04 + + permissions: + contents: read + id-token: write + + strategy: + matrix: + go-version: [ 1.20.x ] + + outputs: + tags: ${{ steps.meta.outputs.tags }} + + steps: + - uses: actions/checkout@v3 + +# - id: 'auth' +# name: 'Authenticate to Google Cloud' +# uses: 'google-github-actions/auth@v0' +# with: +# workload_identity_provider: ${{ secrets.GCR_WORKLOAD_IDENTITY_PROVIDER }} +# service_account: ${{ secrets.GCR_SERVICE_ACCOUNT }} +# token_format: 'access_token' +# +# - name: Login to GCR +# uses: docker/login-action@v1 +# with: +# registry: gcr.io +# username: oauth2accesstoken +# password: ${{ steps.auth.outputs.access_token }} + + - name: Get repo name + id: extract_repo_name + shell: bash + run: | + echo "REPO_NAME=$(basename ${{ github.repository }})" >> $GITHUB_ENV + + - name: Generate docker tags/labels from github build context + id: meta + uses: docker/metadata-action@v4 + with: + images: ${{ env.REGISTRY }}/${{ env.REPO_NAME }} + tags: | + type=ref,event=tag + type=sha,prefix=,enable=true + type=raw,enable=${{ github.ref == 'refs/heads/develop' }},value=develop + flavor: | + latest=${{ startsWith(github.ref, 'refs/tags/') }} + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + + slack-notifications: + if: ${{ !startsWith(github.ref, 'refs/tags/') && github.event_name != 'workflow_dispatch' }} + needs: [ build ] + runs-on: ubuntu-20.04 + steps: + - name: Slack notification + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + uses: Ilshidur/action-slack@2.0.2 + with: + args: | + :done: *${{ github.repository }}* Success building docker image from ${{ github.ref_type }} _${{ github.ref_name }}_ (${{ github.actor }}) :sparkling_heart: ```${{ join(needs.build.outputs.tags, ' ') }}``` \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..579fef0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.21-alpine as build +WORKDIR /app + +COPY go.mod go.sum ./ +RUN go mod download + +COPY . ./ + +RUN go build ./cmd/firecore + +#### + +FROM alpine:edge + +RUN apk --no-cache add ca-certificates tzdata + +WORKDIR /app + +COPY --from=build /app/firecore /app/firecore + +ENTRYPOINT [ "/app/firecore" ] \ No newline at end of file From b53f673ca6df4234e228f87b8c487b223eb19fce Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 16:42:30 -0500 Subject: [PATCH 30/66] github workflows switch branch --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 7799d8a..1a19b66 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,7 +5,7 @@ on: tags: - "v*" branches: - - "develop" + - "block_any" workflow_dispatch: env: From f3ec523e53f8b825b2a1e28c44e7e172669bb162 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 16:55:13 -0500 Subject: [PATCH 31/66] github workflows add login --- .github/workflows/docker.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1a19b66..c85b49f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -44,6 +44,12 @@ jobs: # registry: gcr.io # username: oauth2accesstoken # password: ${{ steps.auth.outputs.access_token }} + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Get repo name id: extract_repo_name From c9041e5b477003483bd10e4324d45eff244d9030 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 17:11:06 -0500 Subject: [PATCH 32/66] github workflows fix image name --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c85b49f..79b7068 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -61,7 +61,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ${{ env.REGISTRY }}/${{ env.REPO_NAME }} + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | type=ref,event=tag type=sha,prefix=,enable=true From 2e3f5e2f468776f6031c214b006c6a58c1d65ac9 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 23 Nov 2023 17:23:08 -0500 Subject: [PATCH 33/66] github workflows fix permission --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 79b7068..3f99e1d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -18,7 +18,7 @@ jobs: permissions: contents: read - id-token: write + packages: write strategy: matrix: From e9da1f702e0d37e25bbbe01f3dc55db7229f24e0 Mon Sep 17 00:00:00 2001 From: billettc Date: Fri, 24 Nov 2023 16:00:17 -0500 Subject: [PATCH 34/66] added grpc_health_probe to images --- .github/workflows/docker.yml | 14 -------------- Dockerfile | 7 ++++++- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3f99e1d..aaf2f7c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,20 +30,6 @@ jobs: steps: - uses: actions/checkout@v3 -# - id: 'auth' -# name: 'Authenticate to Google Cloud' -# uses: 'google-github-actions/auth@v0' -# with: -# workload_identity_provider: ${{ secrets.GCR_WORKLOAD_IDENTITY_PROVIDER }} -# service_account: ${{ secrets.GCR_SERVICE_ACCOUNT }} -# token_format: 'access_token' -# -# - name: Login to GCR -# uses: docker/login-action@v1 -# with: -# registry: gcr.io -# username: oauth2accesstoken -# password: ${{ steps.auth.outputs.access_token }} - name: Log in to the Container registry uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 with: diff --git a/Dockerfile b/Dockerfile index 579fef0..83bf5ef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,12 @@ RUN go build ./cmd/firecore FROM alpine:edge -RUN apk --no-cache add ca-certificates tzdata + +RUN apk --no-cache add \ + ca-certificates htop iotop sysstat \ + strace lsof curl jq tzdata + +RUN mkdir -p /app/ && curl -Lo /app/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.12/grpc_health_probe-linux-amd64 && chmod +x /app/grpc_health_probe WORKDIR /app From b2cf970bbae90a2052d4ab4f97b591fb899043e9 Mon Sep 17 00:00:00 2001 From: billettc Date: Mon, 27 Nov 2023 08:14:57 -0500 Subject: [PATCH 35/66] Move the responsibility of waiting between block fetch to the block fetcher ... It will be easier to throttle the block flow from the fetcher since rate limit and API restriction will vary depending on the chain and service provider ... --- blockpoller/fetcher.go | 2 -- blockpoller/poller.go | 4 ---- 2 files changed, 6 deletions(-) diff --git a/blockpoller/fetcher.go b/blockpoller/fetcher.go index f0b30ec..5bbaf77 100644 --- a/blockpoller/fetcher.go +++ b/blockpoller/fetcher.go @@ -2,12 +2,10 @@ package blockpoller import ( "context" - "time" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" ) type BlockFetcher interface { - PollingInterval() time.Duration Fetch(ctx context.Context, blkNum uint64) (*pbbstream.Block, error) } diff --git a/blockpoller/poller.go b/blockpoller/poller.go index cdbe54b..e9302d2 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -3,7 +3,6 @@ package blockpoller import ( "context" "fmt" - "time" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" @@ -85,7 +84,6 @@ func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { currentCursor := &cursor{state: ContinuousSegState, logger: p.logger} blkIter := resolvedStartBlock.Num() - intervalDuration := p.blockFetcher.PollingInterval() for { if p.IsTerminating() { p.logger.Info("block poller is terminating") @@ -99,8 +97,6 @@ func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { if p.IsTerminating() { p.logger.Info("block poller is terminating") } - - time.Sleep(intervalDuration) } } From 31d5902bc074eaa2c312044cc42c5cbbdd1952fb Mon Sep 17 00:00:00 2001 From: billettc Date: Mon, 27 Nov 2023 12:21:39 -0500 Subject: [PATCH 36/66] Update block printing functions to support chain context The block printing functions have been updated to include chain context. This change allows for the decoding and printing of blocks to be chain-specific. --- tools_print.go | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/tools_print.go b/tools_print.go index 850186d..bba8e4d 100644 --- a/tools_print.go +++ b/tools_print.go @@ -63,10 +63,10 @@ func init() { func configureToolsPrintCmd[B Block](chain *Chain[B]) { toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain) - toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE() + toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE(chain) } -func createToolsPrintMergedBlocksE() CommandExecutor { +func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -116,7 +116,7 @@ func createToolsPrintMergedBlocksE() CommandExecutor { seenBlockCount++ - if err := printBlock(block, outputMode, printTransactions); err != nil { + if err := printBlock(block, chain, outputMode, printTransactions); err != nil { // Error is ready to be passed to the user as-is return err } @@ -183,7 +183,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("reading block: %w", err) } - if err := printBlock(block, outputMode, printTransactions); err != nil { + if err := printBlock(block, chain, outputMode, printTransactions); err != nil { // Error is ready to be passed to the user as-is return err } @@ -214,12 +214,12 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock(block Block, outputMode PrintOutputMode, printTransactions bool) error { +func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool) error { switch outputMode { case PrintOutputModeText: - err := block.PrintBlock(printTransactions, os.Stdout) + err := pbBlock.PrintBlock(printTransactions, os.Stdout) if err != nil { - return fmt.Errorf("block text printing: %w", err) + return fmt.Errorf("pbBlock text printing: %w", err) } case PrintOutputModeJSON, PrintOutputModeJSONL: @@ -245,9 +245,20 @@ func printBlock(block Block, outputMode PrintOutputMode, printTransactions bool) ) } - err := json.MarshalEncode(encoder, block, json.WithMarshalers(marshallers)) + var marshallableBlock Block = pbBlock + chainBlock := chain.BlockFactory() + if _, ok := chainBlock.(*pbbstream.Block); !ok { + + marshallableBlock = chainBlock + err := pbBlock.Payload.UnmarshalTo(marshallableBlock) + if err != nil { + return fmt.Errorf("pbBlock payload unmarshal: %w", err) + } + } + + err := json.MarshalEncode(encoder, marshallableBlock, json.WithMarshalers(marshallers)) if err != nil { - return fmt.Errorf("block JSON printing: json marshal: %w", err) + return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) } } From bc9230582b7213c5def7fa38983abd34572b79ab Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Mon, 27 Nov 2023 13:21:07 -0500 Subject: [PATCH 37/66] fix logger naming --- blockpoller/init_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockpoller/init_test.go b/blockpoller/init_test.go index fdf9f52..4528af5 100644 --- a/blockpoller/init_test.go +++ b/blockpoller/init_test.go @@ -14,7 +14,7 @@ import ( "go.uber.org/zap/zapcore" ) -var logger, tracer = logging.PackageLogger("forkhandler", "github.com/streamingfast/firehose-bitcoin/forkhandler.test") +var logger, tracer = logging.PackageLogger("forkhandler", "github.com/streamingfast/firehose-core/forkhandler.test") func init() { logging.InstantiateLoggers(logging.WithDefaultLevel(zapcore.DebugLevel)) From f5e9f304eac25a5635adcb08f730de43c03ed369 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 28 Nov 2023 14:05:42 -0500 Subject: [PATCH 38/66] Implement dynamic protobuf parsing in block printing The update introduces dynamic parsing of Protocol Buffer files in block printing functionalities. New `dynamicPrinter` structure and factory method have been incorporated to facilitate protobuf file parsing. --- tools_print.go | 141 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 127 insertions(+), 14 deletions(-) diff --git a/tools_print.go b/tools_print.go index bba8e4d..db83e3b 100644 --- a/tools_print.go +++ b/tools_print.go @@ -19,19 +19,24 @@ import ( "fmt" "io" "os" + "os/user" + "path/filepath" "strconv" + "strings" "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/protoparse" + "github.com/jhump/protoreflect/dynamic" "github.com/mr-tron/base58" - - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" "github.com/streamingfast/firehose-core/tools" + "google.golang.org/protobuf/proto" ) var toolsPrintCmd = &cobra.Command{ @@ -58,6 +63,7 @@ func init() { toolsPrintCmd.AddCommand(toolsPrintMergedBlocksCmd) toolsPrintCmd.PersistentFlags().StringP("output", "o", "text", "Output mode for block printing, either 'text', 'json' or 'jsonl'") + toolsPrintCmd.PersistentFlags().StringSlice("proto-paths", []string{"~/.proto"}, "Paths to proto files to use for dynamic decoding of blocks") toolsPrintCmd.PersistentFlags().Bool("transactions", false, "When in 'text' output mode, also print transactions summary") } @@ -76,6 +82,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { } printTransactions := sflags.MustGetBool(cmd, "transactions") + protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") storeURL := args[0] store, err := dstore.NewDBinStore(storeURL) @@ -103,6 +110,11 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { return err } + dPrinter, err := newDynamicPrinter(protoPaths) + if err != nil { + return fmt.Errorf("unable to create dynamic printer: %w", err) + } + seenBlockCount := 0 for { block, err := readerFactory.Read() @@ -116,7 +128,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { seenBlockCount++ - if err := printBlock(block, chain, outputMode, printTransactions); err != nil { + if err := printBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { // Error is ready to be passed to the user as-is return err } @@ -139,6 +151,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { } printTransactions := sflags.MustGetBool(cmd, "transactions") + protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") storeURL := args[0] store, err := dstore.NewDBinStore(storeURL) @@ -161,6 +174,10 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("unable to find on block files: %w", err) } + dPrinter, err := newDynamicPrinter(protoPaths) + if err != nil { + return fmt.Errorf("unable to create dynamic printer: %w", err) + } for _, filepath := range files { reader, err := store.OpenObject(ctx, filepath) if err != nil { @@ -183,7 +200,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("reading block: %w", err) } - if err := printBlock(block, chain, outputMode, printTransactions); err != nil { + if err := printBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { // Error is ready to be passed to the user as-is return err } @@ -194,13 +211,6 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { //go:generate go-enum -f=$GOFILE --marshal --names --nocase -// ENUM( -// -// Text -// JSON -// JSONL -// -// ) type PrintOutputMode uint func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { @@ -214,7 +224,10 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool) error { +func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, dPrinter *dynamicPrinter) error { + if pbBlock == nil { + return fmt.Errorf("block is nil") + } switch outputMode { case PrintOutputModeText: err := pbBlock.PrintBlock(printTransactions, os.Stdout) @@ -247,9 +260,20 @@ func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode P var marshallableBlock Block = pbBlock chainBlock := chain.BlockFactory() - if _, ok := chainBlock.(*pbbstream.Block); !ok { + isLegacyBlock := chainBlock == nil + if isLegacyBlock { + err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), chainBlock) + if err != nil { + return fmt.Errorf("unmarshalling legacy pb block : %w", err) + } + marshallableBlock = chainBlock + } else if _, ok := chainBlock.(*pbbstream.Block); ok { + return dPrinter.printBlock(pbBlock, encoder, marshallers) + + } else { marshallableBlock = chainBlock + err := pbBlock.Payload.UnmarshalTo(marshallableBlock) if err != nil { return fmt.Errorf("pbBlock payload unmarshal: %w", err) @@ -264,3 +288,92 @@ func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode P return nil } + +type dynamicPrinter struct { + fileDescriptors []*desc.FileDescriptor +} + +func newDynamicPrinter(importPaths []string) (*dynamicPrinter, error) { + fileDescriptors, err := parseProtoFiles(importPaths) + if err != nil { + return nil, fmt.Errorf("parsing proto files: %w", err) + } + return &dynamicPrinter{ + fileDescriptors: fileDescriptors, + }, nil +} + +func (d *dynamicPrinter) printBlock(block *pbbstream.Block, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { + for _, fd := range d.fileDescriptors { + md := fd.FindSymbol(block.Payload.TypeUrl) + if md != nil { + dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) + if err := dynMsg.Unmarshal(block.Payload.Value); err != nil { + return fmt.Errorf("unmarshalling block: %w", err) + } + err := json.MarshalEncode(encoder, dynMsg, json.WithMarshalers(marshalers)) + if err != nil { + return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) + } + return nil + } + } + return fmt.Errorf("no message descriptor in proto paths for type url %q", block.Payload.TypeUrl) +} + +func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err error) { + usr, err := user.Current() + if err != nil { + return nil, fmt.Errorf("getting current user: %w", err) + } + userDir := usr.HomeDir + + var ip []string + for _, importPath := range importPaths { + if importPath == "~" { + importPath = userDir + } else if strings.HasPrefix(importPath, "~/") { + importPath = filepath.Join(userDir, importPath[2:]) + } + + importPath, err = filepath.Abs(importPath) + if err != nil { + return nil, fmt.Errorf("getting absolute path for %q: %w", importPath, err) + } + + if !strings.HasSuffix(importPath, "/") { + importPath += "/" + } + ip = append(ip, importPath) + } + + fmt.Println("importPaths", importPaths) + + parser := protoparse.Parser{ + ImportPaths: ip, + } + + var protoFiles []string + for _, importPath := range ip { + err := filepath.Walk(importPath, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if strings.HasSuffix(path, ".proto") && !info.IsDir() { + protoFiles = append(protoFiles, strings.TrimPrefix(path, importPath)) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("walking import path %q: %w", importPath, err) + } + } + + fds, err = parser.ParseFiles(protoFiles...) + if err != nil { + return nil, fmt.Errorf("parsing proto files: %w", err) + } + return + +} From 7aa2064fa2162c27724038ed0202e7f012b5eaaf Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 29 Nov 2023 19:15:34 -0500 Subject: [PATCH 39/66] added the ability to ignore the cursor on the block poller --- blockpoller/options.go | 8 ++++++++ blockpoller/poller.go | 3 ++- blockpoller/state_file.go | 18 +++++++++++++++--- blockpoller/state_file_test.go | 4 ++-- 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/blockpoller/options.go b/blockpoller/options.go index 5554783..98231b4 100644 --- a/blockpoller/options.go +++ b/blockpoller/options.go @@ -16,6 +16,14 @@ func WithStoringState(stateStorePath string) Option { } } +// WithCursorPurged ensures the poller will ignore the cursor and start from the startBlockNum +// the cursor will still be saved as the poller progresses +func WithCursorPurged() Option { + return func(p *BlockPoller) { + p.ignoreCursor = true + } +} + func WithLogger(logger *zap.Logger) Option { return func(p *BlockPoller) { p.logger = logger diff --git a/blockpoller/poller.go b/blockpoller/poller.go index e9302d2..6c08932 100644 --- a/blockpoller/poller.go +++ b/blockpoller/poller.go @@ -26,6 +26,7 @@ type BlockPoller struct { startBlockNumGate uint64 fetchBlockRetryCount uint64 stateStorePath string + ignoreCursor bool blockFetcher BlockFetcher blockHandler BlockHandler @@ -77,7 +78,7 @@ func (p *BlockPoller) Run(ctx context.Context, startBlockNum uint64, chainLatest func (p *BlockPoller) run(resolvedStartBlock bstream.BlockRef) (err error) { - p.forkDB, resolvedStartBlock, err = initState(resolvedStartBlock, p.stateStorePath, p.logger) + p.forkDB, resolvedStartBlock, err = initState(resolvedStartBlock, p.stateStorePath, p.ignoreCursor, p.logger) if err != nil { return fmt.Errorf("unable to initialize cursor: %w", err) } diff --git a/blockpoller/state_file.go b/blockpoller/state_file.go index def719f..d70c493 100644 --- a/blockpoller/state_file.go +++ b/blockpoller/state_file.go @@ -92,9 +92,22 @@ func (p *BlockPoller) saveState(blocks []*forkable.Block) error { return nil } -func initState(resolvedStartBlock bstream.BlockRef, stateStorePath string, logger *zap.Logger) (*forkable.ForkDB, bstream.BlockRef, error) { +func initState(resolvedStartBlock bstream.BlockRef, stateStorePath string, ignoreCursor bool, logger *zap.Logger) (*forkable.ForkDB, bstream.BlockRef, error) { forkDB := forkable.NewForkDB(forkable.ForkDBWithLogger(logger)) + useStartBlockFunc := func() (*forkable.ForkDB, bstream.BlockRef, error) { + forkDB.InitLIB(resolvedStartBlock) + return forkDB, resolvedStartBlock, nil + } + + if ignoreCursor { + logger.Info("ignorign cursor", + zap.Stringer("start_block", resolvedStartBlock), + zap.Stringer("lib", resolvedStartBlock), + ) + return useStartBlockFunc() + } + sf, err := getState(stateStorePath) if err != nil { logger.Warn("unable to load cursor file, initializing a new forkdb", @@ -102,8 +115,7 @@ func initState(resolvedStartBlock bstream.BlockRef, stateStorePath string, logge zap.Stringer("lib", resolvedStartBlock), zap.Error(err), ) - forkDB.InitLIB(resolvedStartBlock) - return forkDB, resolvedStartBlock, nil + return useStartBlockFunc() } forkDB.InitLIB(bstream.NewBlockRef(sf.Lib.Id, sf.Lib.Num)) diff --git a/blockpoller/state_file_test.go b/blockpoller/state_file_test.go index 0d1f060..3e410c8 100644 --- a/blockpoller/state_file_test.go +++ b/blockpoller/state_file_test.go @@ -54,7 +54,7 @@ func TestFireBlockFinalizer_state(t *testing.T) { require.NoError(t, err) assert.Equal(t, expectedStateFileCnt, string(cnt)) - forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, zap.NewNop()) + forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, false, zap.NewNop()) require.NoError(t, err) blocks, reachedLib := forkDB.CompleteSegment(bstream.NewBlockRef("105a", 105)) @@ -70,7 +70,7 @@ func TestFireBlockFinalizer_noSstate(t *testing.T) { require.NoError(t, err) defer os.Remove(dirName) - forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, logger) + forkDB, startBlock, err := initState(bstream.NewBlockRef("60a", 60), dirName, false, logger) require.NoError(t, err) blocks, reachedLib := forkDB.CompleteSegment(bstream.NewBlockRef("60a", 60)) From 1d6f3c31d5e4f61a940faa04d8ee2e27fb7f8420 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 29 Nov 2023 19:18:15 -0500 Subject: [PATCH 40/66] rename block poller option --- blockpoller/options.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockpoller/options.go b/blockpoller/options.go index 98231b4..057512d 100644 --- a/blockpoller/options.go +++ b/blockpoller/options.go @@ -16,9 +16,9 @@ func WithStoringState(stateStorePath string) Option { } } -// WithCursorPurged ensures the poller will ignore the cursor and start from the startBlockNum +// IgnoreCursor ensures the poller will ignore the cursor and start from the startBlockNum // the cursor will still be saved as the poller progresses -func WithCursorPurged() Option { +func IgnoreCursor() Option { return func(p *BlockPoller) { p.ignoreCursor = true } From 2e7b9f992f5b3a5ec19aef8f9ee636451b9738e2 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Thu, 30 Nov 2023 10:21:51 -0500 Subject: [PATCH 41/66] fix substreams block-type --- substreams_common.go | 5 ----- substreams_tier1.go | 9 +++++++-- substreams_tier2.go | 8 +++++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/substreams_common.go b/substreams_common.go index 3e5f73d..0183114 100644 --- a/substreams_common.go +++ b/substreams_common.go @@ -7,7 +7,6 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/substreams/pipeline" "github.com/streamingfast/substreams/wasm" - "google.golang.org/protobuf/proto" ) var registerSSOnce sync.Once @@ -20,10 +19,6 @@ func registerCommonSubstreamsFlags(cmd *cobra.Command) { }) } -func getSubstreamsBlockMessageType[B Block](chain *Chain[B]) string { - return string(proto.MessageName(chain.BlockFactory())) -} - func getSubstreamsExtensions[B Block](chain *Chain[B]) ([]wasm.WASMExtensioner, []pipeline.PipelineOptioner, error) { var wasmExtensions []wasm.WASMExtensioner var pipelineOptions []pipeline.PipelineOptioner diff --git a/substreams_tier1.go b/substreams_tier1.go index 0af1f9d..c45c479 100644 --- a/substreams_tier1.go +++ b/substreams_tier1.go @@ -42,12 +42,12 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { RegisterFlags: func(cmd *cobra.Command) error { cmd.Flags().String("substreams-tier1-grpc-listen-addr", SubstreamsTier1GRPCServingAddr, "Address on which the Substreams tier1 will listen, listen by default in plain text, appending a '*' to the end of the address make it listen in snake-oil (inscure) TLS") cmd.Flags().String("substreams-tier1-subrequests-endpoint", SubstreamsTier2GRPCServingAddr, "Address on which the Substreans tier1 can reach the tier2") - // communication with tier2 cmd.Flags().String("substreams-tier1-discovery-service-url", "", "URL to configure the grpc discovery service, used for communication with tier2") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true cmd.Flags().Bool("substreams-tier1-subrequests-insecure", false, "Connect to tier2 without checking certificate validity") cmd.Flags().Bool("substreams-tier1-subrequests-plaintext", true, "Connect to tier2 without client in plaintext mode") cmd.Flags().Int("substreams-tier1-max-subrequests", 4, "number of parallel subrequests that the tier1 can make to the tier2 per request") + cmd.Flags().String("substreams-tier1-block-type", "", "fully qualified name of the block type to use for the substreams tier1 (i.e. sf.ethereum.v1.Block)") // all substreams registerCommonSubstreamsFlags(cmd) @@ -81,6 +81,11 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { subrequestsInsecure := viper.GetBool("substreams-tier1-subrequests-insecure") subrequestsPlaintext := viper.GetBool("substreams-tier1-subrequests-plaintext") maxSubrequests := viper.GetUint64("substreams-tier1-max-subrequests") + substreamsBlockType := viper.GetString("substreams-tier1-block-type") + + if substreamsBlockType == "" { + return nil, fmt.Errorf("substreams-tier1-block-type is required") + } tracing := os.Getenv("SUBSTREAMS_TRACING") == "modules_exec" @@ -111,7 +116,7 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { StateStoreURL: stateStoreURL, StateStoreDefaultTag: stateStoreDefaultTag, StateBundleSize: stateBundleSize, - BlockType: getSubstreamsBlockMessageType(chain), + BlockType: substreamsBlockType, MaxSubrequests: maxSubrequests, SubrequestsEndpoint: subrequestsEndpoint, SubrequestsInsecure: subrequestsInsecure, diff --git a/substreams_tier2.go b/substreams_tier2.go index 01e60e9..e9aaea9 100644 --- a/substreams_tier2.go +++ b/substreams_tier2.go @@ -40,6 +40,7 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { RegisterFlags: func(cmd *cobra.Command) error { cmd.Flags().String("substreams-tier2-grpc-listen-addr", SubstreamsTier2GRPCServingAddr, "Address on which the substreams tier2 will listen. Default is plain-text, appending a '*' to the end to jkkkj") cmd.Flags().String("substreams-tier2-discovery-service-url", "", "URL to advertise presence to the grpc discovery service") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true + cmd.Flags().String("substreams-tier2-block-type", "", "fully qualified name of the block type to use for the substreams tier1 (i.e. sf.ethereum.v1.Block)") // all substreams registerCommonSubstreamsFlags(cmd) @@ -61,6 +62,11 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { stateStoreDefaultTag := viper.GetString("substreams-state-store-default-tag") stateBundleSize := viper.GetUint64("substreams-state-bundle-size") + substreamsBlockType := viper.GetString("substreams-tier2-block-type") + + if substreamsBlockType == "" { + return nil, fmt.Errorf("substreams-tier2-block-type is required") + } tracing := os.Getenv("SUBSTREAMS_TRACING") == "modules_exec" @@ -88,7 +94,7 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { StateStoreURL: stateStoreURL, StateStoreDefaultTag: stateStoreDefaultTag, StateBundleSize: stateBundleSize, - BlockType: getSubstreamsBlockMessageType(chain), + BlockType: substreamsBlockType, WASMExtensions: wasmExtensions, PipelineOptions: pipelineOptioner, From 57af3fd155c904244187f37fe7e92644f879cb26 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Mon, 4 Dec 2023 14:04:58 -0500 Subject: [PATCH 42/66] Clean up print block --- chain.go | 4 +- cmd/firecore/main.go | 1 + go.mod | 2 + tools.go | 5 ++- tools_check_blocks.go | 2 +- tools_print.go | 95 +++++++++++++++++++++++++++++++------------ types.go | 28 ------------- 7 files changed, 79 insertions(+), 58 deletions(-) diff --git a/chain.go b/chain.go index 2465d2f..736616f 100644 --- a/chain.go +++ b/chain.go @@ -149,8 +149,10 @@ type Chain[B Block] struct { // BlockEncoder BlockEncoder - // RegisterSubstreamsExtensions func(chain *Chain[B]) ([]SubstreamsExtension, error) + + // CoreBinaryEnabled is a flag that when set to true indicates that `firecore` binary is being run directly? (not through firexxx) + CoreBinaryEnabled bool } type ToolsConfig[B Block] struct { diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index 5781c23..a3dde8e 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -13,6 +13,7 @@ func main() { FullyQualifiedModule: "github.com/streamingfast/firehose-core", Version: version, BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, + CoreBinaryEnabled: true, ConsoleReaderFactory: firecore.NewConsoleReader, Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) diff --git a/go.mod b/go.mod index 5f03fd4..b6559ab 100644 --- a/go.mod +++ b/go.mod @@ -223,4 +223,6 @@ require ( replace ( github.com/ShinyTrinkets/overseer => github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef github.com/bytecodealliance/wasmtime-go/v4 => github.com/streamingfast/wasmtime-go/v4 v4.0.0-freemem3 + github.com/streamingfast/bstream => ../bstream ) + diff --git a/tools.go b/tools.go index 0d1bbf0..e2ab87a 100644 --- a/tools.go +++ b/tools.go @@ -162,7 +162,10 @@ func getFirehoseClientFromCmd[B Block, C any](cmd *cobra.Command, logger *zap.Lo requestInfo.GRPCCallOpts = append(requestInfo.GRPCCallOpts, compressor) } - requestInfo.Transforms, err = chain.Tools.TransformFlags.Parse(cmd, logger) + if chain.Tools.TransformFlags != nil { + requestInfo.Transforms, err = chain.Tools.TransformFlags.Parse(cmd, logger) + } + if err != nil { return firehoseClient, nil, nil, fmt.Errorf("unable to parse transforms flags: %w", err) } diff --git a/tools_check_blocks.go b/tools_check_blocks.go index cb6a602..696ceae 100644 --- a/tools_check_blocks.go +++ b/tools_check_blocks.go @@ -238,7 +238,7 @@ func validateBlockSegment[B Block]( seenBlockCount++ if printDetails == PrintStats { - err := block.PrintBlock(false, os.Stdout) + err := printBlock(block, false, os.Stdout) if err != nil { fmt.Printf("❌ Unable to print block %s: %s\n", block.AsRef(), err) continue diff --git a/tools_print.go b/tools_print.go index db83e3b..a90154f 100644 --- a/tools_print.go +++ b/tools_print.go @@ -128,7 +128,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { seenBlockCount++ - if err := printBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { + if err := displayBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { // Error is ready to be passed to the user as-is return err } @@ -200,7 +200,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("reading block: %w", err) } - if err := printBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { + if err := displayBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { // Error is ready to be passed to the user as-is return err } @@ -224,13 +224,14 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, dPrinter *dynamicPrinter) error { +func displayBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, dPrinter *dynamicPrinter) error { if pbBlock == nil { return fmt.Errorf("block is nil") } + switch outputMode { case PrintOutputModeText: - err := pbBlock.PrintBlock(printTransactions, os.Stdout) + err := printBlock(pbBlock, printTransactions, os.Stdout) if err != nil { return fmt.Errorf("pbBlock text printing: %w", err) } @@ -258,32 +259,35 @@ func printBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode P ) } - var marshallableBlock Block = pbBlock - chainBlock := chain.BlockFactory() - isLegacyBlock := chainBlock == nil - if isLegacyBlock { - err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), chainBlock) - if err != nil { - return fmt.Errorf("unmarshalling legacy pb block : %w", err) + isLegacyBlock := pbBlock.Payload == nil + if chain.CoreBinaryEnabled { + // since we are running directly the firecore binary we will *NOT* use the BlockFactory + + if isLegacyBlock { + return dPrinter.printBlock(legacyKindsToProtoType(pbBlock.PayloadKind), pbBlock.GetPayloadBuffer(), encoder, marshallers) } - marshallableBlock = chainBlock - } else if _, ok := chainBlock.(*pbbstream.Block); ok { - return dPrinter.printBlock(pbBlock, encoder, marshallers) + return dPrinter.printBlock(pbBlock.Payload.TypeUrl, pbBlock.Payload.Value, encoder, marshallers) } else { - marshallableBlock = chainBlock + // since we are running via the chain specific binary (i.e. fireeth) we can use a BlockFactory + marshallableBlock := chain.BlockFactory() + if isLegacyBlock { + if err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), marshallableBlock); err != nil { + return fmt.Errorf("unmarshal legacy block payload to protocol block: %w", err) + } + } else { + if err := pbBlock.Payload.UnmarshalTo(marshallableBlock); err != nil { + return fmt.Errorf("pbBlock payload unmarshal: %w", err) + } + } - err := pbBlock.Payload.UnmarshalTo(marshallableBlock) + err := json.MarshalEncode(encoder, marshallableBlock, json.WithMarshalers(marshallers)) if err != nil { - return fmt.Errorf("pbBlock payload unmarshal: %w", err) + return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) } } - err := json.MarshalEncode(encoder, marshallableBlock, json.WithMarshalers(marshallers)) - if err != nil { - return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) - } } return nil @@ -303,12 +307,12 @@ func newDynamicPrinter(importPaths []string) (*dynamicPrinter, error) { }, nil } -func (d *dynamicPrinter) printBlock(block *pbbstream.Block, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { +func (d *dynamicPrinter) printBlock(blkTypeURL string, blkPayload []byte, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { for _, fd := range d.fileDescriptors { - md := fd.FindSymbol(block.Payload.TypeUrl) + md := fd.FindSymbol(blkTypeURL) if md != nil { dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) - if err := dynMsg.Unmarshal(block.Payload.Value); err != nil { + if err := dynMsg.Unmarshal(blkPayload); err != nil { return fmt.Errorf("unmarshalling block: %w", err) } err := json.MarshalEncode(encoder, dynMsg, json.WithMarshalers(marshalers)) @@ -318,7 +322,7 @@ func (d *dynamicPrinter) printBlock(block *pbbstream.Block, encoder *jsontext.En return nil } } - return fmt.Errorf("no message descriptor in proto paths for type url %q", block.Payload.TypeUrl) + return fmt.Errorf("no message descriptor in proto paths for type url %q", blkTypeURL) } func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err error) { @@ -347,8 +351,6 @@ func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err erro ip = append(ip, importPath) } - fmt.Println("importPaths", importPaths) - parser := protoparse.Parser{ ImportPaths: ip, } @@ -377,3 +379,42 @@ func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err erro return } + +func printBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { + _, err := out.Write( + []byte( + fmt.Sprintf( + "Block #%d (%s)\n", + b.Number, + b.Id, + ), + ), + ) + if err != nil { + return fmt.Errorf("writing block: %w", err) + } + + if printTransactions { + if _, err = out.Write([]byte("warning: transaction printing not supported by bstream block")); err != nil { + return fmt.Errorf("writing transaction support warning: %w", err) + } + } + + return nil +} + +func legacyKindsToProtoType(protocol pbbstream.Protocol) string { + switch protocol { + case pbbstream.Protocol_EOS: + return "sf.antelope.type.v1.Block" + case pbbstream.Protocol_ETH: + return "sf.ethereum.type.v2.Block" + case pbbstream.Protocol_SOLANA: + return "sf.solana.type.v1.Block" + case pbbstream.Protocol_NEAR: + return "sf.near.type.v1.Block" + case pbbstream.Protocol_COSMOS: + return "sf.cosmos.type.v1.Block" + } + panic("unaligned protocol") +} diff --git a/types.go b/types.go index 2cf4caf..7eeb2c4 100644 --- a/types.go +++ b/types.go @@ -2,7 +2,6 @@ package firecore import ( "fmt" - "io" "time" "github.com/spf13/cobra" @@ -63,33 +62,6 @@ type Block interface { // GetFirehoseBlockTime returns the block timestamp as a time.Time of when the block was // produced. This should the consensus agreed time of the block. GetFirehoseBlockTime() time.Time - - // PrintBlock is printing function that render a chain specific human readable - // form Block. This block is expected to be rendered as - // a single line for example on Ethereum rendering of a single block looks like: - // - // ``` - // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls - // ``` - // - // If the [alsoPrintTransactions] argument is true, each transaction of the block should also be printed, following - // directly the block line. Each transaction should also be on a single line, usually prefixed with a `- ` to make - // the rendering more appealing. - // - // For example on Ethereum rendering with [alsoPrintTransactions] being `true` looks like: - // - // ``` - // Block #24924194 (01d6d349fbd3fa419182a2f0cf0b00714e101286650c239de8923caef6134b6c) 62 transactions, 607 calls - // - Transaction 0xc7e04240d6f2cc5f382c478fd0a0b5c493463498c64b31477b95bded8cd12ab4 (10 calls) - // - Transaction 0xc7d8a698351eb1ac64acb76c8bf898365bb639865271add95d2c81650b2bd98c (4 calls) - // ``` - // - // The `out` parameter is used to write to the correct location. You can use [fmt.Fprintf] and [fmt.Fprintln] - // and use `out` as the output writer in your implementation. - // - // The [BlockPrinter] is optional, if nil, a default block printer will be used. It's important to note - // that the default block printer error out if `alsoPrintTransactions` is true. - PrintBlock(printTransactions bool, out io.Writer) error } // BlockLIBNumDerivable is an optional interface that can be implemented by your chain's block model Block From 818d341786952ce0482c7019d5e89e4248897386 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 09:40:36 -0500 Subject: [PATCH 43/66] Cleaned up print block --- jsonencoder/encoder.go | 46 +++++++++++ jsonencoder/options.go | 32 ++++++++ jsonencoder/proto.go | 21 +++++ protoregistry/registry.go | 47 +++++++++++ protoregistry/utils.go | 67 ++++++++++++++++ tools_check_blocks.go | 2 +- tools_firehose_client.go | 7 +- tools_print.go | 163 +++++++++++++++++--------------------- 8 files changed, 291 insertions(+), 94 deletions(-) create mode 100644 jsonencoder/encoder.go create mode 100644 jsonencoder/options.go create mode 100644 jsonencoder/proto.go create mode 100644 protoregistry/registry.go create mode 100644 protoregistry/utils.go diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go new file mode 100644 index 0000000..497df18 --- /dev/null +++ b/jsonencoder/encoder.go @@ -0,0 +1,46 @@ +package jsonencoder + +import ( + "fmt" + "os" + + "github.com/streamingfast/firehose-core/protoregistry" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" +) + +type Encoder struct { + e *jsontext.Encoder + files *protoregistry.Files + marshallers []*json.Marshalers +} + +func New(files *protoregistry.Files, opts ...Option) *Encoder { + e := &Encoder{ + e: jsontext.NewEncoder(os.Stdout), + files: files, + } + + e.marshallers = []*json.Marshalers{ + json.MarshalFuncV2(e.protoAny), + } + + for _, opt := range opts { + opt(e) + } + return e +} + +func (e *Encoder) Marshal(in any) error { + return json.MarshalEncode(e.e, in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) +} + +func (e *Encoder) MarshalLegacy(typeURL string, value []byte) error { + msg, err := e.files.Unmarshall(typeURL, value) + if err != nil { + return fmt.Errorf("unmarshalling proto any: %w", err) + } + + return json.MarshalEncode(e.e, msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) +} diff --git a/jsonencoder/options.go b/jsonencoder/options.go new file mode 100644 index 0000000..3b6c550 --- /dev/null +++ b/jsonencoder/options.go @@ -0,0 +1,32 @@ +package jsonencoder + +import ( + "encoding/hex" + "fmt" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/mr-tron/base58" +) + +type Option func(c *Encoder) + +func WithBytesAsBase58() Option { + return func(c *Encoder) { + m := json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + fmt.Println("base58", hex.EncodeToString(t)) + return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) + }) + c.marshallers = append(c.marshallers, m) + } +} + +func WithBytesAsHex() Option { + return func(c *Encoder) { + m := json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { + fmt.Println("hex", hex.EncodeToString(t)) + return encoder.WriteToken(jsontext.String(base58.Encode(t))) + }) + c.marshallers = append(c.marshallers, m) + } +} diff --git a/jsonencoder/proto.go b/jsonencoder/proto.go new file mode 100644 index 0000000..22e9214 --- /dev/null +++ b/jsonencoder/proto.go @@ -0,0 +1,21 @@ +package jsonencoder + +import ( + "fmt" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "google.golang.org/protobuf/types/known/anypb" +) + +func (e *Encoder) protoAny(encoder *jsontext.Encoder, t *anypb.Any, options json.Options) error { + msg, err := e.files.Unmarshall(t.TypeUrl, t.Value) + if err != nil { + return fmt.Errorf("unmarshalling proto any: %w", err) + } + cnt, err := json.Marshal(msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) + if err != nil { + return fmt.Errorf("json marshalling proto any: %w", err) + } + return encoder.WriteValue(cnt) +} diff --git a/protoregistry/registry.go b/protoregistry/registry.go new file mode 100644 index 0000000..897c876 --- /dev/null +++ b/protoregistry/registry.go @@ -0,0 +1,47 @@ +package protoregistry + +import ( + "fmt" + "sync" + + "github.com/jhump/protoreflect/dynamic" + + "github.com/jhump/protoreflect/desc" +) + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +type Files struct { + sync.RWMutex + filesDescriptors []*desc.FileDescriptor +} + +func New() *Files { + return &Files{ + filesDescriptors: []*desc.FileDescriptor{}, + } +} + +func (r *Files) RegisterFiles(files []string) error { + fileDescriptors, err := parseProtoFiles(files) + if err != nil { + return fmt.Errorf("parsing proto files: %w", err) + } + r.filesDescriptors = append(r.filesDescriptors, fileDescriptors...) + return nil +} + +func (r *Files) Unmarshall(typeURL string, value []byte) (*dynamic.Message, error) { + for _, fd := range r.filesDescriptors { + md := fd.FindSymbol(typeURL) + if md != nil { + dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) + if err := dynMsg.Unmarshal(value); err != nil { + return nil, fmt.Errorf("unmarshalling proto: %w", err) + } + return dynMsg, nil + } + } + return nil, fmt.Errorf("no message descriptor in registry for type url: %s", typeURL) +} diff --git a/protoregistry/utils.go b/protoregistry/utils.go new file mode 100644 index 0000000..7a1c341 --- /dev/null +++ b/protoregistry/utils.go @@ -0,0 +1,67 @@ +package protoregistry + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + + "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/desc/protoparse" +) + +func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err error) { + usr, err := user.Current() + if err != nil { + return nil, fmt.Errorf("getting current user: %w", err) + } + userDir := usr.HomeDir + + var ip []string + for _, importPath := range importPaths { + if importPath == "~" { + importPath = userDir + } else if strings.HasPrefix(importPath, "~/") { + importPath = filepath.Join(userDir, importPath[2:]) + } + + importPath, err = filepath.Abs(importPath) + if err != nil { + return nil, fmt.Errorf("getting absolute path for %q: %w", importPath, err) + } + + if !strings.HasSuffix(importPath, "/") { + importPath += "/" + } + ip = append(ip, importPath) + } + + parser := protoparse.Parser{ + ImportPaths: ip, + } + + var protoFiles []string + for _, importPath := range ip { + err := filepath.Walk(importPath, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if strings.HasSuffix(path, ".proto") && !info.IsDir() { + protoFiles = append(protoFiles, strings.TrimPrefix(path, importPath)) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("walking import path %q: %w", importPath, err) + } + } + + fds, err = parser.ParseFiles(protoFiles...) + if err != nil { + return nil, fmt.Errorf("parsing proto files: %w", err) + } + return + +} diff --git a/tools_check_blocks.go b/tools_check_blocks.go index 696ceae..bfa8b31 100644 --- a/tools_check_blocks.go +++ b/tools_check_blocks.go @@ -238,7 +238,7 @@ func validateBlockSegment[B Block]( seenBlockCount++ if printDetails == PrintStats { - err := printBlock(block, false, os.Stdout) + err := printBStreamBlock(block, false, os.Stdout) if err != nil { fmt.Printf("❌ Unable to print block %s: %s\n", block.AsRef(), err) continue diff --git a/tools_firehose_client.go b/tools_firehose_client.go index 95ada32..f61a176 100644 --- a/tools_firehose_client.go +++ b/tools_firehose_client.go @@ -3,14 +3,13 @@ package firecore import ( "context" "fmt" - "io" - "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/firehose-core/tools" "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" + "io" ) func newToolsFirehoseClientCmd[B Block](chain *Chain[B], logger *zap.Logger) *cobra.Command { @@ -23,6 +22,7 @@ func newToolsFirehoseClientCmd[B Block](chain *Chain[B], logger *zap.Logger) *co addFirehoseStreamClientFlagsToSet(cmd.Flags(), chain) + cmd.Flags().StringSlice("proto-paths", []string{"~/.proto"}, "Paths to proto files to use for dynamic decoding of blocks") cmd.Flags().Bool("final-blocks-only", false, "Only ask for final blocks") cmd.Flags().Bool("print-cursor-only", false, "Skip block decoding, only print the step cursor (useful for performance testing)") @@ -52,7 +52,7 @@ func getFirehoseClientE[B Block](chain *Chain[B], logger *zap.Logger) func(cmd * request := &pbfirehose.Request{ StartBlockNum: blockRange.Start, - StopBlockNum: uint64(blockRange.GetStopBlockOr(0)), + StopBlockNum: blockRange.GetStopBlockOr(0), Transforms: requestInfo.Transforms, FinalBlocksOnly: requestInfo.FinalBlocksOnly, Cursor: requestInfo.Cursor, @@ -108,6 +108,7 @@ func getFirehoseClientE[B Block](chain *Chain[B], logger *zap.Logger) func(cmd * // async process the response go func() { + line, err := jsonpb.MarshalToString(response) if err != nil { rootLog.Error("marshalling to string", zap.Error(err)) diff --git a/tools_print.go b/tools_print.go index a90154f..9237adf 100644 --- a/tools_print.go +++ b/tools_print.go @@ -15,28 +15,27 @@ package firecore import ( - "encoding/hex" "fmt" - "io" - "os" - "os/user" - "path/filepath" - "strconv" - "strings" - "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/desc/protoparse" "github.com/jhump/protoreflect/dynamic" - "github.com/mr-tron/base58" "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/jsonencoder" + "github.com/streamingfast/firehose-core/protoregistry" "github.com/streamingfast/firehose-core/tools" "google.golang.org/protobuf/proto" + "io" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" ) var toolsPrintCmd = &cobra.Command{ @@ -82,7 +81,6 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { } printTransactions := sflags.MustGetBool(cmd, "transactions") - protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") storeURL := args[0] store, err := dstore.NewDBinStore(storeURL) @@ -110,9 +108,9 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { return err } - dPrinter, err := newDynamicPrinter(protoPaths) + jencoder, err := setupJsonEncoder(cmd) if err != nil { - return fmt.Errorf("unable to create dynamic printer: %w", err) + return fmt.Errorf("unable to create json encoder: %w", err) } seenBlockCount := 0 @@ -128,7 +126,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { seenBlockCount++ - if err := displayBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { + if err := displayBlock(block, chain, outputMode, printTransactions, jencoder); err != nil { // Error is ready to be passed to the user as-is return err } @@ -151,7 +149,11 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { } printTransactions := sflags.MustGetBool(cmd, "transactions") - protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") + + jencoder, err := setupJsonEncoder(cmd) + if err != nil { + return fmt.Errorf("unable to create json encoder: %w", err) + } storeURL := args[0] store, err := dstore.NewDBinStore(storeURL) @@ -174,10 +176,6 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("unable to find on block files: %w", err) } - dPrinter, err := newDynamicPrinter(protoPaths) - if err != nil { - return fmt.Errorf("unable to create dynamic printer: %w", err) - } for _, filepath := range files { reader, err := store.OpenObject(ctx, filepath) if err != nil { @@ -200,7 +198,7 @@ func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("reading block: %w", err) } - if err := displayBlock(block, chain, outputMode, printTransactions, dPrinter); err != nil { + if err := displayBlock(block, chain, outputMode, printTransactions, jencoder); err != nil { // Error is ready to be passed to the user as-is return err } @@ -224,73 +222,45 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func displayBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, dPrinter *dynamicPrinter) error { +func displayBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, jencoder *jsonencoder.Encoder) error { if pbBlock == nil { return fmt.Errorf("block is nil") } - switch outputMode { - case PrintOutputModeText: - err := printBlock(pbBlock, printTransactions, os.Stdout) - if err != nil { + if outputMode == PrintOutputModeText { + if err := printBStreamBlock(pbBlock, printTransactions, os.Stdout); err != nil { return fmt.Errorf("pbBlock text printing: %w", err) } + return nil + } - case PrintOutputModeJSON, PrintOutputModeJSONL: - var options []jsontext.Options - if outputMode == PrintOutputModeJSON { - options = append(options, jsontext.WithIndent(" ")) - } - encoder := jsontext.NewEncoder(os.Stdout) - - var marshallers *json.Marshalers - switch UnsafeJsonBytesEncoder { - case "hex": - marshallers = json.NewMarshalers( - json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) - }), - ) - case "base58": - marshallers = json.NewMarshalers( - json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - return encoder.WriteToken(jsontext.String(base58.Encode(t))) - }), - ) - } - - isLegacyBlock := pbBlock.Payload == nil - if chain.CoreBinaryEnabled { - // since we are running directly the firecore binary we will *NOT* use the BlockFactory - - if isLegacyBlock { - return dPrinter.printBlock(legacyKindsToProtoType(pbBlock.PayloadKind), pbBlock.GetPayloadBuffer(), encoder, marshallers) + isLegacyBlock := pbBlock.Payload == nil + if !chain.CoreBinaryEnabled { + // since we are running via the chain specific binary (i.e. fireeth) we can use a BlockFactory + marshallableBlock := chain.BlockFactory() + if isLegacyBlock { + if err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), marshallableBlock); err != nil { + return fmt.Errorf("unmarshal legacy block payload to protocol block: %w", err) } - - return dPrinter.printBlock(pbBlock.Payload.TypeUrl, pbBlock.Payload.Value, encoder, marshallers) - } else { - // since we are running via the chain specific binary (i.e. fireeth) we can use a BlockFactory - marshallableBlock := chain.BlockFactory() - if isLegacyBlock { - if err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), marshallableBlock); err != nil { - return fmt.Errorf("unmarshal legacy block payload to protocol block: %w", err) - } - } else { - if err := pbBlock.Payload.UnmarshalTo(marshallableBlock); err != nil { - return fmt.Errorf("pbBlock payload unmarshal: %w", err) - } + if err := pbBlock.Payload.UnmarshalTo(marshallableBlock); err != nil { + return fmt.Errorf("pbBlock payload unmarshal: %w", err) } + } - err := json.MarshalEncode(encoder, marshallableBlock, json.WithMarshalers(marshallers)) - if err != nil { - return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) - } + err := jencoder.Marshal(marshallableBlock) + if err != nil { + return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) } + return nil + } + // since we are running directly the firecore binary we will *NOT* use the BlockFactory + if isLegacyBlock { + return jencoder.MarshalLegacy(legacyKindsToProtoType(pbBlock.GetPayloadKind()), pbBlock.GetPayloadBuffer()) } - return nil + return jencoder.Marshal(pbBlock.Payload) } type dynamicPrinter struct { @@ -307,24 +277,6 @@ func newDynamicPrinter(importPaths []string) (*dynamicPrinter, error) { }, nil } -func (d *dynamicPrinter) printBlock(blkTypeURL string, blkPayload []byte, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { - for _, fd := range d.fileDescriptors { - md := fd.FindSymbol(blkTypeURL) - if md != nil { - dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) - if err := dynMsg.Unmarshal(blkPayload); err != nil { - return fmt.Errorf("unmarshalling block: %w", err) - } - err := json.MarshalEncode(encoder, dynMsg, json.WithMarshalers(marshalers)) - if err != nil { - return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) - } - return nil - } - } - return fmt.Errorf("no message descriptor in proto paths for type url %q", blkTypeURL) -} - func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err error) { usr, err := user.Current() if err != nil { @@ -380,7 +332,25 @@ func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err erro } -func printBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { +func (d *dynamicPrinter) printBlock(blkTypeURL string, blkPayload []byte, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { + for _, fd := range d.fileDescriptors { + md := fd.FindSymbol(blkTypeURL) + if md != nil { + dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) + if err := dynMsg.Unmarshal(blkPayload); err != nil { + return fmt.Errorf("unmarshalling block: %w", err) + } + err := json.MarshalEncode(encoder, dynMsg, json.WithMarshalers(marshalers)) + if err != nil { + return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) + } + return nil + } + } + return fmt.Errorf("no message descriptor in proto paths for type url %q", blkTypeURL) +} + +func printBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { _, err := out.Write( []byte( fmt.Sprintf( @@ -418,3 +388,16 @@ func legacyKindsToProtoType(protocol pbbstream.Protocol) string { } panic("unaligned protocol") } + +func setupJsonEncoder(cmd *cobra.Command) (*jsonencoder.Encoder, error) { + protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") + pbregistry := protoregistry.New() + if err := pbregistry.RegisterFiles(protoPaths); err != nil { + return nil, fmt.Errorf("unable to create dynamic printer: %w", err) + } + + options := []jsonencoder.Option{ + jsonencoder.WithBytesAsHex(), + } + return jsonencoder.New(pbregistry, options...), nil +} From 5a004d9ad2515769030a60db6bb423f2515b722b Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 10:52:46 -0500 Subject: [PATCH 44/66] bump bstream --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index b6559ab..a455f49 100644 --- a/go.mod +++ b/go.mod @@ -223,6 +223,5 @@ require ( replace ( github.com/ShinyTrinkets/overseer => github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef github.com/bytecodealliance/wasmtime-go/v4 => github.com/streamingfast/wasmtime-go/v4 v4.0.0-freemem3 - github.com/streamingfast/bstream => ../bstream ) From 50415ed45c270e9a8f15111094aa600ffbe43171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Tue, 5 Dec 2023 11:26:09 -0500 Subject: [PATCH 45/66] fix firehose server on old and new blocks --- firehose/server/blocks.go | 4 ++-- go.mod | 5 ++--- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 4e0b19a..7be11d4 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -158,7 +158,7 @@ func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream level = zap.InfoLevel } - logger.Check(level, "stream sent block").Write(zap.Stringer("block", block), zap.Duration("duration", time.Since(start))) + logger.Check(level, "stream sent block").Write(zap.Uint64("block", block.Number), zap.Duration("duration", time.Since(start))) return nil }) @@ -221,7 +221,7 @@ func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream } ctx = s.initFunc(ctx, request) - str, err := s.streamFactory.New(ctx, handlerFunc, request, true, logger) // firehose always want decoded the blocks + str, err := s.streamFactory.New(ctx, handlerFunc, request, false, logger) // firehose always want decoded the blocks if err != nil { return err } diff --git a/go.mod b/go.mod index a455f49..86902ff 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d + github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -115,7 +115,7 @@ require ( github.com/ipfs/boxo v0.8.0 // indirect github.com/ipfs/go-cid v0.4.0 // indirect github.com/ipfs/go-ipfs-api v0.6.0 // indirect - github.com/jhump/protoreflect v1.14.0 // indirect + github.com/jhump/protoreflect v1.14.0 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josephburnett/jd v1.7.1 github.com/josharian/intern v1.0.0 // indirect @@ -224,4 +224,3 @@ replace ( github.com/ShinyTrinkets/overseer => github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef github.com/bytecodealliance/wasmtime-go/v4 => github.com/streamingfast/wasmtime-go/v4 v4.0.0-freemem3 ) - diff --git a/go.sum b/go.sum index 9891227..bac5d01 100644 --- a/go.sum +++ b/go.sum @@ -574,8 +574,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d h1:BvvIKTekSj8PAAda313Q6xd91w3gD1nDgOrG/5+YIZk= -github.com/streamingfast/bstream v0.0.2-0.20231123130020-ad84cce9666d/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc h1:biNVCqm8tm5T/lK2Rpg1YvJuShbSDMoDXh/62NK62Zg= +github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= From b22a5746b28073a6a12156ef635a32d7c77f47b1 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 11:28:36 -0500 Subject: [PATCH 46/66] clean up printing --- go.mod | 2 + go.sum | 10 ++-- jsonencoder/encoder.go | 6 +- protoregistry/registry.go | 22 ++++++++ tools_firehose_client.go | 1 - tools_print.go | 113 +------------------------------------- 6 files changed, 33 insertions(+), 121 deletions(-) diff --git a/go.mod b/go.mod index 86902ff..b0bcc3d 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( ) require ( + github.com/bufbuild/protocompile v0.4.0 // indirect github.com/google/s2a-go v0.1.4 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect @@ -223,4 +224,5 @@ require ( replace ( github.com/ShinyTrinkets/overseer => github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef github.com/bytecodealliance/wasmtime-go/v4 => github.com/streamingfast/wasmtime-go/v4 v4.0.0-freemem3 + github.com/jhump/protoreflect => github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc ) diff --git a/go.sum b/go.sum index bac5d01..3826d75 100644 --- a/go.sum +++ b/go.sum @@ -147,6 +147,8 @@ github.com/bufbuild/connect-grpcreflect-go v1.0.0 h1:zWsLFYqrT1O2sNJFYfTXI5WxbAy github.com/bufbuild/connect-grpcreflect-go v1.0.0/go.mod h1:825I20H8bfE9rLnBH/046JSpmm3uwpNYdG4duCARetc= github.com/bufbuild/connect-opentelemetry-go v0.3.0 h1:AuZi3asTDKmjGtd2aqpyP4p5QvBFG/YEaHopViLatnk= github.com/bufbuild/connect-opentelemetry-go v0.3.0/go.mod h1:r1ppyTtu1EWeRodk4Q/JbyQhIWtO7eR3GoRDzjeEcNU= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -391,12 +393,6 @@ github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA= github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-ipfs-api v0.6.0 h1:JARgG0VTbjyVhO5ZfesnbXv9wTcMvoKRBLF1SzJqzmg= github.com/ipfs/go-ipfs-api v0.6.0/go.mod h1:iDC2VMwN9LUpQV/GzEeZ2zNqd8NUdRmWcFM+K/6odf0= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= -github.com/jhump/protoreflect v1.14.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -609,6 +605,8 @@ github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHR github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e h1:8hoT2QUwh+YNgIcCPux9xd4u9XojHR8hbyAzz7rQuEM= github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= +github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc h1:poYChURzYXislOzzeo44FKipd3wWvxhlz966qzO9kZk= +github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go index 497df18..15094fb 100644 --- a/jsonencoder/encoder.go +++ b/jsonencoder/encoder.go @@ -4,6 +4,8 @@ import ( "fmt" "os" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/firehose-core/protoregistry" "github.com/go-json-experiment/json" @@ -36,8 +38,8 @@ func (e *Encoder) Marshal(in any) error { return json.MarshalEncode(e.e, in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) } -func (e *Encoder) MarshalLegacy(typeURL string, value []byte) error { - msg, err := e.files.Unmarshall(typeURL, value) +func (e *Encoder) MarshalLegacy(protocol pbbstream.Protocol, value []byte) error { + msg, err := e.files.UnmarshallLegacy(protocol, value) if err != nil { return fmt.Errorf("unmarshalling proto any: %w", err) } diff --git a/protoregistry/registry.go b/protoregistry/registry.go index 897c876..6f781ec 100644 --- a/protoregistry/registry.go +++ b/protoregistry/registry.go @@ -4,6 +4,8 @@ import ( "fmt" "sync" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/jhump/protoreflect/dynamic" "github.com/jhump/protoreflect/desc" @@ -45,3 +47,23 @@ func (r *Files) Unmarshall(typeURL string, value []byte) (*dynamic.Message, erro } return nil, fmt.Errorf("no message descriptor in registry for type url: %s", typeURL) } + +func (r *Files) UnmarshallLegacy(protocol pbbstream.Protocol, value []byte) (*dynamic.Message, error) { + return r.Unmarshall(legacyKindsToProtoType(protocol), value) +} + +func legacyKindsToProtoType(protocol pbbstream.Protocol) string { + switch protocol { + case pbbstream.Protocol_EOS: + return "sf.antelope.type.v1.Block" + case pbbstream.Protocol_ETH: + return "sf.ethereum.type.v2.Block" + case pbbstream.Protocol_SOLANA: + return "sf.solana.type.v1.Block" + case pbbstream.Protocol_NEAR: + return "sf.near.type.v1.Block" + case pbbstream.Protocol_COSMOS: + return "sf.cosmos.type.v1.Block" + } + panic("unaligned protocol") +} diff --git a/tools_firehose_client.go b/tools_firehose_client.go index f61a176..b6d9135 100644 --- a/tools_firehose_client.go +++ b/tools_firehose_client.go @@ -108,7 +108,6 @@ func getFirehoseClientE[B Block](chain *Chain[B], logger *zap.Logger) func(cmd * // async process the response go func() { - line, err := jsonpb.MarshalToString(response) if err != nil { rootLog.Error("marshalling to string", zap.Error(err)) diff --git a/tools_print.go b/tools_print.go index 9237adf..161f5e1 100644 --- a/tools_print.go +++ b/tools_print.go @@ -16,11 +16,6 @@ package firecore import ( "fmt" - "github.com/go-json-experiment/json" - "github.com/go-json-experiment/json/jsontext" - "github.com/jhump/protoreflect/desc" - "github.com/jhump/protoreflect/desc/protoparse" - "github.com/jhump/protoreflect/dynamic" "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" @@ -32,10 +27,7 @@ import ( "google.golang.org/protobuf/proto" "io" "os" - "os/user" - "path/filepath" "strconv" - "strings" ) var toolsPrintCmd = &cobra.Command{ @@ -257,99 +249,12 @@ func displayBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode // since we are running directly the firecore binary we will *NOT* use the BlockFactory if isLegacyBlock { - return jencoder.MarshalLegacy(legacyKindsToProtoType(pbBlock.GetPayloadKind()), pbBlock.GetPayloadBuffer()) + return jencoder.MarshalLegacy(pbBlock.GetPayloadKind(), pbBlock.GetPayloadBuffer()) } return jencoder.Marshal(pbBlock.Payload) } -type dynamicPrinter struct { - fileDescriptors []*desc.FileDescriptor -} - -func newDynamicPrinter(importPaths []string) (*dynamicPrinter, error) { - fileDescriptors, err := parseProtoFiles(importPaths) - if err != nil { - return nil, fmt.Errorf("parsing proto files: %w", err) - } - return &dynamicPrinter{ - fileDescriptors: fileDescriptors, - }, nil -} - -func parseProtoFiles(importPaths []string) (fds []*desc.FileDescriptor, err error) { - usr, err := user.Current() - if err != nil { - return nil, fmt.Errorf("getting current user: %w", err) - } - userDir := usr.HomeDir - - var ip []string - for _, importPath := range importPaths { - if importPath == "~" { - importPath = userDir - } else if strings.HasPrefix(importPath, "~/") { - importPath = filepath.Join(userDir, importPath[2:]) - } - - importPath, err = filepath.Abs(importPath) - if err != nil { - return nil, fmt.Errorf("getting absolute path for %q: %w", importPath, err) - } - - if !strings.HasSuffix(importPath, "/") { - importPath += "/" - } - ip = append(ip, importPath) - } - - parser := protoparse.Parser{ - ImportPaths: ip, - } - - var protoFiles []string - for _, importPath := range ip { - err := filepath.Walk(importPath, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if strings.HasSuffix(path, ".proto") && !info.IsDir() { - protoFiles = append(protoFiles, strings.TrimPrefix(path, importPath)) - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("walking import path %q: %w", importPath, err) - } - } - - fds, err = parser.ParseFiles(protoFiles...) - if err != nil { - return nil, fmt.Errorf("parsing proto files: %w", err) - } - return - -} - -func (d *dynamicPrinter) printBlock(blkTypeURL string, blkPayload []byte, encoder *jsontext.Encoder, marshalers *json.Marshalers) error { - for _, fd := range d.fileDescriptors { - md := fd.FindSymbol(blkTypeURL) - if md != nil { - dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) - if err := dynMsg.Unmarshal(blkPayload); err != nil { - return fmt.Errorf("unmarshalling block: %w", err) - } - err := json.MarshalEncode(encoder, dynMsg, json.WithMarshalers(marshalers)) - if err != nil { - return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) - } - return nil - } - } - return fmt.Errorf("no message descriptor in proto paths for type url %q", blkTypeURL) -} - func printBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { _, err := out.Write( []byte( @@ -373,22 +278,6 @@ func printBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer return nil } -func legacyKindsToProtoType(protocol pbbstream.Protocol) string { - switch protocol { - case pbbstream.Protocol_EOS: - return "sf.antelope.type.v1.Block" - case pbbstream.Protocol_ETH: - return "sf.ethereum.type.v2.Block" - case pbbstream.Protocol_SOLANA: - return "sf.solana.type.v1.Block" - case pbbstream.Protocol_NEAR: - return "sf.near.type.v1.Block" - case pbbstream.Protocol_COSMOS: - return "sf.cosmos.type.v1.Block" - } - panic("unaligned protocol") -} - func setupJsonEncoder(cmd *cobra.Command) (*jsonencoder.Encoder, error) { protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") pbregistry := protoregistry.New() From b6397607b6fffb218cc02dac396e5f0c5a456cb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Tue, 5 Dec 2023 11:31:16 -0500 Subject: [PATCH 47/66] remove streamfactory 'decodeblock' option --- firehose/factory.go | 3 --- firehose/server/blocks.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- index-builder/index-builder.go | 1 - 5 files changed, 4 insertions(+), 8 deletions(-) diff --git a/firehose/factory.go b/firehose/factory.go index 4064cfd..acfb46e 100644 --- a/firehose/factory.go +++ b/firehose/factory.go @@ -139,7 +139,6 @@ func (sf *StreamFactory) New( ctx context.Context, handler bstream.Handler, request *pbfirehose.Request, - decodeBlock bool, logger *zap.Logger) (*stream.Stream, error) { reqLogger := logger.With( @@ -160,8 +159,6 @@ func (sf *StreamFactory) New( } if preprocFunc != nil { options = append(options, stream.WithPreprocessFunc(preprocFunc, StreamMergedBlocksPreprocThreads)) - } else if decodeBlock { - panic("not supported anymore") } if blockIndexProvider != nil { reqLogger = reqLogger.With(zap.Bool("with_index_provider", true)) diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 7be11d4..911649b 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -221,7 +221,7 @@ func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream } ctx = s.initFunc(ctx, request) - str, err := s.streamFactory.New(ctx, handlerFunc, request, false, logger) // firehose always want decoded the blocks + str, err := s.streamFactory.New(ctx, handlerFunc, request, logger) if err != nil { return err } diff --git a/go.mod b/go.mod index b0bcc3d..d7ec850 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc + github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c diff --git a/go.sum b/go.sum index 3826d75..a9b2d16 100644 --- a/go.sum +++ b/go.sum @@ -570,8 +570,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc h1:biNVCqm8tm5T/lK2Rpg1YvJuShbSDMoDXh/62NK62Zg= -github.com/streamingfast/bstream v0.0.2-0.20231205161519-9f4f0971b0cc/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3 h1:u8orpRssS8rYceziOQ/mbBQHlYh5w06oOtTXK90/yMc= +github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= diff --git a/index-builder/index-builder.go b/index-builder/index-builder.go index b393192..f30cd30 100644 --- a/index-builder/index-builder.go +++ b/index-builder/index-builder.go @@ -85,7 +85,6 @@ func (app *IndexBuilder) launch() error { ctx, bstream.HandlerFunc(handlerFunc), req, - true, app.logger, ) From f58cd0833f76f0f6b7ccaa6b117ec00806ab8381 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 5 Dec 2023 11:36:49 -0500 Subject: [PATCH 48/66] refactor of tools and apps package --- chain.go | 3 +- firehose.go => cmd/apps/firehose.go | 14 +++--- index_builder.go => cmd/apps/index_builder.go | 12 +++-- merger.go => cmd/apps/merger.go | 11 +++-- reader_node.go => cmd/apps/reader_node.go | 40 +++++++++------- .../apps/reader_node_stdin.go | 10 ++-- .../apps/reader_node_test.go | 2 +- relayer.go => cmd/apps/relayer.go | 12 +++-- start.go => cmd/apps/start.go | 18 ++++---- .../apps/substreams_common.go | 10 ++-- .../apps/substreams_tier1.go | 14 +++--- .../apps/substreams_tier2.go | 14 +++--- common.go => cmd/common.go | 17 +++---- cmd/firecore/main.go | 3 +- main.go => cmd/main.go | 43 +++++++++-------- setup.go => cmd/setup.go | 10 ++-- {tools => cmd/tools}/block_range.go | 0 {tools => cmd/tools}/block_range_enum.go | 0 {tools => cmd/tools}/flags.go | 0 tools.go => cmd/tools/tools.go | 46 ++++++++++--------- tools_check.go => cmd/tools/tools_check.go | 19 ++++---- .../tools/tools_check_blocks.go | 35 +++++++------- .../tools/tools_check_merged_batch.go | 14 +++--- .../tools/tools_checkmergedbatch.go | 5 +- .../tools/tools_compare_blocks.go | 28 +++++------ .../tools/tools_download_from_firehose.go | 13 +++--- .../tools/tools_firehose_client.go | 12 ++--- .../tools_firehose_prometheus_exporter.go | 7 +-- .../tools_firehose_single_block_client.go | 9 ++-- .../tools/tools_fix_bloated_merged_blocks.go | 10 ++-- tools_print.go => cmd/tools/tools_print.go | 23 +++++----- .../tools/tools_print_enum.go | 2 +- .../tools/tools_unmerge_blocks.go | 10 ++-- .../tools/tools_upgrade_merged_blocks.go | 12 ++--- {tools => cmd/tools}/types.go | 0 {tools => cmd/tools}/types_test.go | 0 {tools => cmd/tools}/utils.go | 0 {tools => cmd/tools}/utils_test.go | 0 flags.go | 2 +- substreams/extentions.go | 11 +++++ types.go | 2 + unsafe_extensions.go | 6 +-- utils.go | 6 +-- 43 files changed, 276 insertions(+), 229 deletions(-) rename firehose.go => cmd/apps/firehose.go (87%) rename index_builder.go => cmd/apps/index_builder.go (84%) rename merger.go => cmd/apps/merger.go (85%) rename reader_node.go => cmd/apps/reader_node.go (86%) rename reader_node_stdin.go => cmd/apps/reader_node_stdin.go (87%) rename reader_node_test.go => cmd/apps/reader_node_test.go (99%) rename relayer.go => cmd/apps/relayer.go (64%) rename start.go => cmd/apps/start.go (85%) rename substreams_common.go => cmd/apps/substreams_common.go (83%) rename substreams_tier1.go => cmd/apps/substreams_tier1.go (87%) rename substreams_tier2.go => cmd/apps/substreams_tier2.go (85%) rename common.go => cmd/common.go (80%) rename main.go => cmd/main.go (84%) rename setup.go => cmd/setup.go (94%) rename {tools => cmd/tools}/block_range.go (100%) rename {tools => cmd/tools}/block_range_enum.go (100%) rename {tools => cmd/tools}/flags.go (100%) rename tools.go => cmd/tools/tools.go (72%) rename tools_check.go => cmd/tools/tools_check.go (91%) rename tools_check_blocks.go => cmd/tools/tools_check_blocks.go (83%) rename tools_check_merged_batch.go => cmd/tools/tools_check_merged_batch.go (90%) rename tools_checkmergedbatch.go => cmd/tools/tools_checkmergedbatch.go (94%) rename tools_compare_blocks.go => cmd/tools/tools_compare_blocks.go (93%) rename tools_download_from_firehose.go => cmd/tools/tools_download_from_firehose.go (89%) rename tools_firehose_client.go => cmd/tools/tools_firehose_client.go (87%) rename tools_firehose_prometheus_exporter.go => cmd/tools/tools_firehose_prometheus_exporter.go (90%) rename tools_firehose_single_block_client.go => cmd/tools/tools_firehose_single_block_client.go (83%) rename tools_fix_bloated_merged_blocks.go => cmd/tools/tools_fix_bloated_merged_blocks.go (91%) rename tools_print.go => cmd/tools/tools_print.go (92%) rename tools_print_enum.go => cmd/tools/tools_print_enum.go (99%) rename tools_unmerge_blocks.go => cmd/tools/tools_unmerge_blocks.go (92%) rename tools_upgrade_merged_blocks.go => cmd/tools/tools_upgrade_merged_blocks.go (92%) rename {tools => cmd/tools}/types.go (100%) rename {tools => cmd/tools}/types_test.go (100%) rename {tools => cmd/tools}/utils.go (100%) rename {tools => cmd/tools}/utils_test.go (100%) create mode 100644 substreams/extentions.go diff --git a/chain.go b/chain.go index 736616f..20ced95 100644 --- a/chain.go +++ b/chain.go @@ -11,6 +11,7 @@ import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/firehose-core/node-manager/operator" + "github.com/streamingfast/firehose-core/substreams" "github.com/streamingfast/logging" "go.uber.org/multierr" "go.uber.org/zap" @@ -149,7 +150,7 @@ type Chain[B Block] struct { // BlockEncoder BlockEncoder - RegisterSubstreamsExtensions func(chain *Chain[B]) ([]SubstreamsExtension, error) + RegisterSubstreamsExtensions func(chain *Chain[B]) ([]substreams.Extension, error) // CoreBinaryEnabled is a flag that when set to true indicates that `firecore` binary is being run directly? (not through firexxx) CoreBinaryEnabled bool diff --git a/firehose.go b/cmd/apps/firehose.go similarity index 87% rename from firehose.go rename to cmd/apps/firehose.go index 76821d4..a901cb4 100644 --- a/firehose.go +++ b/cmd/apps/firehose.go @@ -1,4 +1,4 @@ -package firecore +package apps import ( "fmt" @@ -12,24 +12,26 @@ import ( discoveryservice "github.com/streamingfast/dgrpc/server/discovery-service" "github.com/streamingfast/dlauncher/launcher" "github.com/streamingfast/dmetrics" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/firehose/app/firehose" "github.com/streamingfast/firehose-core/firehose/server" "github.com/streamingfast/logging" + "go.uber.org/zap" ) var metricset = dmetrics.NewSet() var headBlockNumMetric = metricset.NewHeadBlockNumber("firehose") var headTimeDriftmetric = metricset.NewHeadTimeDrift("firehose") -func registerFirehoseApp[B Block](chain *Chain[B]) { - appLogger, _ := logging.PackageLogger("firehose", chain.LoggerPackageID("firehose")) +func RegisterFirehoseApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { + appLogger, _ := logging.PackageLogger("firehose", "firehose") launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "firehose", Title: "Block Firehose", Description: "Provides on-demand filtered blocks, depends on common-merged-blocks-store-url and common-live-blocks-addr", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("firehose-grpc-listen-addr", FirehoseGRPCServingAddr, "Address on which the firehose will listen") + cmd.Flags().String("firehose-grpc-listen-addr", firecore.FirehoseGRPCServingAddr, "Address on which the firehose will listen") cmd.Flags().String("firehose-discovery-service-url", "", "Url to configure the gRPC discovery service") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true cmd.Flags().Int("firehose-rate-limit-bucket-size", -1, "Rate limit bucket size (default: no rate limit)") cmd.Flags().Duration("firehose-rate-limit-bucket-fill-rate", 10*time.Second, "Rate limit bucket refill rate (default: 10s)") @@ -43,7 +45,7 @@ func registerFirehoseApp[B Block](chain *Chain[B]) { return nil, fmt.Errorf("unable to initialize authenticator: %w", err) } - mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := GetCommonStoresURLs(runtime.AbsDataDir) + mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) if err != nil { return nil, err } @@ -61,7 +63,7 @@ func registerFirehoseApp[B Block](chain *Chain[B]) { } } - indexStore, possibleIndexSizes, err := GetIndexStore(runtime.AbsDataDir) + indexStore, possibleIndexSizes, err := firecore.GetIndexStore(runtime.AbsDataDir) if err != nil { return nil, fmt.Errorf("unable to initialize indexes: %w", err) } diff --git a/index_builder.go b/cmd/apps/index_builder.go similarity index 84% rename from index_builder.go rename to cmd/apps/index_builder.go index 155165e..c3df3ce 100644 --- a/index_builder.go +++ b/cmd/apps/index_builder.go @@ -1,4 +1,4 @@ -package firecore +package apps import ( "context" @@ -10,16 +10,18 @@ import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" bstransform "github.com/streamingfast/bstream/transform" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" index_builder "github.com/streamingfast/firehose-core/index-builder/app/index-builder" + "go.uber.org/zap" ) -func registerIndexBuilderApp[B Block](chain *Chain[B]) { +func RegisterIndexBuilderApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "index-builder", Title: "Index Builder", Description: "App the builds indexes out of Firehose blocks", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("index-builder-grpc-listen-addr", IndexBuilderServiceAddr, "Address to listen for grpc-based healthz check") + cmd.Flags().String("index-builder-grpc-listen-addr", firecore.IndexBuilderServiceAddr, "Address to listen for grpc-based healthz check") cmd.Flags().Uint64("index-builder-index-size", 10000, "Size of index bundles that will be created") cmd.Flags().Uint64("index-builder-start-block", 0, "Block number to start indexing") cmd.Flags().Uint64("index-builder-stop-block", 0, "Block number to stop indexing") @@ -29,12 +31,12 @@ func registerIndexBuilderApp[B Block](chain *Chain[B]) { return nil }, FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { - mergedBlocksStoreURL, _, _, err := GetCommonStoresURLs(runtime.AbsDataDir) + mergedBlocksStoreURL, _, _, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) if err != nil { return nil, err } - indexStore, lookupIdxSizes, err := GetIndexStore(runtime.AbsDataDir) + indexStore, lookupIdxSizes, err := firecore.GetIndexStore(runtime.AbsDataDir) if err != nil { return nil, err } diff --git a/merger.go b/cmd/apps/merger.go similarity index 85% rename from merger.go rename to cmd/apps/merger.go index cd1cf57..9e28851 100644 --- a/merger.go +++ b/cmd/apps/merger.go @@ -1,21 +1,24 @@ -package firecore +package apps import ( "time" + firecore "github.com/streamingfast/firehose-core" + "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" "github.com/streamingfast/firehose-core/merger/app/merger" + "go.uber.org/zap" ) -func registerMergerApp() { +func RegisterMergerApp(rootLog *zap.Logger) { launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "merger", Title: "Merger", Description: "Produces merged block files from single-block files", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("merger-grpc-listen-addr", MergerServingAddr, "Address to listen for incoming gRPC requests") + cmd.Flags().String("merger-grpc-listen-addr", firecore.MergerServingAddr, "Address to listen for incoming gRPC requests") cmd.Flags().Uint64("merger-prune-forked-blocks-after", 50000, "Number of blocks that must pass before we delete old forks (one-block-files lingering)") cmd.Flags().Uint64("merger-stop-block", 0, "If non-zero, merger will trigger shutdown when blocks have been merged up to this block") cmd.Flags().Duration("merger-time-between-store-lookups", 1*time.Second, "Delay between source store polling (should be higher for remote storage)") @@ -23,7 +26,7 @@ func registerMergerApp() { return nil }, FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { - mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := GetCommonStoresURLs(runtime.AbsDataDir) + mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) if err != nil { return nil, err } diff --git a/reader_node.go b/cmd/apps/reader_node.go similarity index 86% rename from reader_node.go rename to cmd/apps/reader_node.go index eb1926c..c611a47 100644 --- a/reader_node.go +++ b/cmd/apps/reader_node.go @@ -1,4 +1,4 @@ -package firecore +package apps import ( "context" @@ -14,6 +14,7 @@ import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" nodeManager "github.com/streamingfast/firehose-core/node-manager" nodeManagerApp "github.com/streamingfast/firehose-core/node-manager/app/node_manager" "github.com/streamingfast/firehose-core/node-manager/metrics" @@ -27,8 +28,8 @@ import ( "google.golang.org/grpc" ) -func registerReaderNodeApp[B Block](chain *Chain[B]) { - appLogger, appTracer := logging.PackageLogger("reader", chain.LoggerPackageID("reader")) +func RegisterReaderNodeApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { + appLogger, appTracer := logging.PackageLogger("reader", "reader") launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "reader-node", @@ -41,7 +42,7 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { `)) cmd.Flags().String("reader-node-data-dir", "{data-dir}/reader/data", "Directory for node data") cmd.Flags().Bool("reader-node-debug-firehose-logs", false, "[DEV] Prints firehose instrumentation logs to standard output, should be use for debugging purposes only") - cmd.Flags().String("reader-node-manager-api-addr", ReaderNodeManagerAPIAddr, "Acme node manager API address") + cmd.Flags().String("reader-node-manager-api-addr", firecore.ReaderNodeManagerAPIAddr, "Acme node manager API address") cmd.Flags().Duration("reader-node-readiness-max-latency", 30*time.Second, "Determine the maximum head block latency at which the instance will be determined healthy. Some chains have more regular block production than others.") cmd.Flags().String("reader-node-arguments", "", string(cli.Description(` Defines the node arguments that will be passed to the node on execution. Supports templating, where we will replace certain sub-string with the appropriate value @@ -55,7 +56,7 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { Example: 'run blockchain -start {start-block-num} -end {stop-block-num}' may yield 'run blockchain -start 200 -end 500' `))) cmd.Flags().StringSlice("reader-node-backups", []string{}, "Repeatable, space-separated key=values definitions for backups. Example: 'type=gke-pvc-snapshot prefix= tag=v1 freq-blocks=1000 freq-time= project=myproj'") - cmd.Flags().String("reader-node-grpc-listen-addr", ReaderNodeGRPCAddr, "The gRPC listening address to use for serving real-time blocks") + cmd.Flags().String("reader-node-grpc-listen-addr", firecore.ReaderNodeGRPCAddr, "The gRPC listening address to use for serving real-time blocks") cmd.Flags().Bool("reader-node-discard-after-stop-num", false, "Ignore remaining blocks being processed after stop num (only useful if we discard the reader data after reprocessing a chunk of blocks)") cmd.Flags().String("reader-node-working-dir", "{data-dir}/reader/work", "Path where reader will stores its files") cmd.Flags().Uint("reader-node-start-block-num", 0, "Blocks that were produced with smaller block number then the given block num are skipped") @@ -75,7 +76,7 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { sfDataDir := runtime.AbsDataDir nodePath := viper.GetString("reader-node-path") - nodeDataDir := MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-data-dir")) + nodeDataDir := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-data-dir")) readinessMaxLatency := viper.GetDuration("reader-node-readiness-max-latency") debugFirehose := viper.GetBool("reader-node-debug-firehose-logs") @@ -93,10 +94,19 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute) defer cancel() - resolveStartBlockNum, err := UnsafeResolveReaderNodeStartBlock(ctx, startCmd, runtime, rootLog) - if err != nil { - return nil, fmt.Errorf("resolve start block: %w", err) + userDefined := viper.IsSet("reader-node-start-block-num") + startBlockNum := viper.GetUint64("reader-node-start-block-num") + firstStreamableBlock := viper.GetUint64("common-first-streamable-block") + + resolveStartBlockNum := startBlockNum + if !userDefined { + resolveStartBlockNum, err = firecore.UnsafeResolveReaderNodeStartBlock(ctx, startBlockNum, firstStreamableBlock, runtime, rootLog) + if err != nil { + return nil, fmt.Errorf("resolve start block: %w", err) + } + } + stopBlockNum := viper.GetUint64("reader-node-stop-block-num") hostname, _ := os.Hostname() @@ -123,7 +133,7 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { var bootstrapper operator.Bootstrapper if chain.ReaderNodeBootstrapperFactory != nil { - bootstrapper, err = chain.ReaderNodeBootstrapperFactory(startCmd.Context(), appLogger, startCmd, nodeArguments, nodeArgumentResolver) + bootstrapper, err = chain.ReaderNodeBootstrapperFactory(StartCmd.Context(), appLogger, StartCmd, nodeArguments, nodeArgumentResolver) if err != nil { return nil, fmt.Errorf("new bootstrapper: %w", err) } @@ -157,8 +167,8 @@ func registerReaderNodeApp[B Block](chain *Chain[B]) { } blockStreamServer := blockstream.NewUnmanagedServer(blockstream.ServerOptionWithLogger(appLogger)) - oneBlocksStoreURL := MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")) - workingDir := MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-working-dir")) + oneBlocksStoreURL := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")) + workingDir := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-working-dir")) gprcListenAddr := viper.GetString("reader-node-grpc-listen-addr") oneBlockFileSuffix := viper.GetString("reader-node-one-block-suffix") blocksChanCapacity := viper.GetInt("reader-node-blocks-chan-capacity") @@ -210,7 +220,7 @@ var variablesRegex = regexp.MustCompile(`\{(data-dir|node-data-dir|hostname|star // buildNodeArguments will resolve and split the given string into arguments, replacing the variables with the appropriate values. // // We are using a function for testing purposes, so that we can test arguments resolving and splitting correctly. -func buildNodeArguments(in string, resolver ReaderNodeArgumentResolver) ([]string, error) { +func buildNodeArguments(in string, resolver firecore.ReaderNodeArgumentResolver) ([]string, error) { // Split arguments according to standard shell rules nodeArguments, err := shellquote.Split(resolver(in)) if err != nil { @@ -220,9 +230,7 @@ func buildNodeArguments(in string, resolver ReaderNodeArgumentResolver) ([]strin return nodeArguments, nil } -type ReaderNodeArgumentResolver = func(in string) string - -func createNodeArgumentsResolver(dataDir, nodeDataDir, hostname string, startBlockNum, stopBlockNum uint64) ReaderNodeArgumentResolver { +func createNodeArgumentsResolver(dataDir, nodeDataDir, hostname string, startBlockNum, stopBlockNum uint64) firecore.ReaderNodeArgumentResolver { return func(in string) string { return variablesRegex.ReplaceAllStringFunc(in, func(match string) string { switch match { diff --git a/reader_node_stdin.go b/cmd/apps/reader_node_stdin.go similarity index 87% rename from reader_node_stdin.go rename to cmd/apps/reader_node_stdin.go index a06a728..08d6bbc 100644 --- a/reader_node_stdin.go +++ b/cmd/apps/reader_node_stdin.go @@ -12,20 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package apps import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" nodeManager "github.com/streamingfast/firehose-core/node-manager" nodeReaderStdinApp "github.com/streamingfast/firehose-core/node-manager/app/node_reader_stdin" "github.com/streamingfast/firehose-core/node-manager/metrics" "github.com/streamingfast/firehose-core/node-manager/mindreader" "github.com/streamingfast/logging" + "go.uber.org/zap" ) -func registerReaderNodeStdinApp[B Block](chain *Chain[B]) { +func RegisterReaderNodeStdinApp[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { appLogger, appTracer := logging.PackageLogger("reader-node-stdin", chain.LoggerPackageID("reader-node-stdin")) launcher.RegisterApp(rootLog, &launcher.AppDef{ @@ -35,7 +37,7 @@ func registerReaderNodeStdinApp[B Block](chain *Chain[B]) { RegisterFlags: func(cmd *cobra.Command) error { return nil }, FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { sfDataDir := runtime.AbsDataDir - archiveStoreURL := MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")) + archiveStoreURL := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")) consoleReaderFactory := func(lines chan string) (mindreader.ConsolerReader, error) { return chain.ConsoleReaderFactory(lines, chain.BlockEncoder, appLogger, appTracer) } @@ -52,7 +54,7 @@ func registerReaderNodeStdinApp[B Block](chain *Chain[B]) { MindReadBlocksChanCapacity: viper.GetInt("reader-node-blocks-chan-capacity"), StartBlockNum: viper.GetUint64("reader-node-start-block-num"), StopBlockNum: viper.GetUint64("reader-node-stop-block-num"), - WorkingDir: MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-working-dir")), + WorkingDir: firecore.MustReplaceDataDir(sfDataDir, viper.GetString("reader-node-working-dir")), OneBlockSuffix: viper.GetString("reader-node-one-block-suffix"), }, &nodeReaderStdinApp.Modules{ ConsoleReaderFactory: consoleReaderFactory, diff --git a/reader_node_test.go b/cmd/apps/reader_node_test.go similarity index 99% rename from reader_node_test.go rename to cmd/apps/reader_node_test.go index 1e74609..95f1888 100644 --- a/reader_node_test.go +++ b/cmd/apps/reader_node_test.go @@ -1,4 +1,4 @@ -package firecore +package apps import ( "testing" diff --git a/relayer.go b/cmd/apps/relayer.go similarity index 64% rename from relayer.go rename to cmd/apps/relayer.go index a13fe16..4bc763b 100644 --- a/relayer.go +++ b/cmd/apps/relayer.go @@ -1,4 +1,4 @@ -package firecore +package apps import ( "time" @@ -6,17 +6,19 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/relayer/app/relayer" + "go.uber.org/zap" ) -func registerRelayerApp() { +func RegisterRelayerApp(rootLog *zap.Logger) { launcher.RegisterApp(rootLog, &launcher.AppDef{ ID: "relayer", Title: "Relayer", Description: "Serves blocks as a stream, with a buffer", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("relayer-grpc-listen-addr", RelayerServingAddr, "Address to listen for incoming gRPC requests") - cmd.Flags().StringSlice("relayer-source", []string{ReaderNodeGRPCAddr}, "List of live sources (reader(s)) to connect to for live block feeds (repeat flag as needed)") + cmd.Flags().String("relayer-grpc-listen-addr", firecore.RelayerServingAddr, "Address to listen for incoming gRPC requests") + cmd.Flags().StringSlice("relayer-source", []string{firecore.ReaderNodeGRPCAddr}, "List of live sources (reader(s)) to connect to for live block feeds (repeat flag as needed)") cmd.Flags().Duration("relayer-max-source-latency", 999999*time.Hour, "Max latency tolerated to connect to a source. A performance optimization for when you have redundant sources and some may not have caught up") return nil }, @@ -25,7 +27,7 @@ func registerRelayerApp() { return relayer.New(&relayer.Config{ SourcesAddr: viper.GetStringSlice("relayer-source"), - OneBlocksURL: MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")), + OneBlocksURL: firecore.MustReplaceDataDir(sfDataDir, viper.GetString("common-one-block-store-url")), GRPCListenAddr: viper.GetString("relayer-grpc-listen-addr"), MaxSourceLatency: viper.GetDuration("relayer-max-source-latency"), }), nil diff --git a/start.go b/cmd/apps/start.go similarity index 85% rename from start.go rename to cmd/apps/start.go index 3633218..43071af 100644 --- a/start.go +++ b/cmd/apps/start.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package apps import ( "fmt" @@ -25,16 +25,16 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" "github.com/streamingfast/dmetering" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -var startCmd = &cobra.Command{Use: "start", Args: cobra.ArbitraryArgs} +var StartCmd = &cobra.Command{Use: "start", Args: cobra.ArbitraryArgs} -func configureStartCmd[B Block](chain *Chain[B]) { - binaryName := chain.BinaryName() +func ConfigureStartCmd[B firecore.Block](chain *firecore.Chain[B], binaryName string, rootLog *zap.Logger) { - startCmd.Short = fmt.Sprintf("Starts `%s` services all at once", binaryName) - startCmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + StartCmd.Short = fmt.Sprintf("Starts `%s` services all at once", binaryName) + StartCmd.RunE = func(cmd *cobra.Command, args []string) (err error) { cmd.SilenceUsage = true dataDir := viper.GetString("global-data-dir") @@ -43,7 +43,7 @@ func configureStartCmd[B Block](chain *Chain[B]) { configFile := viper.GetString("global-config-file") rootLog.Info(fmt.Sprintf("starting Firehose on %s with config file '%s'", chain.LongName, configFile)) - err = start(dataDir, args) + err = start(dataDir, args, rootLog) if err != nil { return fmt.Errorf("unable to launch: %w", err) } @@ -53,13 +53,13 @@ func configureStartCmd[B Block](chain *Chain[B]) { } } -func start(dataDir string, args []string) (err error) { +func start(dataDir string, args []string, rootLog *zap.Logger) (err error) { dataDirAbs, err := filepath.Abs(dataDir) if err != nil { return fmt.Errorf("unable to setup directory structure: %w", err) } - err = makeDirs([]string{dataDirAbs}) + err = firecore.MakeDirs([]string{dataDirAbs}) if err != nil { return err } diff --git a/substreams_common.go b/cmd/apps/substreams_common.go similarity index 83% rename from substreams_common.go rename to cmd/apps/substreams_common.go index 0183114..409d477 100644 --- a/substreams_common.go +++ b/cmd/apps/substreams_common.go @@ -1,10 +1,11 @@ -package firecore +package apps import ( "fmt" "sync" "github.com/spf13/cobra" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/substreams/pipeline" "github.com/streamingfast/substreams/wasm" ) @@ -19,7 +20,7 @@ func registerCommonSubstreamsFlags(cmd *cobra.Command) { }) } -func getSubstreamsExtensions[B Block](chain *Chain[B]) ([]wasm.WASMExtensioner, []pipeline.PipelineOptioner, error) { +func getSubstreamsExtensions[B firecore.Block](chain *firecore.Chain[B]) ([]wasm.WASMExtensioner, []pipeline.PipelineOptioner, error) { var wasmExtensions []wasm.WASMExtensioner var pipelineOptions []pipeline.PipelineOptioner @@ -37,8 +38,3 @@ func getSubstreamsExtensions[B Block](chain *Chain[B]) ([]wasm.WASMExtensioner, return wasmExtensions, pipelineOptions, nil } - -type SubstreamsExtension struct { - PipelineOptioner pipeline.PipelineOptioner - WASMExtensioner wasm.WASMExtensioner -} diff --git a/substreams_tier1.go b/cmd/apps/substreams_tier1.go similarity index 87% rename from substreams_tier1.go rename to cmd/apps/substreams_tier1.go index c45c479..d9249ab 100644 --- a/substreams_tier1.go +++ b/cmd/apps/substreams_tier1.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package apps import ( "fmt" @@ -25,14 +25,16 @@ import ( "github.com/streamingfast/dauth" discoveryservice "github.com/streamingfast/dgrpc/server/discovery-service" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/logging" app "github.com/streamingfast/substreams/app" + "go.uber.org/zap" ) var ss1HeadBlockNumMetric = metricset.NewHeadBlockNumber("substreams-tier1") var ss1HeadTimeDriftmetric = metricset.NewHeadTimeDrift("substreams-tier1") -func registerSubstreamsTier1App[B Block](chain *Chain[B]) { +func RegisterSubstreamsTier1App[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { appLogger, _ := logging.PackageLogger("substreams-tier1", "github.com/streamingfast/firehose-core/firehose-ethereum/substreams-tier1") launcher.RegisterApp(rootLog, &launcher.AppDef{ @@ -40,8 +42,8 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { Title: "Substreams tier1 server", Description: "Provides a substreams grpc endpoint", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("substreams-tier1-grpc-listen-addr", SubstreamsTier1GRPCServingAddr, "Address on which the Substreams tier1 will listen, listen by default in plain text, appending a '*' to the end of the address make it listen in snake-oil (inscure) TLS") - cmd.Flags().String("substreams-tier1-subrequests-endpoint", SubstreamsTier2GRPCServingAddr, "Address on which the Substreans tier1 can reach the tier2") + cmd.Flags().String("substreams-tier1-grpc-listen-addr", firecore.SubstreamsTier1GRPCServingAddr, "Address on which the Substreams tier1 will listen, listen by default in plain text, appending a '*' to the end of the address make it listen in snake-oil (inscure) TLS") + cmd.Flags().String("substreams-tier1-subrequests-endpoint", firecore.SubstreamsTier2GRPCServingAddr, "Address on which the Substreans tier1 can reach the tier2") // communication with tier2 cmd.Flags().String("substreams-tier1-discovery-service-url", "", "URL to configure the grpc discovery service, used for communication with tier2") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true cmd.Flags().Bool("substreams-tier1-subrequests-insecure", false, "Connect to tier2 without checking certificate validity") @@ -62,7 +64,7 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { return nil, fmt.Errorf("unable to initialize dauth: %w", err) } - mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := GetCommonStoresURLs(runtime.AbsDataDir) + mergedBlocksStoreURL, oneBlocksStoreURL, forkedBlocksStoreURL, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) if err != nil { return nil, err } @@ -72,7 +74,7 @@ func registerSubstreamsTier1App[B Block](chain *Chain[B]) { rawServiceDiscoveryURL := viper.GetString("substreams-tier1-discovery-service-url") grpcListenAddr := viper.GetString("substreams-tier1-grpc-listen-addr") - stateStoreURL := MustReplaceDataDir(sfDataDir, viper.GetString("substreams-state-store-url")) + stateStoreURL := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("substreams-state-store-url")) stateStoreDefaultTag := viper.GetString("substreams-state-store-default-tag") stateBundleSize := viper.GetUint64("substreams-state-bundle-size") diff --git a/substreams_tier2.go b/cmd/apps/substreams_tier2.go similarity index 85% rename from substreams_tier2.go rename to cmd/apps/substreams_tier2.go index e9aaea9..6b77255 100644 --- a/substreams_tier2.go +++ b/cmd/apps/substreams_tier2.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package apps import ( "fmt" @@ -23,14 +23,16 @@ import ( "github.com/spf13/viper" discoveryservice "github.com/streamingfast/dgrpc/server/discovery-service" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/logging" - app "github.com/streamingfast/substreams/app" + "github.com/streamingfast/substreams/app" + "go.uber.org/zap" ) var ss2HeadBlockNumMetric = metricset.NewHeadBlockNumber("substreams-tier2") var ss2HeadTimeDriftmetric = metricset.NewHeadTimeDrift("substreams-tier2") -func registerSubstreamsTier2App[B Block](chain *Chain[B]) { +func RegisterSubstreamsTier2App[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { appLogger, _ := logging.PackageLogger("substreams-tier2", "github.com/streamingfast/firehose-core/firehose-ethereum/substreams-tier2") launcher.RegisterApp(rootLog, &launcher.AppDef{ @@ -38,7 +40,7 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { Title: "Substreams tier2 server", Description: "Provides a substreams grpc endpoint", RegisterFlags: func(cmd *cobra.Command) error { - cmd.Flags().String("substreams-tier2-grpc-listen-addr", SubstreamsTier2GRPCServingAddr, "Address on which the substreams tier2 will listen. Default is plain-text, appending a '*' to the end to jkkkj") + cmd.Flags().String("substreams-tier2-grpc-listen-addr", firecore.SubstreamsTier2GRPCServingAddr, "Address on which the substreams tier2 will listen. Default is plain-text, appending a '*' to the end to jkkkj") cmd.Flags().String("substreams-tier2-discovery-service-url", "", "URL to advertise presence to the grpc discovery service") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true cmd.Flags().String("substreams-tier2-block-type", "", "fully qualified name of the block type to use for the substreams tier1 (i.e. sf.ethereum.v1.Block)") @@ -48,7 +50,7 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { }, FactoryFunc: func(runtime *launcher.Runtime) (launcher.App, error) { - mergedBlocksStoreURL, _, _, err := GetCommonStoresURLs(runtime.AbsDataDir) + mergedBlocksStoreURL, _, _, err := firecore.GetCommonStoresURLs(runtime.AbsDataDir) if err != nil { return nil, err } @@ -58,7 +60,7 @@ func registerSubstreamsTier2App[B Block](chain *Chain[B]) { rawServiceDiscoveryURL := viper.GetString("substreams-tier2-discovery-service-url") grpcListenAddr := viper.GetString("substreams-tier2-grpc-listen-addr") - stateStoreURL := MustReplaceDataDir(sfDataDir, viper.GetString("substreams-state-store-url")) + stateStoreURL := firecore.MustReplaceDataDir(sfDataDir, viper.GetString("substreams-state-store-url")) stateStoreDefaultTag := viper.GetString("substreams-state-store-default-tag") stateBundleSize := viper.GetUint64("substreams-state-bundle-size") diff --git a/common.go b/cmd/common.go similarity index 80% rename from common.go rename to cmd/common.go index 82ad81d..f819d7a 100644 --- a/common.go +++ b/cmd/common.go @@ -12,24 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package cmd import ( "github.com/spf13/cobra" "github.com/streamingfast/cli" "github.com/streamingfast/dlauncher/launcher" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -func registerCommonFlags[B Block](chain *Chain[B]) { +func registerCommonFlags[B firecore.Block](chain *firecore.Chain[B]) { launcher.RegisterCommonFlags = func(_ *zap.Logger, cmd *cobra.Command) error { // Common stores configuration flags - cmd.Flags().String("common-one-block-store-url", OneBlockStoreURL, "[COMMON] Store URL to read/write one-block files") - cmd.Flags().String("common-merged-blocks-store-url", MergedBlocksStoreURL, "[COMMON] Store URL where to read/write merged blocks.") - cmd.Flags().String("common-forked-blocks-store-url", ForkedBlocksStoreURL, "[COMMON] Store URL where to read/write forked block files that we want to keep.") - cmd.Flags().String("common-live-blocks-addr", RelayerServingAddr, "[COMMON] gRPC endpoint to get real-time blocks.") + cmd.Flags().String("common-one-block-store-url", firecore.OneBlockStoreURL, "[COMMON] Store URL to read/write one-block files") + cmd.Flags().String("common-merged-blocks-store-url", firecore.MergedBlocksStoreURL, "[COMMON] Store URL where to read/write merged blocks.") + cmd.Flags().String("common-forked-blocks-store-url", firecore.ForkedBlocksStoreURL, "[COMMON] Store URL where to read/write forked block files that we want to keep.") + cmd.Flags().String("common-live-blocks-addr", firecore.RelayerServingAddr, "[COMMON] gRPC endpoint to get real-time blocks.") - cmd.Flags().String("common-index-store-url", IndexStoreURL, "[COMMON] Store URL where to read/write index files (if used on the chain).") + cmd.Flags().String("common-index-store-url", firecore.IndexStoreURL, "[COMMON] Store URL where to read/write index files (if used on the chain).") cmd.Flags().IntSlice("common-index-block-sizes", []int{100000, 10000, 1000, 100}, "Index bundle sizes that that are considered valid when looking for block indexes") cmd.Flags().Bool("common-blocks-cache-enabled", false, cli.FlagDescription(` @@ -40,7 +41,7 @@ func registerCommonFlags[B Block](chain *Chain[B]) { split in two portions, one keeping N total bytes of blocks of the most recently used blocks and the other one keeping the N earliest blocks as requested by the various consumers of the cache. `)) - cmd.Flags().String("common-blocks-cache-dir", BlocksCacheDirectory, cli.FlagDescription(` + cmd.Flags().String("common-blocks-cache-dir", firecore.BlocksCacheDirectory, cli.FlagDescription(` [COMMON] Blocks cache directory where all the block's bytes will be cached to disk instead of being kept in RAM. This should be a disk that persists across restarts of the Firehose component to reduce the the strain on the disk when restarting and streams reconnects. The size of disk must at least big (with a 10%% buffer) in bytes as the sum of flags' diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index a3dde8e..82ee3aa 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -3,10 +3,11 @@ package main import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" firecore "github.com/streamingfast/firehose-core" + fhCMD "github.com/streamingfast/firehose-core/cmd" ) func main() { - firecore.Main(&firecore.Chain[*pbbstream.Block]{ + fhCMD.Main(&firecore.Chain[*pbbstream.Block]{ ShortName: "core", //used to compose the binary name LongName: "CORE", //only used to compose cmd title and description ExecutableName: "fire-core", //only used to set default value of reader-node-path, we should not provide a default value anymore ... diff --git a/main.go b/cmd/main.go similarity index 84% rename from main.go rename to cmd/main.go index e5ef555..e308e52 100644 --- a/main.go +++ b/cmd/main.go @@ -1,4 +1,4 @@ -package firecore +package cmd import ( "fmt" @@ -6,6 +6,10 @@ import ( "strings" "time" + "github.com/streamingfast/firehose-core/cmd/tools" + + "github.com/streamingfast/firehose-core/cmd/apps" + "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -19,6 +23,7 @@ import ( "github.com/streamingfast/dmetering" dmeteringgrpc "github.com/streamingfast/dmetering/grpc" dmeteringlogger "github.com/streamingfast/dmetering/logger" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/logging" "go.uber.org/zap" ) @@ -29,7 +34,7 @@ var rootTracer logging.Tracer // Main is the main entry point that configures everything and should be called from your Go // 'main' entrypoint directly. -func Main[B Block](chain *Chain[B]) { +func Main[B firecore.Block](chain *firecore.Chain[B]) { dauthgrpc.Register() dauthnull.Register() dauthsecret.Register() @@ -48,7 +53,7 @@ func Main[B Block](chain *Chain[B]) { cli.ConfigureViperForCommand(rootCmd, strings.ToUpper(binaryName)) // Compatibility to fetch `viper.GetXXX(....)` without `start-` prefix for flags on startCmd - startCmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { + apps.StartCmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { viper.BindPFlag(flag.Name, flag) viper.BindEnv(sflags.MustGetViperKeyFromFlag(flag), strings.ToUpper(binaryName+"_"+strings.ReplaceAll(flag.Name, "-", "_"))) }) @@ -58,8 +63,8 @@ func Main[B Block](chain *Chain[B]) { rootCmd.Short = fmt.Sprintf("Firehose on %s", chain.LongName) rootCmd.Version = chain.VersionString() - rootCmd.AddCommand(startCmd) - rootCmd.AddCommand(toolsCmd) + rootCmd.AddCommand(apps.StartCmd) + rootCmd.AddCommand(tools.ToolsCmd) (func(flags *pflag.FlagSet) { flags.StringP("data-dir", "d", "./firehose-data", "Path to data storage for all components of the Firehose stack") @@ -92,29 +97,29 @@ func Main[B Block](chain *Chain[B]) { })(rootCmd.PersistentFlags()) registerCommonFlags(chain) - registerReaderNodeApp(chain) - registerReaderNodeStdinApp(chain) - registerMergerApp() - registerRelayerApp() - registerFirehoseApp(chain) - registerSubstreamsTier1App(chain) - registerSubstreamsTier2App(chain) + apps.RegisterReaderNodeApp(chain, rootLog) + apps.RegisterReaderNodeStdinApp(chain, rootLog) + apps.RegisterMergerApp(rootLog) + apps.RegisterRelayerApp(rootLog) + apps.RegisterFirehoseApp(chain, rootLog) + apps.RegisterSubstreamsTier1App(chain, rootLog) + apps.RegisterSubstreamsTier2App(chain, rootLog) if len(chain.BlockIndexerFactories) > 0 { - registerIndexBuilderApp(chain) + apps.RegisterIndexBuilderApp(chain, rootLog) } if chain.RegisterExtraStartFlags != nil { - chain.RegisterExtraStartFlags(startCmd.Flags()) + chain.RegisterExtraStartFlags(apps.StartCmd.Flags()) } - configureStartCmd(chain) + apps.ConfigureStartCmd(chain, "", rootLog) - if err := configureToolsCmd(chain); err != nil { + if err := tools.ConfigureToolsCmd(chain, rootLog, rootTracer); err != nil { exitWithError("registering tools command", err) } - if err := launcher.RegisterFlags(rootLog, startCmd); err != nil { + if err := launcher.RegisterFlags(rootLog, apps.StartCmd); err != nil { exitWithError("registering application flags", err) } @@ -123,8 +128,8 @@ func Main[B Block](chain *Chain[B]) { availableCmds = append(availableCmds, app) } - startCmd.SetHelpTemplate(fmt.Sprintf(startCmdHelpTemplate, strings.Join(availableCmds, "\n "))) - startCmd.Example = fmt.Sprintf("%s start reader-node", binaryName) + apps.StartCmd.SetHelpTemplate(fmt.Sprintf(startCmdHelpTemplate, strings.Join(availableCmds, "\n "))) + apps.StartCmd.Example = fmt.Sprintf("%s start reader-node", binaryName) rootCmd.PersistentPreRunE = func(cmd *cobra.Command, _ []string) error { if err := setupCmd(cmd, chain.BinaryName()); err != nil { diff --git a/setup.go b/cmd/setup.go similarity index 94% rename from setup.go rename to cmd/setup.go index 9529f53..f2c41fe 100644 --- a/setup.go +++ b/cmd/setup.go @@ -1,4 +1,4 @@ -package firecore +package cmd import ( "fmt" @@ -6,6 +6,8 @@ import ( "os" "strings" + "github.com/streamingfast/firehose-core/cmd/apps" + "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -19,8 +21,8 @@ func setupCmd(cmd *cobra.Command, binaryName string) error { cmds := extractCmd(cmd) subCommand := cmds[len(cmds)-1] - forceConfigOn := []*cobra.Command{startCmd} - logToFileOn := []*cobra.Command{startCmd} + forceConfigOn := []*cobra.Command{apps.StartCmd} + logToFileOn := []*cobra.Command{apps.StartCmd} if configFile := viper.GetString("global-config-file"); configFile != "" { exists, err := fileExists(configFile) @@ -138,7 +140,7 @@ func getStartFlags() (byName map[string]*flagInfo) { byName[flag.Name] = &flagInfo{flag.Name, sflags.MustGetViperKeyFromFlag(flag)} }) - startCmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { + apps.StartCmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { byName[flag.Name] = &flagInfo{flag.Name, sflags.MustGetViperKeyFromFlag(flag)} }) diff --git a/tools/block_range.go b/cmd/tools/block_range.go similarity index 100% rename from tools/block_range.go rename to cmd/tools/block_range.go diff --git a/tools/block_range_enum.go b/cmd/tools/block_range_enum.go similarity index 100% rename from tools/block_range_enum.go rename to cmd/tools/block_range_enum.go diff --git a/tools/flags.go b/cmd/tools/flags.go similarity index 100% rename from tools/flags.go rename to cmd/tools/flags.go diff --git a/tools.go b/cmd/tools/tools.go similarity index 72% rename from tools.go rename to cmd/tools/tools.go index e2ab87a..ff13269 100644 --- a/tools.go +++ b/cmd/tools/tools.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package tools import ( "fmt" @@ -22,7 +22,9 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/streamingfast/cli/sflags" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/firehose/client" + "github.com/streamingfast/logging" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/grpc" @@ -30,51 +32,53 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -var toolsCmd = &cobra.Command{Use: "tools", Short: "Developer tools for operators and developers"} +var ToolsCmd = &cobra.Command{Use: "tools", Short: "Developer tools for operators and developers"} -func configureToolsCmd[B Block]( - chain *Chain[B], +func ConfigureToolsCmd[B firecore.Block]( + chain *firecore.Chain[B], + logger *zap.Logger, + tracer logging.Tracer, ) error { - configureToolsCheckCmd(chain) + configureToolsCheckCmd(chain, logger) configureToolsPrintCmd(chain) - toolsCmd.AddCommand(newToolsCompareBlocksCmd(chain)) - toolsCmd.AddCommand(newToolsDownloadFromFirehoseCmd(chain, rootLog)) - toolsCmd.AddCommand(newToolsFirehoseClientCmd(chain, rootLog)) - toolsCmd.AddCommand(newToolsFirehoseSingleBlockClientCmd(chain, rootLog, rootTracer)) - toolsCmd.AddCommand(newToolsFirehosePrometheusExporterCmd(chain, rootLog, rootTracer)) - toolsCmd.AddCommand(newToolsUnmergeBlocksCmd(chain, rootLog)) - toolsCmd.AddCommand(newToolsFixBloatedMergedBlocks(chain, rootLog)) + ToolsCmd.AddCommand(newToolsCompareBlocksCmd(chain)) + ToolsCmd.AddCommand(newToolsDownloadFromFirehoseCmd(chain, logger)) + ToolsCmd.AddCommand(newToolsFirehoseClientCmd(chain, logger)) + ToolsCmd.AddCommand(newToolsFirehoseSingleBlockClientCmd(chain, logger, tracer)) + ToolsCmd.AddCommand(newToolsFirehosePrometheusExporterCmd(chain, logger, tracer)) + ToolsCmd.AddCommand(newToolsUnmergeBlocksCmd(chain, logger)) + ToolsCmd.AddCommand(newToolsFixBloatedMergedBlocks(chain, logger)) if chain.Tools.MergedBlockUpgrader != nil { - toolsCmd.AddCommand(NewToolsUpgradeMergedBlocksCmd(chain)) + ToolsCmd.AddCommand(NewToolsUpgradeMergedBlocksCmd(chain, logger)) } if chain.Tools.RegisterExtraCmd != nil { - if err := chain.Tools.RegisterExtraCmd(chain, toolsCmd, rootLog, rootTracer); err != nil { + if err := chain.Tools.RegisterExtraCmd(chain, ToolsCmd, logger, tracer); err != nil { return fmt.Errorf("registering extra tools command: %w", err) } } var walkCmd func(node *cobra.Command) walkCmd = func(node *cobra.Command) { - hideGlobalFlagsOnChildCmd(node) + firecore.HideGlobalFlagsOnChildCmd(node) for _, child := range node.Commands() { walkCmd(child) } } - walkCmd(toolsCmd) + walkCmd(ToolsCmd) return nil } -func addFirehoseStreamClientFlagsToSet[B Block](flags *pflag.FlagSet, chain *Chain[B]) { +func addFirehoseStreamClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { addFirehoseFetchClientFlagsToSet(flags, chain) flags.String("cursor", "", "Use this cursor with the request to resume your stream at the following block pointed by the cursor") } -func addFirehoseFetchClientFlagsToSet[B Block](flags *pflag.FlagSet, chain *Chain[B]) { +func addFirehoseFetchClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { flags.StringP("api-token-env-var", "a", "FIREHOSE_API_TOKEN", "Look for a JWT in this environment variable to authenticate against endpoint") flags.String("compression", "none", "The HTTP compression: use either 'none', 'gzip' or 'zstd'") flags.BoolP("plaintext", "p", false, "Use plaintext connection to Firehose") @@ -91,7 +95,7 @@ type firehoseRequestInfo struct { Transforms []*anypb.Any } -func getFirehoseFetchClientFromCmd[B Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *Chain[B]) ( +func getFirehoseFetchClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( firehoseClient pbfirehose.FetchClient, connClose func() error, requestInfo *firehoseRequestInfo, @@ -100,7 +104,7 @@ func getFirehoseFetchClientFromCmd[B Block](cmd *cobra.Command, logger *zap.Logg return getFirehoseClientFromCmd[B, pbfirehose.FetchClient](cmd, logger, "fetch-client", endpoint, chain) } -func getFirehoseStreamClientFromCmd[B Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *Chain[B]) ( +func getFirehoseStreamClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( firehoseClient pbfirehose.StreamClient, connClose func() error, requestInfo *firehoseRequestInfo, @@ -109,7 +113,7 @@ func getFirehoseStreamClientFromCmd[B Block](cmd *cobra.Command, logger *zap.Log return getFirehoseClientFromCmd[B, pbfirehose.StreamClient](cmd, logger, "stream-client", endpoint, chain) } -func getFirehoseClientFromCmd[B Block, C any](cmd *cobra.Command, logger *zap.Logger, kind string, endpoint string, chain *Chain[B]) ( +func getFirehoseClientFromCmd[B firecore.Block, C any](cmd *cobra.Command, logger *zap.Logger, kind string, endpoint string, chain *firecore.Chain[B]) ( firehoseClient C, connClose func() error, requestInfo *firehoseRequestInfo, diff --git a/tools_check.go b/cmd/tools/tools_check.go similarity index 91% rename from tools_check.go rename to cmd/tools/tools_check.go index bcf25c4..74014df 100644 --- a/tools_check.go +++ b/cmd/tools/tools_check.go @@ -12,19 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package tools import ( "fmt" "strings" + "go.uber.org/zap" + + firecore "github.com/streamingfast/firehose-core" + "github.com/dustin/go-humanize" "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -47,7 +50,7 @@ var toolsCheckMergedBlocksCmd = &cobra.Command{ } func init() { - toolsCmd.AddCommand(toolsCheckCmd) + ToolsCmd.AddCommand(toolsCheckCmd) toolsCheckCmd.AddCommand(toolsCheckForksCmd) toolsCheckCmd.AddCommand(toolsCheckMergedBlocksCmd) @@ -60,9 +63,9 @@ func init() { toolsCheckForksCmd.Flags().Uint64("after-block", 0, "Only show forks that happened after this block number, if value is not 0") } -func configureToolsCheckCmd[B Block](chain *Chain[B]) { - toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain) - toolsCheckMergedBlocksCmd.Example = ExamplePrefixed(chain, "tools check merged-blocks", ` +func configureToolsCheckCmd[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { + toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain, rootLog) + toolsCheckMergedBlocksCmd.Example = firecore.ExamplePrefixed(chain, "tools check merged-blocks", ` "./sf-data/storage/merged-blocks" "gs:////" -s "s3:////" -f @@ -72,12 +75,12 @@ func configureToolsCheckCmd[B Block](chain *Chain[B]) { toolsCheckForksCmd.RunE = toolsCheckForksE } -func createToolsCheckMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { +func createToolsCheckMergedBlocksE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) firecore.CommandExecutor { return func(cmd *cobra.Command, args []string) error { storeURL := args[0] fileBlockSize := uint64(100) - blockRange, err := tools.GetBlockRangeFromFlag(cmd, "range") + blockRange, err := GetBlockRangeFromFlag(cmd, "range") if err != nil { return err } diff --git a/tools_check_blocks.go b/cmd/tools/tools_check_blocks.go similarity index 83% rename from tools_check_blocks.go rename to cmd/tools/tools_check_blocks.go index bfa8b31..1f44507 100644 --- a/tools_check_blocks.go +++ b/cmd/tools/tools_check_blocks.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -9,11 +9,12 @@ import ( "regexp" "strconv" + firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" "go.uber.org/zap" ) @@ -28,7 +29,7 @@ const ( MaxUint64 = ^uint64(0) ) -func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange tools.BlockRange, printDetails PrintDetails) error { +func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange BlockRange, printDetails PrintDetails) error { readAllBlocks := printDetails != PrintNoDetails fmt.Printf("Checking block holes on %s\n", storeURL) if readAllBlocks { @@ -49,7 +50,7 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za // } holeFound := false - expected = tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected = RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) currentStartBlk := uint64(blockRange.Start) blocksStore, err := dstore.NewDBinStore(storeURL) @@ -82,11 +83,11 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za if baseNum != expected { // There is no previous valid block range if we are at the ever first seen file if count > 1 { - fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), uint64(tools.RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) + fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) } // Otherwise, we do not follow last seen element (previous is `100 - 199` but we are `299 - 300`) - missingRange := tools.NewClosedRange(int64(expected), tools.RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) + missingRange := NewClosedRange(int64(expected), RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) fmt.Printf("❌ Range %s (Missing, [%s])\n", missingRange, missingRange.ReprocRange()) currentStartBlk = baseNum @@ -112,11 +113,11 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za } if count%10000 == 0 { - fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), tools.RoundToBundleEndBlock(baseNum, fileBlockSize))) + fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), RoundToBundleEndBlock(baseNum, fileBlockSize))) currentStartBlk = baseNum + fileBlockSize } - if blockRange.IsClosed() && tools.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } @@ -134,9 +135,9 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za zap.Uint64("highest_block_seen", highestBlockSeen), ) if tfdb.lastLinkedBlock != nil && tfdb.lastLinkedBlock.Number < highestBlockSeen { - fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", tools.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) + fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) } else { - fmt.Printf("✅ Range %s\n", tools.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) + fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) } fmt.Println() @@ -145,7 +146,7 @@ func CheckMergedBlocks[B Block](ctx context.Context, chain *Chain[B], logger *za if blockRange.IsClosed() && (highestBlockSeen < uint64(*blockRange.Stop-1) || (lowestBlockSeen > uint64(blockRange.Start) && lowestBlockSeen > bstream.GetProtocolFirstStreamableBlock)) { - fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, tools.PrettyBlockNum(lowestBlockSeen), tools.PrettyBlockNum(highestBlockSeen)) + fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, PrettyBlockNum(lowestBlockSeen), PrettyBlockNum(highestBlockSeen)) } if holeFound { @@ -164,13 +165,13 @@ type trackedForkDB struct { unlinkableSegmentCount int } -func validateBlockSegment[B Block]( +func validateBlockSegment[B firecore.Block]( ctx context.Context, - chain *Chain[B], + chain *firecore.Chain[B], store dstore.Store, segment string, fileBlockSize uint64, - blockRange tools.BlockRange, + blockRange BlockRange, printDetails PrintDetails, tfdb *trackedForkDB, ) (lowestBlockSeen, highestBlockSeen uint64) { @@ -287,13 +288,13 @@ func validateBlockSegment[B Block]( return } -func WalkBlockPrefix(blockRange tools.BlockRange, fileBlockSize uint64) string { +func WalkBlockPrefix(blockRange BlockRange, fileBlockSize uint64) string { if blockRange.IsOpen() { return "" } - startString := fmt.Sprintf("%010d", tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) - endString := fmt.Sprintf("%010d", tools.RoundToBundleEndBlock(uint64(*blockRange.Stop-1), fileBlockSize)+1) + startString := fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + endString := fmt.Sprintf("%010d", RoundToBundleEndBlock(uint64(*blockRange.Stop-1), fileBlockSize)+1) offset := 0 for i := 0; i < len(startString); i++ { diff --git a/tools_check_merged_batch.go b/cmd/tools/tools_check_merged_batch.go similarity index 90% rename from tools_check_merged_batch.go rename to cmd/tools/tools_check_merged_batch.go index feef5b2..268c467 100644 --- a/tools_check_merged_batch.go +++ b/cmd/tools/tools_check_merged_batch.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -7,8 +7,6 @@ import ( "strconv" "strings" - "github.com/streamingfast/firehose-core/tools" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream" @@ -43,13 +41,13 @@ func CheckMergedBlocksBatch( sourceStoreURL string, destStoreURL string, fileBlockSize uint64, - blockRange tools.BlockRange, + blockRange BlockRange, ) error { if !blockRange.IsResolved() { return fmt.Errorf("check merged blocks can only work with fully resolved range, got %s", blockRange) } - expected := tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected := RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) fileBlockSize64 := uint64(fileBlockSize) blocksStore, err := dstore.NewDBinStore(sourceStoreURL) @@ -64,7 +62,7 @@ func CheckMergedBlocksBatch( } } - var firstFilename = fmt.Sprintf("%010d", tools.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + var firstFilename = fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) lastSeenBlock := &blockRef{} @@ -105,7 +103,7 @@ func CheckMergedBlocksBatch( destStore.WriteObject(ctx, outputFile, strings.NewReader("")) } } else { - brokenSince := tools.RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) + brokenSince := RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) for i := brokenSince; i <= baseNum; i += fileBlockSize64 { fmt.Printf("found broken file %q, %s\n", filename, details) if destStore != nil { @@ -122,7 +120,7 @@ func CheckMergedBlocksBatch( return err } - if blockRange.IsClosed() && tools.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } expected = baseNum + fileBlockSize64 diff --git a/tools_checkmergedbatch.go b/cmd/tools/tools_checkmergedbatch.go similarity index 94% rename from tools_checkmergedbatch.go rename to cmd/tools/tools_checkmergedbatch.go index a4cf4e5..946d5b1 100644 --- a/tools_checkmergedbatch.go +++ b/cmd/tools/tools_checkmergedbatch.go @@ -12,14 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package tools import ( "strconv" "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/tools" ) var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ @@ -46,7 +45,7 @@ func checkMergedBlocksBatchRunE(cmd *cobra.Command, args []string) error { } fileBlockSize := uint64(100) - blockRange := tools.BlockRange{ + blockRange := BlockRange{ Start: int64(start), Stop: &stop, } diff --git a/tools_compare_blocks.go b/cmd/tools/tools_compare_blocks.go similarity index 93% rename from tools_compare_blocks.go rename to cmd/tools/tools_compare_blocks.go index 2ab7991..3997b38 100644 --- a/tools_compare_blocks.go +++ b/cmd/tools/tools_compare_blocks.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package tools import ( "bytes" @@ -30,14 +30,14 @@ import ( "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/multierr" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" ) -func newToolsCompareBlocksCmd[B Block](chain *Chain[B]) *cobra.Command { +func newToolsCompareBlocksCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command { cmd := &cobra.Command{ Use: "compare-blocks []", Short: "Checks for any differences between two block stores between a specified range. (To compare the likeness of two block ranges, for example)", @@ -56,7 +56,7 @@ func newToolsCompareBlocksCmd[B Block](chain *Chain[B]) *cobra.Command { `), Args: cobra.ExactArgs(3), RunE: runCompareBlocksE(chain), - Example: ExamplePrefixed(chain, "tools compare-blocks", ` + Example: firecore.ExamplePrefixed(chain, "tools compare-blocks", ` # Run over full block range reference_store/ current_store/ 0:16000000 @@ -72,7 +72,7 @@ func newToolsCompareBlocksCmd[B Block](chain *Chain[B]) *cobra.Command { return cmd } -func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { +func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.CommandExecutor { sanitizer := chain.Tools.GetSanitizeBlockForCompare() return func(cmd *cobra.Command, args []string) error { @@ -82,7 +82,7 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { warnAboutExtraBlocks := sync.Once{} ctx := cmd.Context() - blockRange, err := tools.GetBlockRangeFromArg(args[2]) + blockRange, err := GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing range: %w", err) } @@ -103,7 +103,7 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { return fmt.Errorf("unable to create store at path %q: %w", args[1], err) } - segments, err := blockRange.Split(segmentSize, tools.EndBoundaryExclusive) + segments, err := blockRange.Split(segmentSize, EndBoundaryExclusive) if err != nil { return fmt.Errorf("unable to split blockrage in segments: %w", err) } @@ -122,7 +122,7 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { return dstore.StopIteration } - if blockRange.Contains(uint64(fileStartBlock), tools.EndBoundaryExclusive) { + if blockRange.Contains(uint64(fileStartBlock), EndBoundaryExclusive) { var wg sync.WaitGroup var bundleErrLock sync.Mutex var bundleReadErr error @@ -195,17 +195,17 @@ func runCompareBlocksE[B Block](chain *Chain[B]) CommandExecutor { } } -func firehoseBlockToRef[B Block](b B) bstream.BlockRef { +func firehoseBlockToRef[B firecore.Block](b B) bstream.BlockRef { return bstream.NewBlockRef(b.GetFirehoseBlockID(), b.GetFirehoseBlockNumber()) } -func readBundle[B Block]( +func readBundle[B firecore.Block]( ctx context.Context, filename string, store dstore.Store, fileStartBlock, stopBlock uint64, - sanitizer SanitizeBlockForCompareFunc[B], + sanitizer firecore.SanitizeBlockForCompareFunc[B], warnAboutExtraBlocks *sync.Once, ) ([]string, map[string]B, error) { fileReader, err := store.OpenObject(ctx, filename) @@ -247,7 +247,7 @@ func readBundle[B Block]( } type state struct { - segments []tools.BlockRange + segments []BlockRange currentSegmentIdx int blocksCountedInThisSegment int differencesFound int @@ -256,10 +256,10 @@ type state struct { } func (s *state) process(blockNum uint64, isDifferent bool, isMissing bool) { - if !s.segments[s.currentSegmentIdx].Contains(blockNum, tools.EndBoundaryExclusive) { // moving forward + if !s.segments[s.currentSegmentIdx].Contains(blockNum, EndBoundaryExclusive) { // moving forward s.print() for i := s.currentSegmentIdx; i < len(s.segments); i++ { - if s.segments[i].Contains(blockNum, tools.EndBoundaryExclusive) { + if s.segments[i].Contains(blockNum, EndBoundaryExclusive) { s.currentSegmentIdx = i s.totalBlocksCounted += s.blocksCountedInThisSegment s.differencesFound = 0 diff --git a/tools_download_from_firehose.go b/cmd/tools/tools_download_from_firehose.go similarity index 89% rename from tools_download_from_firehose.go rename to cmd/tools/tools_download_from_firehose.go index ec5a887..5d4995e 100644 --- a/tools_download_from_firehose.go +++ b/cmd/tools/tools_download_from_firehose.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -11,19 +11,20 @@ import ( "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" + firecore "github.com/streamingfast/firehose-core" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) -func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) *cobra.Command { +func newToolsDownloadFromFirehoseCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "download-from-firehose ", Short: "Download blocks from Firehose and save them to merged-blocks", Args: cobra.ExactArgs(4), RunE: createToolsDownloadFromFirehoseE(chain, zlog), - Example: ExamplePrefixed(chain, "tools download-from-firehose", ` + Example: firecore.ExamplePrefixed(chain, "tools download-from-firehose", ` # Adjust based on your actual network mainnet.eth.streamingfast.io:443 1000 2000 ./output_dir `), @@ -34,7 +35,7 @@ func newToolsDownloadFromFirehoseCmd[B Block](chain *Chain[B], zlog *zap.Logger) return cmd } -func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger) func(cmd *cobra.Command, args []string) error { +func createToolsDownloadFromFirehoseE[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { ctx := context.Background() @@ -110,7 +111,7 @@ func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger return fmt.Errorf("unmarshal response block: %w", err) } - if _, ok := block.(BlockLIBNumDerivable); !ok { + if _, ok := block.(firecore.BlockLIBNumDerivable); !ok { // We must wrap the block in a BlockEnveloppe and "provide" the LIB number as itself minus 1 since // there is nothing we can do more here to obtain the value sadly. For chain where the LIB can be // derived from the Block itself, this code does **not** run (so it will have the correct value) @@ -125,7 +126,7 @@ func createToolsDownloadFromFirehoseE[B Block](chain *Chain[B], zlog *zap.Logger libNum = number } - block = BlockEnveloppe{ + block = firecore.BlockEnveloppe{ Block: block, LIBNum: libNum, } diff --git a/tools_firehose_client.go b/cmd/tools/tools_firehose_client.go similarity index 87% rename from tools_firehose_client.go rename to cmd/tools/tools_firehose_client.go index b6d9135..02eda5b 100644 --- a/tools_firehose_client.go +++ b/cmd/tools/tools_firehose_client.go @@ -1,18 +1,18 @@ -package firecore +package tools import ( "context" "fmt" "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" - "github.com/streamingfast/firehose-core/tools" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" "io" ) -func newToolsFirehoseClientCmd[B Block](chain *Chain[B], logger *zap.Logger) *cobra.Command { +func newToolsFirehoseClientCmd[B firecore.Block](chain *firecore.Chain[B], logger *zap.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-client ", Short: "Connects to a Firehose endpoint over gRPC and print block stream as JSON to terminal", @@ -33,17 +33,17 @@ type respChan struct { ch chan string } -func getFirehoseClientE[B Block](chain *Chain[B], logger *zap.Logger) func(cmd *cobra.Command, args []string) error { +func getFirehoseClientE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { ctx := context.Background() - firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, logger, args[0], chain) + firehoseClient, connClose, requestInfo, err := getFirehoseStreamClientFromCmd(cmd, rootLog, args[0], chain) if err != nil { return err } defer connClose() - blockRange, err := tools.GetBlockRangeFromArg(args[1]) + blockRange, err := GetBlockRangeFromArg(args[1]) if err != nil { return fmt.Errorf("invalid range %q: %w", args[1], err) } diff --git a/tools_firehose_prometheus_exporter.go b/cmd/tools/tools_firehose_prometheus_exporter.go similarity index 90% rename from tools_firehose_prometheus_exporter.go rename to cmd/tools/tools_firehose_prometheus_exporter.go index 387c793..5cc8a00 100644 --- a/tools_firehose_prometheus_exporter.go +++ b/cmd/tools/tools_firehose_prometheus_exporter.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -8,6 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/streamingfast/bstream" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/logging" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" @@ -21,7 +22,7 @@ var lastBlockReceived time.Time var driftSec = prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: "firehose_healthcheck_drift", Help: "Time since the most recent block received (seconds)"}, []string{"endpoint"}) // You should add your custom 'transforms' flags to this command in your init(), then parse them in transformsSetter -func newToolsFirehosePrometheusExporterCmd[B Block](chain *Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { +func newToolsFirehosePrometheusExporterCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-prometheus-exporter ", Short: "stream blocks near the chain HEAD and report to prometheus", @@ -34,7 +35,7 @@ func newToolsFirehosePrometheusExporterCmd[B Block](chain *Chain[B], zlog *zap.L return cmd } -func runPrometheusExporterE[B Block](chain *Chain[B], zlog *zap.Logger, tracer logging.Tracer) func(cmd *cobra.Command, args []string) error { +func runPrometheusExporterE[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { ctx := context.Background() diff --git a/tools_firehose_single_block_client.go b/cmd/tools/tools_firehose_single_block_client.go similarity index 83% rename from tools_firehose_single_block_client.go rename to cmd/tools/tools_firehose_single_block_client.go index b242ac3..bf2dbee 100644 --- a/tools_firehose_single_block_client.go +++ b/cmd/tools/tools_firehose_single_block_client.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -7,6 +7,7 @@ import ( "strings" "github.com/spf13/cobra" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/jsonpb" "github.com/streamingfast/logging" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" @@ -14,13 +15,13 @@ import ( ) // You should add your custom 'transforms' flags to this command in your init(), then parse them in transformsSetter -func newToolsFirehoseSingleBlockClientCmd[B Block](chain *Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { +func newToolsFirehoseSingleBlockClientCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-single-block-client {endpoint} {block_num|block_num:block_id|cursor}", Short: "fetch a single block from firehose and print as JSON", Args: cobra.ExactArgs(2), RunE: getFirehoseSingleBlockClientE(chain, zlog, tracer), - Example: ExamplePrefixed(chain, "tools ", ` + Example: firecore.ExamplePrefixed(chain, "tools ", ` firehose-single-block-client --compression=gzip my.firehose.endpoint:443 2344:0x32d8e8d98a798da98d6as9d69899as86s9898d8ss8d87 `), } @@ -30,7 +31,7 @@ func newToolsFirehoseSingleBlockClientCmd[B Block](chain *Chain[B], zlog *zap.Lo return cmd } -func getFirehoseSingleBlockClientE[B Block](chain *Chain[B], zlog *zap.Logger, tracer logging.Tracer) func(cmd *cobra.Command, args []string) error { +func getFirehoseSingleBlockClientE[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { ctx := context.Background() diff --git a/tools_fix_bloated_merged_blocks.go b/cmd/tools/tools_fix_bloated_merged_blocks.go similarity index 91% rename from tools_fix_bloated_merged_blocks.go rename to cmd/tools/tools_fix_bloated_merged_blocks.go index cd225b4..063a78c 100644 --- a/tools_fix_bloated_merged_blocks.go +++ b/cmd/tools/tools_fix_bloated_merged_blocks.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "fmt" @@ -8,11 +8,11 @@ import ( "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -func newToolsFixBloatedMergedBlocks[B Block](chain *Chain[B], zlog *zap.Logger) *cobra.Command { +func newToolsFixBloatedMergedBlocks[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { return &cobra.Command{ Use: "fix-bloated-merged-blocks []", Short: "Fixes 'corrupted' merged-blocks that contain extraneous or duplicate blocks. Some older versions of the merger may have produce such bloated merged-blocks. All merged-blocks files in given range will be rewritten, regardless of if they were corrupted.", @@ -21,7 +21,7 @@ func newToolsFixBloatedMergedBlocks[B Block](chain *Chain[B], zlog *zap.Logger) } } -func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { +func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -35,7 +35,7 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) CommandExecutor { return fmt.Errorf("unable to create destination store: %w", err) } - blockRange, err := tools.GetBlockRangeFromArg(args[2]) + blockRange, err := GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing block range: %w", err) } diff --git a/tools_print.go b/cmd/tools/tools_print.go similarity index 92% rename from tools_print.go rename to cmd/tools/tools_print.go index 161f5e1..0ac63d0 100644 --- a/tools_print.go +++ b/cmd/tools/tools_print.go @@ -12,22 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package firecore +package tools import ( "fmt" + "io" + "os" + "strconv" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" + firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/jsonencoder" "github.com/streamingfast/firehose-core/protoregistry" - "github.com/streamingfast/firehose-core/tools" "google.golang.org/protobuf/proto" - "io" - "os" - "strconv" ) var toolsPrintCmd = &cobra.Command{ @@ -48,7 +49,7 @@ var toolsPrintMergedBlocksCmd = &cobra.Command{ } func init() { - toolsCmd.AddCommand(toolsPrintCmd) + ToolsCmd.AddCommand(toolsPrintCmd) toolsPrintCmd.AddCommand(toolsPrintOneBlockCmd) toolsPrintCmd.AddCommand(toolsPrintMergedBlocksCmd) @@ -58,12 +59,12 @@ func init() { toolsPrintCmd.PersistentFlags().Bool("transactions", false, "When in 'text' output mode, also print transactions summary") } -func configureToolsPrintCmd[B Block](chain *Chain[B]) { +func configureToolsPrintCmd[B firecore.Block](chain *firecore.Chain[B]) { toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain) toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE(chain) } -func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { +func createToolsPrintMergedBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -84,7 +85,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { if err != nil { return fmt.Errorf("invalid base block %q: %w", args[1], err) } - blockBoundary := tools.RoundToBundleStartBlock(startBlock, 100) + blockBoundary := RoundToBundleStartBlock(startBlock, 100) filename := fmt.Sprintf("%010d", blockBoundary) reader, err := store.OpenObject(ctx, filename) @@ -126,7 +127,7 @@ func createToolsPrintMergedBlocksE[B Block](chain *Chain[B]) CommandExecutor { } } -func createToolsPrintOneBlockE[B Block](chain *Chain[B]) CommandExecutor { +func createToolsPrintOneBlockE[B firecore.Block](chain *firecore.Chain[B]) firecore.CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -214,7 +215,7 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func displayBlock[B Block](pbBlock *pbbstream.Block, chain *Chain[B], outputMode PrintOutputMode, printTransactions bool, jencoder *jsonencoder.Encoder) error { +func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Chain[B], outputMode PrintOutputMode, printTransactions bool, jencoder *jsonencoder.Encoder) error { if pbBlock == nil { return fmt.Errorf("block is nil") } diff --git a/tools_print_enum.go b/cmd/tools/tools_print_enum.go similarity index 99% rename from tools_print_enum.go rename to cmd/tools/tools_print_enum.go index 99e7212..5749abf 100644 --- a/tools_print_enum.go +++ b/cmd/tools/tools_print_enum.go @@ -4,7 +4,7 @@ // Build Date: // Built By: -package firecore +package tools import ( "fmt" diff --git a/tools_unmerge_blocks.go b/cmd/tools/tools_unmerge_blocks.go similarity index 92% rename from tools_unmerge_blocks.go rename to cmd/tools/tools_unmerge_blocks.go index ac00bc1..8d90e5b 100644 --- a/tools_unmerge_blocks.go +++ b/cmd/tools/tools_unmerge_blocks.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "fmt" @@ -10,11 +10,11 @@ import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/cli" "github.com/streamingfast/dstore" - "github.com/streamingfast/firehose-core/tools" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -func newToolsUnmergeBlocksCmd[B Block](chain *Chain[B], zlog *zap.Logger) *cobra.Command { +func newToolsUnmergeBlocksCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { return &cobra.Command{ Use: "unmerge-blocks []", Short: "Unmerges merged block files into one-block-files", @@ -23,7 +23,7 @@ func newToolsUnmergeBlocksCmd[B Block](chain *Chain[B], zlog *zap.Logger) *cobra } } -func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { +func runUnmergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -37,7 +37,7 @@ func runUnmergeBlocksE(zlog *zap.Logger) CommandExecutor { return fmt.Errorf("unable to create destination store: %w", err) } - blockRange, err := tools.GetBlockRangeFromArg(args[2]) + blockRange, err := GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing block range: %w", err) } diff --git a/tools_upgrade_merged_blocks.go b/cmd/tools/tools_upgrade_merged_blocks.go similarity index 92% rename from tools_upgrade_merged_blocks.go rename to cmd/tools/tools_upgrade_merged_blocks.go index fff0caf..f0025ae 100644 --- a/tools_upgrade_merged_blocks.go +++ b/cmd/tools/tools_upgrade_merged_blocks.go @@ -1,4 +1,4 @@ -package firecore +package tools import ( "context" @@ -7,25 +7,25 @@ import ( "io" "strconv" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/spf13/cobra" "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/dstore" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -func NewToolsUpgradeMergedBlocksCmd[B Block](chain *Chain[B]) *cobra.Command { +func NewToolsUpgradeMergedBlocksCmd[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) *cobra.Command { return &cobra.Command{ Use: "upgrade-merged-blocks ", Short: "From a merged-blocks source, rewrite blocks to a new merged-blocks destination, while applying all possible upgrades", Args: cobra.ExactArgs(4), - RunE: getMergedBlockUpgrader(chain.Tools.MergedBlockUpgrader), + RunE: getMergedBlockUpgrader(chain.Tools.MergedBlockUpgrader, rootLog), } } -func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.Block, error)) func(cmd *cobra.Command, args []string) error { +func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.Block, error), rootLog *zap.Logger) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { source := args[0] sourceStore, err := dstore.NewDBinStore(source) diff --git a/tools/types.go b/cmd/tools/types.go similarity index 100% rename from tools/types.go rename to cmd/tools/types.go diff --git a/tools/types_test.go b/cmd/tools/types_test.go similarity index 100% rename from tools/types_test.go rename to cmd/tools/types_test.go diff --git a/tools/utils.go b/cmd/tools/utils.go similarity index 100% rename from tools/utils.go rename to cmd/tools/utils.go diff --git a/tools/utils_test.go b/cmd/tools/utils_test.go similarity index 100% rename from tools/utils_test.go rename to cmd/tools/utils_test.go diff --git a/flags.go b/flags.go index 41f5ee9..8b34415 100644 --- a/flags.go +++ b/flags.go @@ -10,7 +10,7 @@ var globalFlagsHiddenOnChildCmd = []string{ "startup-delay", } -func hideGlobalFlagsOnChildCmd(cmd *cobra.Command) { +func HideGlobalFlagsOnChildCmd(cmd *cobra.Command) { actual := cmd.HelpFunc() cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { for _, flag := range globalFlagsHiddenOnChildCmd { diff --git a/substreams/extentions.go b/substreams/extentions.go new file mode 100644 index 0000000..73198b2 --- /dev/null +++ b/substreams/extentions.go @@ -0,0 +1,11 @@ +package substreams + +import ( + "github.com/streamingfast/substreams/pipeline" + "github.com/streamingfast/substreams/wasm" +) + +type Extension struct { + PipelineOptioner pipeline.PipelineOptioner + WASMExtensioner wasm.WASMExtensioner +} diff --git a/types.go b/types.go index 7eeb2c4..af051e7 100644 --- a/types.go +++ b/types.go @@ -169,3 +169,5 @@ type BlockIndexer[B Block] interface { // for the overall process. The returns [transform.Factory] will be used multiple times (one per request // requesting this transform). type BlockTransformerFactory func(indexStore dstore.Store, indexPossibleSizes []uint64) (*transform.Factory, error) + +type ReaderNodeArgumentResolver = func(in string) string diff --git a/unsafe_extensions.go b/unsafe_extensions.go index 0417434..c014d8d 100644 --- a/unsafe_extensions.go +++ b/unsafe_extensions.go @@ -3,9 +3,7 @@ package firecore import ( "context" - "github.com/spf13/cobra" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dlauncher/launcher" "go.uber.org/zap" ) @@ -16,6 +14,6 @@ var UnsafeJsonBytesEncoder = "hex" // UnsafeResolveReaderNodeStartBlock is a function that resolved the reader node start block num, by default it simply // returns the value of the 'reader-node-start-block-num'. However, the function may be overwritten in certain chains // to perform a more complex resolution logic. -var UnsafeResolveReaderNodeStartBlock = func(ctx context.Context, command *cobra.Command, runtime *launcher.Runtime, rootLog *zap.Logger) (uint64, error) { - return sflags.MustGetUint64(command, "reader-node-start-block-num"), nil +var UnsafeResolveReaderNodeStartBlock = func(ctx context.Context, startBlockNum uint64, firstStreamableBlock uint64, runtime *launcher.Runtime, rootLog *zap.Logger) (uint64, error) { + return startBlockNum, nil } diff --git a/utils.go b/utils.go index b341642..9767001 100644 --- a/utils.go +++ b/utils.go @@ -7,13 +7,11 @@ import ( "strings" "github.com/streamingfast/cli" - "go.uber.org/zap" ) func mkdirStorePathIfLocal(storeURL string) (err error) { - rootLog.Debug("creating directory and its parent(s)", zap.String("directory", storeURL)) if dirs := getDirsToMake(storeURL); len(dirs) > 0 { - err = makeDirs(dirs) + err = MakeDirs(dirs) } return @@ -39,7 +37,7 @@ func getDirsToMake(storeURL string) []string { return []string{storeURL} } -func makeDirs(directories []string) error { +func MakeDirs(directories []string) error { for _, directory := range directories { err := os.MkdirAll(directory, 0755) if err != nil { From efd96c34861e439b40f5e56809f97c4dc3175202 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 5 Dec 2023 13:02:55 -0500 Subject: [PATCH 49/66] wip refactor of tools and apps package --- cmd/main.go | 4 +- .../blocks.go} | 37 ++--- cmd/tools/{tools_check.go => check/check.go} | 50 +++---- .../merged_batch.go} | 21 +-- .../mergedbatch.go} | 23 +-- .../client.go} | 10 +- cmd/tools/firehose/firehose.go | 123 ++++++++++++++++ .../prometheus_exporter.go} | 4 +- .../single_block_client.go} | 4 +- .../tools_download_from_firehose.go | 13 +- cmd/tools/{ => print}/tools_print.go | 54 ++++---- cmd/tools/{ => print}/tools_print_enum.go | 2 +- cmd/tools/tools.go | 131 ++---------------- cmd/tools/tools_compare_blocks.go | 16 ++- cmd/tools/tools_fix_bloated_merged_blocks.go | 18 +-- cmd/tools/tools_unmerge_blocks.go | 7 +- cmd/tools/tools_upgrade_merged_blocks.go | 114 +-------------- jsonencoder/encoder.go | 6 +- mergedblockswriter.go | 113 +++++++++++++++ {cmd/tools => types}/block_range.go | 2 +- {cmd/tools => types}/block_range_enum.go | 2 +- {cmd/tools => types}/flags.go | 6 +- {cmd/tools => types}/types.go | 2 +- {cmd/tools => types}/types_test.go | 2 +- {cmd/tools => types}/utils.go | 4 +- {cmd/tools => types}/utils_test.go | 4 +- 26 files changed, 407 insertions(+), 365 deletions(-) rename cmd/tools/{tools_check_blocks.go => check/blocks.go} (83%) rename cmd/tools/{tools_check.go => check/check.go} (81%) rename cmd/tools/{tools_check_merged_batch.go => check/merged_batch.go} (86%) rename cmd/tools/{tools_checkmergedbatch.go => check/mergedbatch.go} (76%) rename cmd/tools/{tools_firehose_client.go => firehose/client.go} (94%) create mode 100644 cmd/tools/firehose/firehose.go rename cmd/tools/{tools_firehose_prometheus_exporter.go => firehose/prometheus_exporter.go} (97%) rename cmd/tools/{tools_firehose_single_block_client.go => firehose/single_block_client.go} (96%) rename cmd/tools/{ => firehose}/tools_download_from_firehose.go (95%) rename cmd/tools/{ => print}/tools_print.go (88%) rename cmd/tools/{ => print}/tools_print_enum.go (99%) create mode 100644 mergedblockswriter.go rename {cmd/tools => types}/block_range.go (99%) rename {cmd/tools => types}/block_range_enum.go (99%) rename {cmd/tools => types}/flags.go (82%) rename {cmd/tools => types}/types.go (96%) rename {cmd/tools => types}/types_test.go (97%) rename {cmd/tools => types}/utils.go (97%) rename {cmd/tools => types}/utils_test.go (97%) diff --git a/cmd/main.go b/cmd/main.go index e308e52..e5558c9 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -8,8 +8,6 @@ import ( "github.com/streamingfast/firehose-core/cmd/tools" - "github.com/streamingfast/firehose-core/cmd/apps" - "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -24,6 +22,8 @@ import ( dmeteringgrpc "github.com/streamingfast/dmetering/grpc" dmeteringlogger "github.com/streamingfast/dmetering/logger" firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/cmd/apps" + "github.com/streamingfast/logging" "go.uber.org/zap" ) diff --git a/cmd/tools/tools_check_blocks.go b/cmd/tools/check/blocks.go similarity index 83% rename from cmd/tools/tools_check_blocks.go rename to cmd/tools/check/blocks.go index 1f44507..8b86091 100644 --- a/cmd/tools/tools_check_blocks.go +++ b/cmd/tools/check/blocks.go @@ -1,4 +1,4 @@ -package tools +package check import ( "context" @@ -9,12 +9,15 @@ import ( "regexp" "strconv" - firecore "github.com/streamingfast/firehose-core" + print2 "github.com/streamingfast/firehose-core/cmd/tools/print" + + "github.com/streamingfast/firehose-core/types" "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" + firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) @@ -29,7 +32,7 @@ const ( MaxUint64 = ^uint64(0) ) -func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange BlockRange, printDetails PrintDetails) error { +func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange types.BlockRange, printDetails PrintDetails) error { readAllBlocks := printDetails != PrintNoDetails fmt.Printf("Checking block holes on %s\n", storeURL) if readAllBlocks { @@ -50,7 +53,7 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch // } holeFound := false - expected = RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected = types.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) currentStartBlk := uint64(blockRange.Start) blocksStore, err := dstore.NewDBinStore(storeURL) @@ -83,11 +86,11 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch if baseNum != expected { // There is no previous valid block range if we are at the ever first seen file if count > 1 { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) + fmt.Printf("✅ Range %s\n", types.NewClosedRange(int64(currentStartBlk), uint64(types.RoundToBundleEndBlock(expected-fileBlockSize, fileBlockSize)))) } // Otherwise, we do not follow last seen element (previous is `100 - 199` but we are `299 - 300`) - missingRange := NewClosedRange(int64(expected), RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) + missingRange := types.NewClosedRange(int64(expected), types.RoundToBundleEndBlock(baseNum-fileBlockSize, fileBlockSize)) fmt.Printf("❌ Range %s (Missing, [%s])\n", missingRange, missingRange.ReprocRange()) currentStartBlk = baseNum @@ -113,11 +116,11 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch } if count%10000 == 0 { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), RoundToBundleEndBlock(baseNum, fileBlockSize))) + fmt.Printf("✅ Range %s\n", types.NewClosedRange(int64(currentStartBlk), types.RoundToBundleEndBlock(baseNum, fileBlockSize))) currentStartBlk = baseNum + fileBlockSize } - if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && types.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } @@ -135,9 +138,9 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch zap.Uint64("highest_block_seen", highestBlockSeen), ) if tfdb.lastLinkedBlock != nil && tfdb.lastLinkedBlock.Number < highestBlockSeen { - fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) + fmt.Printf("🔶 Range %s has issues with forks, last linkable block number: %d\n", types.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen)), tfdb.lastLinkedBlock.Number) } else { - fmt.Printf("✅ Range %s\n", NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) + fmt.Printf("✅ Range %s\n", types.NewClosedRange(int64(currentStartBlk), uint64(highestBlockSeen))) } fmt.Println() @@ -146,7 +149,7 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch if blockRange.IsClosed() && (highestBlockSeen < uint64(*blockRange.Stop-1) || (lowestBlockSeen > uint64(blockRange.Start) && lowestBlockSeen > bstream.GetProtocolFirstStreamableBlock)) { - fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, PrettyBlockNum(lowestBlockSeen), PrettyBlockNum(highestBlockSeen)) + fmt.Printf("> 🔶 Incomplete range %s, started at block %s and stopped at block: %s\n", blockRange, types.PrettyBlockNum(lowestBlockSeen), types.PrettyBlockNum(highestBlockSeen)) } if holeFound { @@ -171,7 +174,7 @@ func validateBlockSegment[B firecore.Block]( store dstore.Store, segment string, fileBlockSize uint64, - blockRange BlockRange, + blockRange types.BlockRange, printDetails PrintDetails, tfdb *trackedForkDB, ) (lowestBlockSeen, highestBlockSeen uint64) { @@ -197,7 +200,7 @@ func validateBlockSegment[B firecore.Block]( continue } - if blockRange.IsClosed() && block.Number > uint64(*blockRange.Stop) { + if blockRange.IsClosed() && block.Number > *blockRange.Stop { return } @@ -239,7 +242,7 @@ func validateBlockSegment[B firecore.Block]( seenBlockCount++ if printDetails == PrintStats { - err := printBStreamBlock(block, false, os.Stdout) + err := print2.PrintBStreamBlock(block, false, os.Stdout) if err != nil { fmt.Printf("❌ Unable to print block %s: %s\n", block.AsRef(), err) continue @@ -288,13 +291,13 @@ func validateBlockSegment[B firecore.Block]( return } -func WalkBlockPrefix(blockRange BlockRange, fileBlockSize uint64) string { +func WalkBlockPrefix(blockRange types.BlockRange, fileBlockSize uint64) string { if blockRange.IsOpen() { return "" } - startString := fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) - endString := fmt.Sprintf("%010d", RoundToBundleEndBlock(uint64(*blockRange.Stop-1), fileBlockSize)+1) + startString := fmt.Sprintf("%010d", types.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + endString := fmt.Sprintf("%010d", types.RoundToBundleEndBlock(*blockRange.Stop-1, fileBlockSize)+1) offset := 0 for i := 0; i < len(startString); i++ { diff --git a/cmd/tools/tools_check.go b/cmd/tools/check/check.go similarity index 81% rename from cmd/tools/tools_check.go rename to cmd/tools/check/check.go index 74014df..97bf7e4 100644 --- a/cmd/tools/tools_check.go +++ b/cmd/tools/check/check.go @@ -12,45 +12,47 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tools +package check import ( "fmt" "strings" - "go.uber.org/zap" - - firecore "github.com/streamingfast/firehose-core" - "github.com/dustin/go-humanize" "github.com/spf13/cobra" "github.com/streamingfast/bstream" "github.com/streamingfast/cli" "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" + firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/types" + "go.uber.org/zap" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) -var toolsCheckCmd = &cobra.Command{Use: "check", Short: "Various checks for deployment, data integrity & debugging"} +func NewCheckCommand[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) *cobra.Command { -var toolsCheckForksCmd = &cobra.Command{ - Use: "forks ", - Short: "Reads all forked blocks you have and print longest linkable segments for each fork", - Args: cobra.ExactArgs(1), -} + toolsCheckCmd := &cobra.Command{Use: "check", Short: "Various checks for deployment, data integrity & debugging"} -var toolsCheckMergedBlocksCmd = &cobra.Command{ - // TODO: Not sure, it's now a required thing, but we could probably use the same logic as `start` - // and avoid altogether passing the args. If this would also load the config and everything else, - // that would be much more seamless! - Use: "merged-blocks ", - Short: "Checks for any holes in merged blocks as well as ensuring merged blocks integrity", - Args: cobra.ExactArgs(1), -} + toolsCheckForksCmd := &cobra.Command{ + Use: "forks ", + Short: "Reads all forked blocks you have and print longest linkable segments for each fork", + Args: cobra.ExactArgs(1), + } + + var ( + toolsCheckMergedBlocksCmd = &cobra.Command{ + // TODO: Not sure, it's now a required thing, but we could probably use the same logic as `start` + // and avoid altogether passing the args. If this would also load the config and everything else, + // that would be much more seamless! + Use: "merged-blocks ", + Short: "Checks for any holes in merged blocks as well as ensuring merged blocks integrity", + Args: cobra.ExactArgs(1), + } + ) -func init() { - ToolsCmd.AddCommand(toolsCheckCmd) + toolsCheckCmd.AddCommand(newCheckMergedBlockBatchCmd()) toolsCheckCmd.AddCommand(toolsCheckForksCmd) toolsCheckCmd.AddCommand(toolsCheckMergedBlocksCmd) @@ -61,9 +63,7 @@ func init() { toolsCheckForksCmd.Flags().Uint64("min-depth", 1, "Only show forks that are at least this deep") toolsCheckForksCmd.Flags().Uint64("after-block", 0, "Only show forks that happened after this block number, if value is not 0") -} -func configureToolsCheckCmd[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) { toolsCheckMergedBlocksCmd.RunE = createToolsCheckMergedBlocksE(chain, rootLog) toolsCheckMergedBlocksCmd.Example = firecore.ExamplePrefixed(chain, "tools check merged-blocks", ` "./sf-data/storage/merged-blocks" @@ -73,6 +73,8 @@ func configureToolsCheckCmd[B firecore.Block](chain *firecore.Chain[B], rootLog `) toolsCheckForksCmd.RunE = toolsCheckForksE + + return toolsCheckCmd } func createToolsCheckMergedBlocksE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap.Logger) firecore.CommandExecutor { @@ -80,7 +82,7 @@ func createToolsCheckMergedBlocksE[B firecore.Block](chain *firecore.Chain[B], r storeURL := args[0] fileBlockSize := uint64(100) - blockRange, err := GetBlockRangeFromFlag(cmd, "range") + blockRange, err := types.GetBlockRangeFromFlag(cmd, "range") if err != nil { return err } diff --git a/cmd/tools/tools_check_merged_batch.go b/cmd/tools/check/merged_batch.go similarity index 86% rename from cmd/tools/tools_check_merged_batch.go rename to cmd/tools/check/merged_batch.go index 268c467..309d364 100644 --- a/cmd/tools/tools_check_merged_batch.go +++ b/cmd/tools/check/merged_batch.go @@ -1,4 +1,4 @@ -package tools +package check import ( "context" @@ -7,10 +7,10 @@ import ( "strconv" "strings" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" + "github.com/streamingfast/firehose-core/types" ) type blockRef struct { @@ -41,13 +41,13 @@ func CheckMergedBlocksBatch( sourceStoreURL string, destStoreURL string, fileBlockSize uint64, - blockRange BlockRange, + blockRange types.BlockRange, ) error { if !blockRange.IsResolved() { return fmt.Errorf("check merged blocks can only work with fully resolved range, got %s", blockRange) } - expected := RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) + expected := types.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize) fileBlockSize64 := uint64(fileBlockSize) blocksStore, err := dstore.NewDBinStore(sourceStoreURL) @@ -62,7 +62,7 @@ func CheckMergedBlocksBatch( } } - var firstFilename = fmt.Sprintf("%010d", RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) + var firstFilename = fmt.Sprintf("%010d", types.RoundToBundleStartBlock(uint64(blockRange.Start), fileBlockSize)) lastSeenBlock := &blockRef{} @@ -103,12 +103,15 @@ func CheckMergedBlocksBatch( destStore.WriteObject(ctx, outputFile, strings.NewReader("")) } } else { - brokenSince := RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) + brokenSince := types.RoundToBundleStartBlock(uint64(lastSeenBlock.num+1), 100) for i := brokenSince; i <= baseNum; i += fileBlockSize64 { fmt.Printf("found broken file %q, %s\n", filename, details) if destStore != nil { outputFile := fmt.Sprintf("%010d.broken", i) - destStore.WriteObject(ctx, outputFile, strings.NewReader("")) + err := destStore.WriteObject(ctx, outputFile, strings.NewReader("")) + if err != nil { + return fmt.Errorf("unable to write broken file %q: %w", outputFile, err) + } } } } @@ -120,7 +123,7 @@ func CheckMergedBlocksBatch( return err } - if blockRange.IsClosed() && RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { + if blockRange.IsClosed() && types.RoundToBundleEndBlock(baseNum, fileBlockSize) >= *blockRange.Stop-1 { return dstore.StopIteration } expected = baseNum + fileBlockSize64 diff --git a/cmd/tools/tools_checkmergedbatch.go b/cmd/tools/check/mergedbatch.go similarity index 76% rename from cmd/tools/tools_checkmergedbatch.go rename to cmd/tools/check/mergedbatch.go index 946d5b1..3b484eb 100644 --- a/cmd/tools/tools_checkmergedbatch.go +++ b/cmd/tools/check/mergedbatch.go @@ -12,25 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tools +package check import ( "strconv" "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" + "github.com/streamingfast/firehose-core/types" ) -var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ - Use: "merged-blocks-batch ", - Short: "Checks for any missing, disordered or duplicate blocks in merged blocks files", - Args: cobra.ExactArgs(3), - RunE: checkMergedBlocksBatchRunE, -} - -func init() { - toolsCheckCmd.AddCommand(toolsCheckMergedBlocksBatchCmd) +func newCheckMergedBlockBatchCmd() *cobra.Command { + var toolsCheckMergedBlocksBatchCmd = &cobra.Command{ + Use: "merged-blocks-batch ", + Short: "Checks for any missing, disordered or duplicate blocks in merged blocks files", + Args: cobra.ExactArgs(3), + RunE: checkMergedBlocksBatchRunE, + } toolsCheckMergedBlocksBatchCmd.PersistentFlags().String("output-to-store", "", "If non-empty, an empty file called .broken will be created for every problematic merged-blocks-file. This is a convenient way to gather the results from multiple parallel processes.") + return toolsCheckMergedBlocksBatchCmd + } func checkMergedBlocksBatchRunE(cmd *cobra.Command, args []string) error { @@ -45,7 +46,7 @@ func checkMergedBlocksBatchRunE(cmd *cobra.Command, args []string) error { } fileBlockSize := uint64(100) - blockRange := BlockRange{ + blockRange := types.BlockRange{ Start: int64(start), Stop: &stop, } diff --git a/cmd/tools/tools_firehose_client.go b/cmd/tools/firehose/client.go similarity index 94% rename from cmd/tools/tools_firehose_client.go rename to cmd/tools/firehose/client.go index 02eda5b..d416ff3 100644 --- a/cmd/tools/tools_firehose_client.go +++ b/cmd/tools/firehose/client.go @@ -1,18 +1,20 @@ -package tools +package firehose import ( "context" "fmt" + "io" + "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/types" "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" - "io" ) -func newToolsFirehoseClientCmd[B firecore.Block](chain *firecore.Chain[B], logger *zap.Logger) *cobra.Command { +func NewToolsFirehoseClientCmd[B firecore.Block](chain *firecore.Chain[B], logger *zap.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-client ", Short: "Connects to a Firehose endpoint over gRPC and print block stream as JSON to terminal", @@ -43,7 +45,7 @@ func getFirehoseClientE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap } defer connClose() - blockRange, err := GetBlockRangeFromArg(args[1]) + blockRange, err := types.GetBlockRangeFromArg(args[1]) if err != nil { return fmt.Errorf("invalid range %q: %w", args[1], err) } diff --git a/cmd/tools/firehose/firehose.go b/cmd/tools/firehose/firehose.go new file mode 100644 index 0000000..6c81705 --- /dev/null +++ b/cmd/tools/firehose/firehose.go @@ -0,0 +1,123 @@ +package firehose + +import ( + "fmt" + "os" + + "github.com/mostynb/go-grpc-compression/zstd" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/streamingfast/cli/sflags" + firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/firehose/client" + pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/protobuf/types/known/anypb" +) + +type firehoseRequestInfo struct { + GRPCCallOpts []grpc.CallOption + Cursor string + FinalBlocksOnly bool + Transforms []*anypb.Any +} + +func getFirehoseFetchClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( + firehoseClient pbfirehose.FetchClient, + connClose func() error, + requestInfo *firehoseRequestInfo, + err error, +) { + return getFirehoseClientFromCmd[B, pbfirehose.FetchClient](cmd, logger, "fetch-client", endpoint, chain) +} + +func getFirehoseStreamClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( + firehoseClient pbfirehose.StreamClient, + connClose func() error, + requestInfo *firehoseRequestInfo, + err error, +) { + return getFirehoseClientFromCmd[B, pbfirehose.StreamClient](cmd, logger, "stream-client", endpoint, chain) +} + +func getFirehoseClientFromCmd[B firecore.Block, C any](cmd *cobra.Command, logger *zap.Logger, kind string, endpoint string, chain *firecore.Chain[B]) ( + firehoseClient C, + connClose func() error, + requestInfo *firehoseRequestInfo, + err error, +) { + requestInfo = &firehoseRequestInfo{} + + jwt := os.Getenv(sflags.MustGetString(cmd, "api-token-env-var")) + plaintext := sflags.MustGetBool(cmd, "plaintext") + insecure := sflags.MustGetBool(cmd, "insecure") + + if sflags.FlagDefined(cmd, "cursor") { + requestInfo.Cursor = sflags.MustGetString(cmd, "cursor") + } + + if sflags.FlagDefined(cmd, "final-blocks-only") { + requestInfo.FinalBlocksOnly = sflags.MustGetBool(cmd, "final-blocks-only") + } + + var rawClient any + if kind == "stream-client" { + rawClient, connClose, requestInfo.GRPCCallOpts, err = client.NewFirehoseClient(endpoint, jwt, insecure, plaintext) + } else if kind == "fetch-client" { + rawClient, connClose, err = client.NewFirehoseFetchClient(endpoint, jwt, insecure, plaintext) + } else { + panic(fmt.Errorf("unsupported Firehose client kind: %s", kind)) + } + + if err != nil { + return firehoseClient, nil, nil, err + } + + firehoseClient = rawClient.(C) + + compression := sflags.MustGetString(cmd, "compression") + var compressor grpc.CallOption + switch compression { + case "gzip": + compressor = grpc.UseCompressor(gzip.Name) + case "zstd": + compressor = grpc.UseCompressor(zstd.Name) + case "none": + // Valid value but nothing to do + default: + return firehoseClient, nil, nil, fmt.Errorf("invalid value for compression: only 'gzip', 'zstd' or 'none' are accepted") + + } + + if compressor != nil { + requestInfo.GRPCCallOpts = append(requestInfo.GRPCCallOpts, compressor) + } + + if chain.Tools.TransformFlags != nil { + requestInfo.Transforms, err = chain.Tools.TransformFlags.Parse(cmd, logger) + } + + if err != nil { + return firehoseClient, nil, nil, fmt.Errorf("unable to parse transforms flags: %w", err) + } + + return +} + +func addFirehoseStreamClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { + addFirehoseFetchClientFlagsToSet(flags, chain) + + flags.String("cursor", "", "Use this cursor with the request to resume your stream at the following block pointed by the cursor") +} + +func addFirehoseFetchClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { + flags.StringP("api-token-env-var", "a", "FIREHOSE_API_TOKEN", "Look for a JWT in this environment variable to authenticate against endpoint") + flags.String("compression", "none", "The HTTP compression: use either 'none', 'gzip' or 'zstd'") + flags.BoolP("plaintext", "p", false, "Use plaintext connection to Firehose") + flags.BoolP("insecure", "k", false, "Use SSL connection to Firehose but skip SSL certificate validation") + if chain.Tools.TransformFlags != nil { + chain.Tools.TransformFlags.Register(flags) + } +} diff --git a/cmd/tools/tools_firehose_prometheus_exporter.go b/cmd/tools/firehose/prometheus_exporter.go similarity index 97% rename from cmd/tools/tools_firehose_prometheus_exporter.go rename to cmd/tools/firehose/prometheus_exporter.go index 5cc8a00..10b9971 100644 --- a/cmd/tools/tools_firehose_prometheus_exporter.go +++ b/cmd/tools/firehose/prometheus_exporter.go @@ -1,4 +1,4 @@ -package tools +package firehose import ( "context" @@ -22,7 +22,7 @@ var lastBlockReceived time.Time var driftSec = prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: "firehose_healthcheck_drift", Help: "Time since the most recent block received (seconds)"}, []string{"endpoint"}) // You should add your custom 'transforms' flags to this command in your init(), then parse them in transformsSetter -func newToolsFirehosePrometheusExporterCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { +func NewToolsFirehosePrometheusExporterCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-prometheus-exporter ", Short: "stream blocks near the chain HEAD and report to prometheus", diff --git a/cmd/tools/tools_firehose_single_block_client.go b/cmd/tools/firehose/single_block_client.go similarity index 96% rename from cmd/tools/tools_firehose_single_block_client.go rename to cmd/tools/firehose/single_block_client.go index bf2dbee..b6b0a5a 100644 --- a/cmd/tools/tools_firehose_single_block_client.go +++ b/cmd/tools/firehose/single_block_client.go @@ -1,4 +1,4 @@ -package tools +package firehose import ( "context" @@ -15,7 +15,7 @@ import ( ) // You should add your custom 'transforms' flags to this command in your init(), then parse them in transformsSetter -func newToolsFirehoseSingleBlockClientCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { +func NewToolsFirehoseSingleBlockClientCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger, tracer logging.Tracer) *cobra.Command { cmd := &cobra.Command{ Use: "firehose-single-block-client {endpoint} {block_num|block_num:block_id|cursor}", Short: "fetch a single block from firehose and print as JSON", diff --git a/cmd/tools/tools_download_from_firehose.go b/cmd/tools/firehose/tools_download_from_firehose.go similarity index 95% rename from cmd/tools/tools_download_from_firehose.go rename to cmd/tools/firehose/tools_download_from_firehose.go index 5d4995e..0ff8562 100644 --- a/cmd/tools/tools_download_from_firehose.go +++ b/cmd/tools/firehose/tools_download_from_firehose.go @@ -1,8 +1,9 @@ -package tools +package firehose import ( "context" "fmt" + "io" "strconv" "time" @@ -18,7 +19,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -func newToolsDownloadFromFirehoseCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { +func NewToolsDownloadFromFirehoseCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "download-from-firehose ", Short: "Download blocks from Firehose and save them to merged-blocks", @@ -68,10 +69,10 @@ func createToolsDownloadFromFirehoseE[B firecore.Block](chain *firecore.Chain[B] return err } - mergeWriter := &mergedBlocksWriter{ - store: store, - tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, - logger: zlog, + mergeWriter := &firecore.MergedBlocksWriter{ + Store: store, + TweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, + Logger: zlog, } approximateLIBWarningIssued := false diff --git a/cmd/tools/tools_print.go b/cmd/tools/print/tools_print.go similarity index 88% rename from cmd/tools/tools_print.go rename to cmd/tools/print/tools_print.go index 0ac63d0..04aa29b 100644 --- a/cmd/tools/tools_print.go +++ b/cmd/tools/print/tools_print.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tools +package print import ( "fmt" @@ -20,6 +20,8 @@ import ( "os" "strconv" + "github.com/streamingfast/firehose-core/types" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" @@ -31,25 +33,23 @@ import ( "google.golang.org/protobuf/proto" ) -var toolsPrintCmd = &cobra.Command{ - Use: "print", - Short: "Prints of one block or merged blocks file", -} - -var toolsPrintOneBlockCmd = &cobra.Command{ - Use: "one-block ", - Short: "Prints a block from a one-block file", - Args: cobra.ExactArgs(2), -} +func NewToolsPrintCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command { + toolsPrintCmd := &cobra.Command{ + Use: "print", + Short: "Prints of one block or merged blocks file", + } -var toolsPrintMergedBlocksCmd = &cobra.Command{ - Use: "merged-blocks ", - Short: "Prints the content summary of a merged blocks file.", - Args: cobra.ExactArgs(2), -} + toolsPrintOneBlockCmd := &cobra.Command{ + Use: "one-block ", + Short: "Prints a block from a one-block file", + Args: cobra.ExactArgs(2), + } -func init() { - ToolsCmd.AddCommand(toolsPrintCmd) + toolsPrintMergedBlocksCmd := &cobra.Command{ + Use: "merged-blocks ", + Short: "Prints the content summary of a merged blocks file.", + Args: cobra.ExactArgs(2), + } toolsPrintCmd.AddCommand(toolsPrintOneBlockCmd) toolsPrintCmd.AddCommand(toolsPrintMergedBlocksCmd) @@ -57,11 +57,11 @@ func init() { toolsPrintCmd.PersistentFlags().StringP("output", "o", "text", "Output mode for block printing, either 'text', 'json' or 'jsonl'") toolsPrintCmd.PersistentFlags().StringSlice("proto-paths", []string{"~/.proto"}, "Paths to proto files to use for dynamic decoding of blocks") toolsPrintCmd.PersistentFlags().Bool("transactions", false, "When in 'text' output mode, also print transactions summary") -} -func configureToolsPrintCmd[B firecore.Block](chain *firecore.Chain[B]) { toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain) toolsPrintMergedBlocksCmd.RunE = createToolsPrintMergedBlocksE(chain) + + return toolsPrintCmd } func createToolsPrintMergedBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.CommandExecutor { @@ -85,7 +85,7 @@ func createToolsPrintMergedBlocksE[B firecore.Block](chain *firecore.Chain[B]) f if err != nil { return fmt.Errorf("invalid base block %q: %w", args[1], err) } - blockBoundary := RoundToBundleStartBlock(startBlock, 100) + blockBoundary := types.RoundToBundleStartBlock(startBlock, 100) filename := fmt.Sprintf("%010d", blockBoundary) reader, err := store.OpenObject(ctx, filename) @@ -215,13 +215,13 @@ func toolsPrintCmdGetOutputMode(cmd *cobra.Command) (PrintOutputMode, error) { return out, nil } -func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Chain[B], outputMode PrintOutputMode, printTransactions bool, jencoder *jsonencoder.Encoder) error { +func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Chain[B], outputMode PrintOutputMode, printTransactions bool, encoder *jsonencoder.Encoder) error { if pbBlock == nil { return fmt.Errorf("block is nil") } if outputMode == PrintOutputModeText { - if err := printBStreamBlock(pbBlock, printTransactions, os.Stdout); err != nil { + if err := PrintBStreamBlock(pbBlock, printTransactions, os.Stdout); err != nil { return fmt.Errorf("pbBlock text printing: %w", err) } return nil @@ -241,7 +241,7 @@ func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Ch } } - err := jencoder.Marshal(marshallableBlock) + err := encoder.Marshal(marshallableBlock) if err != nil { return fmt.Errorf("pbBlock JSON printing: json marshal: %w", err) } @@ -250,13 +250,13 @@ func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Ch // since we are running directly the firecore binary we will *NOT* use the BlockFactory if isLegacyBlock { - return jencoder.MarshalLegacy(pbBlock.GetPayloadKind(), pbBlock.GetPayloadBuffer()) + return encoder.MarshalLegacy(pbBlock.GetPayloadKind(), pbBlock.GetPayloadBuffer()) } - return jencoder.Marshal(pbBlock.Payload) + return encoder.Marshal(pbBlock.Payload) } -func printBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { +func PrintBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer) error { _, err := out.Write( []byte( fmt.Sprintf( diff --git a/cmd/tools/tools_print_enum.go b/cmd/tools/print/tools_print_enum.go similarity index 99% rename from cmd/tools/tools_print_enum.go rename to cmd/tools/print/tools_print_enum.go index 5749abf..1c70e04 100644 --- a/cmd/tools/tools_print_enum.go +++ b/cmd/tools/print/tools_print_enum.go @@ -4,7 +4,7 @@ // Build Date: // Built By: -package tools +package print import ( "fmt" diff --git a/cmd/tools/tools.go b/cmd/tools/tools.go index ff13269..9d20cf3 100644 --- a/cmd/tools/tools.go +++ b/cmd/tools/tools.go @@ -16,37 +16,33 @@ package tools import ( "fmt" - "os" - "github.com/mostynb/go-grpc-compression/zstd" "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/streamingfast/cli/sflags" firecore "github.com/streamingfast/firehose-core" - "github.com/streamingfast/firehose-core/firehose/client" + "github.com/streamingfast/firehose-core/cmd/tools/check" + "github.com/streamingfast/firehose-core/cmd/tools/firehose" + print2 "github.com/streamingfast/firehose-core/cmd/tools/print" "github.com/streamingfast/logging" - pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding/gzip" - "google.golang.org/protobuf/types/known/anypb" ) var ToolsCmd = &cobra.Command{Use: "tools", Short: "Developer tools for operators and developers"} +var MaxUint64 = ^uint64(0) func ConfigureToolsCmd[B firecore.Block]( chain *firecore.Chain[B], logger *zap.Logger, tracer logging.Tracer, ) error { - configureToolsCheckCmd(chain, logger) - configureToolsPrintCmd(chain) + + ToolsCmd.AddCommand(check.NewCheckCommand(chain, logger)) + ToolsCmd.AddCommand(print2.NewToolsPrintCmd(chain)) ToolsCmd.AddCommand(newToolsCompareBlocksCmd(chain)) - ToolsCmd.AddCommand(newToolsDownloadFromFirehoseCmd(chain, logger)) - ToolsCmd.AddCommand(newToolsFirehoseClientCmd(chain, logger)) - ToolsCmd.AddCommand(newToolsFirehoseSingleBlockClientCmd(chain, logger, tracer)) - ToolsCmd.AddCommand(newToolsFirehosePrometheusExporterCmd(chain, logger, tracer)) + ToolsCmd.AddCommand(firehose.NewToolsDownloadFromFirehoseCmd(chain, logger)) + ToolsCmd.AddCommand(firehose.NewToolsFirehoseClientCmd(chain, logger)) + ToolsCmd.AddCommand(firehose.NewToolsFirehoseSingleBlockClientCmd(chain, logger, tracer)) + ToolsCmd.AddCommand(firehose.NewToolsFirehosePrometheusExporterCmd(chain, logger, tracer)) ToolsCmd.AddCommand(newToolsUnmergeBlocksCmd(chain, logger)) ToolsCmd.AddCommand(newToolsFixBloatedMergedBlocks(chain, logger)) @@ -71,108 +67,3 @@ func ConfigureToolsCmd[B firecore.Block]( return nil } - -func addFirehoseStreamClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { - addFirehoseFetchClientFlagsToSet(flags, chain) - - flags.String("cursor", "", "Use this cursor with the request to resume your stream at the following block pointed by the cursor") -} - -func addFirehoseFetchClientFlagsToSet[B firecore.Block](flags *pflag.FlagSet, chain *firecore.Chain[B]) { - flags.StringP("api-token-env-var", "a", "FIREHOSE_API_TOKEN", "Look for a JWT in this environment variable to authenticate against endpoint") - flags.String("compression", "none", "The HTTP compression: use either 'none', 'gzip' or 'zstd'") - flags.BoolP("plaintext", "p", false, "Use plaintext connection to Firehose") - flags.BoolP("insecure", "k", false, "Use SSL connection to Firehose but skip SSL certificate validation") - if chain.Tools.TransformFlags != nil { - chain.Tools.TransformFlags.Register(flags) - } -} - -type firehoseRequestInfo struct { - GRPCCallOpts []grpc.CallOption - Cursor string - FinalBlocksOnly bool - Transforms []*anypb.Any -} - -func getFirehoseFetchClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( - firehoseClient pbfirehose.FetchClient, - connClose func() error, - requestInfo *firehoseRequestInfo, - err error, -) { - return getFirehoseClientFromCmd[B, pbfirehose.FetchClient](cmd, logger, "fetch-client", endpoint, chain) -} - -func getFirehoseStreamClientFromCmd[B firecore.Block](cmd *cobra.Command, logger *zap.Logger, endpoint string, chain *firecore.Chain[B]) ( - firehoseClient pbfirehose.StreamClient, - connClose func() error, - requestInfo *firehoseRequestInfo, - err error, -) { - return getFirehoseClientFromCmd[B, pbfirehose.StreamClient](cmd, logger, "stream-client", endpoint, chain) -} - -func getFirehoseClientFromCmd[B firecore.Block, C any](cmd *cobra.Command, logger *zap.Logger, kind string, endpoint string, chain *firecore.Chain[B]) ( - firehoseClient C, - connClose func() error, - requestInfo *firehoseRequestInfo, - err error, -) { - requestInfo = &firehoseRequestInfo{} - - jwt := os.Getenv(sflags.MustGetString(cmd, "api-token-env-var")) - plaintext := sflags.MustGetBool(cmd, "plaintext") - insecure := sflags.MustGetBool(cmd, "insecure") - - if sflags.FlagDefined(cmd, "cursor") { - requestInfo.Cursor = sflags.MustGetString(cmd, "cursor") - } - - if sflags.FlagDefined(cmd, "final-blocks-only") { - requestInfo.FinalBlocksOnly = sflags.MustGetBool(cmd, "final-blocks-only") - } - - var rawClient any - if kind == "stream-client" { - rawClient, connClose, requestInfo.GRPCCallOpts, err = client.NewFirehoseClient(endpoint, jwt, insecure, plaintext) - } else if kind == "fetch-client" { - rawClient, connClose, err = client.NewFirehoseFetchClient(endpoint, jwt, insecure, plaintext) - } else { - panic(fmt.Errorf("unsupported Firehose client kind: %s", kind)) - } - - if err != nil { - return firehoseClient, nil, nil, err - } - - firehoseClient = rawClient.(C) - - compression := sflags.MustGetString(cmd, "compression") - var compressor grpc.CallOption - switch compression { - case "gzip": - compressor = grpc.UseCompressor(gzip.Name) - case "zstd": - compressor = grpc.UseCompressor(zstd.Name) - case "none": - // Valid value but nothing to do - default: - return firehoseClient, nil, nil, fmt.Errorf("invalid value for compression: only 'gzip', 'zstd' or 'none' are accepted") - - } - - if compressor != nil { - requestInfo.GRPCCallOpts = append(requestInfo.GRPCCallOpts, compressor) - } - - if chain.Tools.TransformFlags != nil { - requestInfo.Transforms, err = chain.Tools.TransformFlags.Parse(cmd, logger) - } - - if err != nil { - return firehoseClient, nil, nil, fmt.Errorf("unable to parse transforms flags: %w", err) - } - - return -} diff --git a/cmd/tools/tools_compare_blocks.go b/cmd/tools/tools_compare_blocks.go index 3997b38..d38c009 100644 --- a/cmd/tools/tools_compare_blocks.go +++ b/cmd/tools/tools_compare_blocks.go @@ -31,6 +31,8 @@ import ( "github.com/streamingfast/cli/sflags" "github.com/streamingfast/dstore" firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/cmd/tools/check" + "github.com/streamingfast/firehose-core/types" "go.uber.org/multierr" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" @@ -82,7 +84,7 @@ func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.Comm warnAboutExtraBlocks := sync.Once{} ctx := cmd.Context() - blockRange, err := GetBlockRangeFromArg(args[2]) + blockRange, err := types.GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing range: %w", err) } @@ -103,7 +105,7 @@ func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.Comm return fmt.Errorf("unable to create store at path %q: %w", args[1], err) } - segments, err := blockRange.Split(segmentSize, EndBoundaryExclusive) + segments, err := blockRange.Split(segmentSize, types.EndBoundaryExclusive) if err != nil { return fmt.Errorf("unable to split blockrage in segments: %w", err) } @@ -111,7 +113,7 @@ func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.Comm segments: segments, } - err = storeReference.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) (err error) { + err = storeReference.Walk(ctx, check.WalkBlockPrefix(blockRange, 100), func(filename string) (err error) { fileStartBlock, err := strconv.Atoi(filename) if err != nil { return fmt.Errorf("parsing filename: %w", err) @@ -122,7 +124,7 @@ func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.Comm return dstore.StopIteration } - if blockRange.Contains(uint64(fileStartBlock), EndBoundaryExclusive) { + if blockRange.Contains(uint64(fileStartBlock), types.EndBoundaryExclusive) { var wg sync.WaitGroup var bundleErrLock sync.Mutex var bundleReadErr error @@ -247,7 +249,7 @@ func readBundle[B firecore.Block]( } type state struct { - segments []BlockRange + segments []types.BlockRange currentSegmentIdx int blocksCountedInThisSegment int differencesFound int @@ -256,10 +258,10 @@ type state struct { } func (s *state) process(blockNum uint64, isDifferent bool, isMissing bool) { - if !s.segments[s.currentSegmentIdx].Contains(blockNum, EndBoundaryExclusive) { // moving forward + if !s.segments[s.currentSegmentIdx].Contains(blockNum, types.EndBoundaryExclusive) { // moving forward s.print() for i := s.currentSegmentIdx; i < len(s.segments); i++ { - if s.segments[i].Contains(blockNum, EndBoundaryExclusive) { + if s.segments[i].Contains(blockNum, types.EndBoundaryExclusive) { s.currentSegmentIdx = i s.totalBlocksCounted += s.blocksCountedInThisSegment s.differencesFound = 0 diff --git a/cmd/tools/tools_fix_bloated_merged_blocks.go b/cmd/tools/tools_fix_bloated_merged_blocks.go index 063a78c..6be4194 100644 --- a/cmd/tools/tools_fix_bloated_merged_blocks.go +++ b/cmd/tools/tools_fix_bloated_merged_blocks.go @@ -9,6 +9,8 @@ import ( pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/cmd/tools/check" + "github.com/streamingfast/firehose-core/types" "go.uber.org/zap" ) @@ -35,12 +37,12 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return fmt.Errorf("unable to create destination store: %w", err) } - blockRange, err := GetBlockRangeFromArg(args[2]) + blockRange, err := types.GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing block range: %w", err) } - err = srcStore.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) error { + err = srcStore.Walk(ctx, check.WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) startBlock := mustParseUint64(filename) @@ -66,10 +68,10 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return fmt.Errorf("creating block reader: %w", err) } - mergeWriter := &mergedBlocksWriter{ - store: destStore, - tweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, - logger: zlog, + mergeWriter := &firecore.MergedBlocksWriter{ + Store: destStore, + TweakBlock: func(b *pbbstream.Block) (*pbbstream.Block, error) { return b, nil }, + Logger: zlog, } seen := make(map[string]bool) @@ -84,11 +86,11 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { break } - if block.Number < uint64(startBlock) { + if block.Number < startBlock { continue } - if block.Number > uint64(blockRange.GetStopBlockOr(MaxUint64)) { + if block.Number > blockRange.GetStopBlockOr(MaxUint64) { break } diff --git a/cmd/tools/tools_unmerge_blocks.go b/cmd/tools/tools_unmerge_blocks.go index 8d90e5b..f448aa5 100644 --- a/cmd/tools/tools_unmerge_blocks.go +++ b/cmd/tools/tools_unmerge_blocks.go @@ -5,6 +5,9 @@ import ( "io" "strconv" + "github.com/streamingfast/firehose-core/cmd/tools/check" + "github.com/streamingfast/firehose-core/types" + "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" @@ -37,12 +40,12 @@ func runUnmergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return fmt.Errorf("unable to create destination store: %w", err) } - blockRange, err := GetBlockRangeFromArg(args[2]) + blockRange, err := types.GetBlockRangeFromArg(args[2]) if err != nil { return fmt.Errorf("parsing block range: %w", err) } - err = srcStore.Walk(ctx, WalkBlockPrefix(blockRange, 100), func(filename string) error { + err = srcStore.Walk(ctx, check.WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) startBlock := mustParseUint64(filename) diff --git a/cmd/tools/tools_upgrade_merged_blocks.go b/cmd/tools/tools_upgrade_merged_blocks.go index f0025ae..e277329 100644 --- a/cmd/tools/tools_upgrade_merged_blocks.go +++ b/cmd/tools/tools_upgrade_merged_blocks.go @@ -8,7 +8,6 @@ import ( "strconv" "github.com/spf13/cobra" - "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/bstream/stream" "github.com/streamingfast/dstore" @@ -49,12 +48,12 @@ func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.B } rootLog.Info("starting block upgrader process", zap.Uint64("start", start), zap.Uint64("stop", stop), zap.String("source", source), zap.String("dest", dest)) - writer := &mergedBlocksWriter{ - cmd: cmd, - store: destStore, - lowBlockNum: lowBoundary(start), - stopBlockNum: stop, - tweakBlock: tweakFunc, + writer := &firecore.MergedBlocksWriter{ + Cmd: cmd, + Store: destStore, + LowBlockNum: firecore.LowBoundary(start), + StopBlockNum: stop, + TweakBlock: tweakFunc, } stream := stream.New(nil, sourceStore, nil, int64(start), writer, stream.WithFinalBlocksOnly()) @@ -66,104 +65,3 @@ func getMergedBlockUpgrader(tweakFunc func(block *pbbstream.Block) (*pbbstream.B return err } } - -type mergedBlocksWriter struct { - store dstore.Store - lowBlockNum uint64 - stopBlockNum uint64 - - blocks []*pbbstream.Block - logger *zap.Logger - cmd *cobra.Command - - tweakBlock func(*pbbstream.Block) (*pbbstream.Block, error) -} - -func (w *mergedBlocksWriter) ProcessBlock(blk *pbbstream.Block, obj interface{}) error { - if w.tweakBlock != nil { - b, err := w.tweakBlock(blk) - if err != nil { - return fmt.Errorf("tweaking block: %w", err) - } - blk = b - } - - if w.lowBlockNum == 0 && blk.Number > 99 { // initial block - if blk.Number%100 != 0 && blk.Number != bstream.GetProtocolFirstStreamableBlock { - return fmt.Errorf("received unexpected block %s (not a boundary, not the first streamable block %d)", blk, bstream.GetProtocolFirstStreamableBlock) - } - w.lowBlockNum = lowBoundary(blk.Number) - w.logger.Debug("setting initial boundary to %d upon seeing block %s", zap.Uint64("low_boundary", w.lowBlockNum), zap.Stringer("blk", blk)) - } - - if blk.Number > w.lowBlockNum+99 { - w.logger.Debug("bundling because we saw block %s from next bundle (%d was not seen, it must not exist on this chain)", zap.Stringer("blk", blk), zap.Uint64("last_bundle_block", w.lowBlockNum+99)) - if err := w.writeBundle(); err != nil { - return err - } - } - - if w.stopBlockNum > 0 && blk.Number >= w.stopBlockNum { - return io.EOF - } - - w.blocks = append(w.blocks, blk) - - if blk.Number == w.lowBlockNum+99 { - w.logger.Debug("bundling on last bundle block", zap.Uint64("last_bundle_block", w.lowBlockNum+99)) - if err := w.writeBundle(); err != nil { - return err - } - return nil - } - - return nil -} - -func filename(num uint64) string { - return fmt.Sprintf("%010d", num) -} - -func (w *mergedBlocksWriter) writeBundle() error { - file := filename(w.lowBlockNum) - w.logger.Info("writing merged file to store (suffix: .dbin.zst)", zap.String("filename", file), zap.Uint64("lowBlockNum", w.lowBlockNum)) - - if len(w.blocks) == 0 { - return fmt.Errorf("no blocks to write to bundle") - } - - pr, pw := io.Pipe() - - go func() { - var err error - defer func() { - pw.CloseWithError(err) - }() - - blockWriter, err := bstream.NewDBinBlockWriter(pw) - if err != nil { - return - } - - for _, blk := range w.blocks { - err = blockWriter.Write(blk) - if err != nil { - return - } - } - }() - - err := w.store.WriteObject(context.Background(), file, pr) - if err != nil { - w.logger.Error("writing to store", zap.Error(err)) - } - - w.lowBlockNum += 100 - w.blocks = nil - - return err -} - -func lowBoundary(i uint64) uint64 { - return i - (i % 100) -} diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go index 15094fb..13515b7 100644 --- a/jsonencoder/encoder.go +++ b/jsonencoder/encoder.go @@ -4,12 +4,10 @@ import ( "fmt" "os" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - - "github.com/streamingfast/firehose-core/protoregistry" - "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/firehose-core/protoregistry" ) type Encoder struct { diff --git a/mergedblockswriter.go b/mergedblockswriter.go new file mode 100644 index 0000000..d141846 --- /dev/null +++ b/mergedblockswriter.go @@ -0,0 +1,113 @@ +package firecore + +import ( + "context" + "fmt" + "io" + + "github.com/spf13/cobra" + "github.com/streamingfast/bstream" + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/dstore" + "go.uber.org/zap" +) + +type MergedBlocksWriter struct { + Store dstore.Store + LowBlockNum uint64 + StopBlockNum uint64 + + blocks []*pbbstream.Block + Logger *zap.Logger + Cmd *cobra.Command + + TweakBlock func(*pbbstream.Block) (*pbbstream.Block, error) +} + +func (w *MergedBlocksWriter) ProcessBlock(blk *pbbstream.Block, obj interface{}) error { + if w.TweakBlock != nil { + b, err := w.TweakBlock(blk) + if err != nil { + return fmt.Errorf("tweaking block: %w", err) + } + blk = b + } + + if w.LowBlockNum == 0 && blk.Number > 99 { // initial block + if blk.Number%100 != 0 && blk.Number != bstream.GetProtocolFirstStreamableBlock { + return fmt.Errorf("received unexpected block %s (not a boundary, not the first streamable block %d)", blk, bstream.GetProtocolFirstStreamableBlock) + } + w.LowBlockNum = LowBoundary(blk.Number) + w.Logger.Debug("setting initial boundary to %d upon seeing block %s", zap.Uint64("low_boundary", w.LowBlockNum), zap.Stringer("blk", blk)) + } + + if blk.Number > w.LowBlockNum+99 { + w.Logger.Debug("bundling because we saw block %s from next bundle (%d was not seen, it must not exist on this chain)", zap.Stringer("blk", blk), zap.Uint64("last_bundle_block", w.LowBlockNum+99)) + if err := w.writeBundle(); err != nil { + return err + } + } + + if w.StopBlockNum > 0 && blk.Number >= w.StopBlockNum { + return io.EOF + } + + w.blocks = append(w.blocks, blk) + + if blk.Number == w.LowBlockNum+99 { + w.Logger.Debug("bundling on last bundle block", zap.Uint64("last_bundle_block", w.LowBlockNum+99)) + if err := w.writeBundle(); err != nil { + return err + } + return nil + } + + return nil +} + +func (w *MergedBlocksWriter) writeBundle() error { + file := filename(w.LowBlockNum) + w.Logger.Info("writing merged file to store (suffix: .dbin.zst)", zap.String("filename", file), zap.Uint64("lowBlockNum", w.LowBlockNum)) + + if len(w.blocks) == 0 { + return fmt.Errorf("no blocks to write to bundle") + } + + pr, pw := io.Pipe() + + go func() { + var err error + defer func() { + pw.CloseWithError(err) + }() + + blockWriter, err := bstream.NewDBinBlockWriter(pw) + if err != nil { + return + } + + for _, blk := range w.blocks { + err = blockWriter.Write(blk) + if err != nil { + return + } + } + }() + + err := w.Store.WriteObject(context.Background(), file, pr) + if err != nil { + w.Logger.Error("writing to store", zap.Error(err)) + } + + w.LowBlockNum += 100 + w.blocks = nil + + return err +} +func filename(num uint64) string { + return fmt.Sprintf("%010d", num) +} + +func LowBoundary(i uint64) uint64 { + return i - (i % 100) +} diff --git a/cmd/tools/block_range.go b/types/block_range.go similarity index 99% rename from cmd/tools/block_range.go rename to types/block_range.go index d386e98..a51d3da 100644 --- a/cmd/tools/block_range.go +++ b/types/block_range.go @@ -1,4 +1,4 @@ -package tools +package types import ( "fmt" diff --git a/cmd/tools/block_range_enum.go b/types/block_range_enum.go similarity index 99% rename from cmd/tools/block_range_enum.go rename to types/block_range_enum.go index 7a1ec49..c7d4c86 100644 --- a/cmd/tools/block_range_enum.go +++ b/types/block_range_enum.go @@ -4,7 +4,7 @@ // Build Date: // Built By: -package tools +package types import ( "fmt" diff --git a/cmd/tools/flags.go b/types/flags.go similarity index 82% rename from cmd/tools/flags.go rename to types/flags.go index 7a35858..06b51b6 100644 --- a/cmd/tools/flags.go +++ b/types/flags.go @@ -1,4 +1,4 @@ -package tools +package types import ( "fmt" @@ -10,7 +10,7 @@ import ( ) func GetBlockRangeFromArg(in string) (out BlockRange, err error) { - return parseBlockRange(in, bstream.GetProtocolFirstStreamableBlock) + return ParseBlockRange(in, bstream.GetProtocolFirstStreamableBlock) } func GetBlockRangeFromFlag(cmd *cobra.Command, flagName string) (out BlockRange, err error) { @@ -25,7 +25,7 @@ func GetBlockRangeFromFlag(cmd *cobra.Command, flagName string) (out BlockRange, return out, fmt.Errorf("accepting a single range for now, got %d", len(rawRanges)) } - out, err = parseBlockRange(rawRanges[0], bstream.GetProtocolFirstStreamableBlock) + out, err = ParseBlockRange(rawRanges[0], bstream.GetProtocolFirstStreamableBlock) if err != nil { return out, fmt.Errorf("decode range: %w", err) } diff --git a/cmd/tools/types.go b/types/types.go similarity index 96% rename from cmd/tools/types.go rename to types/types.go index c1c5a63..e872b13 100644 --- a/cmd/tools/types.go +++ b/types/types.go @@ -1,4 +1,4 @@ -package tools +package types import ( "fmt" diff --git a/cmd/tools/types_test.go b/types/types_test.go similarity index 97% rename from cmd/tools/types_test.go rename to types/types_test.go index 1844009..01afea7 100644 --- a/cmd/tools/types_test.go +++ b/types/types_test.go @@ -1,4 +1,4 @@ -package tools +package types import ( "testing" diff --git a/cmd/tools/utils.go b/types/utils.go similarity index 97% rename from cmd/tools/utils.go rename to types/utils.go index 64a3313..c241b08 100644 --- a/cmd/tools/utils.go +++ b/types/utils.go @@ -1,4 +1,4 @@ -package tools +package types import ( "fmt" @@ -26,7 +26,7 @@ func PrettyBlockNum(b uint64) string { return "#" + strings.ReplaceAll(humanize.Comma(int64(b)), ",", " ") } -func parseBlockRange(input string, firstStreamableBlock uint64) (out BlockRange, err error) { +func ParseBlockRange(input string, firstStreamableBlock uint64) (out BlockRange, err error) { if input == "" || input == "-1" { return NewOpenRange(-1), nil } diff --git a/cmd/tools/utils_test.go b/types/utils_test.go similarity index 97% rename from cmd/tools/utils_test.go rename to types/utils_test.go index 6373d0a..25f4c8b 100644 --- a/cmd/tools/utils_test.go +++ b/types/utils_test.go @@ -1,4 +1,4 @@ -package tools +package types import ( "testing" @@ -57,7 +57,7 @@ func Test_readBlockRange(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := parseBlockRange(tt.args.blockRangeArg, tt.args.chainFirstStreamableBlock) + got, err := ParseBlockRange(tt.args.blockRangeArg, tt.args.chainFirstStreamableBlock) if tt.assertion == nil { tt.assertion = require.NoError From e851074dc7869a56f6a1b8ed60c7416986cf0c84 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 5 Dec 2023 13:17:37 -0500 Subject: [PATCH 50/66] finish refactor of tools and apps package --- cmd/tools/check/blocks.go | 11 ++++------- .../{ => compare}/tools_compare_blocks.go | 8 ++++---- .../tools_fix_bloated_merged_blocks.go | 10 +++++----- .../{ => mergeblock}/tools_unmerge_blocks.go | 19 +++++-------------- .../tools_upgrade_merged_blocks.go | 2 +- cmd/tools/tools.go | 13 +++++++++---- constants.go | 1 + utils.go | 8 ++++++++ 8 files changed, 37 insertions(+), 35 deletions(-) rename cmd/tools/{ => compare}/tools_compare_blocks.go (98%) rename cmd/tools/{ => fix}/tools_fix_bloated_merged_blocks.go (92%) rename cmd/tools/{ => mergeblock}/tools_unmerge_blocks.go (88%) rename cmd/tools/{ => mergeblock}/tools_upgrade_merged_blocks.go (99%) diff --git a/cmd/tools/check/blocks.go b/cmd/tools/check/blocks.go index 8b86091..0aa4f62 100644 --- a/cmd/tools/check/blocks.go +++ b/cmd/tools/check/blocks.go @@ -9,15 +9,13 @@ import ( "regexp" "strconv" - print2 "github.com/streamingfast/firehose-core/cmd/tools/print" - - "github.com/streamingfast/firehose-core/types" - "github.com/streamingfast/bstream" "github.com/streamingfast/bstream/forkable" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dstore" firecore "github.com/streamingfast/firehose-core" + print2 "github.com/streamingfast/firehose-core/cmd/tools/print" + "github.com/streamingfast/firehose-core/types" "go.uber.org/zap" ) @@ -29,7 +27,6 @@ const ( PrintNoDetails PrintDetails = iota PrintStats PrintFull - MaxUint64 = ^uint64(0) ) func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Chain[B], logger *zap.Logger, storeURL string, fileBlockSize uint64, blockRange types.BlockRange, printDetails PrintDetails) error { @@ -42,7 +39,7 @@ func CheckMergedBlocks[B firecore.Block](ctx context.Context, chain *firecore.Ch var expected uint64 var count int var highestBlockSeen uint64 - lowestBlockSeen := MaxUint64 + lowestBlockSeen := firecore.MaxUint64 if !blockRange.IsResolved() { return fmt.Errorf("check merged blocks can only work with fully resolved range, got %s", blockRange) @@ -178,7 +175,7 @@ func validateBlockSegment[B firecore.Block]( printDetails PrintDetails, tfdb *trackedForkDB, ) (lowestBlockSeen, highestBlockSeen uint64) { - lowestBlockSeen = MaxUint64 + lowestBlockSeen = firecore.MaxUint64 reader, err := store.OpenObject(ctx, segment) if err != nil { fmt.Printf("❌ Unable to read blocks segment %s: %s\n", segment, err) diff --git a/cmd/tools/tools_compare_blocks.go b/cmd/tools/compare/tools_compare_blocks.go similarity index 98% rename from cmd/tools/tools_compare_blocks.go rename to cmd/tools/compare/tools_compare_blocks.go index d38c009..4942418 100644 --- a/cmd/tools/tools_compare_blocks.go +++ b/cmd/tools/compare/tools_compare_blocks.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tools +package compare import ( "bytes" @@ -39,7 +39,7 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -func newToolsCompareBlocksCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command { +func NewToolsCompareBlocksCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command { cmd := &cobra.Command{ Use: "compare-blocks []", Short: "Checks for any differences between two block stores between a specified range. (To compare the likeness of two block ranges, for example)", @@ -93,7 +93,7 @@ func runCompareBlocksE[B firecore.Block](chain *firecore.Chain[B]) firecore.Comm return fmt.Errorf("invalid block range, you must provide a closed range fully resolved (no negative value)") } - stopBlock := uint64(blockRange.GetStopBlockOr(MaxUint64)) + stopBlock := blockRange.GetStopBlockOr(firecore.MaxUint64) // Create stores storeReference, err := dstore.NewDBinStore(args[0]) @@ -281,7 +281,7 @@ func (s *state) process(blockNum uint64, isDifferent bool, isMissing bool) { } func (s *state) print() { - endBlock := fmt.Sprintf("%d", s.segments[s.currentSegmentIdx].GetStopBlockOr(MaxUint64)) + endBlock := fmt.Sprintf("%d", s.segments[s.currentSegmentIdx].GetStopBlockOr(firecore.MaxUint64)) if s.totalBlocksCounted == 0 { fmt.Printf("✖ No blocks were found at all for segment %d - %s\n", s.segments[s.currentSegmentIdx].Start, endBlock) diff --git a/cmd/tools/tools_fix_bloated_merged_blocks.go b/cmd/tools/fix/tools_fix_bloated_merged_blocks.go similarity index 92% rename from cmd/tools/tools_fix_bloated_merged_blocks.go rename to cmd/tools/fix/tools_fix_bloated_merged_blocks.go index 6be4194..0642988 100644 --- a/cmd/tools/tools_fix_bloated_merged_blocks.go +++ b/cmd/tools/fix/tools_fix_bloated_merged_blocks.go @@ -1,4 +1,4 @@ -package tools +package fix import ( "fmt" @@ -14,7 +14,7 @@ import ( "go.uber.org/zap" ) -func newToolsFixBloatedMergedBlocks[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { +func NewToolsFixBloatedMergedBlocks[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { return &cobra.Command{ Use: "fix-bloated-merged-blocks []", Short: "Fixes 'corrupted' merged-blocks that contain extraneous or duplicate blocks. Some older versions of the merger may have produce such bloated merged-blocks. All merged-blocks files in given range will be rewritten, regardless of if they were corrupted.", @@ -45,9 +45,9 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { err = srcStore.Walk(ctx, check.WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) - startBlock := mustParseUint64(filename) + startBlock := firecore.MustParseUint64(filename) - if startBlock > uint64(blockRange.GetStopBlockOr(MaxUint64)) { + if startBlock > uint64(blockRange.GetStopBlockOr(firecore.MaxUint64)) { zlog.Debug("skipping merged block file", zap.String("reason", "past stop block"), zap.String("filename", filename)) return dstore.StopIteration } @@ -90,7 +90,7 @@ func runFixBloatedMergedBlocksE(zlog *zap.Logger) firecore.CommandExecutor { continue } - if block.Number > blockRange.GetStopBlockOr(MaxUint64) { + if block.Number > blockRange.GetStopBlockOr(firecore.MaxUint64) { break } diff --git a/cmd/tools/tools_unmerge_blocks.go b/cmd/tools/mergeblock/tools_unmerge_blocks.go similarity index 88% rename from cmd/tools/tools_unmerge_blocks.go rename to cmd/tools/mergeblock/tools_unmerge_blocks.go index f448aa5..dbe79b0 100644 --- a/cmd/tools/tools_unmerge_blocks.go +++ b/cmd/tools/mergeblock/tools_unmerge_blocks.go @@ -1,9 +1,8 @@ -package tools +package mergeblock import ( "fmt" "io" - "strconv" "github.com/streamingfast/firehose-core/cmd/tools/check" "github.com/streamingfast/firehose-core/types" @@ -11,13 +10,12 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - "github.com/streamingfast/cli" "github.com/streamingfast/dstore" firecore "github.com/streamingfast/firehose-core" "go.uber.org/zap" ) -func newToolsUnmergeBlocksCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { +func NewToolsUnmergeBlocksCmd[B firecore.Block](chain *firecore.Chain[B], zlog *zap.Logger) *cobra.Command { return &cobra.Command{ Use: "unmerge-blocks []", Short: "Unmerges merged block files into one-block-files", @@ -48,9 +46,9 @@ func runUnmergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { err = srcStore.Walk(ctx, check.WalkBlockPrefix(blockRange, 100), func(filename string) error { zlog.Debug("checking merged block file", zap.String("filename", filename)) - startBlock := mustParseUint64(filename) + startBlock := firecore.MustParseUint64(filename) - if startBlock > uint64(blockRange.GetStopBlockOr(MaxUint64)) { + if startBlock > uint64(blockRange.GetStopBlockOr(firecore.MaxUint64)) { zlog.Debug("skipping merged block file", zap.String("reason", "past stop block"), zap.String("filename", filename)) return dstore.StopIteration } @@ -82,7 +80,7 @@ func runUnmergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { continue } - if block.Number > uint64(blockRange.GetStopBlockOr(MaxUint64)) { + if block.Number > blockRange.GetStopBlockOr(firecore.MaxUint64) { break } @@ -134,10 +132,3 @@ func runUnmergeBlocksE(zlog *zap.Logger) firecore.CommandExecutor { return nil } } - -func mustParseUint64(s string) uint64 { - i, err := strconv.Atoi(s) - cli.NoError(err, "Unable to parse %q as uint64", s) - - return uint64(i) -} diff --git a/cmd/tools/tools_upgrade_merged_blocks.go b/cmd/tools/mergeblock/tools_upgrade_merged_blocks.go similarity index 99% rename from cmd/tools/tools_upgrade_merged_blocks.go rename to cmd/tools/mergeblock/tools_upgrade_merged_blocks.go index e277329..ad476a6 100644 --- a/cmd/tools/tools_upgrade_merged_blocks.go +++ b/cmd/tools/mergeblock/tools_upgrade_merged_blocks.go @@ -1,4 +1,4 @@ -package tools +package mergeblock import ( "context" diff --git a/cmd/tools/tools.go b/cmd/tools/tools.go index 9d20cf3..af2337e 100644 --- a/cmd/tools/tools.go +++ b/cmd/tools/tools.go @@ -17,6 +17,11 @@ package tools import ( "fmt" + "github.com/streamingfast/firehose-core/cmd/tools/fix" + "github.com/streamingfast/firehose-core/cmd/tools/mergeblock" + + "github.com/streamingfast/firehose-core/cmd/tools/compare" + "github.com/spf13/cobra" firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/cmd/tools/check" @@ -38,16 +43,16 @@ func ConfigureToolsCmd[B firecore.Block]( ToolsCmd.AddCommand(check.NewCheckCommand(chain, logger)) ToolsCmd.AddCommand(print2.NewToolsPrintCmd(chain)) - ToolsCmd.AddCommand(newToolsCompareBlocksCmd(chain)) + ToolsCmd.AddCommand(compare.NewToolsCompareBlocksCmd(chain)) ToolsCmd.AddCommand(firehose.NewToolsDownloadFromFirehoseCmd(chain, logger)) ToolsCmd.AddCommand(firehose.NewToolsFirehoseClientCmd(chain, logger)) ToolsCmd.AddCommand(firehose.NewToolsFirehoseSingleBlockClientCmd(chain, logger, tracer)) ToolsCmd.AddCommand(firehose.NewToolsFirehosePrometheusExporterCmd(chain, logger, tracer)) - ToolsCmd.AddCommand(newToolsUnmergeBlocksCmd(chain, logger)) - ToolsCmd.AddCommand(newToolsFixBloatedMergedBlocks(chain, logger)) + ToolsCmd.AddCommand(mergeblock.NewToolsUnmergeBlocksCmd(chain, logger)) + ToolsCmd.AddCommand(fix.NewToolsFixBloatedMergedBlocks(chain, logger)) if chain.Tools.MergedBlockUpgrader != nil { - ToolsCmd.AddCommand(NewToolsUpgradeMergedBlocksCmd(chain, logger)) + ToolsCmd.AddCommand(mergeblock.NewToolsUpgradeMergedBlocksCmd(chain, logger)) } if chain.Tools.RegisterExtraCmd != nil { diff --git a/constants.go b/constants.go index 4886093..026e4e1 100644 --- a/constants.go +++ b/constants.go @@ -3,6 +3,7 @@ package firecore // Those are `var` and globally available so that some chains to keep backward-compatibility can // change them. This is not advertised and should **not** be used by new chain. var ( + MaxUint64 = ^uint64(0) // Common ports MetricsListenAddr string = ":9102" diff --git a/utils.go b/utils.go index 9767001..458e66e 100644 --- a/utils.go +++ b/utils.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" "github.com/streamingfast/cli" @@ -72,3 +73,10 @@ var Example = func(in string) string { func ExamplePrefixed[B Block](chain *Chain[B], prefix, in string) string { return string(cli.ExamplePrefixed(chain.BinaryName()+" "+prefix, in)) } + +func MustParseUint64(s string) uint64 { + i, err := strconv.Atoi(s) + cli.NoError(err, "Unable to parse %q as uint64", s) + + return uint64(i) +} From bf490db5610c5399e37110c6a2078dcca7994f81 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 13:54:40 -0500 Subject: [PATCH 51/66] Added proto registry support --- .gitignore | 4 +- cmd/tools/firehose/client.go | 12 ++- cmd/tools/print/tools_print.go | 40 +++----- go.mod | 8 +- go.sum | 14 +++ jsonencoder/encoder.go | 25 +++-- jsonencoder/proto.go | 2 +- protoregistry/generator/generator.go | 125 ++++++++++++++++++++++++ protoregistry/generator/template.gotmpl | 50 ++++++++++ protoregistry/registry.go | 51 ++++------ protoregistry/well_known.go | 77 +++++++++++++++ types/block_range_enum.go | 11 +-- 12 files changed, 334 insertions(+), 85 deletions(-) create mode 100644 protoregistry/generator/generator.go create mode 100644 protoregistry/generator/template.gotmpl create mode 100644 protoregistry/well_known.go diff --git a/.gitignore b/.gitignore index 9410e92..48a7ee1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .idea /build -/dist \ No newline at end of file +/dist +.envrc +.env \ No newline at end of file diff --git a/cmd/tools/firehose/client.go b/cmd/tools/firehose/client.go index d416ff3..4e0d4ca 100644 --- a/cmd/tools/firehose/client.go +++ b/cmd/tools/firehose/client.go @@ -8,8 +8,8 @@ import ( "github.com/spf13/cobra" "github.com/streamingfast/cli/sflags" firecore "github.com/streamingfast/firehose-core" + "github.com/streamingfast/firehose-core/cmd/tools/print" "github.com/streamingfast/firehose-core/types" - "github.com/streamingfast/jsonpb" pbfirehose "github.com/streamingfast/pbgo/sf/firehose/v2" "go.uber.org/zap" ) @@ -24,7 +24,7 @@ func NewToolsFirehoseClientCmd[B firecore.Block](chain *firecore.Chain[B], logge addFirehoseStreamClientFlagsToSet(cmd.Flags(), chain) - cmd.Flags().StringSlice("proto-paths", []string{"~/.proto"}, "Paths to proto files to use for dynamic decoding of blocks") + cmd.Flags().StringSlice("proto-paths", []string{""}, "Paths to proto files to use for dynamic decoding of blocks") cmd.Flags().Bool("final-blocks-only", false, "Only ask for final blocks") cmd.Flags().Bool("print-cursor-only", false, "Skip block decoding, only print the step cursor (useful for performance testing)") @@ -89,6 +89,11 @@ func getFirehoseClientE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap }() } + jencoder, err := print.SetupJsonEncoder(cmd) + if err != nil { + return fmt.Errorf("unable to create json encoder: %w", err) + } + for { response, err := stream.Recv() if err != nil { @@ -110,10 +115,11 @@ func getFirehoseClientE[B firecore.Block](chain *firecore.Chain[B], rootLog *zap // async process the response go func() { - line, err := jsonpb.MarshalToString(response) + line, err := jencoder.MarshalToString(response) if err != nil { rootLog.Error("marshalling to string", zap.Error(err)) } + resp.ch <- line }() } diff --git a/cmd/tools/print/tools_print.go b/cmd/tools/print/tools_print.go index 04aa29b..6f35031 100644 --- a/cmd/tools/print/tools_print.go +++ b/cmd/tools/print/tools_print.go @@ -20,8 +20,6 @@ import ( "os" "strconv" - "github.com/streamingfast/firehose-core/types" - "github.com/spf13/cobra" "github.com/streamingfast/bstream" pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" @@ -30,7 +28,7 @@ import ( firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/jsonencoder" "github.com/streamingfast/firehose-core/protoregistry" - "google.golang.org/protobuf/proto" + "github.com/streamingfast/firehose-core/types" ) func NewToolsPrintCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command { @@ -55,7 +53,7 @@ func NewToolsPrintCmd[B firecore.Block](chain *firecore.Chain[B]) *cobra.Command toolsPrintCmd.AddCommand(toolsPrintMergedBlocksCmd) toolsPrintCmd.PersistentFlags().StringP("output", "o", "text", "Output mode for block printing, either 'text', 'json' or 'jsonl'") - toolsPrintCmd.PersistentFlags().StringSlice("proto-paths", []string{"~/.proto"}, "Paths to proto files to use for dynamic decoding of blocks") + toolsPrintCmd.PersistentFlags().StringSlice("proto-paths", []string{""}, "Paths to proto files to use for dynamic decoding of blocks") toolsPrintCmd.PersistentFlags().Bool("transactions", false, "When in 'text' output mode, also print transactions summary") toolsPrintOneBlockCmd.RunE = createToolsPrintOneBlockE(chain) @@ -101,7 +99,7 @@ func createToolsPrintMergedBlocksE[B firecore.Block](chain *firecore.Chain[B]) f return err } - jencoder, err := setupJsonEncoder(cmd) + jencoder, err := SetupJsonEncoder(cmd) if err != nil { return fmt.Errorf("unable to create json encoder: %w", err) } @@ -143,7 +141,7 @@ func createToolsPrintOneBlockE[B firecore.Block](chain *firecore.Chain[B]) firec printTransactions := sflags.MustGetBool(cmd, "transactions") - jencoder, err := setupJsonEncoder(cmd) + jencoder, err := SetupJsonEncoder(cmd) if err != nil { return fmt.Errorf("unable to create json encoder: %w", err) } @@ -227,18 +225,12 @@ func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Ch return nil } - isLegacyBlock := pbBlock.Payload == nil if !chain.CoreBinaryEnabled { // since we are running via the chain specific binary (i.e. fireeth) we can use a BlockFactory marshallableBlock := chain.BlockFactory() - if isLegacyBlock { - if err := proto.Unmarshal(pbBlock.GetPayloadBuffer(), marshallableBlock); err != nil { - return fmt.Errorf("unmarshal legacy block payload to protocol block: %w", err) - } - } else { - if err := pbBlock.Payload.UnmarshalTo(marshallableBlock); err != nil { - return fmt.Errorf("pbBlock payload unmarshal: %w", err) - } + + if err := pbBlock.Payload.UnmarshalTo(marshallableBlock); err != nil { + return fmt.Errorf("pbBlock payload unmarshal: %w", err) } err := encoder.Marshal(marshallableBlock) @@ -247,12 +239,8 @@ func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Ch } return nil } - // since we are running directly the firecore binary we will *NOT* use the BlockFactory - - if isLegacyBlock { - return encoder.MarshalLegacy(pbBlock.GetPayloadKind(), pbBlock.GetPayloadBuffer()) - } + // since we are running directly the firecore binary we will *NOT* use the BlockFactory return encoder.Marshal(pbBlock.Payload) } @@ -279,13 +267,17 @@ func PrintBStreamBlock(b *pbbstream.Block, printTransactions bool, out io.Writer return nil } -func setupJsonEncoder(cmd *cobra.Command) (*jsonencoder.Encoder, error) { - protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") +func SetupJsonEncoder(cmd *cobra.Command) (*jsonencoder.Encoder, error) { pbregistry := protoregistry.New() - if err := pbregistry.RegisterFiles(protoPaths); err != nil { - return nil, fmt.Errorf("unable to create dynamic printer: %w", err) + protoPaths := sflags.MustGetStringSlice(cmd, "proto-paths") + if len(protoPaths) > 0 { + if err := pbregistry.RegisterFiles(protoPaths); err != nil { + return nil, fmt.Errorf("unable to create dynamic printer: %w", err) + } } + pbregistry.Extends(protoregistry.WellKnownRegistry) + options := []jsonencoder.Option{ jsonencoder.WithBytesAsHex(), } diff --git a/go.mod b/go.mod index d7ec850..6adc290 100644 --- a/go.mod +++ b/go.mod @@ -3,16 +3,19 @@ module github.com/streamingfast/firehose-core go 1.21 require ( + buf.build/gen/go/bufbuild/reflect/connectrpc/go v1.12.0-20230822193137-310c9c4845dd.1 + buf.build/gen/go/bufbuild/reflect/protocolbuffers/go v1.31.0-20230822193137-310c9c4845dd.2 github.com/ShinyTrinkets/overseer v0.3.0 github.com/dustin/go-humanize v1.0.1 github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d + github.com/iancoleman/strcase v0.2.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/mostynb/go-grpc-compression v1.1.17 github.com/prometheus/client_golang v1.16.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3 + github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -37,6 +40,7 @@ require ( ) require ( + connectrpc.com/connect v1.12.0 // indirect github.com/bufbuild/protocompile v0.4.0 // indirect github.com/google/s2a-go v0.1.4 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect @@ -71,7 +75,7 @@ require ( github.com/bits-and-blooms/bitset v1.3.1 // indirect github.com/blendle/zapdriver v1.3.2-0.20200203083823-9200777f8a3d // indirect github.com/bobg/go-generics/v2 v2.1.1 // indirect - github.com/bufbuild/connect-go v1.10.0 // indirect + github.com/bufbuild/connect-go v1.10.0 github.com/bufbuild/connect-grpchealth-go v1.1.1 // indirect github.com/bufbuild/connect-grpcreflect-go v1.0.0 // indirect github.com/bufbuild/connect-opentelemetry-go v0.3.0 // indirect diff --git a/go.sum b/go.sum index a9b2d16..8933791 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,7 @@ +buf.build/gen/go/bufbuild/reflect/connectrpc/go v1.12.0-20230822193137-310c9c4845dd.1 h1:xnX/gxrGjg0FKB/YLHwCLRPdGoVOblm3s9MZa1oNFIw= +buf.build/gen/go/bufbuild/reflect/connectrpc/go v1.12.0-20230822193137-310c9c4845dd.1/go.mod h1:ru4ObfnijLo+YjfhJFd5Xjljz+d8M+QD+ZZLn4zz6lw= +buf.build/gen/go/bufbuild/reflect/protocolbuffers/go v1.31.0-20230822193137-310c9c4845dd.2 h1:RF8bm8mWobc2HVWCrr5PUlCQcpfsrzL/dcydKLmVC7Y= +buf.build/gen/go/bufbuild/reflect/protocolbuffers/go v1.31.0-20230822193137-310c9c4845dd.2/go.mod h1:3JED1QGgFgqC45IIPkydCq6dIcQKfG6/Ghf0RfKr2Ok= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -66,6 +70,8 @@ cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7Biccwk cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +connectrpc.com/connect v1.12.0 h1:HwKdOY0lGhhoHdsza+hW55aqHEC64pYpObRNoAgn70g= +connectrpc.com/connect v1.12.0/go.mod h1:3AGaO6RRGMx5IKFfqbe3hvK1NqLosFNP2BxDYTPmNPo= contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= contrib.go.opencensus.io/exporter/stackdriver v0.13.10 h1:a9+GZPUe+ONKUwULjlEOucMMG0qfSCCenlji0Nhqbys= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= @@ -383,6 +389,8 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -572,6 +580,12 @@ github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jH github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3 h1:u8orpRssS8rYceziOQ/mbBQHlYh5w06oOtTXK90/yMc= github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205174934-869fb7d64fd2 h1:TQPPxjBXflVinpSSbYaiMuNgw1HB1YnMcFiR52M8EVo= +github.com/streamingfast/bstream v0.0.2-0.20231205174934-869fb7d64fd2/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205175345-609448673b00 h1:U/8aQZOpOzLTVcuEVdbEVffVu00ixotkTe8DRhEXxao= +github.com/streamingfast/bstream v0.0.2-0.20231205175345-609448673b00/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc h1:ioohiLa+d59fqToa2OhbUx418YMrqt2bLT+m+fmjOG8= +github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go index 13515b7..ea667b5 100644 --- a/jsonencoder/encoder.go +++ b/jsonencoder/encoder.go @@ -1,25 +1,22 @@ package jsonencoder import ( - "fmt" + "bytes" "os" "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/firehose-core/protoregistry" ) type Encoder struct { - e *jsontext.Encoder - files *protoregistry.Files - marshallers []*json.Marshalers + protoRegistry *protoregistry.Registry + marshallers []*json.Marshalers } -func New(files *protoregistry.Files, opts ...Option) *Encoder { +func New(files *protoregistry.Registry, opts ...Option) *Encoder { e := &Encoder{ - e: jsontext.NewEncoder(os.Stdout), - files: files, + protoRegistry: files, } e.marshallers = []*json.Marshalers{ @@ -33,14 +30,14 @@ func New(files *protoregistry.Files, opts ...Option) *Encoder { } func (e *Encoder) Marshal(in any) error { - return json.MarshalEncode(e.e, in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) + return json.MarshalEncode(jsontext.NewEncoder(os.Stdout), in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) } -func (e *Encoder) MarshalLegacy(protocol pbbstream.Protocol, value []byte) error { - msg, err := e.files.UnmarshallLegacy(protocol, value) - if err != nil { - return fmt.Errorf("unmarshalling proto any: %w", err) +func (e *Encoder) MarshalToString(in any) (string, error) { + buf := bytes.NewBuffer(nil) + if err := json.MarshalEncode(jsontext.NewEncoder(buf), in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))); err != nil { + return "", err } + return buf.String(), nil - return json.MarshalEncode(e.e, msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) } diff --git a/jsonencoder/proto.go b/jsonencoder/proto.go index 22e9214..a787e6c 100644 --- a/jsonencoder/proto.go +++ b/jsonencoder/proto.go @@ -9,7 +9,7 @@ import ( ) func (e *Encoder) protoAny(encoder *jsontext.Encoder, t *anypb.Any, options json.Options) error { - msg, err := e.files.Unmarshall(t.TypeUrl, t.Value) + msg, err := e.protoRegistry.Unmarshall(t.TypeUrl, t.Value) if err != nil { return fmt.Errorf("unmarshalling proto any: %w", err) } diff --git a/protoregistry/generator/generator.go b/protoregistry/generator/generator.go new file mode 100644 index 0000000..7fd9f67 --- /dev/null +++ b/protoregistry/generator/generator.go @@ -0,0 +1,125 @@ +package main + +import ( + "bufio" + "context" + "embed" + "encoding/hex" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "strings" + "text/template" + "time" + + "google.golang.org/protobuf/proto" + + "buf.build/gen/go/bufbuild/reflect/connectrpc/go/buf/reflect/v1beta1/reflectv1beta1connect" + reflectv1beta1 "buf.build/gen/go/bufbuild/reflect/protocolbuffers/go/buf/reflect/v1beta1" + connect "connectrpc.com/connect" + "github.com/iancoleman/strcase" + "github.com/streamingfast/cli" +) + +//go:embed *.gotmpl +var templates embed.FS + +var wellKnownProtoRepos = []string{ + "buf.build/streamingfast/firehose-ethereum", + "buf.build/streamingfast/firehose-near", + "buf.build/streamingfast/firehose-solana", + //"buf.build/streamingfast/firehose-bitcoin", +} + +func main() { + cli.Ensure(len(os.Args) == 3, "go run ./generator ") + + authToken := os.Getenv("BUFBUILD_AUTH_TOKEN") + if authToken == "" { + log.Fatalf("Please set the BUFBUILD_AUTH_TOKEN environment variable, to generate well known registry") + return + } + + output := os.Args[1] + packageName := os.Args[2] + + client := reflectv1beta1connect.NewFileDescriptorSetServiceClient( + http.DefaultClient, + "https://buf.build", + ) + + var protofiles []ProtoFile + + for _, wellKnownProtoRepo := range wellKnownProtoRepos { + request := connect.NewRequest(&reflectv1beta1.GetFileDescriptorSetRequest{ + Module: wellKnownProtoRepo, + }) + request.Header().Set("Authorization", "Bearer "+authToken) + fileDescriptorSet, err := client.GetFileDescriptorSet(context.Background(), request) + if err != nil { + log.Fatalf("failed to call file descriptor set service: %v", err) + return + } + + for _, file := range fileDescriptorSet.Msg.FileDescriptorSet.File { + cnt, err := proto.Marshal(file) + if err != nil { + log.Fatalf("failed to marshall proto file %s: %v", file.Name, err) + return + } + name := "" + if file.Name != nil { + name = *file.Name + } + protofiles = append(protofiles, ProtoFile{name, cnt}) + } + // avoid hitting the buf.build rate limit + time.Sleep(1 * time.Second) + } + + tmpl, err := template.New("wellknown").Funcs(templateFunctions()).ParseFS(templates, "*.gotmpl") + cli.NoError(err, "Unable to instantiate template") + + var out io.Writer = os.Stdout + if output != "-" { + cli.NoError(os.MkdirAll(filepath.Dir(output), os.ModePerm), "Unable to create output file directories") + + file, err := os.Create(output) + cli.NoError(err, "Unable to open output file") + + bufferedOut := bufio.NewWriter(file) + out = bufferedOut + + defer func() { + bufferedOut.Flush() + file.Close() + }() + } + + err = tmpl.ExecuteTemplate(out, "template.gotmpl", map[string]any{ + "Package": packageName, + "ProtoFiles": protofiles, + }) + cli.NoError(err, "Unable to render template") + + fmt.Println("Done creating well known registry") +} + +type ProtoFile struct { + Name string + Data []byte +} + +func templateFunctions() template.FuncMap { + return template.FuncMap{ + "lower": strings.ToLower, + "pascalCase": strcase.ToCamel, + "camelCase": strcase.ToLowerCamel, + "toHex": func(in []byte) string { + return hex.EncodeToString(in) + }, + } +} diff --git a/protoregistry/generator/template.gotmpl b/protoregistry/generator/template.gotmpl new file mode 100644 index 0000000..0bb6598 --- /dev/null +++ b/protoregistry/generator/template.gotmpl @@ -0,0 +1,50 @@ +// Code generated by generate_flags, DO NOT EDIT! +package {{.Package}} + +import ( + "fmt" + "encoding/hex" + + "github.com/jhump/protoreflect/desc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + + +var WellKnownRegistry = New() + +func init() { + protoFiles := []string{ + {{range .ProtoFiles}} + // {{.Name}} + "{{.Data | toHex}}", + {{end}} + } + + var files []*descriptorpb.FileDescriptorProto + for _, protoFile := range protoFiles { + files = append(files, mustProtoToFileDescriptor(protoFile)) + } + + fdmap, err := desc.CreateFileDescriptors(files) + if err != nil { + panic(fmt.Errorf("failed to create file descriptor map: %w", err)) + return + } + + for _, fd := range fdmap { + WellKnownRegistry.RegisterFileDescriptor(fd) + } +} + +func mustProtoToFileDescriptor(in string) *descriptorpb.FileDescriptorProto { + protoBytes, err := hex.DecodeString(in) + if err != nil { + panic(fmt.Errorf("failed to hex decode payload: %w", err)) + } + out := &descriptorpb.FileDescriptorProto{} + if err := proto.Unmarshal(protoBytes, out); err != nil { + panic(fmt.Errorf("failed to unmarshal file descriptor: %w", err)) + } + return out +} diff --git a/protoregistry/registry.go b/protoregistry/registry.go index 6f781ec..574c5e5 100644 --- a/protoregistry/registry.go +++ b/protoregistry/registry.go @@ -2,30 +2,29 @@ package protoregistry import ( "fmt" - "sync" - - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" - - "github.com/jhump/protoreflect/dynamic" + "strings" "github.com/jhump/protoreflect/desc" + "github.com/jhump/protoreflect/dynamic" ) -// GlobalFiles is a global registry of file descriptors. -var GlobalFiles *Files = new(Files) +// Generate the flags based on Go code in this project directly, this however +// creates a chicken & egg problem if there is compilation error within the project +// but to fix them we must re-generate it. +//go:generate go run ./generator well_known.go protoregistry -type Files struct { - sync.RWMutex +type Registry struct { filesDescriptors []*desc.FileDescriptor } -func New() *Files { - return &Files{ +func New() *Registry { + f := &Registry{ filesDescriptors: []*desc.FileDescriptor{}, } + return f } -func (r *Files) RegisterFiles(files []string) error { +func (r *Registry) RegisterFiles(files []string) error { fileDescriptors, err := parseProtoFiles(files) if err != nil { return fmt.Errorf("parsing proto files: %w", err) @@ -34,9 +33,13 @@ func (r *Files) RegisterFiles(files []string) error { return nil } -func (r *Files) Unmarshall(typeURL string, value []byte) (*dynamic.Message, error) { +func (r *Registry) RegisterFileDescriptor(f *desc.FileDescriptor) { + r.filesDescriptors = append(r.filesDescriptors, f) +} + +func (r *Registry) Unmarshall(typeURL string, value []byte) (*dynamic.Message, error) { for _, fd := range r.filesDescriptors { - md := fd.FindSymbol(typeURL) + md := fd.FindSymbol(cleanTypeURL(typeURL)) if md != nil { dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) if err := dynMsg.Unmarshal(value); err != nil { @@ -48,22 +51,10 @@ func (r *Files) Unmarshall(typeURL string, value []byte) (*dynamic.Message, erro return nil, fmt.Errorf("no message descriptor in registry for type url: %s", typeURL) } -func (r *Files) UnmarshallLegacy(protocol pbbstream.Protocol, value []byte) (*dynamic.Message, error) { - return r.Unmarshall(legacyKindsToProtoType(protocol), value) +func (r *Registry) Extends(registry *Registry) { + r.filesDescriptors = append(r.filesDescriptors, registry.filesDescriptors...) } -func legacyKindsToProtoType(protocol pbbstream.Protocol) string { - switch protocol { - case pbbstream.Protocol_EOS: - return "sf.antelope.type.v1.Block" - case pbbstream.Protocol_ETH: - return "sf.ethereum.type.v2.Block" - case pbbstream.Protocol_SOLANA: - return "sf.solana.type.v1.Block" - case pbbstream.Protocol_NEAR: - return "sf.near.type.v1.Block" - case pbbstream.Protocol_COSMOS: - return "sf.cosmos.type.v1.Block" - } - panic("unaligned protocol") +func cleanTypeURL(in string) string { + return strings.Replace(in, "type.googleapis.com/", "", 1) } diff --git a/protoregistry/well_known.go b/protoregistry/well_known.go new file mode 100644 index 0000000..2f4abed --- /dev/null +++ b/protoregistry/well_known.go @@ -0,0 +1,77 @@ +// Code generated by generate_flags, DO NOT EDIT! +package protoregistry + +import ( + "fmt" + "encoding/hex" + + "github.com/jhump/protoreflect/desc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + + +var WellKnownRegistry = New() + +func init() { + protoFiles := []string{ + + // sf/ethereum/substreams/v1/rpc.proto + "0a2373662f657468657265756d2f73756273747265616d732f76312f7270632e70726f746f121973662e657468657265756d2e73756273747265616d732e763122440a0852706343616c6c7312380a0563616c6c7318012003280b32222e73662e657468657265756d2e73756273747265616d732e76312e52706343616c6c520563616c6c7322360a0752706343616c6c12170a07746f5f6164647218012001280c5206746f4164647212120a046461746118022001280c52046461746122540a0c527063526573706f6e73657312440a09726573706f6e73657318012003280b32262e73662e657468657265756d2e73756273747265616d732e76312e527063526573706f6e73655209726573706f6e73657322370a0b527063526573706f6e736512100a0372617718012001280c520372617712160a066661696c656418022001280852066661696c656442575a556769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d657468657265756d2f74797065732f70622f73662f657468657265756d2f73756273747265616d732f76313b70626574687373620670726f746f33", + + // sf/ethereum/transform/v1/transforms.proto + "0a2973662f657468657265756d2f7472616e73666f726d2f76312f7472616e73666f726d732e70726f746f121873662e657468657265756d2e7472616e73666f726d2e763122d6010a0e436f6d62696e656446696c74657212440a0b6c6f675f66696c7465727318012003280b32232e73662e657468657265756d2e7472616e73666f726d2e76312e4c6f6746696c746572520a6c6f6746696c7465727312490a0c63616c6c5f66696c7465727318022003280b32262e73662e657468657265756d2e7472616e73666f726d2e76312e43616c6c546f46696c746572520b63616c6c46696c7465727312330a1673656e645f616c6c5f626c6f636b5f68656164657273180320012808521373656e64416c6c426c6f636b4865616465727322560a0e4d756c74694c6f6746696c74657212440a0b6c6f675f66696c7465727318012003280b32232e73662e657468657265756d2e7472616e73666f726d2e76312e4c6f6746696c746572520a6c6f6746696c7465727322540a094c6f6746696c746572121c0a0961646472657373657318012003280c520961646472657373657312290a106576656e745f7369676e61747572657318022003280c520f6576656e745369676e617475726573225e0a114d756c746943616c6c546f46696c74657212490a0c63616c6c5f66696c7465727318012003280b32262e73662e657468657265756d2e7472616e73666f726d2e76312e43616c6c546f46696c746572520b63616c6c46696c74657273224c0a0c43616c6c546f46696c746572121c0a0961646472657373657318012003280c5209616464726573736573121e0a0a7369676e61747572657318022003280c520a7369676e617475726573220c0a0a4c69676874426c6f636b220c0a0a4865616465724f6e6c79425a5a586769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d657468657265756d2f74797065732f70622f73662f657468657265756d2f7472616e73666f726d2f76313b70627472616e73666f726d620670726f746f33", + + // google/protobuf/timestamp.proto + "0a1f676f6f676c652f70726f746f6275662f74696d657374616d702e70726f746f120f676f6f676c652e70726f746f627566223b0a0954696d657374616d7012180a077365636f6e647318012001280352077365636f6e647312140a056e616e6f7318022001280552056e616e6f734285010a13636f6d2e676f6f676c652e70726f746f627566420e54696d657374616d7050726f746f50015a32676f6f676c652e676f6c616e672e6f72672f70726f746f6275662f74797065732f6b6e6f776e2f74696d657374616d707062f80101a20203475042aa021e476f6f676c652e50726f746f6275662e57656c6c4b6e6f776e5479706573620670726f746f33", + + // sf/ethereum/type/v2/type.proto + "0a1e73662f657468657265756d2f747970652f76322f747970652e70726f746f121373662e657468657265756d2e747970652e76321a1f676f6f676c652f70726f746f6275662f74696d657374616d702e70726f746f22c6030a05426c6f636b12120a046861736818022001280c52046861736812160a066e756d62657218032001280452066e756d62657212120a0473697a65180420012804520473697a6512380a0668656164657218052001280b32202e73662e657468657265756d2e747970652e76322e426c6f636b486561646572520668656164657212380a06756e636c657318062003280b32202e73662e657468657265756d2e747970652e76322e426c6f636b4865616465725206756e636c657312540a127472616e73616374696f6e5f747261636573180a2003280b32252e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e547261636552117472616e73616374696f6e547261636573124b0a0f62616c616e63655f6368616e676573180b2003280b32222e73662e657468657265756d2e747970652e76322e42616c616e63654368616e6765520e62616c616e63654368616e67657312420a0c636f64655f6368616e67657318142003280b321f2e73662e657468657265756d2e747970652e76322e436f64654368616e6765520b636f64654368616e67657312100a0376657218012001280552037665724a04082810294a040829102a4a04082a102b22a8060a0b426c6f636b486561646572121f0a0b706172656e745f6861736818012001280c520a706172656e7448617368121d0a0a756e636c655f6861736818022001280c5209756e636c6548617368121a0a08636f696e6261736518032001280c5208636f696e62617365121d0a0a73746174655f726f6f7418042001280c52097374617465526f6f74122b0a117472616e73616374696f6e735f726f6f7418052001280c52107472616e73616374696f6e73526f6f7412210a0c726563656970745f726f6f7418062001280c520b72656365697074526f6f74121d0a0a6c6f67735f626c6f6f6d18072001280c52096c6f6773426c6f6f6d123b0a0a646966666963756c747918082001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520a646966666963756c747912460a10746f74616c5f646966666963756c747918112001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520f746f74616c446966666963756c747912160a066e756d62657218092001280452066e756d626572121b0a096761735f6c696d6974180a2001280452086761734c696d697412190a086761735f75736564180b2001280452076761735573656412380a0974696d657374616d70180c2001280b321a2e676f6f676c652e70726f746f6275662e54696d657374616d70520974696d657374616d70121d0a0a65787472615f64617461180d2001280c520965787472614461746112190a086d69785f68617368180e2001280c52076d69784861736812140a056e6f6e6365180f2001280452056e6f6e636512120a046861736818102001280c52046861736812440a10626173655f6665655f7065725f67617318122001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520d6261736546656550657247617312290a107769746864726177616c735f726f6f7418132001280c520f7769746864726177616c73526f6f74124b0a0d74785f646570656e64656e637918142001280b32262e73662e657468657265756d2e747970652e76322e55696e7436344e65737465644172726179520c7478446570656e64656e637922470a1155696e7436344e6573746564417272617912320a0376616c18012003280b32202e73662e657468657265756d2e747970652e76322e55696e7436344172726179520376616c221f0a0b55696e743634417272617912100a0376616c180120032804520376616c221e0a06426967496e7412140a05627974657318012001280c520562797465732287080a105472616e73616374696f6e5472616365120e0a02746f18012001280c5202746f12140a056e6f6e636518022001280452056e6f6e636512380a096761735f707269636518032001280b321b2e73662e657468657265756d2e747970652e76322e426967496e7452086761735072696365121b0a096761735f6c696d697418042001280452086761734c696d697412310a0576616c756518052001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520576616c756512140a05696e70757418062001280c5205696e707574120c0a017618072001280c520176120c0a017218082001280c520172120c0a017318092001280c52017312190a086761735f75736564180a20012804520767617355736564123e0a0474797065180c2001280e322a2e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e54726163652e5479706552047479706512410a0b6163636573735f6c697374180e2003280b32202e73662e657468657265756d2e747970652e76322e4163636573735475706c65520a6163636573734c69737412420a0f6d61785f6665655f7065725f676173180b2001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520c6d617846656550657247617312530a186d61785f7072696f726974795f6665655f7065725f676173180d2001280b321b2e73662e657468657265756d2e747970652e76322e426967496e7452146d61785072696f7269747946656550657247617312140a05696e64657818142001280d5205696e64657812120a046861736818152001280c52046861736812120a0466726f6d18162001280c520466726f6d121f0a0b72657475726e5f6461746118172001280c520a72657475726e44617461121d0a0a7075626c69635f6b657918182001280c52097075626c69634b657912230a0d626567696e5f6f7264696e616c181920012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c181a20012804520a656e644f7264696e616c12430a06737461747573181e2001280e322b2e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e5472616365537461747573520673746174757312410a0772656365697074181f2001280b32272e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e52656365697074520772656365697074122f0a0563616c6c7318202003280b32192e73662e657468657265756d2e747970652e76322e43616c6c520563616c6c73224f0a045479706512130a0f5452585f545950455f4c4547414359100012180a145452585f545950455f4143434553535f4c495354100112180a145452585f545950455f44594e414d49435f4645451002224a0a0b4163636573735475706c6512180a076164647265737318012001280c52076164647265737312210a0c73746f726167655f6b65797318022003280c520b73746f726167654b65797322b0010a125472616e73616374696f6e52656365697074121d0a0a73746174655f726f6f7418012001280c52097374617465526f6f74122e0a1363756d756c61746976655f6761735f75736564180220012804521163756d756c617469766547617355736564121d0a0a6c6f67735f626c6f6f6d18032001280c52096c6f6773426c6f6f6d122c0a046c6f677318042003280b32182e73662e657468657265756d2e747970652e76322e4c6f6752046c6f6773229b010a034c6f6712180a076164647265737318012001280c52076164647265737312160a06746f7069637318022003280c5206746f7069637312120a046461746118032001280c52046461746112140a05696e64657818042001280d5205696e646578121e0a0a626c6f636b496e64657818062001280d520a626c6f636b496e64657812180a076f7264696e616c18072001280452076f7264696e616c22b20a0a0443616c6c12140a05696e64657818012001280d5205696e64657812210a0c706172656e745f696e64657818022001280d520b706172656e74496e64657812140a05646570746818032001280d52056465707468123a0a0963616c6c5f7479706518042001280e321d2e73662e657468657265756d2e747970652e76322e43616c6c54797065520863616c6c5479706512160a0663616c6c657218052001280c520663616c6c657212180a076164647265737318062001280c52076164647265737312310a0576616c756518072001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520576616c7565121b0a096761735f6c696d697418082001280452086761734c696d697412210a0c6761735f636f6e73756d6564180920012804520b676173436f6e73756d6564121f0a0b72657475726e5f64617461180d2001280c520a72657475726e4461746112140a05696e707574180e2001280c5205696e70757412230a0d65786563757465645f636f6465180f20012808520c6578656375746564436f646512180a077375696369646518102001280852077375696369646512590a106b656363616b5f707265696d6167657318142003280b322e2e73662e657468657265756d2e747970652e76322e43616c6c2e4b656363616b507265696d61676573456e747279520f6b656363616b507265696d61676573124b0a0f73746f726167655f6368616e67657318152003280b32222e73662e657468657265756d2e747970652e76322e53746f726167654368616e6765520e73746f726167654368616e676573124b0a0f62616c616e63655f6368616e67657318162003280b32222e73662e657468657265756d2e747970652e76322e42616c616e63654368616e6765520e62616c616e63654368616e67657312450a0d6e6f6e63655f6368616e67657318182003280b32202e73662e657468657265756d2e747970652e76322e4e6f6e63654368616e6765520c6e6f6e63654368616e676573122c0a046c6f677318192003280b32182e73662e657468657265756d2e747970652e76322e4c6f6752046c6f677312420a0c636f64655f6368616e676573181a2003280b321f2e73662e657468657265756d2e747970652e76322e436f64654368616e6765520b636f64654368616e676573123f0a0b6761735f6368616e676573181c2003280b321e2e73662e657468657265756d2e747970652e76322e4761734368616e6765520a6761734368616e67657312230a0d7374617475735f6661696c6564180a20012808520c7374617475734661696c656412270a0f7374617475735f7265766572746564180c20012808520e737461747573526576657274656412250a0e6661696c7572655f726561736f6e180b20012809520d6661696c757265526561736f6e12250a0e73746174655f7265766572746564181e20012808520d7374617465526576657274656412230a0d626567696e5f6f7264696e616c181f20012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c182020012804520a656e644f7264696e616c12510a116163636f756e745f6372656174696f6e7318212003280b32242e73662e657468657265756d2e747970652e76322e4163636f756e744372656174696f6e52106163636f756e744372656174696f6e731a420a144b656363616b507265696d61676573456e74727912100a036b657918012001280952036b657912140a0576616c7565180220012809520576616c75653a0238014a04081b101c4a04081d101e4a04083210334a04083310344a04083c103d228f010a0d53746f726167654368616e676512180a076164647265737318012001280c52076164647265737312100a036b657918022001280c52036b6579121b0a096f6c645f76616c756518032001280c52086f6c6456616c7565121b0a096e65775f76616c756518042001280c52086e657756616c756512180a076f7264696e616c18052001280452076f7264696e616c22cc050a0d42616c616e63654368616e676512180a076164647265737318012001280c52076164647265737312380a096f6c645f76616c756518022001280b321b2e73662e657468657265756d2e747970652e76322e426967496e7452086f6c6456616c756512380a096e65775f76616c756518032001280b321b2e73662e657468657265756d2e747970652e76322e426967496e7452086e657756616c756512410a06726561736f6e18042001280e32292e73662e657468657265756d2e747970652e76322e42616c616e63654368616e67652e526561736f6e5206726561736f6e12180a076f7264696e616c18052001280452076f7264696e616c22cf030a06526561736f6e12120a0e524541534f4e5f554e4b4e4f574e1000121c0a18524541534f4e5f5245574152445f4d494e455f554e434c451001121c0a18524541534f4e5f5245574152445f4d494e455f424c4f434b1002121e0a1a524541534f4e5f44414f5f524546554e445f434f4e54524143541003121d0a19524541534f4e5f44414f5f41444a5553545f42414c414e4345100412130a0f524541534f4e5f5452414e534645521005121a0a16524541534f4e5f47454e455349535f42414c414e4345100612120a0e524541534f4e5f4741535f425559100712210a1d524541534f4e5f5245574152445f5452414e53414354494f4e5f4645451008121b0a17524541534f4e5f5245574152445f4645455f5245534554100e12150a11524541534f4e5f4741535f524546554e44100912180a14524541534f4e5f544f5543485f4143434f554e54100a12190a15524541534f4e5f535549434944455f524546554e44100b121b0a17524541534f4e5f535549434944455f5749544844524157100d12200a1c524541534f4e5f43414c4c5f42414c414e43455f4f56455252494445100c120f0a0b524541534f4e5f4255524e100f12150a11524541534f4e5f5749544844524157414c1010227b0a0b4e6f6e63654368616e676512180a076164647265737318012001280c520761646472657373121b0a096f6c645f76616c756518022001280452086f6c6456616c7565121b0a096e65775f76616c756518032001280452086e657756616c756512180a076f7264696e616c18042001280452076f7264696e616c22450a0f4163636f756e744372656174696f6e12180a076163636f756e7418012001280c52076163636f756e7412180a076f7264696e616c18022001280452076f7264696e616c22ac010a0a436f64654368616e676512180a076164647265737318012001280c52076164647265737312190a086f6c645f6861736818022001280c52076f6c644861736812190a086f6c645f636f646518032001280c52076f6c64436f646512190a086e65775f6861736818042001280c52076e65774861736812190a086e65775f636f646518052001280c52076e6577436f646512180a076f7264696e616c18062001280452076f7264696e616c22c3050a094761734368616e6765121b0a096f6c645f76616c756518012001280452086f6c6456616c7565121b0a096e65775f76616c756518022001280452086e657756616c7565123d0a06726561736f6e18032001280e32252e73662e657468657265756d2e747970652e76322e4761734368616e67652e526561736f6e5206726561736f6e12180a076f7264696e616c18042001280452076f7264696e616c22a2040a06526561736f6e12120a0e524541534f4e5f554e4b4e4f574e1000120f0a0b524541534f4e5f43414c4c100112140a10524541534f4e5f43414c4c5f434f4445100212190a15524541534f4e5f43414c4c5f444154415f434f5059100312140a10524541534f4e5f434f44455f434f5059100412170a13524541534f4e5f434f44455f53544f524147451005121c0a18524541534f4e5f434f4e54524143545f4352454154494f4e1006121d0a19524541534f4e5f434f4e54524143545f4352454154494f4e32100712180a14524541534f4e5f44454c45474154455f43414c4c100812140a10524541534f4e5f4556454e545f4c4f47100912180a14524541534f4e5f4558545f434f44455f434f5059100a121b0a17524541534f4e5f4641494c45445f455845435554494f4e100b12180a14524541534f4e5f494e5452494e5349435f474153100c121f0a1b524541534f4e5f505245434f4d50494c45445f434f4e5452414354100d12210a1d524541534f4e5f524546554e445f41465445525f455845435554494f4e100e12110a0d524541534f4e5f52455455524e100f121b0a17524541534f4e5f52455455524e5f444154415f434f5059101012110a0d524541534f4e5f524556455254101112180a14524541534f4e5f53454c465f4445535452554354101212160a12524541534f4e5f5354415449435f43414c4c1013121c0a18524541534f4e5f53544154455f434f4c445f4143434553531014224b0a0f4865616465724f6e6c79426c6f636b12380a0668656164657218052001280b32202e73662e657468657265756d2e747970652e76322e426c6f636b486561646572520668656164657222d1010a0d426c6f636b5769746852656673120e0a0269641801200128095202696412300a05626c6f636b18022001280b321a2e73662e657468657265756d2e747970652e76322e426c6f636b5205626c6f636b125a0a167472616e73616374696f6e5f74726163655f7265667318032001280b32242e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e5265667352147472616e73616374696f6e54726163655265667312220a0c697272657665727369626c65180420012808520c697272657665727369626c652297010a1c5472616e73616374696f6e547261636557697468426c6f636b526566123b0a05747261636518012001280b32252e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e547261636552057472616365123a0a09626c6f636b5f72656618022001280b321d2e73662e657468657265756d2e747970652e76322e426c6f636b5265665208626c6f636b52656622290a0f5472616e73616374696f6e5265667312160a0668617368657318012003280c520668617368657322360a08426c6f636b52656612120a046861736818012001280c52046861736812160a066e756d62657218022001280452066e756d6265722a4e0a165472616e73616374696f6e5472616365537461747573120b0a07554e4b4e4f574e1000120d0a095355434345454445441001120a0a064641494c45441002120c0a08524556455254454410032a590a0843616c6c54797065120f0a0b554e535045434946494544100012080a0443414c4c1001120c0a0843414c4c434f44451002120c0a0844454c45474154451003120a0a065354415449431004120a0a064352454154451005424f5a4d6769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d657468657265756d2f74797065732f70622f73662f657468657265756d2f747970652f76323b7062657468620670726f746f33", + + // sf/ethereum/trxstream/v1/trxstream.proto + "0a2873662f657468657265756d2f74727873747265616d2f76312f74727873747265616d2e70726f746f121873662e657468657265756d2e74727873747265616d2e76311a1f676f6f676c652f70726f746f6275662f74696d657374616d702e70726f746f1a1e73662f657468657265756d2f747970652f76322f747970652e70726f746f22140a125472616e73616374696f6e5265717565737422a5080a105472616e73616374696f6e537461746512570a0e70726576696f75735f737461746518012001280e32302e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e53746174652e5374617465520d70726576696f7573537461746512550a0d63757272656e745f737461746518022001280e32302e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e53746174652e5374617465520c63757272656e74537461746512550a0a7472616e736974696f6e180a2001280e32352e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e53746174652e5472616e736974696f6e520a7472616e736974696f6e12120a0468617368180b2001280c52046861736812370a0374727818032001280b32252e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e520374727812430a0c626c6f636b5f68656164657218042001280b32202e73662e657468657265756d2e747970652e76322e426c6f636b486561646572520b626c6f636b48656164657212540a127472616e73616374696f6e5f74726163657318052001280b32252e73662e657468657265756d2e747970652e76322e5472616e73616374696f6e547261636552117472616e73616374696f6e54726163657312220a0c636f6e6669726d6174696f6e180620012804520c636f6e6669726d6174696f6e124c0a11686561645f626c6f636b5f68656164657218072001280b32202e73662e657468657265756d2e747970652e76322e426c6f636b486561646572520f68656164426c6f636b48656164657212280a107265706c616365645f62795f6861736818082001280c520e7265706c6163656442794861736812480a1270656e64696e675f66697273745f7365656e180c2001280b321a2e676f6f676c652e70726f746f6275662e54696d657374616d70521070656e64696e6746697273745365656e12460a1170656e64696e675f6c6173745f7365656e180d2001280b321a2e676f6f676c652e70726f746f6275662e54696d657374616d70520f70656e64696e674c6173745365656e229c010a0a5472616e736974696f6e120e0a0a5452414e535f494e4954100012100a0c5452414e535f504f4f4c45441001120f0a0b5452414e535f4d494e4544100212100a0c5452414e535f464f524b4544100312130a0f5452414e535f434f4e4649524d4544100412120a0e5452414e535f5245504c41434544100512200a1c5452414e535f53504543554c41544956454c595f4558454355544544100622550a05537461746512110a0d53544154455f554e4b4e4f574e100012110a0d53544154455f50454e44494e47100112120a0e53544154455f494e5f424c4f434b100212120a0e53544154455f5245504c41434544100322a5020a0b5472616e73616374696f6e120e0a02746f18012001280c5202746f12140a056e6f6e636518022001280452056e6f6e636512380a096761735f707269636518032001280b321b2e73662e657468657265756d2e747970652e76322e426967496e7452086761735072696365121b0a096761735f6c696d697418042001280452086761734c696d697412310a0576616c756518052001280b321b2e73662e657468657265756d2e747970652e76322e426967496e74520576616c756512140a05696e70757418062001280c5205696e707574120c0a017618072001280c520176120c0a017218082001280c520172120c0a017318092001280c52017312120a046861736818152001280c52046861736812120a0466726f6d18162001280c520466726f6d327a0a115472616e73616374696f6e53747265616d12650a0c5472616e73616374696f6e73122c2e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e526571756573741a252e73662e657468657265756d2e74727873747265616d2e76312e5472616e73616374696f6e3001425f5a5d6769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d657468657265756d2d707269762f74797065732f70622f73662f657468657265756d2f74727873747265616d2f76313b706274727873747265616d620670726f746f33", + + // sf/near/transform/v1/transform.proto + "0a2473662f6e6561722f7472616e73666f726d2f76312f7472616e73666f726d2e70726f746f121473662e6e6561722e7472616e73666f726d2e7631228f010a1242617369635265636569707446696c746572121a0a086163636f756e747318012003280952086163636f756e7473125d0a177072656669785f616e645f7375666669785f706169727318022003280b32262e73662e6e6561722e7472616e73666f726d2e76312e507265666978537566666978506169725214707265666978416e64537566666978506169727322420a105072656669785375666669785061697212160a06707265666978180120012809520670726566697812160a067375666669781802200128095206737566666978220c0a0a4865616465724f6e6c79424c5a4a6769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d6e6561722f70622f73662f6e6561722f7472616e73666f726d2f76313b70627472616e73666f726d620670726f746f33", + + // sf/near/type/v1/type.proto + "0a1a73662f6e6561722f747970652f76312f747970652e70726f746f120f73662e6e6561722e747970652e7631229b020a05426c6f636b12160a06617574686f721801200128095206617574686f7212340a0668656164657218022001280b321c2e73662e6e6561722e747970652e76312e426c6f636b486561646572520668656164657212410a0d6368756e6b5f6865616465727318032003280b321c2e73662e6e6561722e747970652e76312e4368756e6b486561646572520c6368756e6b4865616465727312350a0673686172647318042003280b321d2e73662e6e6561722e747970652e76312e496e646578657253686172645206736861726473124a0a0d73746174655f6368616e67657318052003280b32252e73662e6e6561722e747970652e76312e53746174654368616e6765576974684361757365520c73746174654368616e67657322470a0f4865616465724f6e6c79426c6f636b12340a0668656164657218022001280b321c2e73662e6e6561722e747970652e76312e426c6f636b48656164657252066865616465722288010a1453746174654368616e676557697468436175736512370a0576616c756518012001280b32212e73662e6e6561722e747970652e76312e53746174654368616e676556616c7565520576616c756512370a05636175736518022001280b32212e73662e6e6561722e747970652e76312e53746174654368616e676543617573655205636175736522d50c0a1053746174654368616e6765436175736512660a146e6f745f7772697461626c655f746f5f6469736b18012001280b32332e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e4e6f745772697461626c65546f4469736b480052116e6f745772697461626c65546f4469736b12550a0d696e697469616c5f737461746518022001280b322e2e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e496e697469616c53746174654800520c696e697469616c537461746512700a167472616e73616374696f6e5f70726f63657373696e6718032001280b32372e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e5472616e73616374696f6e50726f63657373696e67480052157472616e73616374696f6e50726f63657373696e67128d010a21616374696f6e5f726563656970745f70726f63657373696e675f7374617274656418042001280b32402e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e416374696f6e5265636569707450726f63657373696e67537461727465644800521e616374696f6e5265636569707450726f63657373696e675374617274656412750a19616374696f6e5f726563656970745f6761735f72657761726418052001280b32382e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e416374696f6e5265636569707447617352657761726448005216616374696f6e5265636569707447617352657761726412640a12726563656970745f70726f63657373696e6718062001280b32332e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e5265636569707450726f63657373696e67480052117265636569707450726f63657373696e6712610a11706f7374706f6e65645f7265636569707418072001280b32322e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e506f7374706f6e65645265636569707448005210706f7374706f6e65645265636569707412740a18757064617465645f64656c617965645f726563656970747318082001280b32382e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e5570646174656444656c617965645265636569707473480052167570646174656444656c61796564526563656970747312770a1976616c696461746f725f6163636f756e74735f75706461746518092001280b32392e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e56616c696461746f724163636f756e74735570646174654800521776616c696461746f724163636f756e7473557064617465124b0a096d6967726174696f6e180a2001280b322b2e73662e6e6561722e747970652e76312e53746174654368616e676543617573652e4d6967726174696f6e480052096d6967726174696f6e1a130a114e6f745772697461626c65546f4469736b1a0e0a0c496e697469616c53746174651a4d0a155472616e73616374696f6e50726f63657373696e6712340a0774785f6861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852067478486173681a600a1e416374696f6e5265636569707450726f63657373696e6753746172746564123e0a0c726563656970745f6861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520b72656365697074486173681a4e0a16416374696f6e5265636569707447617352657761726412340a0774785f6861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852067478486173681a490a115265636569707450726f63657373696e6712340a0774785f6861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852067478486173681a480a10506f7374706f6e65645265636569707412340a0774785f6861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852067478486173681a180a165570646174656444656c6179656452656365697074731a190a1756616c696461746f724163636f756e74735570646174651a0b0a094d6967726174696f6e42070a05636175736522da0b0a1053746174654368616e676556616c756512580a0e6163636f756e745f75706461746518012001280b322f2e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e4163636f756e745570646174654800520d6163636f756e74557064617465125e0a106163636f756e745f64656c6574696f6e18022001280b32312e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e4163636f756e7444656c6574696f6e4800520f6163636f756e7444656c6574696f6e125f0a116163636573735f6b65795f75706461746518032001280b32312e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e4163636573734b65795570646174654800520f6163636573734b657955706461746512650a136163636573735f6b65795f64656c6574696f6e18042001280b32332e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e4163636573734b657944656c6574696f6e480052116163636573734b657944656c6574696f6e124f0a0b646174615f75706461746518052001280b322c2e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e446174615570646174654800520a6461746155706461746512550a0d646174615f64656c6574696f6e18062001280b322e2e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e4461746144656c6574696f6e4800520c6461746144656c6574696f6e12680a14636f6e74726163745f636f64655f75706461746518072001280b32342e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e436f6e7472616374436f646555706461746548005212636f6e7472616374436f646555706461746512650a11636f6e74726163745f64656c6574696f6e18082001280b32362e73662e6e6561722e747970652e76312e53746174654368616e676556616c75652e436f6e7472616374436f646544656c6574696f6e48005210636f6e747261637444656c6574696f6e1a620a0d4163636f756e74557064617465121d0a0a6163636f756e745f696418012001280952096163636f756e74496412320a076163636f756e7418022001280b32182e73662e6e6561722e747970652e76312e4163636f756e7452076163636f756e741a300a0f4163636f756e7444656c6574696f6e121d0a0a6163636f756e745f696418012001280952096163636f756e7449641aa6010a0f4163636573734b6579557064617465121d0a0a6163636f756e745f696418012001280952096163636f756e74496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b657912390a0a6163636573735f6b657918032001280b321a2e73662e6e6561722e747970652e76312e4163636573734b657952096163636573734b65791a6d0a114163636573734b657944656c6574696f6e121d0a0a6163636f756e745f696418012001280952096163636f756e74496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b65791a530a0a44617461557064617465121d0a0a6163636f756e745f696418012001280952096163636f756e74496412100a036b657918022001280c52036b657912140a0576616c756518032001280c520576616c75651a3f0a0c4461746144656c6574696f6e121d0a0a6163636f756e745f696418012001280952096163636f756e74496412100a036b657918022001280c52036b65791a470a12436f6e7472616374436f6465557064617465121d0a0a6163636f756e745f696418012001280952096163636f756e74496412120a04636f646518022001280c5204636f64651a350a14436f6e7472616374436f646544656c6574696f6e121d0a0a6163636f756e745f696418012001280952096163636f756e74496442070a0576616c756522ca010a074163636f756e74122f0a06616d6f756e7418012001280b32172e73662e6e6561722e747970652e76312e426967496e745206616d6f756e74122f0a066c6f636b656418022001280b32172e73662e6e6561722e747970652e76312e426967496e7452066c6f636b656412380a09636f64655f6861736818032001280b321b2e73662e6e6561722e747970652e76312e43727970746f486173685208636f64654861736812230a0d73746f726167655f7573616765180420012804520c73746f72616765557361676522c50e0a0b426c6f636b48656164657212160a066865696768741801200128045206686569676874121f0a0b707265765f686569676874180220012804520a7072657648656967687412360a0865706f63685f696418032001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520765706f63684964123f0a0d6e6578745f65706f63685f696418042001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520b6e65787445706f63684964122f0a046861736818052001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852046861736812380a09707265765f6861736818062001280b321b2e73662e6e6561722e747970652e76312e43727970746f486173685208707265764861736812430a0f707265765f73746174655f726f6f7418072001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520d707265765374617465526f6f74124b0a136368756e6b5f72656365697074735f726f6f7418082001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852116368756e6b5265636569707473526f6f7412490a126368756e6b5f686561646572735f726f6f7418092001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852106368756e6b48656164657273526f6f74123f0a0d6368756e6b5f74785f726f6f74180a2001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520b6368756e6b5478526f6f74123e0a0c6f7574636f6d655f726f6f74180b2001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520b6f7574636f6d65526f6f7412270a0f6368756e6b735f696e636c75646564180c20012804520e6368756e6b73496e636c7564656412440a0f6368616c6c656e6765735f726f6f74180d2001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520e6368616c6c656e676573526f6f74121c0a0974696d657374616d70180e20012804520974696d657374616d70122b0a1174696d657374616d705f6e616e6f736563180f20012804521074696d657374616d704e616e6f736563123e0a0c72616e646f6d5f76616c756518102001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520b72616e646f6d56616c756512500a1376616c696461746f725f70726f706f73616c7318112003280b321f2e73662e6e6561722e747970652e76312e56616c696461746f725374616b65521276616c696461746f7250726f706f73616c73121d0a0a6368756e6b5f6d61736b18122003280852096368756e6b4d61736b12340a096761735f707269636518132001280b32172e73662e6e6561722e747970652e76312e426967496e745208676173507269636512230a0d626c6f636b5f6f7264696e616c181420012804520c626c6f636b4f7264696e616c123a0a0c746f74616c5f737570706c7918152001280b32172e73662e6e6561722e747970652e76312e426967496e74520b746f74616c537570706c79124e0a116368616c6c656e6765735f726573756c7418162003280b32212e73662e6e6561722e747970652e76312e536c617368656456616c696461746f7252106368616c6c656e676573526573756c7412350a176c6173745f66696e616c5f626c6f636b5f68656967687418172001280452146c61737446696e616c426c6f636b48656967687412450a106c6173745f66696e616c5f626c6f636b18182001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520e6c61737446696e616c426c6f636b123a0a1a6c6173745f64735f66696e616c5f626c6f636b5f68656967687418192001280452166c617374447346696e616c426c6f636b486569676874124a0a136c6173745f64735f66696e616c5f626c6f636b181a2001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852106c617374447346696e616c426c6f636b123d0a0c6e6578745f62705f68617368181b2001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520a6e65787442704861736812470a11626c6f636b5f6d65726b6c655f726f6f74181c2001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520f626c6f636b4d65726b6c65526f6f74122f0a1465706f63685f73796e635f646174615f68617368181d2001280c521165706f636853796e63446174614861736812380a09617070726f76616c73181e2003280b321a2e73662e6e6561722e747970652e76312e5369676e61747572655209617070726f76616c7312380a097369676e6174757265181f2001280b321a2e73662e6e6561722e747970652e76312e5369676e617475726552097369676e617475726512360a176c61746573745f70726f746f636f6c5f76657273696f6e18202001280d52156c617465737450726f746f636f6c56657273696f6e221e0a06426967496e7412140a05627974657318012001280c5205627974657322220a0a43727970746f4861736812140a05627974657318012001280c5205627974657322510a095369676e6174757265122e0a047479706518012001280e321a2e73662e6e6561722e747970652e76312e43757276654b696e6452047479706512140a05627974657318022001280c5205627974657322510a095075626c69634b6579122e0a047479706518012001280e321a2e73662e6e6561722e747970652e76312e43757276654b696e6452047479706512140a05627974657318022001280c520562797465732299010a0e56616c696461746f725374616b65121d0a0a6163636f756e745f696418012001280952096163636f756e74496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b6579122d0a057374616b6518032001280b32172e73662e6e6561722e747970652e76312e426967496e7452057374616b6522570a10536c617368656456616c696461746f72121d0a0a6163636f756e745f696418012001280952096163636f756e74496412240a0e69735f646f75626c655f7369676e180220012808520c6973446f75626c655369676e22f6050a0b4368756e6b486561646572121d0a0a6368756e6b5f6861736818012001280c52096368756e6b4861736812260a0f707265765f626c6f636b5f6861736818022001280c520d70726576426c6f636b4861736812210a0c6f7574636f6d655f726f6f7418032001280c520b6f7574636f6d65526f6f7412260a0f707265765f73746174655f726f6f7418042001280c520d707265765374617465526f6f74122e0a13656e636f6465645f6d65726b6c655f726f6f7418052001280c5211656e636f6465644d65726b6c65526f6f7412250a0e656e636f6465645f6c656e677468180620012804520d656e636f6465644c656e67746812250a0e6865696768745f63726561746564180720012804520d6865696768744372656174656412270a0f6865696768745f696e636c75646564180820012804520e686569676874496e636c7564656412190a0873686172645f696418092001280452077368617264496412190a086761735f75736564180a20012804520767617355736564121b0a096761735f6c696d6974180b2001280452086761734c696d697412420a1076616c696461746f725f726577617264180c2001280b32172e73662e6e6561722e747970652e76312e426967496e74520f76616c696461746f72526577617264123c0a0d62616c616e63655f6275726e74180d2001280b32172e73662e6e6561722e747970652e76312e426967496e74520c62616c616e63654275726e7412340a166f7574676f696e675f72656365697074735f726f6f74180e2001280c52146f7574676f696e675265636569707473526f6f7412170a0774785f726f6f74180f2001280c52067478526f6f7412500a1376616c696461746f725f70726f706f73616c7318102003280b321f2e73662e6e6561722e747970652e76312e56616c696461746f725374616b65521276616c696461746f7250726f706f73616c7312380a097369676e617475726518112001280b321a2e73662e6e6561722e747970652e76312e5369676e617475726552097369676e617475726522d1010a0c496e6465786572536861726412190a0873686172645f696418012001280452077368617264496412330a056368756e6b18022001280b321d2e73662e6e6561722e747970652e76312e496e64657865724368756e6b52056368756e6b12710a1a726563656970745f657865637574696f6e5f6f7574636f6d657318032003280b32332e73662e6e6561722e747970652e76312e496e6465786572457865637574696f6e4f7574636f6d655769746852656365697074521872656365697074457865637574696f6e4f7574636f6d657322ae010a22496e6465786572457865637574696f6e4f7574636f6d65576974685265636569707412540a11657865637574696f6e5f6f7574636f6d6518012001280b32272e73662e6e6561722e747970652e76312e457865637574696f6e4f7574636f6d655769746849645210657865637574696f6e4f7574636f6d6512320a077265636569707418022001280b32182e73662e6e6561722e747970652e76312e5265636569707452077265636569707422e6010a0c496e64657865724368756e6b12160a06617574686f721801200128095206617574686f7212340a0668656164657218022001280b321c2e73662e6e6561722e747970652e76312e4368756e6b486561646572520668656164657212520a0c7472616e73616374696f6e7318032003280b322e2e73662e6e6561722e747970652e76312e496e64657865725472616e73616374696f6e576974684f7574636f6d65520c7472616e73616374696f6e7312340a08726563656970747318042003280b32182e73662e6e6561722e747970652e76312e526563656970745208726563656970747322bc010a1d496e64657865725472616e73616374696f6e576974684f7574636f6d6512440a0b7472616e73616374696f6e18012001280b32222e73662e6e6561722e747970652e76312e5369676e65645472616e73616374696f6e520b7472616e73616374696f6e12550a076f7574636f6d6518022001280b323b2e73662e6e6561722e747970652e76312e496e6465786572457865637574696f6e4f7574636f6d65576974684f7074696f6e616c5265636569707452076f7574636f6d6522c0020a115369676e65645472616e73616374696f6e121b0a097369676e65725f696418012001280952087369676e6572496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b657912140a056e6f6e636518032001280452056e6f6e6365121f0a0b72656365697665725f6964180420012809520a7265636569766572496412310a07616374696f6e7318052003280b32172e73662e6e6561722e747970652e76312e416374696f6e5207616374696f6e7312380a097369676e617475726518062001280b321a2e73662e6e6561722e747970652e76312e5369676e617475726552097369676e6174757265122f0a046861736818072001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852046861736822b6010a2a496e6465786572457865637574696f6e4f7574636f6d65576974684f7074696f6e616c5265636569707412540a11657865637574696f6e5f6f7574636f6d6518012001280b32272e73662e6e6561722e747970652e76312e457865637574696f6e4f7574636f6d655769746849645210657865637574696f6e4f7574636f6d6512320a077265636569707418022001280b32182e73662e6e6561722e747970652e76312e526563656970745207726563656970742286020a075265636569707412250a0e7072656465636573736f725f6964180120012809520d7072656465636573736f724964121f0a0b72656365697665725f6964180220012809520a72656365697665724964123a0a0a726563656970745f696418032001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520972656365697074496412380a06616374696f6e180a2001280b321e2e73662e6e6561722e747970652e76312e52656365697074416374696f6e48005206616374696f6e12320a0464617461180b2001280b321c2e73662e6e6561722e747970652e76312e5265636569707444617461480052046461746142090a077265636569707422570a0b526563656970744461746112340a07646174615f696418012001280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520664617461496412120a046461746118022001280c52046461746122f3020a0d52656365697074416374696f6e121b0a097369676e65725f696418012001280952087369676e6572496412460a117369676e65725f7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b6579520f7369676e65725075626c69634b657912340a096761735f707269636518032001280b32172e73662e6e6561722e747970652e76312e426967496e745208676173507269636512510a156f75747075745f646174615f72656365697665727318042003280b321d2e73662e6e6561722e747970652e76312e44617461526563656976657252136f75747075744461746152656365697665727312410a0e696e7075745f646174615f69647318052003280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520c696e7075744461746149647312310a07616374696f6e7318062003280b32172e73662e6e6561722e747970652e76312e416374696f6e5207616374696f6e7322650a0c44617461526563656976657212340a07646174615f696418012001280b321b2e73662e6e6561722e747970652e76312e43727970746f486173685206646174614964121f0a0b72656365697665725f6964180220012809520a7265636569766572496422f1010a16457865637574696f6e4f7574636f6d6557697468496412310a0570726f6f6618012001280b321b2e73662e6e6561722e747970652e76312e4d65726b6c6550617468520570726f6f66123a0a0a626c6f636b5f6861736818022001280b321b2e73662e6e6561722e747970652e76312e43727970746f486173685209626c6f636b48617368122b0a02696418032001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852026964123b0a076f7574636f6d6518042001280b32212e73662e6e6561722e747970652e76312e457865637574696f6e4f7574636f6d6552076f7574636f6d6522e9040a10457865637574696f6e4f7574636f6d6512120a046c6f677318012003280952046c6f6773123c0a0b726563656970745f69647318022003280b321b2e73662e6e6561722e747970652e76312e43727970746f48617368520a72656365697074496473121b0a096761735f6275726e7418032001280452086761734275726e74123a0a0c746f6b656e735f6275726e7418042001280b32172e73662e6e6561722e747970652e76312e426967496e74520b746f6b656e734275726e74121f0a0b6578656375746f725f6964180520012809520a6578656375746f72496412430a07756e6b6e6f776e18142001280b32272e73662e6e6561722e747970652e76312e556e6b6e6f776e457865637574696f6e53746174757348005207756e6b6e6f776e12430a076661696c75726518152001280b32272e73662e6e6561722e747970652e76312e4661696c757265457865637574696f6e537461747573480052076661696c75726512530a0d737563636573735f76616c756518162001280b322c2e73662e6e6561722e747970652e76312e5375636365737356616c7565457865637574696f6e5374617475734800520c7375636365737356616c756512600a12737563636573735f726563656970745f696418172001280b32302e73662e6e6561722e747970652e76312e53756363657373526563656970744964457865637574696f6e5374617475734800521073756363657373526563656970744964123e0a086d6574616461746118062001280e32222e73662e6e6561722e747970652e76312e457865637574696f6e4d6574616461746152086d6574616461746142080a0673746174757322330a1b5375636365737356616c7565457865637574696f6e53746174757312140a0576616c756518012001280c520576616c7565224e0a1f53756363657373526563656970744964457865637574696f6e537461747573122b0a02696418012001280b321b2e73662e6e6561722e747970652e76312e43727970746f486173685202696422180a16556e6b6e6f776e457865637574696f6e53746174757322b3010a164661696c757265457865637574696f6e53746174757312410a0c616374696f6e5f6572726f7218012001280b321c2e73662e6e6561722e747970652e76312e416374696f6e4572726f724800520b616374696f6e4572726f72124b0a10696e76616c69645f74785f6572726f7218022001280e321f2e73662e6e6561722e747970652e76312e496e76616c696454784572726f724800520e696e76616c696454784572726f7242090a076661696c75726522bc130a0b416374696f6e4572726f7212140a05696e6465781801200128045205696e64657812640a156163636f756e745f616c72656164795f657869737418152001280b322e2e73662e6e6561722e747970652e76312e4163636f756e74416c72656164794578697374734572726f724b696e64480052136163636f756e74416c7265616479457869737412640a166163636f756e745f646f65735f6e6f745f657869737418162001280b322d2e73662e6e6561722e747970652e76312e4163636f756e74446f65734e6f7445786973744572726f724b696e64480052136163636f756e74446f65734e6f7445786973741280010a206372656174655f6163636f756e745f6f6e6c795f62795f72656769737472617218172001280b32362e73662e6e6561722e747970652e76312e4372656174654163636f756e744f6e6c7942795265676973747261724572726f724b696e644800521c6372656174654163636f756e744f6e6c79427952656769737472617212700a1a6372656174655f6163636f756e745f6e6f745f616c6c6f77656418182001280b32312e73662e6e6561722e747970652e76312e4372656174654163636f756e744e6f74416c6c6f7765644572726f724b696e64480052176372656174654163636f756e744e6f74416c6c6f776564125d0a136163746f725f6e6f5f7065726d697373696f6e18192001280b322b2e73662e6e6561722e747970652e76312e4163746f724e6f5065726d697373696f6e4572726f724b696e64480052116163746f724e6f5065726d697373696f6e126b0a1964656c6574655f6b65795f646f65735f6e6f745f6578697374181a2001280b322f2e73662e6e6561722e747970652e76312e44656c6574654b6579446f65734e6f7445786973744572726f724b696e644800521564656c6574654b6579446f65734e6f74457869737412640a166164645f6b65795f616c72656164795f657869737473181b2001280b322d2e73662e6e6561722e747970652e76312e4164644b6579416c72656164794578697374734572726f724b696e64480052136164644b6579416c726561647945786973747312660a1664656c6574655f6163636f756e745f7374616b696e67181c2001280b322e2e73662e6e6561722e747970652e76312e44656c6574654163636f756e745374616b696e674572726f724b696e644800521464656c6574654163636f756e745374616b696e6712640a166c61636b5f62616c616e63655f666f725f7374617465181d2001280b322d2e73662e6e6561722e747970652e76312e4c61636b42616c616e6365466f7253746174654572726f724b696e64480052136c61636b42616c616e6365466f72537461746512540a1074726965735f746f5f756e7374616b65181e2001280b32282e73662e6e6561722e747970652e76312e5472696573546f556e7374616b654572726f724b696e644800520e7472696573546f556e7374616b65124e0a0e74726965735f746f5f7374616b65181f2001280b32262e73662e6e6561722e747970652e76312e5472696573546f5374616b654572726f724b696e644800520c7472696573546f5374616b65125c0a12696e73756666696369656e745f7374616b6518202001280b322b2e73662e6e6561722e747970652e76312e496e73756666696369656e745374616b654572726f724b696e6448005211696e73756666696369656e745374616b65124d0a0d66756e6374696f6e5f63616c6c18212001280b32262e73662e6e6561722e747970652e76312e46756e6374696f6e43616c6c4572726f724b696e644800520c66756e6374696f6e43616c6c12660a166e65775f726563656970745f76616c69646174696f6e18222001280b322e2e73662e6e6561722e747970652e76312e4e65775265636569707456616c69646174696f6e4572726f724b696e64480052146e65775265636569707456616c69646174696f6e1292010a266f6e6c795f696d706c696369745f6163636f756e745f6372656174696f6e5f616c6c6f77656418232001280b323c2e73662e6e6561722e747970652e76312e4f6e6c79496d706c696369744163636f756e744372656174696f6e416c6c6f7765644572726f724b696e64480052226f6e6c79496d706c696369744163636f756e744372656174696f6e416c6c6f776564127d0a1f64656c6574655f6163636f756e745f776974685f6c617267655f737461746518242001280b32352e73662e6e6561722e747970652e76312e44656c6574654163636f756e74576974684c6172676553746174654572726f724b696e644800521b64656c6574654163636f756e74576974684c6172676553746174651280010a2164656c65676174655f616374696f6e5f696e76616c69645f7369676e617475726518252001280b32332e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e496e76616c69645369676e61747572654b696e644800521e64656c6567617465416374696f6e496e76616c69645369676e617475726512a8010a3164656c65676174655f616374696f6e5f73656e6465725f646f65735f6e6f745f6d617463685f74785f726563656976657218262001280b323f2e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e53656e646572446f65734e6f744d61746368547852656365697665724b696e644800522a64656c6567617465416374696f6e53656e646572446f65734e6f744d617463685478526563656976657212640a1764656c65676174655f616374696f6e5f6578706972656418272001280b322a2e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e457870697265644b696e644800521564656c6567617465416374696f6e45787069726564127b0a2064656c65676174655f616374696f6e5f6163636573735f6b65795f6572726f7218282001280b32312e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e4163636573734b65794572726f724b696e644800521c64656c6567617465416374696f6e4163636573734b65794572726f7212740a1d64656c65676174655f616374696f6e5f696e76616c69645f6e6f6e636518292001280b322f2e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e496e76616c69644e6f6e63654b696e644800521a64656c6567617465416374696f6e496e76616c69644e6f6e636512780a1f64656c65676174655f616374696f6e5f6e6f6e63655f746f6f5f6c61726765182a2001280b32302e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e4e6f6e6365546f6f4c617267654b696e644800521b64656c6567617465416374696f6e4e6f6e6365546f6f4c6172676542060a046b696e64223e0a1d4163636f756e74416c72656164794578697374734572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e744964223d0a1c4163636f756e74446f65734e6f7445786973744572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e744964229f010a254372656174654163636f756e744f6e6c7942795265676973747261724572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412300a147265676973747261725f6163636f756e745f696418022001280952127265676973747261724163636f756e74496412250a0e7072656465636573736f725f6964180320012809520d7072656465636573736f72496422680a204372656174654163636f756e744e6f74416c6c6f7765644572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412250a0e7072656465636573736f725f6964180220012809520d7072656465636573736f72496422560a1a4163746f724e6f5065726d697373696f6e4572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412190a086163746f725f696418022001280952076163746f724964227a0a1e44656c6574654b6579446f65734e6f7445786973744572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b657922780a1c4164644b6579416c72656164794578697374734572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b6579223e0a1d44656c6574654163636f756e745374616b696e674572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496422700a1c4c61636b42616c616e6365466f7253746174654572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496412310a0762616c616e636518022001280b32172e73662e6e6561722e747970652e76312e426967496e74520762616c616e636522380a175472696573546f556e7374616b654572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496422c9010a155472696573546f5374616b654572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e744964122d0a057374616b6518022001280b32172e73662e6e6561722e747970652e76312e426967496e7452057374616b65122f0a066c6f636b656418032001280b32172e73662e6e6561722e747970652e76312e426967496e7452066c6f636b656412310a0762616c616e636518042001280b32172e73662e6e6561722e747970652e76312e426967496e74520762616c616e636522a8010a1a496e73756666696369656e745374616b654572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e744964122d0a057374616b6518022001280b32172e73662e6e6561722e747970652e76312e426967496e7452057374616b65123c0a0d6d696e696d756d5f7374616b6518032001280b32172e73662e6e6561722e747970652e76312e426967496e74520c6d696e696d756d5374616b6522540a1546756e6374696f6e43616c6c4572726f724b696e64123b0a056572726f7218012001280e32252e73662e6e6561722e747970652e76312e46756e6374696f6e43616c6c4572726f7253657252056572726f72225e0a1d4e65775265636569707456616c69646174696f6e4572726f724b696e64123d0a056572726f7218012001280e32272e73662e6e6561722e747970652e76312e5265636569707456616c69646174696f6e4572726f7252056572726f72224c0a2b4f6e6c79496d706c696369744163636f756e744372656174696f6e416c6c6f7765644572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496422450a2444656c6574654163636f756e74576974684c6172676553746174654572726f724b696e64121d0a0a6163636f756e745f696418012001280952096163636f756e74496422240a2244656c6567617465416374696f6e496e76616c69645369676e61747572654b696e64226e0a2e44656c6567617465416374696f6e53656e646572446f65734e6f744d61746368547852656365697665724b696e64121b0a0973656e6465725f6964180120012809520873656e6465724964121f0a0b72656365697665725f6964180220012809520a72656365697665724964221b0a1944656c6567617465416374696f6e457870697265644b696e6422590a2044656c6567617465416374696f6e4163636573734b65794572726f724b696e6412350a056572726f7218012001280e321f2e73662e6e6561722e747970652e76312e496e76616c696454784572726f7252056572726f7222620a1e44656c6567617465416374696f6e496e76616c69644e6f6e63654b696e6412250a0e64656c65676174655f6e6f6e6365180120012804520d64656c65676174654e6f6e636512190a08616b5f6e6f6e63651802200128045207616b4e6f6e636522690a1f44656c6567617465416374696f6e4e6f6e6365546f6f4c617267654b696e6412250a0e64656c65676174655f6e6f6e6365180120012804520d64656c65676174654e6f6e6365121f0a0b75707065725f626f756e64180220012804520a7570706572426f756e6422410a0a4d65726b6c655061746812330a047061746818012003280b321f2e73662e6e6561722e747970652e76312e4d65726b6c65506174684974656d520470617468227b0a0e4d65726b6c65506174684974656d122f0a046861736818012001280b321b2e73662e6e6561722e747970652e76312e43727970746f4861736852046861736812380a09646972656374696f6e18022001280e321a2e73662e6e6561722e747970652e76312e446972656374696f6e5209646972656374696f6e2285050a06416374696f6e124d0a0e6372656174655f6163636f756e7418012001280b32242e73662e6e6561722e747970652e76312e4372656174654163636f756e74416374696f6e4800520d6372656174654163636f756e7412500a0f6465706c6f795f636f6e747261637418022001280b32252e73662e6e6561722e747970652e76312e4465706c6f79436f6e7472616374416374696f6e4800520e6465706c6f79436f6e7472616374124a0a0d66756e6374696f6e5f63616c6c18032001280b32232e73662e6e6561722e747970652e76312e46756e6374696f6e43616c6c416374696f6e4800520c66756e6374696f6e43616c6c123d0a087472616e7366657218042001280b321f2e73662e6e6561722e747970652e76312e5472616e73666572416374696f6e480052087472616e7366657212340a057374616b6518052001280b321c2e73662e6e6561722e747970652e76312e5374616b65416374696f6e480052057374616b6512380a076164645f6b657918062001280b321d2e73662e6e6561722e747970652e76312e4164644b6579416374696f6e480052066164644b657912410a0a64656c6574655f6b657918072001280b32202e73662e6e6561722e747970652e76312e44656c6574654b6579416374696f6e4800520964656c6574654b6579124d0a0e64656c6574655f6163636f756e7418082001280b32242e73662e6e6561722e747970652e76312e44656c6574654163636f756e74416374696f6e4800520d64656c6574654163636f756e7412430a0864656c656761746518092001280b32252e73662e6e6561722e747970652e76312e5369676e656444656c6567617465416374696f6e4800520864656c656761746542080a06616374696f6e22150a134372656174654163636f756e74416374696f6e222a0a144465706c6f79436f6e7472616374416374696f6e12120a04636f646518012001280c5204636f6465228e010a1246756e6374696f6e43616c6c416374696f6e121f0a0b6d6574686f645f6e616d65180120012809520a6d6574686f644e616d6512120a046172677318022001280c52046172677312100a03676173180320012804520367617312310a076465706f73697418042001280b32172e73662e6e6561722e747970652e76312e426967496e7452076465706f73697422430a0e5472616e73666572416374696f6e12310a076465706f73697418012001280b32172e73662e6e6561722e747970652e76312e426967496e7452076465706f73697422770a0b5374616b65416374696f6e122d0a057374616b6518012001280b32172e73662e6e6561722e747970652e76312e426967496e7452057374616b6512390a0a7075626c69635f6b657918022001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b65792284010a0c4164644b6579416374696f6e12390a0a7075626c69635f6b657918012001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b657912390a0a6163636573735f6b657918022001280b321a2e73662e6e6561722e747970652e76312e4163636573734b657952096163636573734b6579224c0a0f44656c6574654b6579416374696f6e12390a0a7075626c69635f6b657918012001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b6579223c0a1344656c6574654163636f756e74416374696f6e12250a0e62656e65666963696172795f6964180120012809520d62656e65666963696172794964229a010a145369676e656444656c6567617465416374696f6e12380a097369676e617475726518012001280b321a2e73662e6e6561722e747970652e76312e5369676e617475726552097369676e617475726512480a0f64656c65676174655f616374696f6e18022001280b321f2e73662e6e6561722e747970652e76312e44656c6567617465416374696f6e520e64656c6567617465416374696f6e22fc010a0e44656c6567617465416374696f6e121b0a0973656e6465725f6964180120012809520873656e6465724964121f0a0b72656365697665725f6964180220012809520a7265636569766572496412310a07616374696f6e7318032003280b32172e73662e6e6561722e747970652e76312e416374696f6e5207616374696f6e7312140a056e6f6e636518042001280452056e6f6e636512280a106d61785f626c6f636b5f686569676874180520012804520e6d6178426c6f636b48656967687412390a0a7075626c69635f6b657918062001280b321a2e73662e6e6561722e747970652e76312e5075626c69634b657952097075626c69634b657922670a094163636573734b657912140a056e6f6e636518012001280452056e6f6e636512440a0a7065726d697373696f6e18022001280b32242e73662e6e6561722e747970652e76312e4163636573734b65795065726d697373696f6e520a7065726d697373696f6e22bd010a134163636573734b65795065726d697373696f6e124e0a0d66756e6374696f6e5f63616c6c18012001280b32272e73662e6e6561722e747970652e76312e46756e6374696f6e43616c6c5065726d697373696f6e4800520c66756e6374696f6e43616c6c12480a0b66756c6c5f61636365737318022001280b32252e73662e6e6561722e747970652e76312e46756c6c4163636573735065726d697373696f6e4800520a66756c6c416363657373420c0a0a7065726d697373696f6e2293010a1646756e6374696f6e43616c6c5065726d697373696f6e12350a09616c6c6f77616e636518012001280b32172e73662e6e6561722e747970652e76312e426967496e745209616c6c6f77616e6365121f0a0b72656365697665725f6964180220012809520a7265636569766572496412210a0c6d6574686f645f6e616d6573180320032809520b6d6574686f644e616d657322160a1446756c6c4163636573735065726d697373696f6e2a270a0943757276654b696e64120b0a07454432353531391000120d0a09534543503235364b3110012a2c0a11457865637574696f6e4d6574616461746112170a13457865637574696f6e4d65746164617461563110002aa9010a1446756e6374696f6e43616c6c4572726f7253657212140a10436f6d70696c6174696f6e4572726f721000120d0a094c696e6b4572726f72100112160a124d6574686f645265736f6c76654572726f721002120c0a085761736d54726170100312140a105761736d556e6b6e6f776e4572726f721004120d0a09486f73744572726f721005120d0a095f45564d4572726f72100612120a0e457865637574696f6e4572726f7210072aed010a165265636569707456616c69646174696f6e4572726f7212180a14496e76616c69645072656465636573736f7249641000121c0a18496e76616c696452656365697665724163636f756e7449641001121a0a16496e76616c69645369676e65724163636f756e744964100212190a15496e76616c696444617461526563656976657249641003121f0a1b52657475726e656456616c75654c656e6774684578636565646564100412270a234e756d626572496e70757444617461446570656e64656e6369657345786365656465641005121a0a16416374696f6e7356616c69646174696f6e4572726f7210062abe020a0e496e76616c696454784572726f7212190a15496e76616c69644163636573734b65794572726f72100012130a0f496e76616c69645369676e65724964100112160a125369676e6572446f65734e6f744578697374100212100a0c496e76616c69644e6f6e6365100312110a0d4e6f6e6365546f6f4c61726765100412150a11496e76616c696452656365697665724964100512140a10496e76616c69645369676e6174757265100612140a104e6f74456e6f75676842616c616e6365100712170a134c61636b42616c616e6365466f725374617465100812100a0c436f73744f766572666c6f77100912100a0c496e76616c6964436861696e100a120b0a0745787069726564100b12150a11416374696f6e7356616c69646174696f6e100c121b0a175472616e73616374696f6e53697a654578636565646564100d2a200a09446972656374696f6e12080a046c656674100012090a057269676874100142425a406769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d6e6561722f70622f73662f6e6561722f747970652f76313b70626e656172620670726f746f33", + + // sf/solana/transforms/v1/transforms.proto + "0a2873662f736f6c616e612f7472616e73666f726d732f76312f7472616e73666f726d732e70726f746f121773662e736f6c616e612e7472616e73666f726d732e763122300a0d50726f6772616d46696c746572121f0a0b70726f6772616d5f696473180120032809520a70726f6772616d49647342585a566769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d736f6c616e612f74797065732f70622f73662f736f6c616e612f7472616e73666f726d732f76313b70627472616e73666f726d73620670726f746f33", + + // sf/solana/type/v1/type.proto + "0a1c73662f736f6c616e612f747970652f76312f747970652e70726f746f121173662e736f6c616e612e747970652e7631228f030a05426c6f636b122d0a1270726576696f75735f626c6f636b68617368180120012809521170726576696f7573426c6f636b68617368121c0a09626c6f636b686173681802200128095209626c6f636b68617368121f0a0b706172656e745f736c6f74180320012804520a706172656e74536c6f74124b0a0c7472616e73616374696f6e7318042003280b32272e73662e736f6c616e612e747970652e76312e436f6e6669726d65645472616e73616374696f6e520c7472616e73616374696f6e7312330a077265776172647318052003280b32192e73662e736f6c616e612e747970652e76312e526577617264520772657761726473123f0a0a626c6f636b5f74696d6518062001280b32202e73662e736f6c616e612e747970652e76312e556e697854696d657374616d705209626c6f636b54696d6512410a0c626c6f636b5f68656967687418072001280b321e2e73662e736f6c616e612e747970652e76312e426c6f636b486569676874520b626c6f636b48656967687412120a04736c6f741814200128045204736c6f742296010a14436f6e6669726d65645472616e73616374696f6e12400a0b7472616e73616374696f6e18012001280b321e2e73662e736f6c616e612e747970652e76312e5472616e73616374696f6e520b7472616e73616374696f6e123c0a046d65746118022001280b32282e73662e736f6c616e612e747970652e76312e5472616e73616374696f6e5374617475734d65746152046d65746122630a0b5472616e73616374696f6e121e0a0a7369676e61747572657318012003280c520a7369676e61747572657312340a076d65737361676518022001280b321a2e73662e736f6c616e612e747970652e76312e4d65737361676552076d65737361676522dd020a074d65737361676512380a0668656164657218012001280b32202e73662e736f6c616e612e747970652e76312e4d657373616765486561646572520668656164657212210a0c6163636f756e745f6b65797318022003280c520b6163636f756e744b65797312290a10726563656e745f626c6f636b6861736818032001280c520f726563656e74426c6f636b68617368124a0a0c696e737472756374696f6e7318042003280b32262e73662e736f6c616e612e747970652e76312e436f6d70696c6564496e737472756374696f6e520c696e737472756374696f6e73121c0a0976657273696f6e6564180520012808520976657273696f6e656412600a15616464726573735f7461626c655f6c6f6f6b75707318062003280b322c2e73662e736f6c616e612e747970652e76312e4d657373616765416464726573735461626c654c6f6f6b75705213616464726573735461626c654c6f6f6b75707322cd010a0d4d65737361676548656164657212360a176e756d5f72657175697265645f7369676e61747572657318012001280d52156e756d52657175697265645369676e617475726573123f0a1c6e756d5f726561646f6e6c795f7369676e65645f6163636f756e747318022001280d52196e756d526561646f6e6c795369676e65644163636f756e747312430a1e6e756d5f726561646f6e6c795f756e7369676e65645f6163636f756e747318032001280d521b6e756d526561646f6e6c79556e7369676e65644163636f756e74732292010a194d657373616765416464726573735461626c654c6f6f6b7570121f0a0b6163636f756e745f6b657918012001280c520a6163636f756e744b657912290a107772697461626c655f696e646578657318022001280c520f7772697461626c65496e646578657312290a10726561646f6e6c795f696e646578657318032001280c520f726561646f6e6c79496e64657865732291070a155472616e73616374696f6e5374617475734d65746112350a0365727218012001280b32232e73662e736f6c616e612e747970652e76312e5472616e73616374696f6e4572726f72520365727212100a03666565180220012804520366656512210a0c7072655f62616c616e636573180320032804520b70726542616c616e63657312230a0d706f73745f62616c616e636573180420032804520c706f737442616c616e63657312530a12696e6e65725f696e737472756374696f6e7318052003280b32242e73662e736f6c616e612e747970652e76312e496e6e6572496e737472756374696f6e735211696e6e6572496e737472756374696f6e7312360a17696e6e65725f696e737472756374696f6e735f6e6f6e65180a200128085215696e6e6572496e737472756374696f6e734e6f6e6512210a0c6c6f675f6d65737361676573180620032809520b6c6f674d65737361676573122a0a116c6f675f6d657373616765735f6e6f6e65180b20012808520f6c6f674d657373616765734e6f6e65124d0a127072655f746f6b656e5f62616c616e63657318072003280b321f2e73662e736f6c616e612e747970652e76312e546f6b656e42616c616e63655210707265546f6b656e42616c616e636573124f0a13706f73745f746f6b656e5f62616c616e63657318082003280b321f2e73662e736f6c616e612e747970652e76312e546f6b656e42616c616e63655211706f7374546f6b656e42616c616e63657312330a077265776172647318092003280b32192e73662e736f6c616e612e747970652e76312e526577617264520772657761726473123a0a196c6f616465645f7772697461626c655f616464726573736573180c2003280c52176c6f616465645772697461626c65416464726573736573123a0a196c6f616465645f726561646f6e6c795f616464726573736573180d2003280c52176c6f61646564526561646f6e6c79416464726573736573123e0a0b72657475726e5f64617461180e2001280b321d2e73662e736f6c616e612e747970652e76312e52657475726e44617461520a72657475726e4461746112280a1072657475726e5f646174615f6e6f6e65180f20012808520e72657475726e446174614e6f6e6512390a16636f6d707574655f756e6974735f636f6e73756d656418102001280448005214636f6d70757465556e697473436f6e73756d656488010142190a175f636f6d707574655f756e6974735f636f6e73756d656422240a105472616e73616374696f6e4572726f7212100a0365727218012001280c520365727222720a11496e6e6572496e737472756374696f6e7312140a05696e64657818012001280d5205696e64657812470a0c696e737472756374696f6e7318022003280b32232e73662e736f6c616e612e747970652e76312e496e6e6572496e737472756374696f6e520c696e737472756374696f6e7322a5010a10496e6e6572496e737472756374696f6e12280a1070726f6772616d5f69645f696e64657818012001280d520e70726f6772616d4964496e646578121a0a086163636f756e747318022001280c52086163636f756e747312120a046461746118032001280c52046461746112260a0c737461636b5f68656967687418042001280d4800520b737461636b486569676874880101420f0a0d5f737461636b5f686569676874226f0a13436f6d70696c6564496e737472756374696f6e12280a1070726f6772616d5f69645f696e64657818012001280d520e70726f6772616d4964496e646578121a0a086163636f756e747318022001280c52086163636f756e747312120a046461746118032001280c52046461746122c6010a0c546f6b656e42616c616e636512230a0d6163636f756e745f696e64657818012001280d520c6163636f756e74496e64657812120a046d696e7418022001280952046d696e7412480a0f75695f746f6b656e5f616d6f756e7418032001280b32202e73662e736f6c616e612e747970652e76312e5569546f6b656e416d6f756e74520d7569546f6b656e416d6f756e7412140a056f776e657218042001280952056f776e6572121d0a0a70726f6772616d5f6964180520012809520970726f6772616d4964228a010a0d5569546f6b656e416d6f756e74121b0a0975695f616d6f756e7418012001280152087569416d6f756e74121a0a08646563696d616c7318022001280d5208646563696d616c7312160a06616d6f756e741803200128095206616d6f756e7412280a1075695f616d6f756e745f737472696e67180420012809520e7569416d6f756e74537472696e67223f0a0a52657475726e44617461121d0a0a70726f6772616d5f696418012001280c520970726f6772616d496412120a046461746118022001280c52046461746122bf010a0652657761726412160a067075626b657918012001280952067075626b6579121a0a086c616d706f72747318022001280352086c616d706f72747312210a0c706f73745f62616c616e6365180320012804520b706f737442616c616e6365123e0a0b7265776172645f7479706518042001280e321d2e73662e736f6c616e612e747970652e76312e52657761726454797065520a72657761726454797065121e0a0a636f6d6d697373696f6e180520012809520a636f6d6d697373696f6e223e0a075265776172647312330a077265776172647318012003280b32192e73662e736f6c616e612e747970652e76312e526577617264520772657761726473222d0a0d556e697854696d657374616d70121c0a0974696d657374616d70180120012803520974696d657374616d7022300a0b426c6f636b48656967687412210a0c626c6f636b5f686569676874180120012804520b626c6f636b4865696768742a490a0a52657761726454797065120f0a0b556e737065636966696564100012070a03466565100112080a0452656e741002120b0a075374616b696e671003120a0a06566f74696e671004424b5a496769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d736f6c616e612f74797065732f70622f73662f736f6c616e612f747970652f76313b7062736f6c620670726f746f33", + + // sf/solana/type/v2/type.proto + "0a1c73662f736f6c616e612f747970652f76322f747970652e70726f746f121173662e736f6c616e612e747970652e76322286040a05426c6f636b120e0a02696418012001280c5202696412160a066e756d62657218022001280452066e756d62657212180a0776657273696f6e18032001280d520776657273696f6e121f0a0b70726576696f75735f696418042001280c520a70726576696f7573496412250a0e70726576696f75735f626c6f636b180520012804520d70726576696f7573426c6f636b12340a1667656e657369735f756e69785f74696d657374616d70180620012804521467656e65736973556e697854696d657374616d7012300a14636c6f636b5f756e69785f74696d657374616d701807200128045212636c6f636b556e697854696d657374616d7012260a0f6c6173745f656e7472795f6861736818082001280c520d6c617374456e7472794861736812420a0c7472616e73616374696f6e7318092003280b321e2e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e520c7472616e73616374696f6e73122b0a117472616e73616374696f6e5f636f756e74180a2001280d52107472616e73616374696f6e436f756e7412390a196861735f73706c69745f6163636f756e745f6368616e676573180b20012808521668617353706c69744163636f756e744368616e67657312370a186163636f756e745f6368616e6765735f66696c655f726566180c2001280952156163636f756e744368616e67657346696c65526566224b0a05426174636812420a0c7472616e73616374696f6e7318012003280b321e2e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e520c7472616e73616374696f6e7322cd030a0b5472616e73616374696f6e120e0a02696418012001280c5202696412140a05696e6465781802200128045205696e64657812330a156164646974696f6e616c5f7369676e61747572657318032003280c52146164646974696f6e616c5369676e61747572657312380a0668656164657218042001280b32202e73662e736f6c616e612e747970652e76322e4d657373616765486561646572520668656164657212210a0c6163636f756e745f6b65797318052003280c520b6163636f756e744b65797312290a10726563656e745f626c6f636b6861736818062001280c520f726563656e74426c6f636b6861736812420a0c696e737472756374696f6e7318072003280b321e2e73662e736f6c616e612e747970652e76322e496e737472756374696f6e520c696e737472756374696f6e7312160a066661696c656418082001280852066661696c656412390a056572726f7218092001280b32232e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e4572726f7252056572726f7212230a0d626567696e5f6f7264696e616c180a20012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c180b20012804520a656e644f7264696e616c22cd010a0d4d65737361676548656164657212360a176e756d5f72657175697265645f7369676e61747572657318012001280d52156e756d52657175697265645369676e617475726573123f0a1c6e756d5f726561646f6e6c795f7369676e65645f6163636f756e747318022001280d52196e756d526561646f6e6c795369676e65644163636f756e747312430a1e6e756d5f726561646f6e6c795f756e7369676e65645f6163636f756e747318032001280d521b6e756d526561646f6e6c79556e7369676e65644163636f756e7473228d040a0b496e737472756374696f6e121d0a0a70726f6772616d5f696418032001280c520970726f6772616d496412210a0c6163636f756e745f6b65797318042003280c520b6163636f756e744b65797312120a046461746118052001280c52046461746112140a05696e64657818062001280d5205696e64657812210a0c706172656e745f696e64657818072001280d520b706172656e74496e64657812140a05646570746818082001280d5205646570746812490a0f62616c616e63655f6368616e67657318092003280b32202e73662e736f6c616e612e747970652e76322e42616c616e63654368616e6765520e62616c616e63654368616e67657312490a0f6163636f756e745f6368616e676573180a2003280b32202e73662e736f6c616e612e747970652e76322e4163636f756e744368616e6765520e6163636f756e744368616e676573122a0a046c6f6773180b2003280b32162e73662e736f6c616e612e747970652e76322e4c6f6752046c6f677312160a066661696c6564180f2001280852066661696c656412390a056572726f7218102001280b32232e73662e736f6c616e612e747970652e76322e496e737472756374696f6e4572726f7252056572726f7212230a0d626567696e5f6f7264696e616c181120012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c181220012804520a656e644f7264696e616c226f0a0d42616c616e63654368616e676512160a067075626b657918012001280c52067075626b657912230a0d707265765f6c616d706f727473180220012804520c707265764c616d706f72747312210a0c6e65775f6c616d706f727473180320012804520b6e65774c616d706f7274732287010a0d4163636f756e744368616e676512160a067075626b657918012001280c52067075626b6579121b0a09707265765f6461746118022001280c5208707265764461746112190a086e65775f6461746118032001280c52076e65774461746112260a0f6e65775f646174615f6c656e677468180420012804520d6e6577446174614c656e67746822390a034c6f6712180a076d65737361676518012001280952076d65737361676512180a076f7264696e616c18022001280452076f7264696e616c22280a105472616e73616374696f6e4572726f7212140a056572726f7218022001280952056572726f7222330a1b5472616e73616374696f6e496e737472756374696f6e4572726f7212140a056572726f7218022001280952056572726f7222280a10496e737472756374696f6e4572726f7212140a056572726f7218022001280952056572726f72222e0a16496e737472756374696f6e4572726f72437573746f6d12140a056572726f7218022001280952056572726f72424b5a496769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d736f6c616e612f74797065732f70622f73662f736f6c616e612f747970652f76323b7062736f6c620670726f746f33", + + } + + var files []*descriptorpb.FileDescriptorProto + for _, protoFile := range protoFiles { + files = append(files, mustProtoToFileDescriptor(protoFile)) + } + + fdmap, err := desc.CreateFileDescriptors(files) + if err != nil { + panic(fmt.Errorf("failed to create file descriptor map: %w", err)) + return + } + + for _, fd := range fdmap { + WellKnownRegistry.RegisterFileDescriptor(fd) + } +} + +func mustProtoToFileDescriptor(in string) *descriptorpb.FileDescriptorProto { + protoBytes, err := hex.DecodeString(in) + if err != nil { + panic(fmt.Errorf("failed to hex decode payload: %w", err)) + } + out := &descriptorpb.FileDescriptorProto{} + if err := proto.Unmarshal(protoBytes, out); err != nil { + panic(fmt.Errorf("failed to unmarshal file descriptor: %w", err)) + } + return out +} diff --git a/types/block_range_enum.go b/types/block_range_enum.go index c7d4c86..187a21e 100644 --- a/types/block_range_enum.go +++ b/types/block_range_enum.go @@ -18,8 +18,6 @@ const ( RangeBoundaryExclusive ) -var ErrInvalidRangeBoundary = fmt.Errorf("not a valid RangeBoundary, try [%s]", strings.Join(_RangeBoundaryNames, ", ")) - const _RangeBoundaryName = "InclusiveExclusive" var _RangeBoundaryNames = []string{ @@ -47,13 +45,6 @@ func (x RangeBoundary) String() string { return fmt.Sprintf("RangeBoundary(%d)", x) } -// IsValid provides a quick way to determine if the typed value is -// part of the allowed enumerated values -func (x RangeBoundary) IsValid() bool { - _, ok := _RangeBoundaryMap[x] - return ok -} - var _RangeBoundaryValue = map[string]RangeBoundary{ _RangeBoundaryName[0:9]: RangeBoundaryInclusive, strings.ToLower(_RangeBoundaryName[0:9]): RangeBoundaryInclusive, @@ -70,7 +61,7 @@ func ParseRangeBoundary(name string) (RangeBoundary, error) { if x, ok := _RangeBoundaryValue[strings.ToLower(name)]; ok { return x, nil } - return RangeBoundary(0), fmt.Errorf("%s is %w", name, ErrInvalidRangeBoundary) + return RangeBoundary(0), fmt.Errorf("%s is not a valid RangeBoundary, try [%s]", name, strings.Join(_RangeBoundaryNames, ", ")) } // MarshalText implements the text marshaller method. From 126fe12b73c712920b80c96431b603c9c9a871bc Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 14:16:37 -0500 Subject: [PATCH 52/66] Clean up marshaller added hex support --- cmd/tools/print/tools_print.go | 6 +--- go.mod | 6 ++-- go.sum | 10 ++----- jsonencoder/encoder.go | 9 ++---- jsonencoder/marshallers.go | 45 ++++++++++++++++++++++++++++ jsonencoder/options.go | 32 -------------------- jsonencoder/proto.go | 21 ------------- protoregistry/generator/generator.go | 2 +- protoregistry/registry.go | 10 ++++--- unsafe_extensions.go | 4 --- 10 files changed, 60 insertions(+), 85 deletions(-) create mode 100644 jsonencoder/marshallers.go delete mode 100644 jsonencoder/options.go delete mode 100644 jsonencoder/proto.go diff --git a/cmd/tools/print/tools_print.go b/cmd/tools/print/tools_print.go index 6f35031..1b99e17 100644 --- a/cmd/tools/print/tools_print.go +++ b/cmd/tools/print/tools_print.go @@ -277,9 +277,5 @@ func SetupJsonEncoder(cmd *cobra.Command) (*jsonencoder.Encoder, error) { } pbregistry.Extends(protoregistry.WellKnownRegistry) - - options := []jsonencoder.Option{ - jsonencoder.WithBytesAsHex(), - } - return jsonencoder.New(pbregistry, options...), nil + return jsonencoder.New(pbregistry), nil } diff --git a/go.mod b/go.mod index 6adc290..fe5616a 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.21 require ( buf.build/gen/go/bufbuild/reflect/connectrpc/go v1.12.0-20230822193137-310c9c4845dd.1 buf.build/gen/go/bufbuild/reflect/protocolbuffers/go v1.31.0-20230822193137-310c9c4845dd.2 + connectrpc.com/connect v1.12.0 github.com/ShinyTrinkets/overseer v0.3.0 github.com/dustin/go-humanize v1.0.1 github.com/go-json-experiment/json v0.0.0-20231013223334-54c864be5b8d @@ -40,7 +41,6 @@ require ( ) require ( - connectrpc.com/connect v1.12.0 // indirect github.com/bufbuild/protocompile v0.4.0 // indirect github.com/google/s2a-go v0.1.4 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect @@ -75,7 +75,7 @@ require ( github.com/bits-and-blooms/bitset v1.3.1 // indirect github.com/blendle/zapdriver v1.3.2-0.20200203083823-9200777f8a3d // indirect github.com/bobg/go-generics/v2 v2.1.1 // indirect - github.com/bufbuild/connect-go v1.10.0 + github.com/bufbuild/connect-go v1.10.0 // indirect github.com/bufbuild/connect-grpchealth-go v1.1.1 // indirect github.com/bufbuild/connect-grpcreflect-go v1.0.0 // indirect github.com/bufbuild/connect-opentelemetry-go v0.3.0 // indirect @@ -228,5 +228,5 @@ require ( replace ( github.com/ShinyTrinkets/overseer => github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef github.com/bytecodealliance/wasmtime-go/v4 => github.com/streamingfast/wasmtime-go/v4 v4.0.0-freemem3 - github.com/jhump/protoreflect => github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc + github.com/jhump/protoreflect => github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d ) diff --git a/go.sum b/go.sum index 8933791..d1d2077 100644 --- a/go.sum +++ b/go.sum @@ -578,12 +578,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3 h1:u8orpRssS8rYceziOQ/mbBQHlYh5w06oOtTXK90/yMc= -github.com/streamingfast/bstream v0.0.2-0.20231205163051-ade2f311eca3/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= -github.com/streamingfast/bstream v0.0.2-0.20231205174934-869fb7d64fd2 h1:TQPPxjBXflVinpSSbYaiMuNgw1HB1YnMcFiR52M8EVo= -github.com/streamingfast/bstream v0.0.2-0.20231205174934-869fb7d64fd2/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= -github.com/streamingfast/bstream v0.0.2-0.20231205175345-609448673b00 h1:U/8aQZOpOzLTVcuEVdbEVffVu00ixotkTe8DRhEXxao= -github.com/streamingfast/bstream v0.0.2-0.20231205175345-609448673b00/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc h1:ioohiLa+d59fqToa2OhbUx418YMrqt2bLT+m+fmjOG8= github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= @@ -619,8 +613,8 @@ github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHR github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e h1:8hoT2QUwh+YNgIcCPux9xd4u9XojHR8hbyAzz7rQuEM= github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= -github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc h1:poYChURzYXislOzzeo44FKipd3wWvxhlz966qzO9kZk= -github.com/streamingfast/protoreflect v0.0.0-20230414203421-018294174fdc/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d h1:33VIARqUqBUKXJcuQoOS1rVSms54tgxhhNCmrLptpLg= +github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d/go.mod h1:aBJivEdekmFWYSQ29EE/fN9IanJWJXbtjy3ky0XD/jE= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9/go.mod h1:ktzt1BUj3GF+SKQHEmn3ShryJ7y87JeCHtaTGaDVATs= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go index ea667b5..744ddb2 100644 --- a/jsonencoder/encoder.go +++ b/jsonencoder/encoder.go @@ -14,17 +14,12 @@ type Encoder struct { marshallers []*json.Marshalers } -func New(files *protoregistry.Registry, opts ...Option) *Encoder { +func New(files *protoregistry.Registry) *Encoder { e := &Encoder{ protoRegistry: files, } - e.marshallers = []*json.Marshalers{ - json.MarshalFuncV2(e.protoAny), - } - - for _, opt := range opts { - opt(e) + json.MarshalFuncV2(e.anypb), } return e } diff --git a/jsonencoder/marshallers.go b/jsonencoder/marshallers.go new file mode 100644 index 0000000..b389bff --- /dev/null +++ b/jsonencoder/marshallers.go @@ -0,0 +1,45 @@ +package jsonencoder + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/jhump/protoreflect/dynamic" + + "github.com/mr-tron/base58" + + "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "google.golang.org/protobuf/types/known/anypb" +) + +func (e *Encoder) anypb(encoder *jsontext.Encoder, t *anypb.Any, options json.Options) error { + msg, err := e.protoRegistry.Unmarshall(t) + if err != nil { + return fmt.Errorf("unmarshalling proto any: %w", err) + } + setBytesEncoder(t.TypeUrl) + cnt, err := json.Marshal(msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) + if err != nil { + return fmt.Errorf("json marshalling proto any: %w", err) + } + return encoder.WriteValue(cnt) +} + +func (e *Encoder) base58Bytes(encoder *jsontext.Encoder, t []byte, options json.Options) error { + return encoder.WriteToken(jsontext.String(base58.Encode(t))) +} + +func (e *Encoder) hexBytes(encoder *jsontext.Encoder, t []byte, options json.Options) error { + return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) +} + +func setBytesEncoder(typeURL string) { + if strings.Contains(typeURL, "solana") { + dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsBase58) + return + } else { + dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsHex) + } +} diff --git a/jsonencoder/options.go b/jsonencoder/options.go deleted file mode 100644 index 3b6c550..0000000 --- a/jsonencoder/options.go +++ /dev/null @@ -1,32 +0,0 @@ -package jsonencoder - -import ( - "encoding/hex" - "fmt" - - "github.com/go-json-experiment/json" - "github.com/go-json-experiment/json/jsontext" - "github.com/mr-tron/base58" -) - -type Option func(c *Encoder) - -func WithBytesAsBase58() Option { - return func(c *Encoder) { - m := json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - fmt.Println("base58", hex.EncodeToString(t)) - return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) - }) - c.marshallers = append(c.marshallers, m) - } -} - -func WithBytesAsHex() Option { - return func(c *Encoder) { - m := json.MarshalFuncV2(func(encoder *jsontext.Encoder, t []byte, options json.Options) error { - fmt.Println("hex", hex.EncodeToString(t)) - return encoder.WriteToken(jsontext.String(base58.Encode(t))) - }) - c.marshallers = append(c.marshallers, m) - } -} diff --git a/jsonencoder/proto.go b/jsonencoder/proto.go deleted file mode 100644 index a787e6c..0000000 --- a/jsonencoder/proto.go +++ /dev/null @@ -1,21 +0,0 @@ -package jsonencoder - -import ( - "fmt" - - "github.com/go-json-experiment/json" - "github.com/go-json-experiment/json/jsontext" - "google.golang.org/protobuf/types/known/anypb" -) - -func (e *Encoder) protoAny(encoder *jsontext.Encoder, t *anypb.Any, options json.Options) error { - msg, err := e.protoRegistry.Unmarshall(t.TypeUrl, t.Value) - if err != nil { - return fmt.Errorf("unmarshalling proto any: %w", err) - } - cnt, err := json.Marshal(msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) - if err != nil { - return fmt.Errorf("json marshalling proto any: %w", err) - } - return encoder.WriteValue(cnt) -} diff --git a/protoregistry/generator/generator.go b/protoregistry/generator/generator.go index 7fd9f67..25de309 100644 --- a/protoregistry/generator/generator.go +++ b/protoregistry/generator/generator.go @@ -67,7 +67,7 @@ func main() { for _, file := range fileDescriptorSet.Msg.FileDescriptorSet.File { cnt, err := proto.Marshal(file) if err != nil { - log.Fatalf("failed to marshall proto file %s: %v", file.Name, err) + log.Fatalf("failed to marshall proto file %s: %v", file.GetName(), err) return } name := "" diff --git a/protoregistry/registry.go b/protoregistry/registry.go index 574c5e5..e8b7775 100644 --- a/protoregistry/registry.go +++ b/protoregistry/registry.go @@ -4,6 +4,8 @@ import ( "fmt" "strings" + "google.golang.org/protobuf/types/known/anypb" + "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/dynamic" ) @@ -37,18 +39,18 @@ func (r *Registry) RegisterFileDescriptor(f *desc.FileDescriptor) { r.filesDescriptors = append(r.filesDescriptors, f) } -func (r *Registry) Unmarshall(typeURL string, value []byte) (*dynamic.Message, error) { +func (r *Registry) Unmarshall(t *anypb.Any) (*dynamic.Message, error) { for _, fd := range r.filesDescriptors { - md := fd.FindSymbol(cleanTypeURL(typeURL)) + md := fd.FindSymbol(cleanTypeURL(t.TypeUrl)) if md != nil { dynMsg := dynamic.NewMessageFactoryWithDefaults().NewDynamicMessage(md.(*desc.MessageDescriptor)) - if err := dynMsg.Unmarshal(value); err != nil { + if err := dynMsg.Unmarshal(t.Value); err != nil { return nil, fmt.Errorf("unmarshalling proto: %w", err) } return dynMsg, nil } } - return nil, fmt.Errorf("no message descriptor in registry for type url: %s", typeURL) + return nil, fmt.Errorf("no message descriptor in registry for type url: %s", t.TypeUrl) } func (r *Registry) Extends(registry *Registry) { diff --git a/unsafe_extensions.go b/unsafe_extensions.go index c014d8d..9a847c5 100644 --- a/unsafe_extensions.go +++ b/unsafe_extensions.go @@ -3,14 +3,10 @@ package firecore import ( "context" - pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" "github.com/streamingfast/dlauncher/launcher" "go.uber.org/zap" ) -var UnsafePayloadKind pbbstream.Protocol = pbbstream.Protocol_UNKNOWN -var UnsafeJsonBytesEncoder = "hex" - // UnsafeResolveReaderNodeStartBlock is a function that resolved the reader node start block num, by default it simply // returns the value of the 'reader-node-start-block-num'. However, the function may be overwritten in certain chains // to perform a more complex resolution logic. From e177b36a1809686517ed122913a85347dd0593e6 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 14:17:13 -0500 Subject: [PATCH 53/66] clean up func --- jsonencoder/marshallers.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/jsonencoder/marshallers.go b/jsonencoder/marshallers.go index b389bff..2d504b6 100644 --- a/jsonencoder/marshallers.go +++ b/jsonencoder/marshallers.go @@ -39,7 +39,6 @@ func setBytesEncoder(typeURL string) { if strings.Contains(typeURL, "solana") { dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsBase58) return - } else { - dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsHex) } + dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsHex) } From 15351604587816b12c24a63dace7ec9e7b2cd49a Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Tue, 5 Dec 2023 14:24:25 -0500 Subject: [PATCH 54/66] cleaned up encoder --- jsonencoder/encoder.go | 11 +++-------- jsonencoder/marshallers.go | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/jsonencoder/encoder.go b/jsonencoder/encoder.go index 744ddb2..4b912e1 100644 --- a/jsonencoder/encoder.go +++ b/jsonencoder/encoder.go @@ -11,26 +11,21 @@ import ( type Encoder struct { protoRegistry *protoregistry.Registry - marshallers []*json.Marshalers } func New(files *protoregistry.Registry) *Encoder { - e := &Encoder{ + return &Encoder{ protoRegistry: files, } - e.marshallers = []*json.Marshalers{ - json.MarshalFuncV2(e.anypb), - } - return e } func (e *Encoder) Marshal(in any) error { - return json.MarshalEncode(jsontext.NewEncoder(os.Stdout), in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) + return json.MarshalEncode(jsontext.NewEncoder(os.Stdout), in, json.WithMarshalers(e.getMarshallers(""))) } func (e *Encoder) MarshalToString(in any) (string, error) { buf := bytes.NewBuffer(nil) - if err := json.MarshalEncode(jsontext.NewEncoder(buf), in, json.WithMarshalers(json.NewMarshalers(e.marshallers...))); err != nil { + if err := json.MarshalEncode(jsontext.NewEncoder(buf), in, json.WithMarshalers(e.getMarshallers(""))); err != nil { return "", err } return buf.String(), nil diff --git a/jsonencoder/marshallers.go b/jsonencoder/marshallers.go index 2d504b6..d338c3a 100644 --- a/jsonencoder/marshallers.go +++ b/jsonencoder/marshallers.go @@ -19,8 +19,8 @@ func (e *Encoder) anypb(encoder *jsontext.Encoder, t *anypb.Any, options json.Op if err != nil { return fmt.Errorf("unmarshalling proto any: %w", err) } - setBytesEncoder(t.TypeUrl) - cnt, err := json.Marshal(msg, json.WithMarshalers(json.NewMarshalers(e.marshallers...))) + + cnt, err := json.Marshal(msg, json.WithMarshalers(e.getMarshallers(t.TypeUrl))) if err != nil { return fmt.Errorf("json marshalling proto any: %w", err) } @@ -35,10 +35,18 @@ func (e *Encoder) hexBytes(encoder *jsontext.Encoder, t []byte, options json.Opt return encoder.WriteToken(jsontext.String(hex.EncodeToString(t))) } -func setBytesEncoder(typeURL string) { +func (e *Encoder) getMarshallers(typeURL string) *json.Marshalers { + out := []*json.Marshalers{ + json.MarshalFuncV2(e.anypb), + } + if strings.Contains(typeURL, "solana") { dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsBase58) - return + out = append(out, json.MarshalFuncV2(e.base58Bytes)) + return json.NewMarshalers(out...) } + dynamic.SetDefaultBytesRepresentation(dynamic.BytesAsHex) + out = append(out, json.MarshalFuncV2(e.hexBytes)) + return json.NewMarshalers(out...) } From bd5ad8fb387ff268711d7677b58e45a2da8439d1 Mon Sep 17 00:00:00 2001 From: billettc Date: Tue, 5 Dec 2023 14:26:51 -0500 Subject: [PATCH 55/66] move common flags to main --- cmd/common.go | 72 ---------------------------------------------- cmd/main.go | 52 +++++++++++++++++++++++++++++++-- cmd/tools/tools.go | 9 ++---- 3 files changed, 53 insertions(+), 80 deletions(-) delete mode 100644 cmd/common.go diff --git a/cmd/common.go b/cmd/common.go deleted file mode 100644 index f819d7a..0000000 --- a/cmd/common.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2021 dfuse Platform Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "github.com/spf13/cobra" - "github.com/streamingfast/cli" - "github.com/streamingfast/dlauncher/launcher" - firecore "github.com/streamingfast/firehose-core" - "go.uber.org/zap" -) - -func registerCommonFlags[B firecore.Block](chain *firecore.Chain[B]) { - launcher.RegisterCommonFlags = func(_ *zap.Logger, cmd *cobra.Command) error { - // Common stores configuration flags - cmd.Flags().String("common-one-block-store-url", firecore.OneBlockStoreURL, "[COMMON] Store URL to read/write one-block files") - cmd.Flags().String("common-merged-blocks-store-url", firecore.MergedBlocksStoreURL, "[COMMON] Store URL where to read/write merged blocks.") - cmd.Flags().String("common-forked-blocks-store-url", firecore.ForkedBlocksStoreURL, "[COMMON] Store URL where to read/write forked block files that we want to keep.") - cmd.Flags().String("common-live-blocks-addr", firecore.RelayerServingAddr, "[COMMON] gRPC endpoint to get real-time blocks.") - - cmd.Flags().String("common-index-store-url", firecore.IndexStoreURL, "[COMMON] Store URL where to read/write index files (if used on the chain).") - cmd.Flags().IntSlice("common-index-block-sizes", []int{100000, 10000, 1000, 100}, "Index bundle sizes that that are considered valid when looking for block indexes") - - cmd.Flags().Bool("common-blocks-cache-enabled", false, cli.FlagDescription(` - [COMMON] Use a disk cache to store the blocks data to disk and instead of keeping it in RAM. By enabling this, block's Protobuf content, in bytes, - is kept on file system instead of RAM. This is done as soon the block is downloaded from storage. This is a tradeoff between RAM and Disk, if you - are going to serve only a handful of concurrent requests, it's suggested to keep is disabled, if you encounter heavy RAM consumption issue, specially - by the firehose component, it's definitely a good idea to enable it and configure it properly through the other 'common-blocks-cache-...' flags. The cache is - split in two portions, one keeping N total bytes of blocks of the most recently used blocks and the other one keeping the N earliest blocks as - requested by the various consumers of the cache. - `)) - cmd.Flags().String("common-blocks-cache-dir", firecore.BlocksCacheDirectory, cli.FlagDescription(` - [COMMON] Blocks cache directory where all the block's bytes will be cached to disk instead of being kept in RAM. - This should be a disk that persists across restarts of the Firehose component to reduce the the strain on the disk - when restarting and streams reconnects. The size of disk must at least big (with a 10%% buffer) in bytes as the sum of flags' - value for 'common-blocks-cache-max-recent-entry-bytes' and 'common-blocks-cache-max-entry-by-age-bytes'. - `)) - cmd.Flags().Int("common-blocks-cache-max-recent-entry-bytes", 21474836480, cli.FlagDescription(` - [COMMON] Blocks cache max size in bytes of the most recently used blocks, after the limit is reached, blocks are evicted from the cache. - `)) - cmd.Flags().Int("common-blocks-cache-max-entry-by-age-bytes", 21474836480, cli.FlagDescription(` - [COMMON] Blocks cache max size in bytes of the earliest used blocks, after the limit is reached, blocks are evicted from the cache. - `)) - - cmd.Flags().Int("common-first-streamable-block", int(chain.FirstStreamableBlock), "[COMMON] First streamable block of the chain") - - // Authentication, metering and rate limiter plugins - cmd.Flags().String("common-auth-plugin", "null://", "[COMMON] Auth plugin URI, see streamingfast/dauth repository") - cmd.Flags().String("common-metering-plugin", "null://", "[COMMON] Metering plugin URI, see streamingfast/dmetering repository") - - // System Behavior - cmd.Flags().Uint64("common-auto-mem-limit-percent", 0, "[COMMON] Automatically sets GOMEMLIMIT to a percentage of memory limit from cgroup (useful for container environments)") - cmd.Flags().Bool("common-auto-max-procs", false, "[COMMON] Automatically sets GOMAXPROCS to max cpu available from cgroup (useful for container environments)") - cmd.Flags().Duration("common-system-shutdown-signal-delay", 0, cli.FlagDescription(` - [COMMON] Add a delay between receiving SIGTERM signal and shutting down apps. - Apps will respond negatively to /healthz during this period - `)) - return nil - } -} diff --git a/cmd/main.go b/cmd/main.go index e5558c9..d31a2de 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -6,8 +6,6 @@ import ( "strings" "time" - "github.com/streamingfast/firehose-core/cmd/tools" - "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -23,6 +21,7 @@ import ( dmeteringlogger "github.com/streamingfast/dmetering/logger" firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/cmd/apps" + "github.com/streamingfast/firehose-core/cmd/tools" "github.com/streamingfast/logging" "go.uber.org/zap" @@ -156,6 +155,55 @@ func exitWithError(message string, err error) { os.Exit(1) } +func registerCommonFlags[B firecore.Block](chain *firecore.Chain[B]) { + launcher.RegisterCommonFlags = func(_ *zap.Logger, cmd *cobra.Command) error { + // Common stores configuration flags + cmd.Flags().String("common-one-block-store-url", firecore.OneBlockStoreURL, "[COMMON] Store URL to read/write one-block files") + cmd.Flags().String("common-merged-blocks-store-url", firecore.MergedBlocksStoreURL, "[COMMON] Store URL where to read/write merged blocks.") + cmd.Flags().String("common-forked-blocks-store-url", firecore.ForkedBlocksStoreURL, "[COMMON] Store URL where to read/write forked block files that we want to keep.") + cmd.Flags().String("common-live-blocks-addr", firecore.RelayerServingAddr, "[COMMON] gRPC endpoint to get real-time blocks.") + + cmd.Flags().String("common-index-store-url", firecore.IndexStoreURL, "[COMMON] Store URL where to read/write index files (if used on the chain).") + cmd.Flags().IntSlice("common-index-block-sizes", []int{100000, 10000, 1000, 100}, "Index bundle sizes that that are considered valid when looking for block indexes") + + cmd.Flags().Bool("common-blocks-cache-enabled", false, cli.FlagDescription(` + [COMMON] Use a disk cache to store the blocks data to disk and instead of keeping it in RAM. By enabling this, block's Protobuf content, in bytes, + is kept on file system instead of RAM. This is done as soon the block is downloaded from storage. This is a tradeoff between RAM and Disk, if you + are going to serve only a handful of concurrent requests, it's suggested to keep is disabled, if you encounter heavy RAM consumption issue, specially + by the firehose component, it's definitely a good idea to enable it and configure it properly through the other 'common-blocks-cache-...' flags. The cache is + split in two portions, one keeping N total bytes of blocks of the most recently used blocks and the other one keeping the N earliest blocks as + requested by the various consumers of the cache. + `)) + cmd.Flags().String("common-blocks-cache-dir", firecore.BlocksCacheDirectory, cli.FlagDescription(` + [COMMON] Blocks cache directory where all the block's bytes will be cached to disk instead of being kept in RAM. + This should be a disk that persists across restarts of the Firehose component to reduce the the strain on the disk + when restarting and streams reconnects. The size of disk must at least big (with a 10%% buffer) in bytes as the sum of flags' + value for 'common-blocks-cache-max-recent-entry-bytes' and 'common-blocks-cache-max-entry-by-age-bytes'. + `)) + cmd.Flags().Int("common-blocks-cache-max-recent-entry-bytes", 21474836480, cli.FlagDescription(` + [COMMON] Blocks cache max size in bytes of the most recently used blocks, after the limit is reached, blocks are evicted from the cache. + `)) + cmd.Flags().Int("common-blocks-cache-max-entry-by-age-bytes", 21474836480, cli.FlagDescription(` + [COMMON] Blocks cache max size in bytes of the earliest used blocks, after the limit is reached, blocks are evicted from the cache. + `)) + + cmd.Flags().Int("common-first-streamable-block", int(chain.FirstStreamableBlock), "[COMMON] First streamable block of the chain") + + // Authentication, metering and rate limiter plugins + cmd.Flags().String("common-auth-plugin", "null://", "[COMMON] Auth plugin URI, see streamingfast/dauth repository") + cmd.Flags().String("common-metering-plugin", "null://", "[COMMON] Metering plugin URI, see streamingfast/dmetering repository") + + // System Behavior + cmd.Flags().Uint64("common-auto-mem-limit-percent", 0, "[COMMON] Automatically sets GOMEMLIMIT to a percentage of memory limit from cgroup (useful for container environments)") + cmd.Flags().Bool("common-auto-max-procs", false, "[COMMON] Automatically sets GOMAXPROCS to max cpu available from cgroup (useful for container environments)") + cmd.Flags().Duration("common-system-shutdown-signal-delay", 0, cli.FlagDescription(` + [COMMON] Add a delay between receiving SIGTERM signal and shutting down apps. + Apps will respond negatively to /healthz during this period + `)) + return nil + } +} + var startCmdHelpTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}} [all|command1 [command2...]]{{if gt (len .Aliases) 0}} diff --git a/cmd/tools/tools.go b/cmd/tools/tools.go index af2337e..cfdc70a 100644 --- a/cmd/tools/tools.go +++ b/cmd/tools/tools.go @@ -17,22 +17,19 @@ package tools import ( "fmt" - "github.com/streamingfast/firehose-core/cmd/tools/fix" - "github.com/streamingfast/firehose-core/cmd/tools/mergeblock" - - "github.com/streamingfast/firehose-core/cmd/tools/compare" - "github.com/spf13/cobra" firecore "github.com/streamingfast/firehose-core" "github.com/streamingfast/firehose-core/cmd/tools/check" + "github.com/streamingfast/firehose-core/cmd/tools/compare" "github.com/streamingfast/firehose-core/cmd/tools/firehose" + "github.com/streamingfast/firehose-core/cmd/tools/fix" + "github.com/streamingfast/firehose-core/cmd/tools/mergeblock" print2 "github.com/streamingfast/firehose-core/cmd/tools/print" "github.com/streamingfast/logging" "go.uber.org/zap" ) var ToolsCmd = &cobra.Command{Use: "tools", Short: "Developer tools for operators and developers"} -var MaxUint64 = ^uint64(0) func ConfigureToolsCmd[B firecore.Block]( chain *firecore.Chain[B], From 5a810db264d30a9a6edc7d236123f978aef91db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 12:58:51 -0500 Subject: [PATCH 56/66] merge firehose commits after Nov9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * commit 7b23ef908c0c2b74863c0558ee7f18eaae431834 Stéphane Duchesneau Date: Mon Dec 4 14:20:37 2023 -0500 add jitter and longer delay on rate-limiting * commit 3eae369c284a499d8985e53dfd1b76a415b52b31 Frederik Schöll Date: Mon Nov 20 15:43:36 2023 +0100 Merge pull request #26 from fschoell/feature/pass_x_sf_meta_header pass the x-sf-meta header into the metering --- firehose/server/blocks.go | 4 +++- firehose/server/server.go | 1 + go.mod | 4 ++-- go.sum | 4 ++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/firehose/server/blocks.go b/firehose/server/blocks.go index 911649b..2c61ff3 100644 --- a/firehose/server/blocks.go +++ b/firehose/server/blocks.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/rand" "os" "time" @@ -69,7 +70,8 @@ func (s *Server) Blocks(request *pbfirehose.Request, streamSrv pbfirehose.Stream defer cancel() if allow := s.rateLimiter.Take(rlCtx, "", "Blocks"); !allow { - <-time.After(time.Millisecond * 500) // force a minimal backoff + jitterDelay := time.Duration(rand.Intn(3000) + 1000) // force a minimal backoff + <-time.After(time.Millisecond * jitterDelay) return status.Error(codes.Unavailable, "rate limit exceeded") } else { defer s.rateLimiter.Return() diff --git a/firehose/server/server.go b/firehose/server/server.go index 6d7c4ca..de5c247 100644 --- a/firehose/server/server.go +++ b/firehose/server/server.go @@ -83,6 +83,7 @@ func New( UserID: auth.UserID(), ApiKeyID: auth.APIKeyID(), IpAddress: auth.RealIP(), + Meta: auth.Meta(), Endpoint: "sf.firehose.v2.Firehose/Blocks", Metrics: map[string]float64{ "egress_bytes": float64(size), diff --git a/go.mod b/go.mod index fe5616a..a65ce63 100644 --- a/go.mod +++ b/go.mod @@ -18,12 +18,12 @@ require ( github.com/spf13/viper v1.15.0 github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 - github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 + github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e - github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa + github.com/streamingfast/dmetering v0.0.0-20231120142327-a3405f0eed83 github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 diff --git a/go.sum b/go.sum index d1d2077..5219b28 100644 --- a/go.sum +++ b/go.sum @@ -584,6 +584,8 @@ github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZ github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= +github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 h1:g4mG6ZCy3/XtcsZXfOHrQOsjVGoX9uTc/QlemaPV4EE= +github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c h1:6WjE2yInE+5jnI7cmCcxOiGZiEs2FQm9Zsg2a9Ivp0Q= github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c/go.mod h1:dbfiy9ORrL8c6ldSq+L0H9pg8TOqqu/FsghsgUEWK54= github.com/streamingfast/derr v0.0.0-20230515163924-8570aaa43fe1 h1:xJB7rXnOHLesosMjfwWsEL2i/40mFSkzenEb3M0qTyM= @@ -594,6 +596,8 @@ github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e h1:Nh/gLDv github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e/go.mod h1:xErlHEDd5+4NlR+Mg3ZtW7BTTLB0yZBxZAjHPrkk8X4= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa h1:bM6iy5X7Gtw1oh1bMxFmtroouKZu4K4BHXaFvR96jNw= github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= +github.com/streamingfast/dmetering v0.0.0-20231120142327-a3405f0eed83 h1:IbIUT85146duL9EKwMiiW0HH1djpm8plmJOo+YZbO5U= +github.com/streamingfast/dmetering v0.0.0-20231120142327-a3405f0eed83/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 h1:SUl04bZKGAv207lp7/6CHOJIRpjUKunwItrno3K463Y= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545/go.mod h1:JbxEDbzWRG1dHdNIPrYfuPllEkktZMgm40AwVIBENcw= github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 h1:u7FWLqz3Uwff609Ja9M+3aGOWqBCVU7dx9i6R6Qc4qI= From 6f6433e98cf7d243fad3e567fd6fe1a94d0654e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 13:08:43 -0500 Subject: [PATCH 57/66] bump substreams, bstream to latest --- go.mod | 12 ++++++------ go.sum | 28 ++++++++++++---------------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index a65ce63..e5650c2 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc + github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -30,7 +30,7 @@ require ( github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6 + github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609 github.com/stretchr/testify v1.8.4 github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 @@ -195,13 +195,13 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.5.1 // indirect - golang.org/x/crypto v0.13.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.1.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index 5219b28..c4a53f0 100644 --- a/go.sum +++ b/go.sum @@ -578,12 +578,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc h1:ioohiLa+d59fqToa2OhbUx418YMrqt2bLT+m+fmjOG8= -github.com/streamingfast/bstream v0.0.2-0.20231205185208-7e21cc7e64bc/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab h1:CTVn1JAzOvON/d1ZohrV2RicBIVxXMApDQhURguqWXg= +github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= -github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330 h1:49JYZkn8ALGe+LhcACZyX3L9B8tIxRZ3F3l+OxmNMhY= -github.com/streamingfast/dauth v0.0.0-20230929180355-921f9c9be330/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 h1:g4mG6ZCy3/XtcsZXfOHrQOsjVGoX9uTc/QlemaPV4EE= github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2/go.mod h1:zfq+mtesfbaZnNeh1BF+vo+zEFP1sat4pm3lvt40nRw= github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c h1:6WjE2yInE+5jnI7cmCcxOiGZiEs2FQm9Zsg2a9Ivp0Q= @@ -594,8 +592,6 @@ github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa h1:L/Ipge5pkZt github.com/streamingfast/dgrpc v0.0.0-20230929132851-893fc52687fa/go.mod h1:AcY2kk28XswihgU6z37288a3ZF4gGGO7nNwlTI/vET4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e h1:Nh/gLDv8rOMIidb/gpO4rZOYVe09k+tof/trezkpku4= github.com/streamingfast/dlauncher v0.0.0-20230607184145-76399faad89e/go.mod h1:xErlHEDd5+4NlR+Mg3ZtW7BTTLB0yZBxZAjHPrkk8X4= -github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa h1:bM6iy5X7Gtw1oh1bMxFmtroouKZu4K4BHXaFvR96jNw= -github.com/streamingfast/dmetering v0.0.0-20230731155453-e1df53e362aa/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= github.com/streamingfast/dmetering v0.0.0-20231120142327-a3405f0eed83 h1:IbIUT85146duL9EKwMiiW0HH1djpm8plmJOo+YZbO5U= github.com/streamingfast/dmetering v0.0.0-20231120142327-a3405f0eed83/go.mod h1:3XggUfQMyciaue133qhbIkFqJQqNzozGpa/gI3sdwac= github.com/streamingfast/dmetrics v0.0.0-20230919161904-206fa8ebd545 h1:SUl04bZKGAv207lp7/6CHOJIRpjUKunwItrno3K463Y= @@ -625,8 +621,8 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6 h1:eNA736ywGv/XXCotwx4LkfRDmOrHUyPbAqCKI+RzYI4= -github.com/streamingfast/substreams v1.1.21-0.20231122013157-938ec26b0ef6/go.mod h1:JHCOsJtgXUM2KWNxsvi5/hjrLy4KpClaMRriBR3ybnI= +github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609 h1:Fqkzbca2JPg4kKVfrSyPJk0hGGbkJcxSsxgWKwh75KA= +github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -731,8 +727,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -816,8 +812,8 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -915,13 +911,13 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From d2c719a69534cee289282d4d90b765fcc5988867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 13:26:12 -0500 Subject: [PATCH 58/66] fix index builder --- cmd/apps/index_builder.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/apps/index_builder.go b/cmd/apps/index_builder.go index c3df3ce..76fc643 100644 --- a/cmd/apps/index_builder.go +++ b/cmd/apps/index_builder.go @@ -73,7 +73,11 @@ func RegisterIndexBuilderApp[B firecore.Block](chain *firecore.Chain[B], rootLog } handler := bstream.HandlerFunc(func(blk *pbbstream.Block, _ interface{}) error { - return indexer.ProcessBlock(any(blk).(B)) + var b = chain.BlockFactory() + if err := blk.Payload.UnmarshalTo(b); err != nil { + return err + } + return indexer.ProcessBlock(any(b).(B)) }) app := index_builder.New(&index_builder.Config{ From 2d21be880525441070d4f50e8effe60b74e42a99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 13:41:37 -0500 Subject: [PATCH 59/66] bump bstreams to prevent heavy blocks printed in logs --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e5650c2..c4f99d0 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab + github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c diff --git a/go.sum b/go.sum index c4a53f0..247f353 100644 --- a/go.sum +++ b/go.sum @@ -578,8 +578,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab h1:CTVn1JAzOvON/d1ZohrV2RicBIVxXMApDQhURguqWXg= -github.com/streamingfast/bstream v0.0.2-0.20231205214347-a411792eb5ab/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e h1:pKET4WQZl0DFgTrkAR+ipS7XoGNbpdtL6HS/3VcHseg= +github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 h1:g4mG6ZCy3/XtcsZXfOHrQOsjVGoX9uTc/QlemaPV4EE= From 0ae01d4add854183759260a22255558a51e66fc5 Mon Sep 17 00:00:00 2001 From: Julien Cassis Date: Wed, 6 Dec 2023 13:49:24 -0500 Subject: [PATCH 60/66] add firehose-bitcoin in well known proto types --- protoregistry/generator/generator.go | 2 +- protoregistry/well_known.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/protoregistry/generator/generator.go b/protoregistry/generator/generator.go index 25de309..b171fec 100644 --- a/protoregistry/generator/generator.go +++ b/protoregistry/generator/generator.go @@ -31,7 +31,7 @@ var wellKnownProtoRepos = []string{ "buf.build/streamingfast/firehose-ethereum", "buf.build/streamingfast/firehose-near", "buf.build/streamingfast/firehose-solana", - //"buf.build/streamingfast/firehose-bitcoin", + "buf.build/streamingfast/firehose-bitcoin", } func main() { diff --git a/protoregistry/well_known.go b/protoregistry/well_known.go index 2f4abed..f9bc450 100644 --- a/protoregistry/well_known.go +++ b/protoregistry/well_known.go @@ -46,6 +46,12 @@ func init() { // sf/solana/type/v2/type.proto "0a1c73662f736f6c616e612f747970652f76322f747970652e70726f746f121173662e736f6c616e612e747970652e76322286040a05426c6f636b120e0a02696418012001280c5202696412160a066e756d62657218022001280452066e756d62657212180a0776657273696f6e18032001280d520776657273696f6e121f0a0b70726576696f75735f696418042001280c520a70726576696f7573496412250a0e70726576696f75735f626c6f636b180520012804520d70726576696f7573426c6f636b12340a1667656e657369735f756e69785f74696d657374616d70180620012804521467656e65736973556e697854696d657374616d7012300a14636c6f636b5f756e69785f74696d657374616d701807200128045212636c6f636b556e697854696d657374616d7012260a0f6c6173745f656e7472795f6861736818082001280c520d6c617374456e7472794861736812420a0c7472616e73616374696f6e7318092003280b321e2e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e520c7472616e73616374696f6e73122b0a117472616e73616374696f6e5f636f756e74180a2001280d52107472616e73616374696f6e436f756e7412390a196861735f73706c69745f6163636f756e745f6368616e676573180b20012808521668617353706c69744163636f756e744368616e67657312370a186163636f756e745f6368616e6765735f66696c655f726566180c2001280952156163636f756e744368616e67657346696c65526566224b0a05426174636812420a0c7472616e73616374696f6e7318012003280b321e2e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e520c7472616e73616374696f6e7322cd030a0b5472616e73616374696f6e120e0a02696418012001280c5202696412140a05696e6465781802200128045205696e64657812330a156164646974696f6e616c5f7369676e61747572657318032003280c52146164646974696f6e616c5369676e61747572657312380a0668656164657218042001280b32202e73662e736f6c616e612e747970652e76322e4d657373616765486561646572520668656164657212210a0c6163636f756e745f6b65797318052003280c520b6163636f756e744b65797312290a10726563656e745f626c6f636b6861736818062001280c520f726563656e74426c6f636b6861736812420a0c696e737472756374696f6e7318072003280b321e2e73662e736f6c616e612e747970652e76322e496e737472756374696f6e520c696e737472756374696f6e7312160a066661696c656418082001280852066661696c656412390a056572726f7218092001280b32232e73662e736f6c616e612e747970652e76322e5472616e73616374696f6e4572726f7252056572726f7212230a0d626567696e5f6f7264696e616c180a20012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c180b20012804520a656e644f7264696e616c22cd010a0d4d65737361676548656164657212360a176e756d5f72657175697265645f7369676e61747572657318012001280d52156e756d52657175697265645369676e617475726573123f0a1c6e756d5f726561646f6e6c795f7369676e65645f6163636f756e747318022001280d52196e756d526561646f6e6c795369676e65644163636f756e747312430a1e6e756d5f726561646f6e6c795f756e7369676e65645f6163636f756e747318032001280d521b6e756d526561646f6e6c79556e7369676e65644163636f756e7473228d040a0b496e737472756374696f6e121d0a0a70726f6772616d5f696418032001280c520970726f6772616d496412210a0c6163636f756e745f6b65797318042003280c520b6163636f756e744b65797312120a046461746118052001280c52046461746112140a05696e64657818062001280d5205696e64657812210a0c706172656e745f696e64657818072001280d520b706172656e74496e64657812140a05646570746818082001280d5205646570746812490a0f62616c616e63655f6368616e67657318092003280b32202e73662e736f6c616e612e747970652e76322e42616c616e63654368616e6765520e62616c616e63654368616e67657312490a0f6163636f756e745f6368616e676573180a2003280b32202e73662e736f6c616e612e747970652e76322e4163636f756e744368616e6765520e6163636f756e744368616e676573122a0a046c6f6773180b2003280b32162e73662e736f6c616e612e747970652e76322e4c6f6752046c6f677312160a066661696c6564180f2001280852066661696c656412390a056572726f7218102001280b32232e73662e736f6c616e612e747970652e76322e496e737472756374696f6e4572726f7252056572726f7212230a0d626567696e5f6f7264696e616c181120012804520c626567696e4f7264696e616c121f0a0b656e645f6f7264696e616c181220012804520a656e644f7264696e616c226f0a0d42616c616e63654368616e676512160a067075626b657918012001280c52067075626b657912230a0d707265765f6c616d706f727473180220012804520c707265764c616d706f72747312210a0c6e65775f6c616d706f727473180320012804520b6e65774c616d706f7274732287010a0d4163636f756e744368616e676512160a067075626b657918012001280c52067075626b6579121b0a09707265765f6461746118022001280c5208707265764461746112190a086e65775f6461746118032001280c52076e65774461746112260a0f6e65775f646174615f6c656e677468180420012804520d6e6577446174614c656e67746822390a034c6f6712180a076d65737361676518012001280952076d65737361676512180a076f7264696e616c18022001280452076f7264696e616c22280a105472616e73616374696f6e4572726f7212140a056572726f7218022001280952056572726f7222330a1b5472616e73616374696f6e496e737472756374696f6e4572726f7212140a056572726f7218022001280952056572726f7222280a10496e737472756374696f6e4572726f7212140a056572726f7218022001280952056572726f72222e0a16496e737472756374696f6e4572726f72437573746f6d12140a056572726f7218022001280952056572726f72424b5a496769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d736f6c616e612f74797065732f70622f73662f736f6c616e612f747970652f76323b7062736f6c620670726f746f33", + // google/protobuf/timestamp.proto + "0a1f676f6f676c652f70726f746f6275662f74696d657374616d702e70726f746f120f676f6f676c652e70726f746f627566223b0a0954696d657374616d7012180a077365636f6e647318012001280352077365636f6e647312140a056e616e6f7318022001280552056e616e6f734285010a13636f6d2e676f6f676c652e70726f746f627566420e54696d657374616d7050726f746f50015a32676f6f676c652e676f6c616e672e6f72672f70726f746f6275662f74797065732f6b6e6f776e2f74696d657374616d707062f80101a20203475042aa021e476f6f676c652e50726f746f6275662e57656c6c4b6e6f776e5479706573620670726f746f33", + + // sf/bitcoin/type/v1/type.proto + "0a1d73662f626974636f696e2f747970652f76312f747970652e70726f746f121273662e626974636f696e2e747970652e76311a1f676f6f676c652f70726f746f6275662f74696d657374616d702e70726f746f22e5030a05426c6f636b12120a046861736818012001280952046861736812120a0473697a65180320012805520473697a6512230a0d73747269707065645f73697a65180420012805520c737472697070656453697a6512160a06776569676874180520012805520677656967687412160a06686569676874180620012803520668656967687412180a0776657273696f6e180720012805520776657273696f6e121f0a0b76657273696f6e5f686578180820012809520a76657273696f6e486578121f0a0b6d65726b6c655f726f6f74180920012809520a6d65726b6c65526f6f74122f0a027478180a2003280b321f2e73662e626974636f696e2e747970652e76312e5472616e73616374696f6e5202747812120a0474696d65180b20012803520474696d65121e0a0a6d656469616e74696d65180c20012803520a6d656469616e74696d6512140a056e6f6e6365180d2001280d52056e6f6e636512120a0462697473180e20012809520462697473121e0a0a646966666963756c7479180f20012801520a646966666963756c7479121c0a09636861696e776f726b1810200128095209636861696e776f726b12110a046e5f747818112001280d52036e547812230a0d70726576696f75735f68617368181220012809520c70726576696f75734861736822d4020a0b5472616e73616374696f6e12100a03686578180120012809520368657812120a047478696418022001280952047478696412120a046861736818032001280952046861736812120a0473697a65180420012805520473697a6512140a057673697a6518052001280552057673697a6512160a06776569676874180620012805520677656967687412180a0776657273696f6e18072001280d520776657273696f6e121a0a086c6f636b74696d6518082001280d52086c6f636b74696d6512290a0376696e18092003280b32172e73662e626974636f696e2e747970652e76312e56696e520376696e122c0a04766f7574180a2003280b32182e73662e626974636f696e2e747970652e76312e566f75745204766f7574121c0a09626c6f636b68617368180b200128095209626c6f636b68617368121c0a09626c6f636b74696d65180c200128035209626c6f636b74696d6522c5010a0356696e12120a047478696418012001280952047478696412120a04766f757418022001280d5204766f7574123c0a0a7363726970745f73696718032001280b321d2e73662e626974636f696e2e747970652e76312e5363726970745369675209736372697074536967121a0a0873657175656e636518042001280d520873657175656e636512200a0b7478696e7769746e657373180520032809520b7478696e7769746e657373121a0a08636f696e626173651806200128095208636f696e6261736522710a04566f757412140a0576616c7565180120012801520576616c7565120c0a016e18022001280d52016e12450a0d7363726970745f7075624b657918032001280b32202e73662e626974636f696e2e747970652e76312e5363726970745075624b6579520c7363726970745075624b6579222f0a0953637269707453696712100a0361736d180120012809520361736d12100a0368657818022001280952036865782299010a0c5363726970745075624b657912100a0361736d180120012809520361736d12100a03686578180220012809520368657812190a087265715f7369677318032001280552077265715369677312120a047479706518042001280952047479706512180a0761646472657373180520012809520761646472657373121c0a096164647265737365731806200328095209616464726573736573424d5a4b6769746875622e636f6d2f73747265616d696e67666173742f66697265686f73652d626974636f696e2f74797065732f70622f73662f626974636f696e2f747970652f76313b7062627463620670726f746f33", + } var files []*descriptorpb.FileDescriptorProto From b5b7a4426deb80ed64e44c0befbd90659a0228d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 15:19:47 -0500 Subject: [PATCH 61/66] bump substreams: remove the need to specify block type --- cmd/apps/substreams_tier1.go | 7 ------- cmd/apps/substreams_tier2.go | 7 ------- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 3 insertions(+), 17 deletions(-) diff --git a/cmd/apps/substreams_tier1.go b/cmd/apps/substreams_tier1.go index d9249ab..d79e986 100644 --- a/cmd/apps/substreams_tier1.go +++ b/cmd/apps/substreams_tier1.go @@ -49,7 +49,6 @@ func RegisterSubstreamsTier1App[B firecore.Block](chain *firecore.Chain[B], root cmd.Flags().Bool("substreams-tier1-subrequests-insecure", false, "Connect to tier2 without checking certificate validity") cmd.Flags().Bool("substreams-tier1-subrequests-plaintext", true, "Connect to tier2 without client in plaintext mode") cmd.Flags().Int("substreams-tier1-max-subrequests", 4, "number of parallel subrequests that the tier1 can make to the tier2 per request") - cmd.Flags().String("substreams-tier1-block-type", "", "fully qualified name of the block type to use for the substreams tier1 (i.e. sf.ethereum.v1.Block)") // all substreams registerCommonSubstreamsFlags(cmd) @@ -83,11 +82,6 @@ func RegisterSubstreamsTier1App[B firecore.Block](chain *firecore.Chain[B], root subrequestsInsecure := viper.GetBool("substreams-tier1-subrequests-insecure") subrequestsPlaintext := viper.GetBool("substreams-tier1-subrequests-plaintext") maxSubrequests := viper.GetUint64("substreams-tier1-max-subrequests") - substreamsBlockType := viper.GetString("substreams-tier1-block-type") - - if substreamsBlockType == "" { - return nil, fmt.Errorf("substreams-tier1-block-type is required") - } tracing := os.Getenv("SUBSTREAMS_TRACING") == "modules_exec" @@ -118,7 +112,6 @@ func RegisterSubstreamsTier1App[B firecore.Block](chain *firecore.Chain[B], root StateStoreURL: stateStoreURL, StateStoreDefaultTag: stateStoreDefaultTag, StateBundleSize: stateBundleSize, - BlockType: substreamsBlockType, MaxSubrequests: maxSubrequests, SubrequestsEndpoint: subrequestsEndpoint, SubrequestsInsecure: subrequestsInsecure, diff --git a/cmd/apps/substreams_tier2.go b/cmd/apps/substreams_tier2.go index 6b77255..3a667ce 100644 --- a/cmd/apps/substreams_tier2.go +++ b/cmd/apps/substreams_tier2.go @@ -42,7 +42,6 @@ func RegisterSubstreamsTier2App[B firecore.Block](chain *firecore.Chain[B], root RegisterFlags: func(cmd *cobra.Command) error { cmd.Flags().String("substreams-tier2-grpc-listen-addr", firecore.SubstreamsTier2GRPCServingAddr, "Address on which the substreams tier2 will listen. Default is plain-text, appending a '*' to the end to jkkkj") cmd.Flags().String("substreams-tier2-discovery-service-url", "", "URL to advertise presence to the grpc discovery service") //traffic-director://xds?vpc_network=vpc-global&use_xds_reds=true - cmd.Flags().String("substreams-tier2-block-type", "", "fully qualified name of the block type to use for the substreams tier1 (i.e. sf.ethereum.v1.Block)") // all substreams registerCommonSubstreamsFlags(cmd) @@ -64,11 +63,6 @@ func RegisterSubstreamsTier2App[B firecore.Block](chain *firecore.Chain[B], root stateStoreDefaultTag := viper.GetString("substreams-state-store-default-tag") stateBundleSize := viper.GetUint64("substreams-state-bundle-size") - substreamsBlockType := viper.GetString("substreams-tier2-block-type") - - if substreamsBlockType == "" { - return nil, fmt.Errorf("substreams-tier2-block-type is required") - } tracing := os.Getenv("SUBSTREAMS_TRACING") == "modules_exec" @@ -96,7 +90,6 @@ func RegisterSubstreamsTier2App[B firecore.Block](chain *firecore.Chain[B], root StateStoreURL: stateStoreURL, StateStoreDefaultTag: stateStoreDefaultTag, StateBundleSize: stateBundleSize, - BlockType: substreamsBlockType, WASMExtensions: wasmExtensions, PipelineOptions: pipelineOptioner, diff --git a/go.mod b/go.mod index c4f99d0..f9816f7 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609 + github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580 github.com/stretchr/testify v1.8.4 github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 diff --git a/go.sum b/go.sum index 247f353..8792eca 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,8 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609 h1:Fqkzbca2JPg4kKVfrSyPJk0hGGbkJcxSsxgWKwh75KA= -github.com/streamingfast/substreams v1.1.23-0.20231206180647-a75a8a462609/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= +github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580 h1:xcQBdcR4DgAeWKe9qUP6AVO4vlsg/3ceBbuOHeNdoEA= +github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= From 6665daf5b4eeb91dd5a043a857a2f838372292f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Wed, 6 Dec 2023 15:29:54 -0500 Subject: [PATCH 62/66] bump substreams: fix blockType getter on tier1 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f9816f7..04a569b 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580 + github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6 github.com/stretchr/testify v1.8.4 github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 diff --git a/go.sum b/go.sum index 8792eca..995b6b8 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,8 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580 h1:xcQBdcR4DgAeWKe9qUP6AVO4vlsg/3ceBbuOHeNdoEA= -github.com/streamingfast/substreams v1.1.23-0.20231206201840-a4bf83159580/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= +github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6 h1:8ffFzWSpxOM5sskCOq6T+COsosIxzXS/RLfroaxYJOk= +github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= From b6771e01c482a9b7db26072638024dd22efeef55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Thu, 7 Dec 2023 15:32:01 -0500 Subject: [PATCH 63/66] fix merger to convert legacy blocks --- merger/merger_io.go | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/merger/merger_io.go b/merger/merger_io.go index 647835e..aeec9d1 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -1,11 +1,11 @@ package merger import ( + "bytes" "context" "errors" "fmt" "io" - "io/ioutil" "sort" "strconv" "strings" @@ -174,6 +174,34 @@ func (s *DStoreIO) WalkOneBlockFiles(ctx context.Context, lowestBlock uint64, ca } +// fixLegacyBlock reads the header and looks for "Version 0", rewriting to Version 1 on the fly if needed +func fixLegacyBlock(in []byte) ([]byte, error) { + dbinReader, err := bstream.NewDBinBlockReader(bytes.NewReader(in)) + if err != nil { + return nil, fmt.Errorf("creating block reader in fixLegacyBlock: %w", err) + } + + if dbinReader.Header.Version != 0 { + return in, nil + } + + blk, err := dbinReader.Read() + if err != nil { + return nil, err + } + + out := new(bytes.Buffer) + writer, err := bstream.NewDBinBlockWriter(out) + if err != nil { + return nil, err + } + if err := writer.Write(blk); err != nil { + return nil, err + } + return out.Bytes(), nil + +} + func (s *DStoreIO) DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstream.OneBlockFile) (data []byte, err error) { for filename := range oneBlockFile.Filenames { // will try to get MemoizeData from any of those files var out io.ReadCloser @@ -190,9 +218,14 @@ func (s *DStoreIO) DownloadOneBlockFile(ctx context.Context, oneBlockFile *bstre default: } - data, err = ioutil.ReadAll(out) + data, err = io.ReadAll(out) + if err != nil { + continue + } + + data, err = fixLegacyBlock(data) if err == nil { - return data, nil + break } } From f094747223e23e7074de78b4293398b875f12954 Mon Sep 17 00:00:00 2001 From: billettc Date: Thu, 7 Dec 2023 16:52:53 -0500 Subject: [PATCH 64/66] fix tests --- merger/bundler_test.go | 60 ++++++++++++++++++++++++++++++++-------- merger/merger_io.go | 12 ++++++-- merger/merger_io_test.go | 58 ++++++++++++++++++++++++++++++++------ 3 files changed, 107 insertions(+), 23 deletions(-) diff --git a/merger/bundler_test.go b/merger/bundler_test.go index f06df72..327762d 100644 --- a/merger/bundler_test.go +++ b/merger/bundler_test.go @@ -16,42 +16,78 @@ import ( "github.com/stretchr/testify/require" ) +func setPbBlock(obf *bstream.OneBlockFile) { + //pbb := &pbbstream.Block{ + // Number: obf.Num, + //} + //out, err := proto.Marshal(pbb) + //if err != nil { + // panic(err) + //} + //obf.MemoizeData = out +} + var block98 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000098-0000000000000098a-0000000000000097a-96-suffix") + obf := bstream.MustNewOneBlockFile("0000000098-0000000000000098a-0000000000000097a-96-suffix") + setPbBlock(obf) + return obf } var block99 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000099-0000000000000099a-0000000000000098a-97-suffix") + obf := bstream.MustNewOneBlockFile("0000000099-0000000000000099a-0000000000000098a-97-suffix") + setPbBlock(obf) + return obf + } var block100 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000100-0000000000000100a-0000000000000099a-98-suffix") + obf := bstream.MustNewOneBlockFile("0000000100-0000000000000100a-0000000000000099a-98-suffix") + setPbBlock(obf) + return obf } var block101 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000101-0000000000000101a-0000000000000100a-99-suffix") + obf := bstream.MustNewOneBlockFile("0000000101-0000000000000101a-0000000000000100a-99-suffix") + setPbBlock(obf) + return obf } var block102Final100 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000102-0000000000000102a-0000000000000101a-100-suffix") + obf := bstream.MustNewOneBlockFile("0000000102-0000000000000102a-0000000000000101a-100-suffix") + setPbBlock(obf) + return obf } var block103Final101 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000103-0000000000000103a-0000000000000102a-101-suffix") + obf := bstream.MustNewOneBlockFile("0000000103-0000000000000103a-0000000000000102a-101-suffix") + setPbBlock(obf) + return obf } var block104Final102 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000104-0000000000000104a-0000000000000103a-102-suffix") + obf := bstream.MustNewOneBlockFile("0000000104-0000000000000104a-0000000000000103a-102-suffix") + setPbBlock(obf) + return obf } var block105Final103 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000105-0000000000000105a-0000000000000104a-103-suffix") + obf := bstream.MustNewOneBlockFile("0000000105-0000000000000105a-0000000000000104a-103-suffix") + setPbBlock(obf) + return obf } var block106Final104 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000106-0000000000000106a-0000000000000105a-104-suffix") + obf := bstream.MustNewOneBlockFile("0000000106-0000000000000106a-0000000000000105a-104-suffix") + setPbBlock(obf) + return obf } var block507Final106 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000507-0000000000000507a-0000000000000106a-106-suffix") + obf := bstream.MustNewOneBlockFile("0000000507-0000000000000507a-0000000000000106a-106-suffix") + setPbBlock(obf) + return obf } var block608Final507 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000608-0000000000000608a-0000000000000507a-507-suffix") + obf := bstream.MustNewOneBlockFile("0000000608-0000000000000608a-0000000000000507a-507-suffix") + setPbBlock(obf) + return obf } var block609Final608 = func() *bstream.OneBlockFile { - return bstream.MustNewOneBlockFile("0000000609-0000000000000607a-0000000000000608a-608-suffix") + obf := bstream.MustNewOneBlockFile("0000000609-0000000000000609a-0000000000000608a-608-suffix") + setPbBlock(obf) + return obf } func TestNewBundler(t *testing.T) { diff --git a/merger/merger_io.go b/merger/merger_io.go index aeec9d1..a35aae2 100644 --- a/merger/merger_io.go +++ b/merger/merger_io.go @@ -185,9 +185,14 @@ func fixLegacyBlock(in []byte) ([]byte, error) { return in, nil } - blk, err := dbinReader.Read() + reader, err := bstream.NewDBinBlockReader(bytes.NewReader(in)) if err != nil { - return nil, err + return nil, fmt.Errorf("creating block reader in fixLegacyBlock: %w", err) + } + + blk, err := reader.Read() + if err != nil { + return nil, fmt.Errorf("reading block in fixLegacyBlock: %w", err) } out := new(bytes.Buffer) @@ -195,8 +200,9 @@ func fixLegacyBlock(in []byte) ([]byte, error) { if err != nil { return nil, err } + if err := writer.Write(blk); err != nil { - return nil, err + return nil, fmt.Errorf("writing block in fixLegacyBlock: %w", err) } return out.Bytes(), nil diff --git a/merger/merger_io_test.go b/merger/merger_io_test.go index 56479ff..f3552c5 100644 --- a/merger/merger_io_test.go +++ b/merger/merger_io_test.go @@ -15,12 +15,18 @@ package merger import ( + "bytes" "context" "io" - "strings" "testing" "time" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/streamingfast/firehose-core/test" + + pbbstream "github.com/streamingfast/bstream/pb/sf/bstream/v1" + "github.com/streamingfast/bstream" "github.com/streamingfast/dstore" "github.com/stretchr/testify/require" @@ -57,7 +63,7 @@ func TestNewDstore(t *testing.T) { require.False(t, ok) } -func newDStoreIO( +func newTestDStoreIO( oneBlocksStore dstore.Store, mergedBlocksStore dstore.Store, ) IOInterface { @@ -81,7 +87,25 @@ func TestMergerIO_MergeUploadPerfect(t *testing.T) { if len(filesRead) == 2 { close(done) } - return io.NopCloser(strings.NewReader(string(testOneBlockHeader))), nil + + tb := &test.Block{ + Number: 9999, + } + anyB, err := anypb.New(tb) + require.NoError(t, err) + + pbb := &pbbstream.Block{ + Number: 9999, + Payload: anyB, + } + out := new(bytes.Buffer) + w, err := bstream.NewDBinBlockWriter(out) + require.NoError(t, err) + + err = w.Write(pbb) + require.NoError(t, err) + + return io.NopCloser(out), nil } mergedBlocksStore := dstore.NewMockStore( func(base string, f io.Reader) (err error) { @@ -91,7 +115,7 @@ func TestMergerIO_MergeUploadPerfect(t *testing.T) { }, ) - mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + mio := newTestDStoreIO(oneBlockStore, mergedBlocksStore) err := mio.MergeAndStore(context.Background(), 100, files) require.NoError(t, err) @@ -130,7 +154,25 @@ func TestMergerIO_MergeUploadFiltered(t *testing.T) { if len(filesRead) == 2 { close(done) } - return io.NopCloser(strings.NewReader(string(testOneBlockHeader))), nil + tb := &test.Block{ + Number: 9999, + } + anyB, err := anypb.New(tb) + require.NoError(t, err) + + pbb := &pbbstream.Block{ + Number: 9999, + Payload: anyB, + } + out := new(bytes.Buffer) + w, err := bstream.NewDBinBlockWriter(out) + require.NoError(t, err) + + err = w.Write(pbb) + require.NoError(t, err) + + return io.NopCloser(out), nil + } mergedBlocksStore := dstore.NewMockStore( func(base string, f io.Reader) (err error) { @@ -140,7 +182,7 @@ func TestMergerIO_MergeUploadFiltered(t *testing.T) { }, ) - mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + mio := newTestDStoreIO(oneBlockStore, mergedBlocksStore) err := mio.MergeAndStore(context.Background(), 100, files) require.NoError(t, err) @@ -167,7 +209,7 @@ func TestMergerIO_MergeUploadNoFiles(t *testing.T) { oneBlockStore := dstore.NewMockStore(nil) mergedBlocksStore := dstore.NewMockStore(nil) - mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + mio := newTestDStoreIO(oneBlockStore, mergedBlocksStore) err := mio.MergeAndStore(context.Background(), 114, files) require.Error(t, err) @@ -181,7 +223,7 @@ func TestMergerIO_MergeUploadFilteredToZero(t *testing.T) { } oneBlockStore := dstore.NewMockStore(nil) mergedBlocksStore := dstore.NewMockStore(nil) - mio := newDStoreIO(oneBlockStore, mergedBlocksStore) + mio := newTestDStoreIO(oneBlockStore, mergedBlocksStore) b100.MemoizeData = append(testOneBlockHeader, []byte{0x0, 0x1, 0x2, 0x3}...) b101.MemoizeData = append(testOneBlockHeader, []byte{0x0, 0x1, 0x2, 0x3}...) From 6db53248f6e7c84c8efb90acb99a85f36104aebd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Fri, 8 Dec 2023 10:08:18 -0500 Subject: [PATCH 65/66] remove CoreBinaryEnabled from chain definition, now only part of the packagE --- chain.go | 3 --- cmd/firecore/main.go | 2 +- cmd/tools/print/tools_print.go | 2 +- utils.go | 2 ++ 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/chain.go b/chain.go index 20ced95..bb45d67 100644 --- a/chain.go +++ b/chain.go @@ -151,9 +151,6 @@ type Chain[B Block] struct { BlockEncoder BlockEncoder RegisterSubstreamsExtensions func(chain *Chain[B]) ([]substreams.Extension, error) - - // CoreBinaryEnabled is a flag that when set to true indicates that `firecore` binary is being run directly? (not through firexxx) - CoreBinaryEnabled bool } type ToolsConfig[B Block] struct { diff --git a/cmd/firecore/main.go b/cmd/firecore/main.go index 82ee3aa..8f23b11 100644 --- a/cmd/firecore/main.go +++ b/cmd/firecore/main.go @@ -7,6 +7,7 @@ import ( ) func main() { + firecore.RunningFromFirecore = true fhCMD.Main(&firecore.Chain[*pbbstream.Block]{ ShortName: "core", //used to compose the binary name LongName: "CORE", //only used to compose cmd title and description @@ -14,7 +15,6 @@ func main() { FullyQualifiedModule: "github.com/streamingfast/firehose-core", Version: version, BlockFactory: func() firecore.Block { return new(pbbstream.Block) }, - CoreBinaryEnabled: true, ConsoleReaderFactory: firecore.NewConsoleReader, Tools: &firecore.ToolsConfig[*pbbstream.Block]{}, }) diff --git a/cmd/tools/print/tools_print.go b/cmd/tools/print/tools_print.go index 1b99e17..eb89ab5 100644 --- a/cmd/tools/print/tools_print.go +++ b/cmd/tools/print/tools_print.go @@ -225,7 +225,7 @@ func displayBlock[B firecore.Block](pbBlock *pbbstream.Block, chain *firecore.Ch return nil } - if !chain.CoreBinaryEnabled { + if !firecore.RunningFromFirecore { // since we are running via the chain specific binary (i.e. fireeth) we can use a BlockFactory marshallableBlock := chain.BlockFactory() diff --git a/utils.go b/utils.go index 458e66e..2b1a664 100644 --- a/utils.go +++ b/utils.go @@ -10,6 +10,8 @@ import ( "github.com/streamingfast/cli" ) +var RunningFromFirecore = false + func mkdirStorePathIfLocal(storeURL string) (err error) { if dirs := getDirsToMake(storeURL); len(dirs) > 0 { err = MakeDirs(dirs) From 574234d4ed5ddffe141b8a82692b98d83ee89302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duchesneau?= Date: Fri, 8 Dec 2023 10:37:32 -0500 Subject: [PATCH 66/66] bump libraries one last time, bump changelog for v1.0.0 --- CHANGELOG.md | 36 ++++++++++++++++++++++++++++ go.mod | 6 ++--- go.sum | 12 +++++----- protoregistry/generator/generator.go | 2 +- protoregistry/well_known.go | 1 - 5 files changed, 46 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index faeb5a4..55b3bad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,42 @@ Operators, you should copy/paste content of this content straight to your projec If you were at `firehose-core` version `1.0.0` and are bumping to `1.1.0`, you should copy the content between those 2 version to your own repository, replacing placeholder value `fire{chain}` with your chain's own binary. +## v1.0.0 + +This is a major release. + +### Operators + +> [!IMPORTANT] +> When upgrading your stack to firehose-core v1.0.0, be sure to upgrade all components simultaneously because the block encapsulation format has changed. +> Blocks that are merged using the new merger will not be readable by previous versions. + +### Added + +* New binary `firecore` which can run all firehose components (`reader`, `reader-stdin`, `merger`, `relayer`, `firehose`, `substreams-tier1|2`) in a chain-agnostic way. This is not mandatory (it can still be used as a library) but strongly suggested when possible. + +* Current Limitations on Ethereum: + - The firecore `firehose` app does not support transforms (filters, header-only --for graph-node compatibility--) so you will want to continue running this app from `fireeth` + - The firecore `substreams` apps do not support eth_calls so you will want to continue running them from `fireeth` + - The firecore `reader` does not support the block format output by the current geth firehose instrumentation, so you will want to continue running it from `fireeth` + +* New BlockPoller library to facilitate the implementation of rpc-poller-based chains, taking care of managing reorgs + +* Considering that firehose-core is chain-agnostic, it's not aware of the different of the different block types. To be able to use tools around block decoding/printing, + there are two ways to provide the type definition: + 1. the 'protoregistry' package contains well-known block type definitions (ethereum, near, solana, bitcoin...), you won't need to provide anything in those cases. + 2. for other types, you can provide additional protobuf files using `--proto-path` flag + +### Changed + +* Merged blocks storage format has been changed. Current blocks will continue to be decoded, but new merged blocks will not be readable by previous software versions. +* The code from the following repositories have been merged into this repo. They will soon be archived. + * github.com/streamingfast/node-manager + * github.com/streamingfast/merger + * github.com/streamingfast/relayer + * github.com/streamingfast/firehose + * github.com/streamingfast/index-builder + ## v0.2.3 ### Fixed diff --git a/go.mod b/go.mod index 04a569b..f456aa2 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e + github.com/streamingfast/bstream v0.0.2-0.20231208141508-a50f2c686c91 github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 github.com/streamingfast/dbin v0.9.1-0.20231117225723-59790c798e2c @@ -28,9 +28,9 @@ require ( github.com/streamingfast/dstore v0.1.1-0.20230620124109-3924b3b36c77 github.com/streamingfast/jsonpb v0.0.0-20210811021341-3670f0aa02d0 github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 - github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e + github.com/streamingfast/pbgo v0.0.6-0.20231208140754-ed2bd10b96ee github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 - github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6 + github.com/streamingfast/substreams v1.1.23-0.20231208153603-95496e92a415 github.com/stretchr/testify v1.8.4 github.com/test-go/testify v1.1.4 go.uber.org/multierr v1.10.0 diff --git a/go.sum b/go.sum index 995b6b8..1e0163e 100644 --- a/go.sum +++ b/go.sum @@ -578,8 +578,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e h1:pKET4WQZl0DFgTrkAR+ipS7XoGNbpdtL6HS/3VcHseg= -github.com/streamingfast/bstream v0.0.2-0.20231206184105-db4d51a0e07e/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= +github.com/streamingfast/bstream v0.0.2-0.20231208141508-a50f2c686c91 h1:v+r1jME46+c6jNqRfD2nSFXTY59wwNxCoxGS826yjTQ= +github.com/streamingfast/bstream v0.0.2-0.20231208141508-a50f2c686c91/go.mod h1:08GVb+DXyz6jVNIsbf+2zlaC81UeEGu5o1h49KrSR3Y= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80 h1:UxJUTcEVkdZy8N77E3exz0iNlgQuxl4m220GPvzdZ2s= github.com/streamingfast/cli v0.0.4-0.20230825151644-8cc84512cd80/go.mod h1:QxjVH73Lkqk+mP8bndvhMuQDUINfkgsYhdCH/5TJFKI= github.com/streamingfast/dauth v0.0.0-20231120142446-843f4e045cc2 h1:g4mG6ZCy3/XtcsZXfOHrQOsjVGoX9uTc/QlemaPV4EE= @@ -611,8 +611,8 @@ github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308 h1:xlWSfi1BoP github.com/streamingfast/opaque v0.0.0-20210811180740-0c01d37ea308/go.mod h1:K1p8Bj/wG34KJvYzPUqtzpndffmpkrVY11u2hkyxCWQ= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef h1:9IVFHRsqvI+vKJwgF1OMV6L55jHbaV/ZLoU4IAG/dME= github.com/streamingfast/overseer v0.2.1-0.20210326144022-ee491780e3ef/go.mod h1:cq8CvbZ3ioFmGrHokSAJalS0lC+pVXLKhITScItUGXY= -github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e h1:8hoT2QUwh+YNgIcCPux9xd4u9XojHR8hbyAzz7rQuEM= -github.com/streamingfast/pbgo v0.0.6-0.20231120172814-537d034aad5e/go.mod h1:fZuijmeFrqxW2YnnXmGrkQpUTHx3eHCaJUKwdvXAYKM= +github.com/streamingfast/pbgo v0.0.6-0.20231208140754-ed2bd10b96ee h1:ydH7Ii6P1JIx1bNRO1sFH2VCAr0iZQ8MCHUPBo8i0dY= +github.com/streamingfast/pbgo v0.0.6-0.20231208140754-ed2bd10b96ee/go.mod h1:eDQjKBYg9BWE2BTaV3UZeLZ5xw05+ywA9RCFTmM1w5Y= github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d h1:33VIARqUqBUKXJcuQoOS1rVSms54tgxhhNCmrLptpLg= github.com/streamingfast/protoreflect v0.0.0-20231205191344-4b629d20ce8d/go.mod h1:aBJivEdekmFWYSQ29EE/fN9IanJWJXbtjy3ky0XD/jE= github.com/streamingfast/sf-tracing v0.0.0-20230616174903-cd2ade641ca9 h1:YRwpVvLYa+FEJlTy0S7mk4UptYjk5zac+A+ZE1phOeA= @@ -621,8 +621,8 @@ github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAt github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0 h1:Y15G1Z4fpEdm2b+/70owI7TLuXadlqBtGM7rk4Hxrzk= github.com/streamingfast/snapshotter v0.0.0-20230316190750-5bcadfde44d0/go.mod h1:/Rnz2TJvaShjUct0scZ9kKV2Jr9/+KBAoWy4UMYxgv4= -github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6 h1:8ffFzWSpxOM5sskCOq6T+COsosIxzXS/RLfroaxYJOk= -github.com/streamingfast/substreams v1.1.23-0.20231206202935-7873b84446e6/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= +github.com/streamingfast/substreams v1.1.23-0.20231208153603-95496e92a415 h1:objc6hYL+wmIK92EIu+Z72WAqioQWow2oy2KE9wEB6A= +github.com/streamingfast/substreams v1.1.23-0.20231208153603-95496e92a415/go.mod h1:fCC3pGTYMi0N4VhJjdJPQydefJpY+tsY9BzWxDi152k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/protoregistry/generator/generator.go b/protoregistry/generator/generator.go index b171fec..8aad4cf 100644 --- a/protoregistry/generator/generator.go +++ b/protoregistry/generator/generator.go @@ -39,7 +39,7 @@ func main() { authToken := os.Getenv("BUFBUILD_AUTH_TOKEN") if authToken == "" { - log.Fatalf("Please set the BUFBUILD_AUTH_TOKEN environment variable, to generate well known registry") + log.Fatalf("You must set the BUFBUILD_AUTH_TOKEN environment variable to generate well known registry. See https://buf.build/docs/bsr/authentication") return } diff --git a/protoregistry/well_known.go b/protoregistry/well_known.go index f9bc450..ad43446 100644 --- a/protoregistry/well_known.go +++ b/protoregistry/well_known.go @@ -62,7 +62,6 @@ func init() { fdmap, err := desc.CreateFileDescriptors(files) if err != nil { panic(fmt.Errorf("failed to create file descriptor map: %w", err)) - return } for _, fd := range fdmap {