From c446f0107450178a6513ffa12419b9bf8ed3a1b7 Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Thu, 9 Jan 2025 15:43:55 +0100 Subject: [PATCH] peerDAS: Decouple network subnets from das-core. https://github.com/ethereum/consensus-specs/pull/3832/ --- beacon-chain/blockchain/process_block.go | 32 +- beacon-chain/core/peerdas/helpers.go | 206 ++++++++----- beacon-chain/core/peerdas/helpers_test.go | 67 ++--- beacon-chain/das/availability_columns.go | 18 +- beacon-chain/das/availability_columns_test.go | 8 +- beacon-chain/p2p/custody.go | 103 +++---- beacon-chain/p2p/custody_test.go | 28 +- beacon-chain/p2p/discovery.go | 22 +- beacon-chain/p2p/discovery_test.go | 30 +- beacon-chain/p2p/interfaces.go | 6 +- beacon-chain/p2p/subnets.go | 70 +++-- beacon-chain/p2p/testing/fuzz_p2p.go | 6 +- beacon-chain/p2p/testing/p2p.go | 12 +- beacon-chain/rpc/eth/config/handlers_test.go | 10 +- beacon-chain/rpc/lookup/blocker.go | 19 +- beacon-chain/sync/data_columns_reconstruct.go | 38 ++- beacon-chain/sync/data_columns_sampling.go | 242 +++++++++------ .../sync/data_columns_sampling_test.go | 276 +++++++++--------- .../sync/initial-sync/blocks_fetcher.go | 18 +- .../sync/initial-sync/blocks_fetcher_test.go | 20 +- .../sync/initial-sync/blocks_fetcher_utils.go | 44 +-- beacon-chain/sync/initial-sync/service.go | 21 +- beacon-chain/sync/pending_blocks_queue.go | 17 +- .../sync/rpc_beacon_blocks_by_root.go | 31 +- .../sync/rpc_data_column_sidecars_by_range.go | 19 +- .../sync/rpc_data_column_sidecars_by_root.go | 15 +- beacon-chain/sync/rpc_metadata.go | 16 +- beacon-chain/sync/rpc_metadata_test.go | 54 ++-- config/params/config.go | 15 +- config/params/loader_test.go | 1 - config/params/mainnet_config.go | 7 +- config/params/network_config.go | 2 +- consensus-types/wrapper/metadata.go | 14 +- .../v1alpha1/metadata/metadata_interfaces.go | 2 +- proto/prysm/v1alpha1/non-core.ssz.go | 12 +- proto/prysm/v1alpha1/p2p_messages.pb.go | 86 +++--- proto/prysm/v1alpha1/p2p_messages.proto | 4 +- .../networking/custody_columns_test.go | 11 - .../{eip7594 => fulu}/networking/BUILD.bazel | 1 + .../fulu/networking/custody_columns_test.go | 11 + .../networking/custody_columns_test.go | 11 - .../{eip7594 => fulu}/networking/BUILD.bazel | 1 + .../fulu/networking/custody_columns_test.go | 11 + .../{eip7594 => fulu}/networking/BUILD.bazel | 2 +- .../networking/custody_columns.go | 14 +- 45 files changed, 934 insertions(+), 719 deletions(-) delete mode 100644 testing/spectest/mainnet/eip7594/networking/custody_columns_test.go rename testing/spectest/mainnet/{eip7594 => fulu}/networking/BUILD.bazel (77%) create mode 100644 testing/spectest/mainnet/fulu/networking/custody_columns_test.go delete mode 100644 testing/spectest/minimal/eip7594/networking/custody_columns_test.go rename testing/spectest/minimal/{eip7594 => fulu}/networking/BUILD.bazel (77%) create mode 100644 testing/spectest/minimal/fulu/networking/custody_columns_test.go rename testing/spectest/shared/{eip7594 => fulu}/networking/BUILD.bazel (94%) rename testing/spectest/shared/{eip7594 => fulu}/networking/custody_columns.go (81%) diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 1b82eb38366..6427b860a55 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -652,7 +652,7 @@ func uint64MapToSortedSlice(input map[uint64]bool) []uint64 { } func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error { - if signed.Version() < version.Deneb { + if signed.Version() < version.Fulu { return nil } @@ -660,8 +660,12 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si if block == nil { return errors.New("invalid nil beacon block") } + // We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) { + blockSlot, currentSlot := block.Slot(), s.CurrentSlot() + blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot) + + if !params.WithinDAPeriod(blockEpoch, currentEpoch) { return nil } @@ -681,20 +685,26 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si } // All columns to sample need to be available for the block to be considered available. - // https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling + // https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling nodeID := s.cfg.P2P.NodeID() - subnetSamplingSize := peerdas.SubnetSamplingSize() + custodyGroupSamplingSize := peerdas.CustodyGroupSamplingSize() - colMap, err := peerdas.CustodyColumns(nodeID, subnetSamplingSize) + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupSamplingSize) if err != nil { - return errors.Wrap(err, "custody columns") + return errors.Wrap(err, "custody groups") } - // colMap represents the data columnns a node is expected to custody. - if len(colMap) == 0 { + // Exit early if the node is not expected to custody any data columns. + if len(custodyGroups) == 0 { return nil } + // Get the custody columns from the groups. + columnsMap, err := peerdas.CustodyColumns(custodyGroups) + if err != nil { + return errors.Wrap(err, "custody columns") + } + // Subscribe to newsly data columns stored in the database. rootIndexChan := make(chan filesystem.RootIndexPair) subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan) @@ -715,7 +725,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si } // Get a map of data column indices that are not currently available. - missingMap, err := missingDataColumns(s.blobStorage, root, colMap) + missingMap, err := missingDataColumns(s.blobStorage, root, columnsMap) if err != nil { return err } @@ -743,10 +753,10 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si ) numberOfColumns := params.BeaconConfig().NumberOfColumns - colMapCount := uint64(len(colMap)) + colMapCount := uint64(len(columnsMap)) if colMapCount < numberOfColumns { - expected = uint64MapToSortedSlice(colMap) + expected = uint64MapToSortedSlice(columnsMap) } if missingMapCount < numberOfColumns { diff --git a/beacon-chain/core/peerdas/helpers.go b/beacon-chain/core/peerdas/helpers.go index ad6500e0bec..03d324b466f 100644 --- a/beacon-chain/core/peerdas/helpers.go +++ b/beacon-chain/core/peerdas/helpers.go @@ -30,41 +30,41 @@ import ( ) const ( - CustodySubnetCountEnrKey = "csc" + CustodyGroupCountEnrKey = "cgc" ) -// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5 -type Csc uint64 +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#the-discovery-domain-discv5 +type Cgc uint64 -func (Csc) ENRKey() string { return CustodySubnetCountEnrKey } +func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey } var ( // Custom errors - errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count") - errIndexTooLarge = errors.New("column index is larger than the specified columns count") - errMismatchLength = errors.New("mismatch in the length of the commitments and proofs") - errRecordNil = errors.New("record is nil") - errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer") + errCustodyGroupCountTooLarge = errors.New("custody group count too large") + errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen") + errIndexTooLarge = errors.New("column index is larger than the specified columns count") + errMismatchLength = errors.New("mismatch in the length of the commitments and proofs") + errRecordNil = errors.New("record is nil") + errCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer") // maxUint256 is the maximum value of a uint256. maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64} ) -// CustodyColumnSubnets computes the subnets the node should participate in for custody. -func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) { - dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount +// CustodyGroups computes the custody groups the node should participate in for custody. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups +func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) (map[uint64]bool, error) { + numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups - // Check if the custody subnet count is larger than the data column sidecar subnet count. - if custodySubnetCount > dataColumnSidecarSubnetCount { - return nil, errCustodySubnetCountTooLarge + // Check if the custody group count is larger than the number of custody groups. + if custodyGroupCount > numberOfCustodyGroup { + return nil, errCustodyGroupCountTooLarge } - // First, compute the subnet IDs that the node should participate in. - subnetIds := make(map[uint64]bool, custodySubnetCount) - + custodyGroups := make(map[uint64]bool, custodyGroupCount) one := uint256.NewInt(1) - for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) { + for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; currentId.Add(currentId, one) { // Convert to big endian bytes. currentIdBytesBigEndian := currentId.Bytes32() @@ -74,11 +74,11 @@ func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint6 // Hash the result. hashedCurrentId := hash.Hash(currentIdBytesLittleEndian) - // Get the subnet ID. - subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount + // Get the custody group ID. + custodyGroupId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup - // Add the subnet to the map. - subnetIds[subnetId] = true + // Add the custody group to the map. + custodyGroups[custodyGroupId] = true // Overflow prevention. if currentId.Cmp(maxUint256) == 0 { @@ -86,37 +86,100 @@ func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint6 } } - return subnetIds, nil + // Final check. + if uint64(len(custodyGroups)) != custodyGroupCount { + return nil, errWrongComputedCustodyGroupCount + } + + return custodyGroups, nil } -// CustodyColumns computes the columns the node should custody. -// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions -func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) { - dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount +// ComputeColumnsForCustodyGroup computes the columns for a given custody group. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group +func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) { + beaconConfig := params.BeaconConfig() + numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups - // Compute the custody subnets. - subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount) - if err != nil { - return nil, errors.Wrap(err, "custody subnets") + if custodyGroup > numberOfCustodyGroup { + return nil, errCustodyGroupCountTooLarge + } + + numberOfColumns := beaconConfig.NumberOfColumns + + columnsPerGroup := numberOfColumns / numberOfCustodyGroup + + columns := make([]uint64, 0, columnsPerGroup) + for i := range columnsPerGroup { + column := numberOfCustodyGroup*i + custodyGroup + columns = append(columns, column) } - columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount + return columns, nil +} + +// ComputeCustodyGroupForColumn computes the custody group for a given column. +// It is the reciprocal function of ComputeColumnsForCustodyGroup. +func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) { + beaconConfig := params.BeaconConfig() + numberOfColumns := beaconConfig.NumberOfColumns + + if columnIndex >= numberOfColumns { + return 0, errIndexTooLarge + } + + numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups + columnsPerGroup := numberOfColumns / numberOfCustodyGroups + + return columnIndex / columnsPerGroup, nil +} + +// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar. +// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar +func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 { + dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount + return columnIndex % dataColumnSidecarSubnetCount +} - // Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody. - // Columns belonging to the same subnet are contiguous. - columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet) - for i := uint64(0); i < columnsPerSubnet; i++ { - for subnetId := range subnetIds { - columnIndex := dataColumnSidecarSubnetCount*i + subnetId - columnIndices[columnIndex] = true +// CustodyColumns computes the columns the node should custody. +func CustodyColumns(custodyGroups map[uint64]bool) (map[uint64]bool, error) { + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups + + custodyGroupCount := len(custodyGroups) + + // Compute the columns for each custody group. + columns := make(map[uint64]bool, custodyGroupCount) + for group := range custodyGroups { + if group >= numberOfCustodyGroups { + return nil, errCustodyGroupCountTooLarge + } + + groupColumns, err := ComputeColumnsForCustodyGroup(group) + if err != nil { + return nil, errors.Wrap(err, "compute columns for custody group") + } + + for _, column := range groupColumns { + columns[column] = true } } - return columnIndices, nil + return columns, nil +} + +// DataColumnSubnets computes the subnets for the data columns. +func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool { + subnets := make(map[uint64]bool, len(dataColumns)) + + for column := range dataColumns { + subnet := ComputeSubnetForDataColumnSidecar(column) + subnets[subnet] = true + } + + return subnets } // DataColumnSidecars computes the data column sidecars from the signed block and blobs. -// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix +// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#get_data_column_sidecars func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) { startTime := time.Now() blobsCount := len(blobs) @@ -454,39 +517,22 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, er return verified, nil } -// CustodySubnetCount returns the number of subnets the node should participate in for custody. -func CustodySubnetCount() uint64 { +// CustodyGroupCount returns the number of groups the node should participate in for custody. +func CustodyGroupCount() uint64 { if flags.Get().SubscribeToAllSubnets { - return params.BeaconConfig().DataColumnSidecarSubnetCount + return params.BeaconConfig().NumberOfCustodyGroups } return params.BeaconConfig().CustodyRequirement } -// SubnetSamplingSize returns the number of subnets the node should sample from. -// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling -func SubnetSamplingSize() uint64 { +// CustodyGroupSamplingSize returns the number of custody groups the node should sample from. +// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling +func CustodyGroupSamplingSize() uint64 { samplesPerSlot := params.BeaconConfig().SamplesPerSlot - custodySubnetCount := CustodySubnetCount() - - return max(samplesPerSlot, custodySubnetCount) -} - -// CustodyColumnCount returns the number of columns the node should custody. -func CustodyColumnCount() uint64 { - // Get the number of subnets. - dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount - - // Compute the number of columns per subnet. - columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount - - // Get the number of subnets we custody - custodySubnetCount := CustodySubnetCount() - - // Finally, compute the number of columns we should custody. - custodyColumnCount := custodySubnetCount * columnsPerSubnet + custodyGroupCount := CustodyGroupCount() - return custodyColumnCount + return max(samplesPerSlot, custodyGroupCount) } // HypergeomCDF computes the hypergeometric cumulative distribution function. @@ -538,27 +584,27 @@ func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 { return sampleCount } -func CustodyCountFromRecord(record *enr.Record) (uint64, error) { - // By default, we assume the peer custodies the minimum number of subnets. +// CustodyGroupCountFromRecord extracts the custody group count from an ENR record. +func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) { if record == nil { return 0, errRecordNil } - // Load the `custody_subnet_count` - var csc Csc - if err := record.Load(&csc); err != nil { - return 0, errCannotLoadCustodySubnetCount + // Load the `cgc` + var cgc Cgc + if cgc := record.Load(&cgc); cgc != nil { + return 0, errCannotLoadCustodyGroupCount } - return uint64(csc), nil + return uint64(cgc), nil } -func CanSelfReconstruct(numCol uint64) bool { - total := params.BeaconConfig().NumberOfColumns - // if total is odd, then we need total / 2 + 1 columns to reconstruct - // if total is even, then we need total / 2 columns to reconstruct - columnsNeeded := total/2 + total%2 - return numCol >= columnsNeeded +func CanSelfReconstruct(custodyGroupCount uint64) bool { + total := params.BeaconConfig().NumberOfCustodyGroups + // If total is odd, then we need total / 2 + 1 columns to reconstruct. + // If total is even, then we need total / 2 columns to reconstruct. + custodyGroupsNeeded := total/2 + total%2 + return custodyGroupCount >= custodyGroupsNeeded } // RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars. diff --git a/beacon-chain/core/peerdas/helpers_test.go b/beacon-chain/core/peerdas/helpers_test.go index 389680b9788..78578fa6a68 100644 --- a/beacon-chain/core/peerdas/helpers_test.go +++ b/beacon-chain/core/peerdas/helpers_test.go @@ -234,7 +234,7 @@ func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) { require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs) } -func TestCustodySubnetCount(t *testing.T) { +func TestCustodyGroupCount(t *testing.T) { testCases := []struct { name string subscribeToAllSubnets bool @@ -266,25 +266,12 @@ func TestCustodySubnetCount(t *testing.T) { flags.Init(gFlags) // Get the custody subnet count. - actual := peerdas.CustodySubnetCount() + actual := peerdas.CustodyGroupCount() require.Equal(t, tc.expected, actual) }) } } -func TestCustodyColumnCount(t *testing.T) { - const expected uint64 = 8 - - params.SetupTestConfigCleanup(t) - config := params.BeaconConfig().Copy() - config.DataColumnSidecarSubnetCount = 32 - config.CustodyRequirement = 2 - params.OverrideBeaconConfig(config) - - actual := peerdas.CustodyColumnCount() - require.Equal(t, expected, actual) -} - func TestHypergeomCDF(t *testing.T) { // Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution // Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5 @@ -337,48 +324,48 @@ func TestExtendedSampleCount(t *testing.T) { } } -func TestCustodyCountFromRecord(t *testing.T) { +func TestCustodyGroupCountFromRecord(t *testing.T) { const expected uint64 = 7 // Create an Ethereum record. record := &enr.Record{} - record.Set(peerdas.Csc(expected)) + record.Set(peerdas.Cgc(expected)) - actual, err := peerdas.CustodyCountFromRecord(record) + actual, err := peerdas.CustodyGroupCountFromRecord(record) require.NoError(t, err) require.Equal(t, expected, actual) } func TestCanSelfReconstruct(t *testing.T) { testCases := []struct { - name string - totalNumberOfColumns uint64 - custodyNumberOfColumns uint64 - expected bool + name string + totalNumberOfCustodyGroups uint64 + custodyNumberOfGroups uint64 + expected bool }{ { - name: "totalNumberOfColumns=64, custodyNumberOfColumns=31", - totalNumberOfColumns: 64, - custodyNumberOfColumns: 31, - expected: false, + name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31", + totalNumberOfCustodyGroups: 64, + custodyNumberOfGroups: 31, + expected: false, }, { - name: "totalNumberOfColumns=64, custodyNumberOfColumns=32", - totalNumberOfColumns: 64, - custodyNumberOfColumns: 32, - expected: true, + name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32", + totalNumberOfCustodyGroups: 64, + custodyNumberOfGroups: 32, + expected: true, }, { - name: "totalNumberOfColumns=65, custodyNumberOfColumns=32", - totalNumberOfColumns: 65, - custodyNumberOfColumns: 32, - expected: false, + name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32", + totalNumberOfCustodyGroups: 65, + custodyNumberOfGroups: 32, + expected: false, }, { - name: "totalNumberOfColumns=63, custodyNumberOfColumns=33", - totalNumberOfColumns: 65, - custodyNumberOfColumns: 33, - expected: true, + name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33", + totalNumberOfCustodyGroups: 65, + custodyNumberOfGroups: 33, + expected: true, }, } @@ -387,11 +374,11 @@ func TestCanSelfReconstruct(t *testing.T) { // Set the total number of columns. params.SetupTestConfigCleanup(t) cfg := params.BeaconConfig().Copy() - cfg.NumberOfColumns = tc.totalNumberOfColumns + cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups params.OverrideBeaconConfig(cfg) // Check if reconstuction is possible. - actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfColumns) + actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups) require.Equal(t, tc.expected, actual) }) } diff --git a/beacon-chain/das/availability_columns.go b/beacon-chain/das/availability_columns.go index ecb28617bcf..a9300e85fb9 100644 --- a/beacon-chain/das/availability_columns.go +++ b/beacon-chain/das/availability_columns.go @@ -137,8 +137,8 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable( // fullCommitmentsToCheck returns the commitments to check for a given block. func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) { - // Return early for blocks that are pre-deneb. - if block.Version() < version.Deneb { + // Return early for blocks that are pre-Fulu. + if block.Version() < version.Fulu { return &safeCommitmentsArray{}, nil } @@ -165,9 +165,17 @@ func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot p return &safeCommitmentsArray{}, nil } - // Retrieve the custody columns. - custodySubnetCount := peerdas.CustodySubnetCount() - custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + // Retrieve the groups count. + custodyGroupCount := peerdas.CustodyGroupCount() + + // Retrieve custody groups. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) + if err != nil { + return nil, errors.Wrap(err, "custody groups") + } + + // Retrieve custody columns. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { return nil, errors.Wrap(err, "custody columns") } diff --git a/beacon-chain/das/availability_columns_test.go b/beacon-chain/das/availability_columns_test.go index 0405756d96a..04fcd9f943d 100644 --- a/beacon-chain/das/availability_columns_test.go +++ b/beacon-chain/das/availability_columns_test.go @@ -31,9 +31,9 @@ func TestFullCommitmentsToCheck(t *testing.T) { err error }{ { - name: "pre deneb", + name: "pre fulu", block: func(t *testing.T) blocks.ROBlock { - bb := util.NewBeaconBlockBellatrix() + bb := util.NewBeaconBlockElectra() sb, err := blocks.NewSignedBeaconBlock(bb) require.NoError(t, err) rb, err := blocks.NewROBlock(sb) @@ -44,7 +44,7 @@ func TestFullCommitmentsToCheck(t *testing.T) { { name: "commitments within da", block: func(t *testing.T) blocks.ROBlock { - d := util.NewBeaconBlockDeneb() + d := util.NewBeaconBlockFulu() d.Block.Body.BlobKzgCommitments = commits d.Block.Slot = 100 sb, err := blocks.NewSignedBeaconBlock(d) @@ -59,7 +59,7 @@ func TestFullCommitmentsToCheck(t *testing.T) { { name: "commitments outside da", block: func(t *testing.T) blocks.ROBlock { - d := util.NewBeaconBlockDeneb() + d := util.NewBeaconBlockElectra() // block is from slot 0, "current slot" is window size +1 (so outside the window) d.Block.Body.BlobKzgCommitments = commits sb, err := blocks.NewSignedBeaconBlock(d) diff --git a/beacon-chain/p2p/custody.go b/beacon-chain/p2p/custody.go index 128e8b7894d..b8e055de80b 100644 --- a/beacon-chain/p2p/custody.go +++ b/beacon-chain/p2p/custody.go @@ -9,43 +9,44 @@ import ( "github.com/prysmaticlabs/prysm/v5/config/params" ) -// DataColumnsAdmissibleCustodyPeers returns a list of peers that custody a super set of the local node's custody columns. -func (s *Service) DataColumnsAdmissibleCustodyPeers(peers []peer.ID) ([]peer.ID, error) { - localCustodySubnetCount := peerdas.CustodySubnetCount() - return s.dataColumnsAdmissiblePeers(peers, localCustodySubnetCount) +// AdmissibleCustodyGroupsPeers returns a list of peers that custody a super set of the local node's custody groups. +func (s *Service) AdmissibleCustodyGroupsPeers(peers []peer.ID) ([]peer.ID, error) { + localCustodyGroupCount := peerdas.CustodyGroupCount() + return s.custodyGroupsAdmissiblePeers(peers, localCustodyGroupCount) } -// DataColumnsAdmissibleSubnetSamplingPeers returns a list of peers that custody a super set of the local node's sampling columns. -func (s *Service) DataColumnsAdmissibleSubnetSamplingPeers(peers []peer.ID) ([]peer.ID, error) { - localSubnetSamplingSize := peerdas.SubnetSamplingSize() - return s.dataColumnsAdmissiblePeers(peers, localSubnetSamplingSize) +// AdmissibleCustodySamplingPeers returns a list of peers that custody a super set of the local node's sampling columns. +func (s *Service) AdmissibleCustodySamplingPeers(peers []peer.ID) ([]peer.ID, error) { + localSubnetSamplingSize := peerdas.CustodyGroupSamplingSize() + return s.custodyGroupsAdmissiblePeers(peers, localSubnetSamplingSize) } -// dataColumnsAdmissiblePeers computes the first columns of the local node corresponding to `subnetCount`, then -// filters out `peers` that do not custody a super set of these columns. -func (s *Service) dataColumnsAdmissiblePeers(peers []peer.ID, subnetCount uint64) ([]peer.ID, error) { - // Get the total number of columns. - numberOfColumns := params.BeaconConfig().NumberOfColumns +// custodyGroupsAdmissiblePeers filters out `peers` that do not custody a super set of our own custody groups. +func (s *Service) custodyGroupsAdmissiblePeers(peers []peer.ID, custodyGroupCount uint64) ([]peer.ID, error) { + // Get the total number of custody groups. + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups // Retrieve the local node ID. localNodeId := s.NodeID() - // Retrieve the needed columns. - neededColumns, err := peerdas.CustodyColumns(localNodeId, subnetCount) + // Retrieve the needed custody groups. + neededCustodyGroups, err := peerdas.CustodyGroups(localNodeId, custodyGroupCount) if err != nil { - return nil, errors.Wrap(err, "custody columns for local node") + return nil, errors.Wrap(err, "custody groups") } - // Get the number of needed columns. - localneededColumnsCount := uint64(len(neededColumns)) - // Find the valid peers. validPeers := make([]peer.ID, 0, len(peers)) loop: for _, pid := range peers { - // Get the custody subnets count of the remote peer. - remoteCustodySubnetCount := s.DataColumnsCustodyCountFromRemotePeer(pid) + // Get the custody group count of the remote peer. + remoteCustodyGroupCount := s.CustodyGroupCountFromPeer(pid) + + // If the remote peer custodies less groups than we do, skip it. + if remoteCustodyGroupCount < custodyGroupCount { + continue + } // Get the remote node ID from the peer ID. remoteNodeID, err := ConvertPeerIDToNodeID(pid) @@ -53,44 +54,39 @@ loop: return nil, errors.Wrap(err, "convert peer ID to node ID") } - // Get the custody columns of the remote peer. - remoteCustodyColumns, err := peerdas.CustodyColumns(remoteNodeID, remoteCustodySubnetCount) + // Get the custody groups of the remote peer. + remoteCustodyGroups, err := peerdas.CustodyGroups(remoteNodeID, remoteCustodyGroupCount) if err != nil { - return nil, errors.Wrap(err, "custody columns") + return nil, errors.Wrap(err, "custody groups") } - remoteCustodyColumnsCount := uint64(len(remoteCustodyColumns)) - - // If the remote peer custodies less columns than the local node needs, skip it. - if remoteCustodyColumnsCount < localneededColumnsCount { - continue - } + remoteCustodyGroupsCount := uint64(len(remoteCustodyGroups)) // If the remote peers custodies all the possible columns, add it to the list. - if remoteCustodyColumnsCount == numberOfColumns { - copiedId := pid - validPeers = append(validPeers, copiedId) + if remoteCustodyGroupsCount == numberOfCustodyGroups { + validPeers = append(validPeers, pid) continue } // Filter out invalid peers. - for c := range neededColumns { - if !remoteCustodyColumns[c] { + for custodyGroup := range neededCustodyGroups { + if !remoteCustodyGroups[custodyGroup] { continue loop } } - copiedId := pid - // Add valid peer to list - validPeers = append(validPeers, copiedId) + validPeers = append(validPeers, pid) } return validPeers, nil } -func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 { - // By default, we assume the peer custodies the minimum number of subnets. +// custodyGroupCountFromPeerENR retrieves the custody count from the peer ENR. +// If the ENR is not available, it defaults to the minimum number of custody groups +// an honest node custodies and serves samples from. +func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 { + // By default, we assume the peer custodies the minimum number of groups. custodyRequirement := params.BeaconConfig().CustodyRequirement // Retrieve the ENR of the peer. @@ -104,42 +100,47 @@ func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 { return custodyRequirement } - // Retrieve the custody subnets count from the ENR. - custodyCount, err := peerdas.CustodyCountFromRecord(record) + // Retrieve the custody group count from the ENR. + custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "peerID": pid, "defaultValue": custodyRequirement, - }).Debug("Failed to retrieve custody count from ENR for peer, defaulting to the default value") + }).Debug("Failed to retrieve custody group count from ENR for peer, defaulting to the default value") return custodyRequirement } - return custodyCount + return custodyGroupCount } -// DataColumnsCustodyCountFromRemotePeer retrieves the custody count from a remote peer. -func (s *Service) DataColumnsCustodyCountFromRemotePeer(pid peer.ID) uint64 { - // Try to get the custody count from the peer's metadata. +// CustodyGroupCountFromPeer retrieves custody group count from a peer. +// It first tries to get the custody group count from the peer's metadata, +// then falls back to the ENR value if the metadata is not available, then +// falls back to the minimum number of custody groups an honest node should custodiy +// and serve samples from if ENR is not available. +func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 { + // Try to get the custody group count from the peer's metadata. metadata, err := s.peers.Metadata(pid) if err != nil { + // On error, default to the ENR value. log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value") - return s.custodyCountFromRemotePeerEnr(pid) + return s.custodyGroupCountFromPeerENR(pid) } // If the metadata is nil, default to the ENR value. if metadata == nil { log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value") - return s.custodyCountFromRemotePeerEnr(pid) + return s.custodyGroupCountFromPeerENR(pid) } // Get the custody subnets count from the metadata. - custodyCount := metadata.CustodySubnetCount() + custodyCount := metadata.CustodyGroupCount() // If the custody count is null, default to the ENR value. if custodyCount == 0 { log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value") - return s.custodyCountFromRemotePeerEnr(pid) + return s.custodyGroupCountFromPeerENR(pid) } return custodyCount diff --git a/beacon-chain/p2p/custody_test.go b/beacon-chain/p2p/custody_test.go index 422489de309..e2cd8744e4e 100644 --- a/beacon-chain/p2p/custody_test.go +++ b/beacon-chain/p2p/custody_test.go @@ -41,13 +41,13 @@ func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.R require.NoError(t, err) record := &enr.Record{} - record.Set(peerdas.Csc(custodyCount)) + record.Set(peerdas.Cgc(custodyCount)) record.Set(enode.Secp256k1(privateKey.PublicKey)) return record, peerID, privateKey } -func TestDataColumnsAdmissibleCustodyPeers(t *testing.T) { +func TestAdmissibleCustodyGroupsPeers(t *testing.T) { genesisValidatorRoot := make([]byte, 32) for i := 0; i < 32; i++ { @@ -70,18 +70,18 @@ func TestDataColumnsAdmissibleCustodyPeers(t *testing.T) { custodyRequirement := params.BeaconConfig().CustodyRequirement dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount - // Peer 1 custodies exactly the same columns than us. + // Peer 1 custodies exactly the same groups than us. // (We use the same keys pair than ours for simplicity) peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement) - // Peer 2 custodies all the columns. + // Peer 2 custodies all the groups. peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount) - // Peer 3 custodies different columns than us (but the same count). + // Peer 3 custodies different groups than us (but the same count). // (We use the same public key than peer 2 for simplicity) peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement) - // Peer 4 custodies less columns than us. + // Peer 4 custodies less groups than us. peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1) createListener := func() (*discover.UDPv5, error) { @@ -98,40 +98,40 @@ func TestDataColumnsAdmissibleCustodyPeers(t *testing.T) { service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound) service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound) - actual, err := service.DataColumnsAdmissibleCustodyPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID}) + actual, err := service.AdmissibleCustodyGroupsPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID}) require.NoError(t, err) expected := []peer.ID{peer1ID, peer2ID} require.DeepSSZEqual(t, expected, actual) } -func TestDataColumnsCustodyCountFromRemotePeer(t *testing.T) { +func TestCustodyGroupCountFromPeer(t *testing.T) { const ( expectedENR uint64 = 7 expectedMetadata uint64 = 8 pid = "test-id" ) - csc := peerdas.Csc(expectedENR) + cgc := peerdas.Cgc(expectedENR) // Define a nil record var nilRecord *enr.Record = nil - // Define an empty record (record with non `csc` entry) + // Define an empty record (record with non `cgc` entry) emptyRecord := &enr.Record{} // Define a nominal record nominalRecord := &enr.Record{} - nominalRecord.Set(csc) + nominalRecord.Set(cgc) // Define a metadata with zero custody. zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - CustodySubnetCount: 0, + CustodyGroupCount: 0, }) // Define a nominal metadata. nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - CustodySubnetCount: expectedMetadata, + CustodyGroupCount: expectedMetadata, }) testCases := []struct { @@ -191,7 +191,7 @@ func TestDataColumnsCustodyCountFromRemotePeer(t *testing.T) { } // Retrieve the custody count from the remote peer. - actual := service.DataColumnsCustodyCountFromRemotePeer(pid) + actual := service.CustodyGroupCountFromPeer(pid) // Verify the result. require.Equal(t, tc.expected, actual) diff --git a/beacon-chain/p2p/discovery.go b/beacon-chain/p2p/discovery.go index 1b68e3f0912..98e8aaffede 100644 --- a/beacon-chain/p2p/discovery.go +++ b/beacon-chain/p2p/discovery.go @@ -247,28 +247,28 @@ func (s *Service) RefreshPersistentSubnets() { return } - // Get the current custody subnet count. - custodySubnetCount := peerdas.CustodySubnetCount() + // Get the current custody group count. + custodyGroupCount := peerdas.CustodyGroupCount() - // Get the custody subnet count we store in our record. - inRecordCustodySubnetCount, err := peerdas.CustodyCountFromRecord(record) + // Get the custody group count we store in our record. + inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record) if err != nil { log.WithError(err).Error("Could not retrieve custody subnet count") return } - // Get the custody subnet count in our metadata. - inMetadataCustodySubnetCount := s.Metadata().CustodySubnetCount() + // Get the custody group count in our metadata. + inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount() - isCustodySubnetCountUpToDate := (custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == inMetadataCustodySubnetCount) + isCustodyGroupCountUpToDate := (custodyGroupCount == inRecordCustodyGroupCount && custodyGroupCount == inMetadataCustodyGroupCount) - if isBitVUpToDate && isBitSUpToDate && isCustodySubnetCountUpToDate { + if isBitVUpToDate && isBitSUpToDate && isCustodyGroupCountUpToDate { // Nothing to do, return early. return } // Some data changed. Update the record and the metadata. - s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodySubnetCount) + s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodyGroupCount) // Ping all peers. s.pingPeersAndLogEnr() @@ -496,8 +496,8 @@ func (s *Service) createLocalNode( } if params.FuluEnabled() { - custodySubnetCount := peerdas.CustodySubnetCount() - localNode.Set(peerdas.Csc(custodySubnetCount)) + custodyGroupCount := peerdas.CustodyGroupCount() + localNode.Set(peerdas.Cgc(custodyGroupCount)) } localNode.SetFallbackIP(ipAddr) diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index 8ac9e63a7dc..dd5cce80e86 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -242,10 +242,10 @@ func TestCreateLocalNode(t *testing.T) { require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets))) require.DeepSSZEqual(t, []byte{0}, *syncSubnets) - // Check custody_subnet_count config. - custodySubnetCount := new(uint64) - require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount))) - require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodySubnetCount) + // Check cgc config. + custodyGroupCount := new(uint64) + require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount))) + require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount) }) } } @@ -545,7 +545,7 @@ type check struct { metadataSequenceNumber uint64 attestationSubnets []uint64 syncSubnets []uint64 - custodySubnetCount *uint64 + custodyGroupCount *uint64 } func checkPingCountCacheMetadataRecord( @@ -612,16 +612,16 @@ func checkPingCountCacheMetadataRecord( require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata) } - if expected.custodySubnetCount != nil { + if expected.custodyGroupCount != nil { // Check custody subnet count in ENR. - var actualCustodySubnetCount uint64 - err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, &actualCustodySubnetCount)) + var actualCustodyGroupCount uint64 + err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount)) require.NoError(t, err) - require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCount) + require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount) // Check custody subnet count in metadata. - actualCustodySubnetCountMetadata := service.metaData.CustodySubnetCount() - require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCountMetadata) + actualGroupCountMetadata := service.metaData.CustodyGroupCount() + require.Equal(t, *expected.custodyGroupCount, actualGroupCountMetadata) } } @@ -637,7 +637,7 @@ func TestRefreshPersistentSubnets(t *testing.T) { fuluForkEpoch = 10 ) - custodySubnetCount := params.BeaconConfig().CustodyRequirement + custodyGroupCount := params.BeaconConfig().CustodyRequirement // Set up epochs. defaultCfg := params.BeaconConfig() @@ -727,21 +727,21 @@ func TestRefreshPersistentSubnets(t *testing.T) { metadataSequenceNumber: 1, attestationSubnets: []uint64{40, 41}, syncSubnets: nil, - custodySubnetCount: &custodySubnetCount, + custodyGroupCount: &custodyGroupCount, }, { pingCount: 2, metadataSequenceNumber: 2, attestationSubnets: []uint64{40, 41}, syncSubnets: []uint64{1, 2}, - custodySubnetCount: &custodySubnetCount, + custodyGroupCount: &custodyGroupCount, }, { pingCount: 2, metadataSequenceNumber: 2, attestationSubnets: []uint64{40, 41}, syncSubnets: []uint64{1, 2}, - custodySubnetCount: &custodySubnetCount, + custodyGroupCount: &custodyGroupCount, }, }, }, diff --git a/beacon-chain/p2p/interfaces.go b/beacon-chain/p2p/interfaces.go index 71127a640f1..7483c292dbe 100644 --- a/beacon-chain/p2p/interfaces.go +++ b/beacon-chain/p2p/interfaces.go @@ -114,7 +114,7 @@ type MetadataProvider interface { } type DataColumnsHandler interface { - DataColumnsCustodyCountFromRemotePeer(peer.ID) uint64 - DataColumnsAdmissibleCustodyPeers([]peer.ID) ([]peer.ID, error) - DataColumnsAdmissibleSubnetSamplingPeers([]peer.ID) ([]peer.ID, error) + CustodyGroupCountFromPeer(peer.ID) uint64 + AdmissibleCustodyGroupsPeers([]peer.ID) ([]peer.ID, error) + AdmissibleCustodySamplingPeers([]peer.ID) ([]peer.ID, error) } diff --git a/beacon-chain/p2p/subnets.go b/beacon-chain/p2p/subnets.go index 3177f334378..515e6d1de81 100644 --- a/beacon-chain/p2p/subnets.go +++ b/beacon-chain/p2p/subnets.go @@ -30,9 +30,9 @@ var ( attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount - attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey - syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey - custodySubnetCountEnrKey = params.BeaconNetworkConfig().CustodySubnetCountKey + attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey + syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey + custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey ) // The value used with the subnet, in order @@ -56,7 +56,7 @@ const blobSubnetLockerVal = 110 // chosen more than sync, attestation and blob subnet (6) combined. const dataColumnSubnetVal = 150 -// nodeFilter return a function that filters nodes based on the subnet topic and subnet index. +// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index. func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) { switch { case strings.Contains(topic, GossipAttestationMessage): @@ -346,24 +346,24 @@ func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64, func (s *Service) updateSubnetRecordWithMetadataV3( bitVAtt bitfield.Bitvector64, bitVSync bitfield.Bitvector4, - custodySubnetCount uint64, + custodyGroupCount uint64, ) { attSubnetsEntry := enr.WithEntry(attSubnetEnrKey, &bitVAtt) syncSubnetsEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync) - custodySubnetCountEntry := enr.WithEntry(custodySubnetCountEnrKey, custodySubnetCount) + custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount) localNode := s.dv5Listener.LocalNode() localNode.Set(attSubnetsEntry) localNode.Set(syncSubnetsEntry) - localNode.Set(custodySubnetCountEntry) + localNode.Set(custodyGroupCountEntry) newSeqNumber := s.metaData.SequenceNumber() + 1 s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: newSeqNumber, - Attnets: bitVAtt, - Syncnets: bitVSync, - CustodySubnetCount: custodySubnetCount, + SeqNumber: newSeqNumber, + Attnets: bitVAtt, + Syncnets: bitVSync, + CustodyGroupCount: custodyGroupCount, }) } @@ -381,7 +381,7 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error { return nil } -// initializePersistentColumnSubnets initialize persisten column subnets +// initializePersistentColumnSubnets initialize persistent column subnets func initializePersistentColumnSubnets(id enode.ID) error { // Check if the column subnets are already cached. _, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets() @@ -389,15 +389,25 @@ func initializePersistentColumnSubnets(id enode.ID) error { return nil } - // Retrieve the subnets we should be subscribed to. - subnetSamplingSize := peerdas.SubnetSamplingSize() - subnetsMap, err := peerdas.CustodyColumnSubnets(id, subnetSamplingSize) + // Compute the number of custody groups we should sample. + custodyGroupSamplingSize := peerdas.CustodyGroupSamplingSize() + + // Compute the custody groups we should sample. + custodyGroups, err := peerdas.CustodyGroups(id, custodyGroupSamplingSize) + if err != nil { + return errors.Wrap(err, "custody groups") + } + + // Compute the column subnets for the custody groups. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { - return errors.Wrap(err, "custody column subnets") + return errors.Wrap(err, "custody columns") } - subnets := make([]uint64, 0, len(subnetsMap)) - for subnet := range subnetsMap { + // Compute subnets from the custody columns. + subnets := make([]uint64, 0, len(custodyColumns)) + for column := range custodyColumns { + subnet := peerdas.ComputeSubnetForDataColumnSidecar(column) subnets = append(subnets, subnet) } @@ -530,23 +540,29 @@ func syncSubnets(record *enr.Record) ([]uint64, error) { return committeeIdxs, nil } +// Retrieve the data columns subnets from a node's ENR and node ID. +// TODO: Add tests func dataColumnSubnets(nodeID enode.ID, record *enr.Record) (map[uint64]bool, error) { - custodyRequirement := params.BeaconConfig().CustodyRequirement - // Retrieve the custody count from the ENR. - custodyCount, err := peerdas.CustodyCountFromRecord(record) + custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record) + if err != nil { + return nil, errors.Wrap(err, "custody group count from record") + } + + // Retrieve the custody groups from the remote peer. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) if err != nil { - // If we fail to retrieve the custody count, we default to the custody requirement. - custodyCount = custodyRequirement + return nil, errors.Wrap(err, "custody groups") } - // Retrieve the custody subnets from the remote peer - custodyColumnsSubnets, err := peerdas.CustodyColumnSubnets(nodeID, custodyCount) + // Retrieve the custody columns from the groups. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { - return nil, errors.Wrap(err, "custody column subnets") + return nil, errors.Wrap(err, "custody columns") } - return custodyColumnsSubnets, nil + // Get custody columns subnets from the columns. + return peerdas.DataColumnSubnets(custodyColumns), nil } // Parses the attestation subnets ENR entry in a node and extracts its value diff --git a/beacon-chain/p2p/testing/fuzz_p2p.go b/beacon-chain/p2p/testing/fuzz_p2p.go index feccb5e6297..238a40436aa 100644 --- a/beacon-chain/p2p/testing/fuzz_p2p.go +++ b/beacon-chain/p2p/testing/fuzz_p2p.go @@ -185,14 +185,14 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc return true, 0 } -func (*FakeP2P) DataColumnsCustodyCountFromRemotePeer(peer.ID) uint64 { +func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 { return 0 } -func (*FakeP2P) DataColumnsAdmissibleCustodyPeers(peers []peer.ID) ([]peer.ID, error) { +func (*FakeP2P) AdmissibleCustodyGroupsPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } -func (*FakeP2P) DataColumnsAdmissibleSubnetSamplingPeers(peers []peer.ID) ([]peer.ID, error) { +func (*FakeP2P) AdmissibleCustodySamplingPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index 2821cf9ece0..48ef7b3f4d0 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -448,8 +448,8 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc return true, 0 } -func (s *TestP2P) DataColumnsCustodyCountFromRemotePeer(pid peer.ID) uint64 { - // By default, we assume the peer custodies the minimum number of subnets. +func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 { + // By default, we assume the peer custodies the minimum number of groups. custodyRequirement := params.BeaconConfig().CustodyRequirement // Retrieve the ENR of the peer. @@ -459,18 +459,18 @@ func (s *TestP2P) DataColumnsCustodyCountFromRemotePeer(pid peer.ID) uint64 { } // Retrieve the custody subnets count from the ENR. - custodyCount, err := peerdas.CustodyCountFromRecord(record) + custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record) if err != nil { return custodyRequirement } - return custodyCount + return custodyGroupCount } -func (*TestP2P) DataColumnsAdmissibleCustodyPeers(peers []peer.ID) ([]peer.ID, error) { +func (*TestP2P) AdmissibleCustodyGroupsPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } -func (*TestP2P) DataColumnsAdmissibleSubnetSamplingPeers(peers []peer.ID) ([]peer.ID, error) { +func (*TestP2P) AdmissibleCustodySamplingPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } diff --git a/beacon-chain/rpc/eth/config/handlers_test.go b/beacon-chain/rpc/eth/config/handlers_test.go index 8cb9e7ffd3b..2d1aec5b702 100644 --- a/beacon-chain/rpc/eth/config/handlers_test.go +++ b/beacon-chain/rpc/eth/config/handlers_test.go @@ -191,7 +191,7 @@ func TestGetSpec(t *testing.T) { data, ok := resp.Data.(map[string]interface{}) require.Equal(t, true, ok) - assert.Equal(t, 162, len(data)) + assert.Equal(t, 165, len(data)) for k, v := range data { t.Run(k, func(t *testing.T) { switch k { @@ -540,6 +540,14 @@ func TestGetSpec(t *testing.T) { assert.Equal(t, "1152", v) case "MAX_REQUEST_BLOB_SIDECARS_FULU": assert.Equal(t, "1536", v) + case "NUMBER_OF_CUSTODY_GROUPS": + assert.Equal(t, "128", v) + case "CUSTODY_REQUIREMENT": + assert.Equal(t, "4", v) + case "SAMPLES_PER_SLOT": + assert.Equal(t, "8", v) + case "MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS": + assert.Equal(t, "4096", v) default: t.Errorf("Incorrect key: %s", k) } diff --git a/beacon-chain/rpc/lookup/blocker.go b/beacon-chain/rpc/lookup/blocker.go index 55f5cd978e1..c00bc15b83d 100644 --- a/beacon-chain/rpc/lookup/blocker.go +++ b/beacon-chain/rpc/lookup/blocker.go @@ -291,14 +291,18 @@ func (p *BeaconDbBlocker) blobsFromReconstructedDataColumns( // This function expects data columns to be stored (aka. no blobs). // If not enough data columns are available to extract blobs from them (either directly or after reconstruction), an error is returned. func (p *BeaconDbBlocker) blobsFromStoredDataColumns(indices map[uint64]bool, rootBytes []byte) ([]*blocks.VerifiedROBlob, *core.RpcError) { - // Get our count of columns we should custody. + beaconConfig := params.BeaconConfig() + numberOfColumns := beaconConfig.NumberOfColumns + numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups + columnsPerGroup := numberOfColumns / numberOfCustodyGroups + root := bytesutil.ToBytes32(rootBytes) - // Get the number of columns we should custody. - custodyColumnsCount := peerdas.CustodyColumnCount() + // Get the number of groups we should custody. + custodyGroupCount := peerdas.CustodyGroupCount() // Determine if we are theoretically able to reconstruct the data columns. - canTheoreticallyReconstruct := peerdas.CanSelfReconstruct(custodyColumnsCount) + canTheoreticallyReconstruct := peerdas.CanSelfReconstruct(custodyGroupCount) // Retrieve the data columns indice actually we store. storedDataColumnsIndices, err := p.BlobStorage.ColumnIndices(root) @@ -307,10 +311,11 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(indices map[uint64]bool, ro return nil, &core.RpcError{Err: errors.Wrap(err, "could not retrieve columns indices stored for block root"), Reason: core.Internal} } - storedDataColumnsCount := uint64(len(storedDataColumnsIndices)) + storedDataColumnCount := uint64(len(storedDataColumnsIndices)) + storedGroupCount := storedDataColumnCount / columnsPerGroup // Determine is we acually able to reconstruct the data columns. - canActuallyReconstruct := peerdas.CanSelfReconstruct(storedDataColumnsCount) + canActuallyReconstruct := peerdas.CanSelfReconstruct(storedGroupCount) if !canTheoreticallyReconstruct && !canActuallyReconstruct { // There is no way to reconstruct the data columns. @@ -325,7 +330,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(indices map[uint64]bool, ro if canTheoreticallyReconstruct && !canActuallyReconstruct { // This case may happen if the node started recently with a big enough custody count, but did not (yet) backfill all the columns. return nil, &core.RpcError{ - Err: errors.Errorf("not all data columns are available for this blob. Wanted: %d, got: %d. Please retry later.", nonExtendedColumnsCount, storedDataColumnsCount), + Err: errors.Errorf("not all data columns are available for this blob. Wanted: %d, got: %d. Please retry later.", nonExtendedColumnsCount, storedDataColumnCount), Reason: core.NotFound} } diff --git a/beacon-chain/sync/data_columns_reconstruct.go b/beacon-chain/sync/data_columns_reconstruct.go index e34a4c5dc36..b79730b7f1a 100644 --- a/beacon-chain/sync/data_columns_reconstruct.go +++ b/beacon-chain/sync/data_columns_reconstruct.go @@ -30,8 +30,8 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu return errors.Wrap(err, "stored data columns") } - storedColumnsCount := len(storedDataColumns) - numberOfColumns := fieldparams.NumberOfColumns + storedColumnsCount := uint64(len(storedDataColumns)) + numberOfColumns := params.BeaconConfig().NumberOfColumns // If less than half of the columns are stored, reconstruction is not possible. // If all columns are stored, no need to reconstruct. @@ -51,10 +51,20 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu defer s.dataColumsnReconstructionLock.Unlock() - // Retrieve the custody columns. + // Retrieve the node ID. nodeID := s.cfg.p2p.NodeID() - custodySubnetCount := peerdas.CustodySubnetCount() - custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + + // Compute the custody group count. + custodyGroupCount := peerdas.CustodyGroupCount() + + // Compute the custody groups. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) + if err != nil { + return errors.Wrap(err, "custody groups") + } + + // Compute the custody columns. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { return errors.Wrap(err, "custody columns") } @@ -160,12 +170,24 @@ func (s *Service) scheduleReconstructedDataColumnsBroadcast( return } - // Get the data columns we should store. + // Get the node ID. nodeID := s.cfg.p2p.NodeID() - custodySubnetCount := peerdas.CustodySubnetCount() - custodyDataColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + + // Get the custody group count. + custodyGroupCount := peerdas.CustodyGroupCount() + + // Compute the custody groups. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) + if err != nil { + log.WithError(err).Error("Custody groups") + return + } + + // Compute the custody columns. + custodyDataColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { log.WithError(err).Error("Custody columns") + return } // Get the data columns we actually store. diff --git a/beacon-chain/sync/data_columns_sampling.go b/beacon-chain/sync/data_columns_sampling.go index ffcc264ac21..2b816f64579 100644 --- a/beacon-chain/sync/data_columns_sampling.go +++ b/beacon-chain/sync/data_columns_sampling.go @@ -18,11 +18,9 @@ import ( "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" - coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types" "github.com/prysmaticlabs/prysm/v5/beacon-chain/startup" - fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams" "github.com/prysmaticlabs/prysm/v5/config/params" "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v5/crypto/rand" @@ -54,12 +52,15 @@ type dataColumnSampler1D struct { ctxMap ContextByteVersions stateNotifier statefeed.Notifier - // nonCustodyColumns is a set of columns that are not custodied by the node. - nonCustodyColumns map[uint64]bool - // columnFromPeer maps a peer to the columns it is responsible for custody. - columnFromPeer map[peer.ID]map[uint64]bool - // peerFromColumn maps a column to the peer responsible for custody. - peerFromColumn map[uint64]map[peer.ID]bool + // nonCustodyGroups is a set of groups that are not custodied by the node. + nonCustodyGroups map[uint64]bool + + // groupsByPeer maps a peer to the groups it is responsible for custody. + groupsByPeer map[peer.ID]map[uint64]bool + + // peersByCustodyGroup maps a group to the peer responsible for custody. + peersByCustodyGroup map[uint64]map[peer.ID]bool + // columnVerifier verifies a column according to the specified requirements. columnVerifier verification.NewDataColumnsVerifier } @@ -72,51 +73,56 @@ func newDataColumnSampler1D( stateNotifier statefeed.Notifier, colVerifier verification.NewDataColumnsVerifier, ) *dataColumnSampler1D { - numColumns := params.BeaconConfig().NumberOfColumns - peerFromColumn := make(map[uint64]map[peer.ID]bool, numColumns) - for i := uint64(0); i < numColumns; i++ { - peerFromColumn[i] = make(map[peer.ID]bool) + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups + peersByCustodyGroup := make(map[uint64]map[peer.ID]bool, numberOfCustodyGroups) + + for i := range numberOfCustodyGroups { + peersByCustodyGroup[i] = make(map[peer.ID]bool) } return &dataColumnSampler1D{ - p2p: p2p, - clock: clock, - ctxMap: ctxMap, - stateNotifier: stateNotifier, - columnFromPeer: make(map[peer.ID]map[uint64]bool), - peerFromColumn: peerFromColumn, - columnVerifier: colVerifier, + p2p: p2p, + clock: clock, + ctxMap: ctxMap, + stateNotifier: stateNotifier, + groupsByPeer: make(map[peer.ID]map[uint64]bool), + peersByCustodyGroup: peersByCustodyGroup, + columnVerifier: colVerifier, } } // Run implements DataColumnSampler. func (d *dataColumnSampler1D) Run(ctx context.Context) { - // verify if we need to run sampling or not, if not, return directly - csc := peerdas.CustodySubnetCount() - columns, err := peerdas.CustodyColumns(d.p2p.NodeID(), csc) + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups + + // Get the node ID. + nodeID := d.p2p.NodeID() + + // Verify if we need to run sampling or not, if not, return directly. + custodyGroupCount := peerdas.CustodyGroupCount() + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) if err != nil { - log.WithError(err).Error("Failed to determine local custody columns") + log.WithError(err).Error("custody groups") return } - custodyColumnsCount := uint64(len(columns)) - if peerdas.CanSelfReconstruct(custodyColumnsCount) { + if peerdas.CanSelfReconstruct(custodyGroupCount) { log.WithFields(logrus.Fields{ - "custodyColumnsCount": custodyColumnsCount, - "totalColumns": params.BeaconConfig().NumberOfColumns, - }).Debug("The node custodies at least the half the data columns, no need to sample") + "custodyGroupCount": custodyGroupCount, + "totalGroups": numberOfCustodyGroups, + }).Debug("The node custodies at least the half of the groups, no need to sample") return } - // initialize non custody columns. - d.nonCustodyColumns = make(map[uint64]bool) - for i := uint64(0); i < params.BeaconConfig().NumberOfColumns; i++ { - if exists := columns[i]; !exists { - d.nonCustodyColumns[i] = true + // Initialize non custody groups. + d.nonCustodyGroups = make(map[uint64]bool) + for i := range numberOfCustodyGroups { + if !custodyGroups[i] { + d.nonCustodyGroups[i] = true } } - // initialize peer info first. + // Initialize peer info first. d.refreshPeerInfo() // periodically refresh peer info to keep peer <-> column mapping up to date. @@ -146,9 +152,6 @@ func (d *dataColumnSampler1D) samplingRoutine(ctx context.Context) { // Refresh peer information. func (d *dataColumnSampler1D) refreshPeerInfo() { - dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount - columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount - d.Lock() defer d.Unlock() @@ -156,49 +159,50 @@ func (d *dataColumnSampler1D) refreshPeerInfo() { d.prunePeerInfo(activePeers) for _, pid := range activePeers { - csc := d.p2p.DataColumnsCustodyCountFromRemotePeer(pid) + // Retrieve the custody group count of the peer. + retrievedCustodyGroupCount := d.p2p.CustodyGroupCountFromPeer(pid) - columns, ok := d.columnFromPeer[pid] - columnsCount := uint64(len(columns)) + // Look into our store the custody storedGroups for this peer. + storedGroups, ok := d.groupsByPeer[pid] + storedGroupsCount := uint64(len(storedGroups)) - if ok && columnsCount == csc*columnsPerSubnet { + if ok && storedGroupsCount == retrievedCustodyGroupCount { // No change for this peer. continue } - nid, err := p2p.ConvertPeerIDToNodeID(pid) + nodeID, err := p2p.ConvertPeerIDToNodeID(pid) if err != nil { log.WithError(err).WithField("peerID", pid).Error("Failed to convert peer ID to node ID") continue } - columns, err = peerdas.CustodyColumns(nid, csc) + retrievedGroups, err := peerdas.CustodyGroups(nodeID, retrievedCustodyGroupCount) if err != nil { - log.WithError(err).WithField("peerID", pid).Error("Failed to determine peer custody columns") + log.WithError(err).WithField("peerID", pid).Error("Failed to determine peer custody groups") continue } - d.columnFromPeer[pid] = columns - for column := range columns { - d.peerFromColumn[column][pid] = true + d.groupsByPeer[pid] = retrievedGroups + for group := range retrievedGroups { + d.peersByCustodyGroup[group][pid] = true } } - columnsWithoutPeers := make([]uint64, 0) - for column, peers := range d.peerFromColumn { + groupsWithoutPeers := make([]uint64, 0) + for group, peers := range d.peersByCustodyGroup { if len(peers) == 0 { - columnsWithoutPeers = append(columnsWithoutPeers, column) + groupsWithoutPeers = append(groupsWithoutPeers, group) } } - slices.Sort[[]uint64](columnsWithoutPeers) - - if len(columnsWithoutPeers) > 0 { - log.WithField("columns", columnsWithoutPeers).Warn("Some columns have no peers responsible for custody") + if len(groupsWithoutPeers) > 0 { + slices.Sort[[]uint64](groupsWithoutPeers) + log.WithField("groups", groupsWithoutPeers).Warn("Some groups have no peers responsible for custody") } } -// prunePeerInfo prunes inactive peers from peerFromColumn and columnFromPeer. +// prunePeerInfo prunes inactive peers from peerByGroup and groupByPeer. // This should not be called outside of refreshPeerInfo without being locked. func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) { active := make(map[peer.ID]bool) @@ -206,7 +210,7 @@ func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) { active[pid] = true } - for pid := range d.columnFromPeer { + for pid := range d.groupsByPeer { if !active[pid] { d.prunePeer(pid) } @@ -215,8 +219,8 @@ func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) { // prunePeer removes a peer from stored peer info map, it should be called with lock held. func (d *dataColumnSampler1D) prunePeer(pid peer.ID) { - delete(d.columnFromPeer, pid) - for _, peers := range d.peerFromColumn { + delete(d.groupsByPeer, pid) + for _, peers := range d.peersByCustodyGroup { delete(peers, pid) } } @@ -238,13 +242,19 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event return } - if data.SignedBlock.Version() < version.Deneb { - log.Debug("Pre Deneb block, skipping data column sampling") + if data.SignedBlock.Version() < version.Fulu { + log.Debug("Pre Fulu block, skipping data column sampling") return } - if !coreTime.PeerDASIsActive(data.Slot) { - // We do not trigger sampling if peerDAS is not active yet. + // Determine if we need to sample data columns for this block. + beaconConfig := params.BeaconConfig() + samplesPerSlots := beaconConfig.SamplesPerSlot + halfOfCustodyGroups := beaconConfig.NumberOfCustodyGroups / 2 + nonCustodyGroupsCount := uint64(len(d.nonCustodyGroups)) + + if nonCustodyGroupsCount <= halfOfCustodyGroups { + // Nothing to sample. return } @@ -262,8 +272,13 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event } // Randomize columns for sample selection. - randomizedColumns := randomizeColumns(d.nonCustodyColumns) - samplesCount := min(params.BeaconConfig().SamplesPerSlot, uint64(len(d.nonCustodyColumns))-params.BeaconConfig().NumberOfColumns/2) + randomizedColumns, err := randomizeColumns(d.nonCustodyGroups) + if err != nil { + log.WithError(err).Error("Failed to randomize columns") + return + } + + samplesCount := min(samplesPerSlots, nonCustodyGroupsCount-halfOfCustodyGroups) // TODO: Use the first output of `incrementalDAS` as input of the fork choice rule. _, _, err = d.incrementalDAS(ctx, data, randomizedColumns, samplesCount) @@ -285,11 +300,12 @@ func (d *dataColumnSampler1D) incrementalDAS( firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures) roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary. blockRoot := blockProcessedData.BlockRoot + columnsCount := uint64(len(columns)) start := time.Now() for round := 1; ; /*No exit condition */ round++ { - if extendedSampleCount > uint64(len(columns)) { + if extendedSampleCount > columnsCount { // We already tried to sample all possible columns, this is the unhappy path. log.WithFields(logrus.Fields{ "root": fmt.Sprintf("%#x", blockRoot), @@ -309,7 +325,10 @@ func (d *dataColumnSampler1D) incrementalDAS( }).Debug("Start data columns sampling") // Sample data columns from peers in parallel. - retrievedSamples := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample) + retrievedSamples, err := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample) + if err != nil { + return false, nil, errors.Wrap(err, "sample data columns") + } missingSamples := make(map[uint64]bool) for _, column := range columnsToSample { @@ -339,7 +358,7 @@ func (d *dataColumnSampler1D) incrementalDAS( return false, nil, errors.New("retrieved more columns than requested") } - // missing columns, extend the samples. + // There is still some missing columns, extend the samples. allowedFailures += columnsToSampleCount - retrievedSampleCount oldExtendedSampleCount := extendedSampleCount firstColumnToSample = extendedSampleCount @@ -359,9 +378,12 @@ func (d *dataColumnSampler1D) sampleDataColumns( ctx context.Context, blockProcessedData *statefeed.BlockProcessedData, columns []uint64, -) map[uint64]bool { +) (map[uint64]bool, error) { // distribute samples to peer - peerToColumns := d.distributeSamplesToPeer(columns) + peerToColumns, err := d.distributeSamplesToPeer(columns) + if err != nil { + return nil, errors.Wrap(err, "distribute samples to peer") + } var ( mu sync.Mutex @@ -388,31 +410,39 @@ func (d *dataColumnSampler1D) sampleDataColumns( } wg.Wait() - return res + return res, nil } // distributeSamplesToPeer distributes samples to peers based on the columns they are responsible for. // Currently it randomizes peer selection for a column and did not take into account whole peer distribution balance. It could be improved if needed. -func (d *dataColumnSampler1D) distributeSamplesToPeer( - columns []uint64, -) map[peer.ID]map[uint64]bool { +func (d *dataColumnSampler1D) distributeSamplesToPeer(columns []uint64) (map[peer.ID]map[uint64]bool, error) { dist := make(map[peer.ID]map[uint64]bool) - for _, col := range columns { - peers := d.peerFromColumn[col] + for _, column := range columns { + custodyGroup, err := peerdas.ComputeCustodyGroupForColumn(column) + if err != nil { + return nil, errors.Wrap(err, "compute custody group for column") + } + + peers := d.peersByCustodyGroup[custodyGroup] if len(peers) == 0 { - log.WithField("column", col).Warn("No peers responsible for custody of column") + log.WithField("column", column).Warning("No peers responsible for custody of column") continue } - pid := selectRandomPeer(peers) + pid, err := selectRandomPeer(peers) + if err != nil { + return nil, errors.Wrap(err, "select random peer") + } + if _, ok := dist[pid]; !ok { dist[pid] = make(map[uint64]bool) } - dist[pid][col] = true + + dist[pid][column] = true } - return dist + return dist, nil } func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( @@ -463,20 +493,41 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( return retrievedColumns } -// randomizeColumns returns a slice containing all the numbers between 0 and colNum in a random order. -func randomizeColumns(columns map[uint64]bool) []uint64 { - // Create a slice from columns. - randomized := make([]uint64, 0, len(columns)) - for column := range columns { - randomized = append(randomized, column) +// randomizeColumns returns a slice containing randomly ordered columns belonging to the input `groups`. +func randomizeColumns(custodyGroups map[uint64]bool) ([]uint64, error) { + // Compute the number of columns per group. + numberOfColumns := params.BeaconConfig().NumberOfColumns + numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups + columnsPerGroup := numberOfColumns / numberOfCustodyGroups + + // Compute the number of columns. + groupCount := uint64(len(custodyGroups)) + expectedColumnCount := groupCount * columnsPerGroup + + // Compute the columns. + columns := make([]uint64, 0, expectedColumnCount) + for group := range custodyGroups { + columnsGroup, err := peerdas.ComputeColumnsForCustodyGroup(group) + if err != nil { + return nil, errors.Wrap(err, "compute columns for custody group") + } + + columns = append(columns, columnsGroup...) + } + + actualColumnCount := len(columns) + + // Safety check. + if uint64(actualColumnCount) != expectedColumnCount { + return nil, errors.New("invalid number of columns, should never happen") } - // Shuffle the slice. - rand.NewGenerator().Shuffle(len(randomized), func(i, j int) { - randomized[i], randomized[j] = randomized[j], randomized[i] + // Shuffle the columns. + rand.NewGenerator().Shuffle(actualColumnCount, func(i, j int) { + columns[i], columns[j] = columns[j], columns[i] }) - return randomized + return columns, nil } // sortedSliceFromMap returns a sorted list of keys from a map. @@ -494,17 +545,20 @@ func sortedSliceFromMap(m map[uint64]bool) []uint64 { } // selectRandomPeer returns a random peer from the given list of peers. -func selectRandomPeer(peers map[peer.ID]bool) peer.ID { - pick := rand.NewGenerator().Uint64() % uint64(len(peers)) - for k := range peers { +func selectRandomPeer(peers map[peer.ID]bool) (peer.ID, error) { + peersCount := uint64(len(peers)) + pick := rand.NewGenerator().Uint64() % peersCount + + for peer := range peers { if pick == 0 { - return k + return peer, nil } + pick-- } // This should never be reached. - return peer.ID("") + return peer.ID(""), errors.New("failed to select random peer") } // verifyColumn verifies the retrieved column against the root, the index, diff --git a/beacon-chain/sync/data_columns_sampling_test.go b/beacon-chain/sync/data_columns_sampling_test.go index d1c8a0a6eaa..09c19fc06b0 100644 --- a/beacon-chain/sync/data_columns_sampling_test.go +++ b/beacon-chain/sync/data_columns_sampling_test.go @@ -36,14 +36,15 @@ import ( func TestRandomizeColumns(t *testing.T) { const count uint64 = 128 - // Generate columns. - columns := make(map[uint64]bool, count) + // Generate groups. + groups := make(map[uint64]bool, count) for i := uint64(0); i < count; i++ { - columns[i] = true + groups[i] = true } // Randomize columns. - randomizedColumns := randomizeColumns(columns) + randomizedColumns, err := randomizeColumns(groups) + require.NoError(t, err) // Convert back to a map. randomizedColumnsMap := make(map[uint64]bool, count) @@ -52,7 +53,7 @@ func TestRandomizeColumns(t *testing.T) { } // Check duplicates and missing columns. - require.Equal(t, len(columns), len(randomizedColumnsMap)) + require.Equal(t, len(groups), len(randomizedColumnsMap)) // Check the values. for column := range randomizedColumnsMap { @@ -70,7 +71,7 @@ func createAndConnectPeer( p2pService *p2ptest.TestP2P, chainService *mock.ChainService, dataColumnSidecars []*ethpb.DataColumnSidecar, - custodySubnetCount uint64, + custodyGroupCount uint64, columnsNotToRespond map[uint64]bool, offset int, ) *p2ptest.TestP2P { @@ -112,7 +113,7 @@ func createAndConnectPeer( // Create the record and set the custody count. enr := &enr.Record{} - enr.Set(peerdas.Csc(custodySubnetCount)) + enr.Set(peerdas.Cgc(custodyGroupCount)) // Add the peer and connect it. p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound) @@ -138,7 +139,7 @@ type dataSamplerTest struct { func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataColumnSampler1D) { const ( blobCount uint64 = 3 - custodyRequirement uint64 = 1 + custodyRequirement uint64 = 4 ) test, sampler := setupDataColumnSamplerTest(t, blobCount) @@ -219,33 +220,33 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes func TestDataColumnSampler1D_PeerManagement(t *testing.T) { testCases := []struct { + name string numPeers int custodyRequirement uint64 - subnetCount uint64 expectedColumns [][]uint64 prunePeers map[int]bool // Peers to prune. }{ { + name: "custodyRequirement=4", numPeers: 3, - custodyRequirement: 1, - subnetCount: 32, + custodyRequirement: 4, expectedColumns: [][]uint64{ - {6, 38, 70, 102}, - {3, 35, 67, 99}, - {12, 44, 76, 108}, + {6, 37, 48, 113}, + {35, 79, 92, 109}, + {31, 44, 58, 97}, }, prunePeers: map[int]bool{ 0: true, }, }, { + name: "custodyRequirement=8", numPeers: 3, - custodyRequirement: 2, - subnetCount: 32, + custodyRequirement: 8, expectedColumns: [][]uint64{ - {6, 16, 38, 48, 70, 80, 102, 112}, - {3, 13, 35, 45, 67, 77, 99, 109}, - {12, 31, 44, 63, 76, 95, 108, 127}, + {1, 6, 37, 48, 51, 87, 112, 113}, + {24, 25, 35, 52, 79, 92, 109, 126}, + {31, 44, 58, 64, 91, 97, 116, 127}, }, prunePeers: map[int]bool{ 0: true, @@ -255,116 +256,115 @@ func TestDataColumnSampler1D_PeerManagement(t *testing.T) { params.SetupTestConfigCleanup(t) for _, tc := range testCases { - cfg := params.BeaconConfig() - cfg.CustodyRequirement = tc.custodyRequirement - cfg.DataColumnSidecarSubnetCount = tc.subnetCount - params.OverrideBeaconConfig(cfg) - test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers)) - for i := 0; i < tc.numPeers; i++ { - p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1) - test.peers = append(test.peers, p) - } - - // confirm everything works - sampler.refreshPeerInfo() - require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peerFromColumn))) - - require.Equal(t, tc.numPeers, len(sampler.columnFromPeer)) - for i, peer := range test.peers { - // confirm peer has the expected columns - require.Equal(t, len(tc.expectedColumns[i]), len(sampler.columnFromPeer[peer.PeerID()])) - for _, column := range tc.expectedColumns[i] { - require.Equal(t, true, sampler.columnFromPeer[peer.PeerID()][column]) + t.Run(tc.name, func(t *testing.T) { + cfg := params.BeaconConfig() + cfg.CustodyRequirement = tc.custodyRequirement + params.OverrideBeaconConfig(cfg) + test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers)) + for i := 0; i < tc.numPeers; i++ { + p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1) + test.peers = append(test.peers, p) } - // confirm column to peer mapping are correct - for _, column := range tc.expectedColumns[i] { - require.Equal(t, true, sampler.peerFromColumn[column][peer.PeerID()]) - } - } + // confirm everything works + sampler.refreshPeerInfo() + require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peersByCustodyGroup))) - // prune peers - for peer := range tc.prunePeers { - err := test.p2pSvc.Disconnect(test.peers[peer].PeerID()) - test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.Disconnected) - require.NoError(t, err) - } - sampler.refreshPeerInfo() + require.Equal(t, tc.numPeers, len(sampler.groupsByPeer)) + for i, peer := range test.peers { + // confirm peer has the expected columns + require.Equal(t, len(tc.expectedColumns[i]), len(sampler.groupsByPeer[peer.PeerID()])) + for _, column := range tc.expectedColumns[i] { + require.Equal(t, true, sampler.groupsByPeer[peer.PeerID()][column]) + } - require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.columnFromPeer)) - for i, peer := range test.peers { - for _, column := range tc.expectedColumns[i] { - expected := true - if tc.prunePeers[i] { - expected = false + // confirm column to peer mapping are correct + for _, column := range tc.expectedColumns[i] { + require.Equal(t, true, sampler.peersByCustodyGroup[column][peer.PeerID()]) } - require.Equal(t, expected, sampler.peerFromColumn[column][peer.PeerID()]) } - } + + // prune peers + for peer := range tc.prunePeers { + err := test.p2pSvc.Disconnect(test.peers[peer].PeerID()) + test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.Disconnected) + require.NoError(t, err) + } + sampler.refreshPeerInfo() + + require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.groupsByPeer)) + for i, peer := range test.peers { + for _, column := range tc.expectedColumns[i] { + expected := true + if tc.prunePeers[i] { + expected = false + } + require.Equal(t, expected, sampler.peersByCustodyGroup[column][peer.PeerID()]) + } + } + }) } } func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { + // TODO: Use `t.Run`. testCases := []struct { numPeers int custodyRequirement uint64 - subnetCount uint64 columnsToDistribute [][]uint64 expectedDistribution []map[int][]uint64 }{ { numPeers: 3, - custodyRequirement: 1, - subnetCount: 32, + custodyRequirement: 4, // peer custody maps - // p0: {6, 38, 70, 102}, - // p1: {3, 35, 67, 99}, - // p2: {12, 44, 76, 108}, + // p0: {6, 37, 48, 113}, + // p1: {35, 79, 92, 109}, + // p2: {31, 44, 58, 97}, columnsToDistribute: [][]uint64{ - {3, 6, 12}, - {6, 3, 12, 38, 35, 44}, - {6, 38, 70}, + {6, 35, 31}, + {6, 48, 79, 109, 31, 97}, + {6, 37, 113}, {11}, }, expectedDistribution: []map[int][]uint64{ { - 0: {6}, // p1 - 1: {3}, // p2 - 2: {12}, // p3 + 0: {6}, // p0 + 1: {35}, // p1 + 2: {31}, // p2 }, { - 0: {6, 38}, // p1 - 1: {3, 35}, // p2 - 2: {12, 44}, // p3 + 0: {6, 48}, // p0 + 1: {79, 109}, // p1 + 2: {31, 97}, // p2 }, { - 0: {6, 38, 70}, // p1 + 0: {6, 37, 113}, // p0 }, {}, }, }, { numPeers: 3, - custodyRequirement: 2, - subnetCount: 32, + custodyRequirement: 8, // peer custody maps - // p0: {6, 16, 38, 48, 70, 80, 102, 112}, - // p1: {3, 13, 35, 45, 67, 77, 99, 109}, - // p2: {12, 31, 44, 63, 76, 95, 108, 127}, + // p0: {6, 37, 48, 113, 1, 112, 87, 51}, + // p1: {35, 79, 92, 109, 52, 126, 25, 24}, + // p2: {31, 44, 58, 97, 116, 91, 64, 127}, columnsToDistribute: [][]uint64{ - {3, 6, 12, 109, 112, 127}, // all covered by peers - {13, 16, 31, 32}, // 32 not in covered by peers + {6, 48, 79, 25, 24, 97}, // all covered by peers + {6, 35, 31, 32}, // `32` is not in covered by peers }, expectedDistribution: []map[int][]uint64{ { - 0: {6, 112}, // p1 - 1: {3, 109}, // p2 - 2: {12, 127}, // p3 + 0: {6, 48}, // p0 + 1: {79, 25, 24}, // p1 + 2: {97}, // p2 }, { - 0: {16}, // p1 - 1: {13}, // p2 - 2: {31}, // p3 + 0: {6}, // p0 + 1: {35}, // p1 + 2: {31}, // p2 }, }, }, @@ -373,7 +373,6 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { for _, tc := range testCases { cfg := params.BeaconConfig() cfg.CustodyRequirement = tc.custodyRequirement - cfg.DataColumnSidecarSubnetCount = tc.subnetCount params.OverrideBeaconConfig(cfg) test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers)) for i := 0; i < tc.numPeers; i++ { @@ -383,7 +382,8 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { sampler.refreshPeerInfo() for idx, columns := range tc.columnsToDistribute { - result := sampler.distributeSamplesToPeer(columns) + result, err := sampler.distributeSamplesToPeer(columns) + require.NoError(t, err) require.Equal(t, len(tc.expectedDistribution[idx]), len(result), fmt.Sprintf("%v - %v", tc.expectedDistribution[idx], result)) for peerIdx, dist := range tc.expectedDistribution[idx] { @@ -397,34 +397,36 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { } func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { - params.SetupTestConfigCleanup(t) - cfg := params.BeaconConfig() - cfg.DataColumnSidecarSubnetCount = 32 - params.OverrideBeaconConfig(cfg) test, sampler := setupDefaultDataColumnSamplerTest(t) sampler.refreshPeerInfo() - // Sample all columns. - sampleColumns := []uint64{6, 3, 12, 38, 35, 44, 70, 67, 76, 102, 99, 108} - retrieved := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) - require.Equal(t, 12, len(retrieved)) - for _, column := range sampleColumns { - require.Equal(t, true, retrieved[column]) - } + t.Run("sample all columns", func(t *testing.T) { + sampleColumns := []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97} + retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) + require.NoError(t, err) + require.Equal(t, 12, len(retrieved)) + for _, column := range sampleColumns { + require.Equal(t, true, retrieved[column]) + } + }) - // Sample a subset of columns. - sampleColumns = []uint64{6, 3, 12, 38, 35, 44} - retrieved = sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) - require.Equal(t, 6, len(retrieved)) - for _, column := range sampleColumns { - require.Equal(t, true, retrieved[column]) - } + t.Run("sample a subset of columns", func(t *testing.T) { + sampleColumns := []uint64{35, 31, 79, 48, 113, 97} + retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) + require.NoError(t, err) + require.Equal(t, 6, len(retrieved)) + for _, column := range sampleColumns { + require.Equal(t, true, retrieved[column]) + } + }) - // Sample a subset of columns with missing columns. - sampleColumns = []uint64{6, 3, 12, 127} - retrieved = sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) - require.Equal(t, 3, len(retrieved)) - require.DeepEqual(t, map[uint64]bool{6: true, 3: true, 12: true}, retrieved) + t.Run("sample a subset of columns with missing columns", func(t *testing.T) { + sampleColumns := []uint64{35, 31, 100, 79} + retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) + require.NoError(t, err) + require.Equal(t, 3, len(retrieved)) + require.DeepEqual(t, map[uint64]bool{35: true, 31: true, 79: true}, retrieved) + }) } func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { @@ -444,12 +446,12 @@ func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { { name: "All columns are correctly sampled in a single round", samplesCount: 5, - possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, + possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}, columnsNotToRespond: map[uint64]bool{}, expectedSuccess: true, expectedRoundSummaries: []roundSummary{ { - RequestedColumns: []uint64{70, 35, 99, 6, 38}, + RequestedColumns: []uint64{6, 35, 31, 37, 79}, MissingColumns: map[uint64]bool{}, }, }, @@ -457,16 +459,16 @@ func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { { name: "Two missing columns in the first round, ok in the second round", samplesCount: 5, - possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, - columnsNotToRespond: map[uint64]bool{6: true, 70: true}, + possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}, + columnsNotToRespond: map[uint64]bool{6: true, 31: true}, expectedSuccess: true, expectedRoundSummaries: []roundSummary{ { - RequestedColumns: []uint64{70, 35, 99, 6, 38}, - MissingColumns: map[uint64]bool{70: true, 6: true}, + RequestedColumns: []uint64{6, 35, 31, 37, 79}, + MissingColumns: map[uint64]bool{6: true, 31: true}, }, { - RequestedColumns: []uint64{3, 67, 102, 12, 44, 76}, + RequestedColumns: []uint64{44, 48, 92, 58, 113, 109}, MissingColumns: map[uint64]bool{}, }, }, @@ -474,35 +476,37 @@ func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { { name: "Two missing columns in the first round, one missing in the second round. Fail to sample.", samplesCount: 5, - possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, - columnsNotToRespond: map[uint64]bool{6: true, 70: true, 3: true}, + possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}, + columnsNotToRespond: map[uint64]bool{6: true, 31: true, 48: true}, expectedSuccess: false, expectedRoundSummaries: []roundSummary{ { - RequestedColumns: []uint64{70, 35, 99, 6, 38}, - MissingColumns: map[uint64]bool{70: true, 6: true}, + RequestedColumns: []uint64{6, 35, 31, 37, 79}, + MissingColumns: map[uint64]bool{6: true, 31: true}, }, { - RequestedColumns: []uint64{3, 67, 102, 12, 44, 76}, - MissingColumns: map[uint64]bool{3: true}, + RequestedColumns: []uint64{44, 48, 92, 58, 113, 109}, + MissingColumns: map[uint64]bool{48: true}, }, }, }, } for _, tc := range testCases { - test, sampler := setupDataColumnSamplerTest(t, 3) - p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1) - p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2) - p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3) - test.peers = []*p2ptest.TestP2P{p1, p2, p3} + t.Run(tc.name, func(t *testing.T) { + test, sampler := setupDataColumnSamplerTest(t, 3) + p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1) + p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2) + p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3) + test.peers = []*p2ptest.TestP2P{p1, p2, p3} - sampler.refreshPeerInfo() + sampler.refreshPeerInfo() - success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount) - require.NoError(t, err) - require.Equal(t, tc.expectedSuccess, success) - require.DeepEqual(t, tc.expectedRoundSummaries, summaries) + success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount) + require.NoError(t, err) + require.Equal(t, tc.expectedSuccess, success) + require.DeepEqual(t, tc.expectedRoundSummaries, summaries) + }) } } diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher.go b/beacon-chain/sync/initial-sync/blocks_fetcher.go index 599b726c1a5..951c4f38c26 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher.go @@ -764,11 +764,17 @@ func (f *blocksFetcher) custodyColumns() (map[uint64]bool, error) { // Retrieve our node ID. localNodeID := f.p2p.NodeID() - // Retrieve the number of colums subnets we should custody. - localCustodySubnetCount := peerdas.CustodySubnetCount() + // Retrieve the number of groups we should custody. + localCustodyGroupCount := peerdas.CustodyGroupCount() - // Retrieve the columns we should custody. - localCustodyColumns, err := peerdas.CustodyColumns(localNodeID, localCustodySubnetCount) + // Compute the groups we should custody. + localCustodyGroups, err := peerdas.CustodyGroups(localNodeID, localCustodyGroupCount) + if err != nil { + return nil, errors.Wrap(err, "custody groups") + } + + // Compute the columns we should custody. + localCustodyColumns, err := peerdas.CustodyColumns(localCustodyGroups) if err != nil { return nil, errors.Wrap(err, "custody columns") } @@ -1112,7 +1118,7 @@ func (f *blocksFetcher) waitForPeersForDataColumns( } // Get the peers that are admissible for the data columns. - dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, err := f.admissiblePeersForDataColumn(peers, lastSlot, neededDataColumns, blockCount) + dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, err := f.admissiblePeersForCustodyGroup(peers, lastSlot, neededDataColumns, blockCount) if err != nil { return nil, errors.Wrap(err, "peers with slot and data columns") } @@ -1165,7 +1171,7 @@ func (f *blocksFetcher) waitForPeersForDataColumns( time.Sleep(delay) - dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, err = f.admissiblePeersForDataColumn(peers, lastSlot, neededDataColumns, blockCount) + dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, err = f.admissiblePeersForCustodyGroup(peers, lastSlot, neededDataColumns, blockCount) if err != nil { return nil, errors.Wrap(err, "peers with slot and data columns") } diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index cf6f7dc35a6..684ffe4c223 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -1378,7 +1378,7 @@ type ( peerParams struct { // Custody subnet count - csc uint64 + cgc uint64 // key: RPCDataColumnSidecarsByRangeTopicV1 stringified // value: The list of all slotxindex to respond by request number @@ -1462,7 +1462,7 @@ func createAndConnectPeer( // Create the record and set the custody count. enr := &enr.Record{} - enr.Set(peerdas.Csc(peerParams.csc)) + enr.Set(peerdas.Cgc(peerParams.cgc)) // Add the peer and connect it. p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound) @@ -1831,11 +1831,11 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 0, + cgc: 0, toRespond: map[string][][]responseParams{}, }, { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 33, @@ -1864,7 +1864,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, }, { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 33, @@ -1926,7 +1926,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 33, @@ -1971,7 +1971,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { storedDataColumns: []map[int]bool{{38: true, 102: true}}, peersParams: []peerParams{ { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -2002,7 +2002,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { storedDataColumns: []map[int]bool{{38: true, 102: true}}, peersParams: []peerParams{ { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -2030,7 +2030,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { storedDataColumns: []map[int]bool{{38: true, 102: true}}, peersParams: []peerParams{ { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -2059,7 +2059,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 128, + cgc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 32, diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go b/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go index 58e5cc432d5..ac1b4094b29 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go @@ -382,11 +382,11 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p return headEpoch, targetEpoch, peers } -// custodyColumnFromPeer compute all costody columns indexed by peer. -func (f *blocksFetcher) custodyDataColumnsFromPeer(peers map[peer.ID]bool) (map[peer.ID]map[uint64]bool, error) { +// custodyGroupsFromPeer compute all the custody groups indexed by peer. +func (f *blocksFetcher) custodyGroupsFromPeer(peers map[peer.ID]bool) (map[peer.ID]map[uint64]bool, error) { peerCount := len(peers) - custodyDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount) + custodyGroupsByPeer := make(map[peer.ID]map[uint64]bool, peerCount) for peer := range peers { // Get the node ID from the peer ID. nodeID, err := p2p.ConvertPeerIDToNodeID(peer) @@ -394,19 +394,19 @@ func (f *blocksFetcher) custodyDataColumnsFromPeer(peers map[peer.ID]bool) (map[ return nil, errors.Wrap(err, "convert peer ID to node ID") } - // Get the custody columns count from the peer. - custodyCount := f.p2p.DataColumnsCustodyCountFromRemotePeer(peer) + // Get the custody group count of the peer. + custodyGroupCount := f.p2p.CustodyGroupCountFromPeer(peer) - // Get the custody columns from the peer. - custodyDataColumns, err := peerdas.CustodyColumns(nodeID, custodyCount) + // Get the custody groups of the peer. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) if err != nil { - return nil, errors.Wrap(err, "custody columns") + return nil, errors.Wrap(err, "custody groups") } - custodyDataColumnsByPeer[peer] = custodyDataColumns + custodyGroupsByPeer[peer] = custodyGroups } - return custodyDataColumnsByPeer, nil + return custodyGroupsByPeer, nil } // uint64MapToSortedSlice produces a sorted uint64 slice from a map. @@ -468,19 +468,20 @@ outerLoop: return outputDataColumnsByPeer, descriptions } -// admissiblePeersForDataColumn returns a map of peers that: -// - custody at least one column listed in `neededDataColumns`, +// admissiblePeersForCustodyGroup returns a map of peers that: +// - custody at least one custody group listed in `neededCustodyGroups`, // - are synced to `targetSlot`, and // - have enough bandwidth to serve data columns corresponding to `count` blocks. +// // It returns: -// - A map, where the key of the map is the peer, the value is the custody columns of the peer. -// - A map, where the key of the map is the data column, the value is the peer that custody the data column. +// - A map, where the key of the map is the peer, the value is the custody groups of the peer. +// - A map, where the key of the map is the custody group, the value is the peer that custodies the group. // - A slice of descriptions for non admissible peers. // - An error if any. -func (f *blocksFetcher) admissiblePeersForDataColumn( +func (f *blocksFetcher) admissiblePeersForCustodyGroup( peers []peer.ID, targetSlot primitives.Slot, - neededDataColumns map[uint64]bool, + neededCustodyGroups map[uint64]bool, count uint64, ) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) { // If no peer is specified, get all connected peers. @@ -490,7 +491,7 @@ func (f *blocksFetcher) admissiblePeersForDataColumn( } inputPeerCount := len(inputPeers) - neededDataColumnsCount := uint64(len(neededDataColumns)) + neededCustodyGroupCount := uint64(len(neededCustodyGroups)) // Create description slice for non admissible peers. descriptions := make([]string, 0, inputPeerCount) @@ -518,7 +519,6 @@ func (f *blocksFetcher) admissiblePeersForDataColumn( peersWithAdmissibleHeadEpoch := make(map[peer.ID]bool, inputPeerCount) for _, peer := range peersWithSufficientBandwidth { peerChainState, err := f.p2p.Peers().ChainState(peer) - if err != nil { description := fmt.Sprintf("peer %s: error: %s", peer, err) descriptions = append(descriptions, description) @@ -542,18 +542,18 @@ func (f *blocksFetcher) admissiblePeersForDataColumn( peersWithAdmissibleHeadEpoch[peer] = true } - // Compute custody columns for each peer. - dataColumnsByPeerWithAdmissibleHeadEpoch, err := f.custodyDataColumnsFromPeer(peersWithAdmissibleHeadEpoch) + // Compute custody groups for each peer. + dataColumnsByPeerWithAdmissibleHeadEpoch, err := f.custodyGroupsFromPeer(peersWithAdmissibleHeadEpoch) if err != nil { return nil, nil, nil, errors.Wrap(err, "custody columns from peer") } // Filter peers which custody at least one needed data column. - dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeerWithAdmissibleHeadEpoch) + dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededCustodyGroups, dataColumnsByPeerWithAdmissibleHeadEpoch) descriptions = append(descriptions, localDescriptions...) // Compute a map from needed data columns to their peers. - admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount) + admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededCustodyGroupCount) for peer, peerCustodyDataColumns := range dataColumnsByAdmissiblePeer { for dataColumn := range peerCustodyDataColumns { admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peer) diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index 321ea94ace5..14c7d196a57 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -321,8 +321,8 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt } func (s *Service) missingColumnRequest(roBlock blocks.ROBlock, store *filesystem.BlobStorage) (p2ptypes.DataColumnSidecarsByRootReq, error) { - // No columns for pre-Deneb blocks. - if roBlock.Version() < version.Deneb { + // No columns for pre-Fulu blocks. + if roBlock.Version() < version.Fulu { return nil, nil } @@ -349,15 +349,24 @@ func (s *Service) missingColumnRequest(roBlock blocks.ROBlock, store *filesystem // Get our node ID. nodeID := s.cfg.P2P.NodeID() - // Get the custodied columns. - custodiedColumns, err := peerdas.CustodyColumns(nodeID, peerdas.CustodySubnetCount()) + // Get the custody group count. + custodyGroupsCount := peerdas.CustodyGroupCount() + + // Compute the custody groups. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupsCount) + if err != nil { + return nil, errors.Wrap(err, "custody groups") + } + + // Compute the custody columns. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) if err != nil { return nil, errors.Wrap(err, "custody columns") } // Build blob sidecars by root requests based on missing columns. req := make(p2ptypes.DataColumnSidecarsByRootReq, 0, len(commitments)) - for columnIndex := range custodiedColumns { + for columnIndex := range custodyColumns { isColumnAvailable := storedColumns[columnIndex] if !isColumnAvailable { req = append(req, ð.DataColumnIdentifier{ @@ -449,7 +458,7 @@ func (s *Service) fetchOriginColumns(pids []peer.ID) error { return nil } shufflePeers(pids) - pids, err = s.cfg.P2P.DataColumnsAdmissibleCustodyPeers(pids) + pids, err = s.cfg.P2P.AdmissibleCustodyGroupsPeers(pids) if err != nil { return err } diff --git a/beacon-chain/sync/pending_blocks_queue.go b/beacon-chain/sync/pending_blocks_queue.go index 1bbaf6c07a2..1e1b62ddcf5 100644 --- a/beacon-chain/sync/pending_blocks_queue.go +++ b/beacon-chain/sync/pending_blocks_queue.go @@ -195,23 +195,26 @@ func (s *Service) hasPeer() bool { var errNoPeersForPending = errors.New("no suitable peers to process pending block queue, delaying") // processAndBroadcastBlock validates, processes, and broadcasts a block. -// part of the function is to request missing blobs from peers if the block contains kzg commitments. +// Part of the function is to request missing blobs or data columns from peers if the block contains kzg commitments. func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error { + blockSlot := b.Block().Slot() + if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil { if !errors.Is(ErrOptimisticParent, err) { - log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block") + log.WithError(err).WithField("slot", blockSlot).Debug("Could not validate block") return err } } - if coreTime.PeerDASIsActive(b.Block().Slot()) { + if coreTime.PeerDASIsActive(blockSlot) { request, err := s.buildRequestsForMissingDataColumns(blkRoot, b) if err != nil { - return err + return errors.Wrap(err, "build requests for missing data columns") } + if len(request) > 0 { peers := s.getBestPeers() - peers, err = s.cfg.p2p.DataColumnsAdmissibleCustodyPeers(peers) + peers, err = s.cfg.p2p.AdmissibleCustodyGroupsPeers(peers) if err != nil { return err } @@ -244,7 +247,7 @@ func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.Rea return err } - s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex()) + s.setSeenBlockIndexSlot(blockSlot, b.Block().ProposerIndex()) pb, err := b.Proto() if err != nil { @@ -346,7 +349,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra if peerDASIsActive { var err error - bestPeers, err = s.cfg.p2p.DataColumnsAdmissibleSubnetSamplingPeers(bestPeers) + bestPeers, err = s.cfg.p2p.AdmissibleCustodySamplingPeers(bestPeers) if err != nil { return errors.Wrap(err, "data columns admissible subnet sampling peers") } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root.go b/beacon-chain/sync/rpc_beacon_blocks_by_root.go index 4bd952d88a5..8a37105bf9d 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root.go @@ -285,11 +285,11 @@ func (s *Service) pendingBlobsRequestForBlock(root [32]byte, b interfaces.ReadOn return blobIdentifiers, nil } -// buildRequestsForMissingDataColumns looks at the data columns we should custody and have via subnet sampling +// buildRequestsForMissingDataColumns looks at the data columns we should sample from and have via custody sampling // and that we don't actually store for a given block, and construct the corresponding data column sidecars by root requests. func (s *Service) buildRequestsForMissingDataColumns(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock) (types.DataColumnSidecarsByRootReq, error) { - // Block before deneb has nor blobs neither data columns. - if block.Version() < version.Deneb { + // Blocks before Fulu have no data columns. + if block.Version() < version.Fulu { return nil, nil } @@ -304,26 +304,35 @@ func (s *Service) buildRequestsForMissingDataColumns(root [32]byte, block interf return nil, nil } - // Retrieve the columns we store for the current root. + // Retrieve the columns we store for the root. storedColumns, err := s.cfg.blobStorage.ColumnIndices(root) if err != nil { return nil, errors.Wrap(err, "column indices") } - // Retrieve the columns we should custody. + // Get our node ID. nodeID := s.cfg.p2p.NodeID() - custodySubnetCount := peerdas.SubnetSamplingSize() - custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + // Retrieve the number of groups we should sample from. + samplingGroupSize := peerdas.CustodyGroupSamplingSize() + + // Retrieve the groups we should sample from. + samplingGroups, err := peerdas.CustodyGroups(nodeID, samplingGroupSize) + if err != nil { + return nil, errors.Wrap(err, "custody groups") + } + + // Retrieve the columns we should sample from. + samplingColumns, err := peerdas.CustodyColumns(samplingGroups) if err != nil { return nil, errors.Wrap(err, "custody columns") } - custodyColumnCount := len(custodyColumns) + samplingColumnCount := len(samplingColumns) - // Build the request for the we should custody and we don't actually store. - req := make(types.DataColumnSidecarsByRootReq, 0, custodyColumnCount) - for column := range custodyColumns { + // Build the request for the columns we should sample from and we don't actually store. + req := make(types.DataColumnSidecarsByRootReq, 0, samplingColumnCount) + for column := range samplingColumns { isColumnStored := storedColumns[column] if !isColumnStored { req = append(req, ð.DataColumnIdentifier{ diff --git a/beacon-chain/sync/rpc_data_column_sidecars_by_range.go b/beacon-chain/sync/rpc_data_column_sidecars_by_range.go index a0223732709..1447c865775 100644 --- a/beacon-chain/sync/rpc_data_column_sidecars_by_range.go +++ b/beacon-chain/sync/rpc_data_column_sidecars_by_range.go @@ -91,14 +91,25 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i return errors.New("message is not type *pb.DataColumnSidecarsByRangeRequest") } - // Compute custody columns. + // Get our node ID. nodeID := s.cfg.p2p.NodeID() numberOfColumns := params.BeaconConfig().NumberOfColumns - custodySubnetCount := peerdas.CustodySubnetCount() - custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + + // Get the number of groups we should custody. + custodyGroupCount := peerdas.CustodyGroupCount() + + // Compute the groups we should custody. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) if err != nil { s.writeErrorResponseToStream(responseCodeServerError, err.Error(), stream) - return err + return errors.Wrap(err, "custody groups") + } + + // Compute the columns we should custody. + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) + if err != nil { + s.writeErrorResponseToStream(responseCodeServerError, err.Error(), stream) + return errors.Wrap(err, "custody columns") } custodyColumnsCount := uint64(len(custodyColumns)) diff --git a/beacon-chain/sync/rpc_data_column_sidecars_by_root.go b/beacon-chain/sync/rpc_data_column_sidecars_by_root.go index 7dae8053a04..901b4f83ccb 100644 --- a/beacon-chain/sync/rpc_data_column_sidecars_by_root.go +++ b/beacon-chain/sync/rpc_data_column_sidecars_by_root.go @@ -103,10 +103,19 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs) } - // Compute all custody columns. + // Retrieve our node ID. nodeID := s.cfg.p2p.NodeID() - custodySubnetCount := peerdas.CustodySubnetCount() - custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount) + + // Retrieve the number of groups we should custody. + custodyGroupCount := peerdas.CustodyGroupCount() + + // Compute the groups we should custody. + custodyGroups, err := peerdas.CustodyGroups(nodeID, custodyGroupCount) + if err != nil { + return errors.Wrap(err, "custody groups") + } + + custodyColumns, err := peerdas.CustodyColumns(custodyGroups) custodyColumnsCount := uint64(len(custodyColumns)) if err != nil { diff --git a/beacon-chain/sync/rpc_metadata.go b/beacon-chain/sync/rpc_metadata.go index 2bd57e0969d..d4fbcf80c54 100644 --- a/beacon-chain/sync/rpc_metadata.go +++ b/beacon-chain/sync/rpc_metadata.go @@ -101,18 +101,18 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2 case version.Phase0: metadata = wrapper.WrappedMetadataV2( &pb.MetaDataV2{ - Attnets: metadata.AttnetsBitfield(), - SeqNumber: metadata.SequenceNumber(), - Syncnets: bitfield.Bitvector4{byte(0x00)}, - CustodySubnetCount: 0, + Attnets: metadata.AttnetsBitfield(), + SeqNumber: metadata.SequenceNumber(), + Syncnets: bitfield.Bitvector4{byte(0x00)}, + CustodyGroupCount: 0, }) case version.Altair: metadata = wrapper.WrappedMetadataV2( &pb.MetaDataV2{ - Attnets: metadata.AttnetsBitfield(), - SeqNumber: metadata.SequenceNumber(), - Syncnets: metadata.SyncnetsBitfield(), - CustodySubnetCount: 0, + Attnets: metadata.AttnetsBitfield(), + SeqNumber: metadata.SequenceNumber(), + Syncnets: metadata.SyncnetsBitfield(), + CustodyGroupCount: 0, }) } } diff --git a/beacon-chain/sync/rpc_metadata_test.go b/beacon-chain/sync/rpc_metadata_test.go index b6e89634e40..4c94c9342d9 100644 --- a/beacon-chain/sync/rpc_metadata_test.go +++ b/beacon-chain/sync/rpc_metadata_test.go @@ -92,9 +92,9 @@ func createService(peer p2p.P2P, chain *mock.ChainService) *Service { func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { const ( - requestTimeout = 1 * time.Second - seqNumber = 2 - custodySubnetCount = 4 + requestTimeout = 1 * time.Second + seqNumber = 2 + custodyGroupCount = 4 ) attnets := []byte{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'} @@ -152,10 +152,10 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { epochsSinceGenesisPeer1: 0, epochsSinceGenesisPeer2: 15, metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: syncnets, - CustodySubnetCount: custodySubnetCount, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: syncnets, + CustodyGroupCount: custodyGroupCount, }), expected: wrapper.WrappedMetadataV0(&pb.MetaDataV0{ SeqNumber: seqNumber, @@ -199,10 +199,10 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { epochsSinceGenesisPeer1: 5, epochsSinceGenesisPeer2: 15, metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: syncnets, - CustodySubnetCount: custodySubnetCount, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: syncnets, + CustodyGroupCount: custodyGroupCount, }), expected: wrapper.WrappedMetadataV1(&pb.MetaDataV1{ SeqNumber: seqNumber, @@ -220,10 +220,10 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { Attnets: attnets, }), expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: bitfield.Bitvector4{byte(0x00)}, - CustodySubnetCount: 0, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: bitfield.Bitvector4{byte(0x00)}, + CustodyGroupCount: 0, }), }, { @@ -237,10 +237,10 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { Syncnets: syncnets, }), expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: syncnets, - CustodySubnetCount: 0, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: syncnets, + CustodyGroupCount: 0, }), }, { @@ -249,16 +249,16 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) { epochsSinceGenesisPeer1: 15, epochsSinceGenesisPeer2: 15, metadataPeer2: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: syncnets, - CustodySubnetCount: custodySubnetCount, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: syncnets, + CustodyGroupCount: custodyGroupCount, }), expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{ - SeqNumber: seqNumber, - Attnets: attnets, - Syncnets: syncnets, - CustodySubnetCount: custodySubnetCount, + SeqNumber: seqNumber, + Attnets: attnets, + Syncnets: syncnets, + CustodyGroupCount: custodyGroupCount, }), }, } diff --git a/config/params/config.go b/config/params/config.go index 9a8cf88abde..126312624ea 100644 --- a/config/params/config.go +++ b/config/params/config.go @@ -242,7 +242,7 @@ type BeaconChainConfig struct { MaxRequestBlobSidecarsFulu uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS_FULU" spec:"true"` // MaxRequestBlobSidecarsFulu is the maximum number of blobs to request in a single request after the fulu epoch. MaxRequestBlocksDeneb uint64 `yaml:"MAX_REQUEST_BLOCKS_DENEB" spec:"true"` // MaxRequestBlocksDeneb is the maximum number of blocks in a single request after the deneb epoch. - // Values introduce in Electra upgrade + // Values introduced in Electra upgrade DataColumnSidecarSubnetCount uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" spec:"true"` // DataColumnSidecarSubnetCount is the number of data column sidecar subnets used in the gossipsub protocol MaxPerEpochActivationExitChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT" spec:"true"` // MaxPerEpochActivationExitChurnLimit represents the maximum combined activation and exit churn. MinPerEpochChurnLimitElectra uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA" spec:"true"` // MinPerEpochChurnLimitElectra is the minimum amount of churn allotted for validator rotations for electra. @@ -261,12 +261,13 @@ type BeaconChainConfig struct { MaxDepositRequestsPerPayload uint64 `yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxDepositRequestsPerPayload is the maximum number of execution layer deposits in each payload UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true"` // UnsetDepositRequestsStartIndex is used to check the start index for eip6110 - // PeerDAS Values - SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT"` // SamplesPerSlot refers to the number of random samples a node queries per slot. - CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT"` // CustodyRequirement refers to the minimum amount of subnets a peer must custody and serve samples from. - MinEpochsForDataColumnSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS"` // MinEpochsForDataColumnSidecarsRequest is the minimum number of epochs the node will keep the data columns for. - MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX" spec:"true"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format). - NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix. + // Values introduced in Fulu upgrade + NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix. + SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT" spec:"true"` // SamplesPerSlot refers to the number of random samples a node queries per slot. + NumberOfCustodyGroups uint64 `yaml:"NUMBER_OF_CUSTODY_GROUPS" spec:"true"` // NumberOfCustodyGroups available for nodes to custody. + CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT" spec:"true"` // CustodyRequirement refers to the minimum amount of subnets a peer must custody and serve samples from. + MinEpochsForDataColumnSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS" spec:"true"` // MinEpochsForDataColumnSidecarsRequest is the minimum number of epochs the node will keep the data columns for. + MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format). // Networking Specific Parameters GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE" spec:"true"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages. diff --git a/config/params/loader_test.go b/config/params/loader_test.go index a4a8ea0a79d..61fc4ac165d 100644 --- a/config/params/loader_test.go +++ b/config/params/loader_test.go @@ -40,7 +40,6 @@ var placeholderFields = []string{ "MAX_EXTRA_DATA_BYTES", // Compile time constant on ExecutionPayload.extra_data. "MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests "MAX_TRANSACTIONS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.transactions. - "NUMBER_OF_CUSTODY_GROUPS", "REORG_HEAD_WEIGHT_THRESHOLD", "TARGET_NUMBER_OF_PEERS", "UPDATE_TIMEOUT", diff --git a/config/params/mainnet_config.go b/config/params/mainnet_config.go index 7f1b2a3167b..5befe7368e7 100644 --- a/config/params/mainnet_config.go +++ b/config/params/mainnet_config.go @@ -37,7 +37,7 @@ var mainnetNetworkConfig = &NetworkConfig{ ETH2Key: "eth2", AttSubnetKey: "attnets", SyncCommsSubnetKey: "syncnets", - CustodySubnetCountKey: "csc", + CustodyGroupCountKey: "cgc", MinimumPeersInSubnetSearch: 20, ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524. BootstrapNodes: []string{ @@ -301,12 +301,13 @@ var mainnetBeaconConfig = &BeaconChainConfig{ MaxDepositRequestsPerPayload: 8192, // 2**13 (= 8192) UnsetDepositRequestsStartIndex: math.MaxUint64, - // PeerDAS + // Values related to fulu NumberOfColumns: 128, - MaxCellsInExtendedMatrix: 768, SamplesPerSlot: 8, + NumberOfCustodyGroups: 128, CustodyRequirement: 4, MinEpochsForDataColumnSidecarsRequest: 4096, + MaxCellsInExtendedMatrix: 768, // Values related to networking parameters. GossipMaxSize: 10 * 1 << 20, // 10 MiB diff --git a/config/params/network_config.go b/config/params/network_config.go index a46cc8c13cd..0b797430c00 100644 --- a/config/params/network_config.go +++ b/config/params/network_config.go @@ -11,7 +11,7 @@ type NetworkConfig struct { ETH2Key string // ETH2Key is the ENR key of the Ethereum consensus object. AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield. SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield. - CustodySubnetCountKey string // CustodySubnetCountKey is the ENR key of the custody subnet count. + CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count. MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search. // Chain Network Config diff --git a/consensus-types/wrapper/metadata.go b/consensus-types/wrapper/metadata.go index 3fb22d20106..209046650d2 100644 --- a/consensus-types/wrapper/metadata.go +++ b/consensus-types/wrapper/metadata.go @@ -36,8 +36,8 @@ func (m MetadataV0) SyncnetsBitfield() bitfield.Bitvector4 { return bitfield.Bitvector4{0} } -// CustodySubnetCount returns custody subnet count from the metadata. -func (m MetadataV0) CustodySubnetCount() uint64 { +// CustodyGroupCount returns custody subnet count from the metadata. +func (m MetadataV0) CustodyGroupCount() uint64 { return 0 } @@ -130,8 +130,8 @@ func (m MetadataV1) SyncnetsBitfield() bitfield.Bitvector4 { return m.md.Syncnets } -// CustodySubnetCount returns custody subnet count from the metadata. -func (m MetadataV1) CustodySubnetCount() uint64 { +// CustodyGroupCount returns custody subnet count from the metadata. +func (m MetadataV1) CustodyGroupCount() uint64 { return 0 } @@ -224,9 +224,9 @@ func (m MetadataV2) SyncnetsBitfield() bitfield.Bitvector4 { return m.md.Syncnets } -// CustodySubnetCount returns custody subnet count from the metadata. -func (m MetadataV2) CustodySubnetCount() uint64 { - return m.md.CustodySubnetCount +// CustodyGroupCount returns custody subnet count from the metadata. +func (m MetadataV2) CustodyGroupCount() uint64 { + return m.md.CustodyGroupCount } // InnerObject returns the underlying metadata protobuf structure. diff --git a/proto/prysm/v1alpha1/metadata/metadata_interfaces.go b/proto/prysm/v1alpha1/metadata/metadata_interfaces.go index b57a8753ceb..87302a7cbbf 100644 --- a/proto/prysm/v1alpha1/metadata/metadata_interfaces.go +++ b/proto/prysm/v1alpha1/metadata/metadata_interfaces.go @@ -11,7 +11,7 @@ type Metadata interface { SequenceNumber() uint64 AttnetsBitfield() bitfield.Bitvector64 SyncnetsBitfield() bitfield.Bitvector4 - CustodySubnetCount() uint64 + CustodyGroupCount() uint64 InnerObject() interface{} IsNil() bool Copy() Metadata diff --git a/proto/prysm/v1alpha1/non-core.ssz.go b/proto/prysm/v1alpha1/non-core.ssz.go index 58e5e30d402..e9513db4e3d 100644 --- a/proto/prysm/v1alpha1/non-core.ssz.go +++ b/proto/prysm/v1alpha1/non-core.ssz.go @@ -576,8 +576,8 @@ func (m *MetaDataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) { } dst = append(dst, m.Syncnets...) - // Field (3) 'CustodySubnetCount' - dst = ssz.MarshalUint64(dst, m.CustodySubnetCount) + // Field (3) 'CustodyGroupCount' + dst = ssz.MarshalUint64(dst, m.CustodyGroupCount) return } @@ -605,8 +605,8 @@ func (m *MetaDataV2) UnmarshalSSZ(buf []byte) error { } m.Syncnets = append(m.Syncnets, buf[16:17]...) - // Field (3) 'CustodySubnetCount' - m.CustodySubnetCount = ssz.UnmarshallUint64(buf[17:25]) + // Field (3) 'CustodyGroupCount' + m.CustodyGroupCount = ssz.UnmarshallUint64(buf[17:25]) return err } @@ -643,8 +643,8 @@ func (m *MetaDataV2) HashTreeRootWith(hh *ssz.Hasher) (err error) { } hh.PutBytes(m.Syncnets) - // Field (3) 'CustodySubnetCount' - hh.PutUint64(m.CustodySubnetCount) + // Field (3) 'CustodyGroupCount' + hh.PutUint64(m.CustodyGroupCount) hh.Merkleize(indx) return diff --git a/proto/prysm/v1alpha1/p2p_messages.pb.go b/proto/prysm/v1alpha1/p2p_messages.pb.go index 796dc5d3c03..75f8688a33d 100755 --- a/proto/prysm/v1alpha1/p2p_messages.pb.go +++ b/proto/prysm/v1alpha1/p2p_messages.pb.go @@ -353,10 +353,10 @@ type MetaDataV2 struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SeqNumber uint64 `protobuf:"varint,1,opt,name=seq_number,json=seqNumber,proto3" json:"seq_number,omitempty"` - Attnets github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,2,opt,name=attnets,proto3" json:"attnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector64" ssz-size:"8"` - Syncnets github_com_prysmaticlabs_go_bitfield.Bitvector4 `protobuf:"bytes,3,opt,name=syncnets,proto3" json:"syncnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector4" ssz-size:"1"` - CustodySubnetCount uint64 `protobuf:"varint,4,opt,name=custody_subnet_count,json=custodySubnetCount,proto3" json:"custody_subnet_count,omitempty"` + SeqNumber uint64 `protobuf:"varint,1,opt,name=seq_number,json=seqNumber,proto3" json:"seq_number,omitempty"` + Attnets github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,2,opt,name=attnets,proto3" json:"attnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector64" ssz-size:"8"` + Syncnets github_com_prysmaticlabs_go_bitfield.Bitvector4 `protobuf:"bytes,3,opt,name=syncnets,proto3" json:"syncnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector4" ssz-size:"1"` + CustodyGroupCount uint64 `protobuf:"varint,4,opt,name=custody_group_count,json=custodyGroupCount,proto3" json:"custody_group_count,omitempty"` } func (x *MetaDataV2) Reset() { @@ -412,9 +412,9 @@ func (x *MetaDataV2) GetSyncnets() github_com_prysmaticlabs_go_bitfield.Bitvecto return github_com_prysmaticlabs_go_bitfield.Bitvector4(nil) } -func (x *MetaDataV2) GetCustodySubnetCount() uint64 { +func (x *MetaDataV2) GetCustodyGroupCount() uint64 { if x != nil { - return x.CustodySubnetCount + return x.CustodyGroupCount } return 0 } @@ -616,7 +616,7 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, - 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x88, + 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x86, 0x02, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x32, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07, @@ -630,42 +630,42 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{ 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, - 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x64, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x53, 0x75, - 0x62, 0x6e, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x98, 0x01, 0x0a, 0x1a, 0x42, 0x6c, - 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, - 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, - 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, - 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, - 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc1, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, - 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, - 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, - 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, - 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, - 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, - 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, - 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, - 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x64, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x98, 0x01, 0x0a, 0x1a, 0x42, 0x6c, 0x6f, 0x62, + 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, + 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, + 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, + 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0xc1, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, + 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, + 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, + 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, + 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x07, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, + 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, + 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/prysm/v1alpha1/p2p_messages.proto b/proto/prysm/v1alpha1/p2p_messages.proto index 0ea6a477276..fabb13979c0 100644 --- a/proto/prysm/v1alpha1/p2p_messages.proto +++ b/proto/prysm/v1alpha1/p2p_messages.proto @@ -67,14 +67,14 @@ message MetaDataV1 { seq_number: uint64 attnets: Bitvector[ATTESTATION_SUBNET_COUNT] syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT] - custody_subnet_count: uint64 + custody_group_count: uint64 ) */ message MetaDataV2 { uint64 seq_number = 1; bytes attnets = 2 [(ethereum.eth.ext.ssz_size) = "8", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector64"]; bytes syncnets = 3 [(ethereum.eth.ext.ssz_size) = "1", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector4"]; - uint64 custody_subnet_count = 4; + uint64 custody_group_count = 4; } /* diff --git a/testing/spectest/mainnet/eip7594/networking/custody_columns_test.go b/testing/spectest/mainnet/eip7594/networking/custody_columns_test.go deleted file mode 100644 index 470397682ce..00000000000 --- a/testing/spectest/mainnet/eip7594/networking/custody_columns_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package networking - -// import ( -// "testing" - -// "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/eip7594/networking" -// ) - -// func TestMainnet_EIP7594_Networking_CustodyColumns(t *testing.T) { -// networking.RunCustodyColumnsTest(t, "mainnet") -// } diff --git a/testing/spectest/mainnet/eip7594/networking/BUILD.bazel b/testing/spectest/mainnet/fulu/networking/BUILD.bazel similarity index 77% rename from testing/spectest/mainnet/eip7594/networking/BUILD.bazel rename to testing/spectest/mainnet/fulu/networking/BUILD.bazel index 7fa964afb08..b10f2329deb 100644 --- a/testing/spectest/mainnet/eip7594/networking/BUILD.bazel +++ b/testing/spectest/mainnet/fulu/networking/BUILD.bazel @@ -8,4 +8,5 @@ go_test( "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], + deps = ["//testing/spectest/shared/fulu/networking:go_default_library"], ) diff --git a/testing/spectest/mainnet/fulu/networking/custody_columns_test.go b/testing/spectest/mainnet/fulu/networking/custody_columns_test.go new file mode 100644 index 00000000000..5da05790821 --- /dev/null +++ b/testing/spectest/mainnet/fulu/networking/custody_columns_test.go @@ -0,0 +1,11 @@ +package networking + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/fulu/networking" +) + +func TestMainnet_Fulu_Networking_CustodyColumns(t *testing.T) { + networking.RunCustodyColumnsTest(t, "mainnet") +} diff --git a/testing/spectest/minimal/eip7594/networking/custody_columns_test.go b/testing/spectest/minimal/eip7594/networking/custody_columns_test.go deleted file mode 100644 index f0879a0974b..00000000000 --- a/testing/spectest/minimal/eip7594/networking/custody_columns_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package networking - -// import ( -// "testing" - -// "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/eip7594/networking" -// ) - -// func TestMainnet_EIP7594_Networking_CustodyColumns(t *testing.T) { -// networking.RunCustodyColumnsTest(t, "minimal") -// } diff --git a/testing/spectest/minimal/eip7594/networking/BUILD.bazel b/testing/spectest/minimal/fulu/networking/BUILD.bazel similarity index 77% rename from testing/spectest/minimal/eip7594/networking/BUILD.bazel rename to testing/spectest/minimal/fulu/networking/BUILD.bazel index fef2919090a..0da30acdc57 100644 --- a/testing/spectest/minimal/eip7594/networking/BUILD.bazel +++ b/testing/spectest/minimal/fulu/networking/BUILD.bazel @@ -8,4 +8,5 @@ go_test( "@consensus_spec_tests_minimal//:test_data", ], tags = ["spectest"], + deps = ["//testing/spectest/shared/fulu/networking:go_default_library"], ) diff --git a/testing/spectest/minimal/fulu/networking/custody_columns_test.go b/testing/spectest/minimal/fulu/networking/custody_columns_test.go new file mode 100644 index 00000000000..0ee94b72d2f --- /dev/null +++ b/testing/spectest/minimal/fulu/networking/custody_columns_test.go @@ -0,0 +1,11 @@ +package networking + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/fulu/networking" +) + +func TestMainnet_Fulu_Networking_CustodyColumns(t *testing.T) { + networking.RunCustodyColumnsTest(t, "minimal") +} diff --git a/testing/spectest/shared/eip7594/networking/BUILD.bazel b/testing/spectest/shared/fulu/networking/BUILD.bazel similarity index 94% rename from testing/spectest/shared/eip7594/networking/BUILD.bazel rename to testing/spectest/shared/fulu/networking/BUILD.bazel index c9e60dc073b..1908298ce24 100644 --- a/testing/spectest/shared/eip7594/networking/BUILD.bazel +++ b/testing/spectest/shared/fulu/networking/BUILD.bazel @@ -4,7 +4,7 @@ go_library( name = "go_default_library", testonly = True, srcs = ["custody_columns.go"], - importpath = "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/eip7594/networking", + importpath = "github.com/prysmaticlabs/prysm/v5/testing/spectest/shared/fulu/networking", visibility = ["//visibility:public"], deps = [ "//beacon-chain/core/peerdas:go_default_library", diff --git a/testing/spectest/shared/eip7594/networking/custody_columns.go b/testing/spectest/shared/fulu/networking/custody_columns.go similarity index 81% rename from testing/spectest/shared/eip7594/networking/custody_columns.go rename to testing/spectest/shared/fulu/networking/custody_columns.go index f8b343fe124..985104f2ca5 100644 --- a/testing/spectest/shared/eip7594/networking/custody_columns.go +++ b/testing/spectest/shared/fulu/networking/custody_columns.go @@ -13,9 +13,9 @@ import ( ) type Config struct { - NodeId *big.Int `yaml:"node_id"` - CustodySubnetCount uint64 `yaml:"custody_subnet_count"` - Expected []uint64 `yaml:"result"` + NodeId *big.Int `yaml:"node_id"` + CustodyGroupCount uint64 `yaml:"custody_group_count"` + Expected []uint64 `yaml:"result"` } // RunCustodyColumnsTest executes custody columns spec tests. @@ -50,8 +50,12 @@ func RunCustodyColumnsTest(t *testing.T, config string) { copy(nodeIdBytes32[:], nodeIdBytes) nodeId := enode.ID(nodeIdBytes32) - // Compute the custodied columns. - actual, err := peerdas.CustodyColumns(nodeId, config.CustodySubnetCount) + // Compute the custody groups. + custodyGroups, err := peerdas.CustodyGroups(nodeId, config.CustodyGroupCount) + require.NoError(t, err, "failed to compute the custody groups") + + // Compute the custody columns. + actual, err := peerdas.CustodyColumns(custodyGroups) require.NoError(t, err, "failed to compute the custody columns") // Compare the results.