Skip to content

Commit

Permalink
Cherrypick/reorgs (0xPolygonHermez#3588)
Browse files Browse the repository at this point in the history
* change number migration

* add column checked on state.block

* if no unchecked blocks  return ErrNotFound

* migration set to checked all but the block with number below max-1000

* add column checked on state.block (0xPolygonHermez#3543)

* add column checked on state.block

* if no unchecked blocks  return ErrNotFound

* migration set to checked all but the block with number below max-1000

* Feature/0xPolygonHermez#3549 reorgs improvement (0xPolygonHermez#3553)

* New reorg function

* mocks

* linter

* Synchronizer tests

* new elderberry smc docker image

* new image

* logs

* fix json rpc

* fix

* Test sync from empty block

* Regular reorg case tested

* linter

* remove empty block + fix LatestSyncedBlockEmpty

* Improve check reorgs when no block is received during the call

* fix RPC error code for eth_estimateGas and eth_call for reverted tx and no return value; fix e2e test;

* fix test

* Extra unit test

* fix reorg until genesis

* disable parallel synchronization

---------

Co-authored-by: tclemos <[email protected]>

* migrations

* Fix + remove empty blocks

* unit test

* linter

* Fix + remove empty blocks (0xPolygonHermez#3564)

* Fix + remove empty blocks

* unit test

* linter

* Fix/0xPolygonHermez#3565 reorg (0xPolygonHermez#3566)

* fix + logs

* fix loop

* Revert "fix + logs"

This reverts commit 39ced69.

* fix L1InfoRoot when an error happens during the process of the L1 information (0xPolygonHermez#3576)

* fix

* Comments + mock

* avoid error from some L1providers when fromBlock is higher than toBlock

* Revert some changes

* comments

* add L2BlockModulus to L1check

* doc

* fix dbTx = nil

* fix unit tests

* config

* fix sync unit test

* linter

* fix config param typo

* synchronizer:  check l1blocks (0xPolygonHermez#3546)

* wip

* run on background L1block checker

* fix lint and documentation

* fix conflict

* add unittest

* more unittest

* fix lint

* increase timeout for async unittest

* fix unittest

* rename GetResponse for GetResult and fix uniitest

* add a second gorutines for check the newest blocks

* more unittest

* add unittest and run also preCheck on launch

* by default Precheck from FINALIZED and SAFE

* fix unittest, apply PR comments

* changes suggested by ARR552 in integration method

* fix documentation

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* fix unittest

* fix PR comments

* fix error

* checkReorgAndExecuteReset can't be call with lastEthBlockSynced=nil

* add parentHash to error

* fix error

* merge 3553 fix unittest

* fix unittest

* fix wrong merge

* adapt parallel reorg detection to flow

* fix unit tests

* fix log

* allow use sync parallel mode

---------

Co-authored-by: Alonso <[email protected]>

* linter

* comment check

---------

Co-authored-by: tclemos <[email protected]>
  • Loading branch information
ARR552 and tclemos authored Apr 25, 2024
1 parent 7ddcb29 commit c7d7834
Show file tree
Hide file tree
Showing 66 changed files with 6,564 additions and 828 deletions.
10 changes: 9 additions & 1 deletion config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,15 @@ TrustedSequencerURL = "" # If it is empty or not specified, then the value is re
SyncBlockProtection = "safe" # latest, finalized, safe
L1SynchronizationMode = "sequential"
L1SyncCheckL2BlockHash = true
L1SyncCheckL2BlockNumberhModulus = 30
L1SyncCheckL2BlockNumberModulus = 600
[Synchronizer.L1BlockCheck]
Enable = true
L1SafeBlockPoint = "finalized"
L1SafeBlockOffset = 0
ForceCheckBeforeStart = true
PreCheckEnable = true
L1PreSafeBlockPoint = "safe"
L1PreSafeBlockOffset = 0
[Synchronizer.L1ParallelSynchronization]
MaxClients = 10
MaxPendingNoProcessedBlocks = 25
Expand Down
32 changes: 16 additions & 16 deletions config/environments/local/local.genesis.config.json

Large diffs are not rendered by default.

82 changes: 6 additions & 76 deletions db/migrations/state/0018.sql
Original file line number Diff line number Diff line change
@@ -1,81 +1,11 @@
-- +migrate Up
CREATE TABLE state.blob_inner
(
blob_inner_num BIGINT PRIMARY KEY,
data BYTEA,
block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE
);
ALTER TABLE state.block
ADD COLUMN IF NOT EXISTS checked BOOL NOT NULL DEFAULT FALSE;

ALTER TABLE state.virtual_batch
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT, -- REFERENCES state.blob_inner (blob_inner_num),
ADD COLUMN IF NOT EXISTS prev_l1_it_root VARCHAR,
ADD COLUMN IF NOT EXISTS prev_l1_it_index BIGINT;

ALTER TABLE IF EXISTS state.proof RENAME TO batch_proof;

ALTER TABLE state.batch_proof
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT; -- NOT NULL REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE;

CREATE TABLE state.blob_inner_proof
(
blob_inner_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_inner_num)
);

CREATE TABLE state.blob_outer_proof
(
blob_outer_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
blob_outer_num_final BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_outer_num, blob_outer_num_final)
);
-- set block.checked to true for all blocks below max - 100
UPDATE state.block SET checked = true WHERE block_num <= (SELECT MAX(block_num) - 1000 FROM state.block);

-- +migrate Down
ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

DROP TABLE state.blob_outer_proof;

DROP TABLE state.blob_inner_proof;

DROP TABLE state.batch_proof;

DROP TABLE state.blob_inner;

CREATE TABLE state.proof
(
batch_num BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
batch_num_final BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (batch_num, batch_num_final)
);
ALTER TABLE state.block
DROP COLUMN IF EXISTS checked;

ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

118 changes: 34 additions & 84 deletions db/migrations/state/0018_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,117 +3,67 @@ package migrations_test
import (
"database/sql"
"testing"
"time"

"github.com/stretchr/testify/assert"
)

type migrationTest0018 struct {
migrationBase
}
type migrationTest0018 struct{}

func (m migrationTest0018) InsertData(db *sql.DB) error {
const insertBatch1 = `
INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip)
VALUES (1,'0x0001', '0x0001', '0x0001', '0x0001', now(), '0x0001', null, null, true)`

_, err := db.Exec(insertBatch1)
if err != nil {
const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)"
if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil {
return err
}

const insertBatch2 = `
INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip)
VALUES (2,'0x0002', '0x0002', '0x0002', '0x0002', now(), '0x0002', null, null, true)`

_, err = db.Exec(insertBatch2)
if err != nil {
if _, err := db.Exec(addBlock, 50, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil {
return err
}

const insertBlock1 = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at) VALUES (1,'0x0001', '0x0001', now())"

_, err = db.Exec(insertBlock1)
if err != nil {
if _, err := db.Exec(addBlock, 1050, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil {
return err
}

const insertBlock2 = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at) VALUES (2,'0x0002', '0x0002', now())"

_, err = db.Exec(insertBlock2)
if err != nil {
return err
}

return nil
}

func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
m.AssertNewAndRemovedItemsAfterMigrationUp(t, db)

// Insert blobInner 1
const insertBlobInner = `INSERT INTO state.blob_inner (blob_inner_num, data, block_num) VALUES (1, E'\\x1234', 1);`
_, err := db.Exec(insertBlobInner)
var checked bool
row := db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1)
assert.NoError(t, row.Scan(&checked))
assert.Equal(t, true, checked)
row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 50)
assert.NoError(t, row.Scan(&checked))
assert.Equal(t, true, checked)
row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1050)
assert.NoError(t, row.Scan(&checked))
assert.Equal(t, false, checked)

const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash, checked) VALUES ($1, $2, $3, $4)"
_, err := db.Exec(addBlock, 2, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", true)
assert.NoError(t, err)

const insertBatch1 = `
INSERT INTO state.virtual_batch (batch_num, tx_hash, coinbase, block_num, sequencer_addr, timestamp_batch_etrog, l1_info_root, blob_inner_num, prev_l1_it_root, prev_l1_it_index)
VALUES (1,'0x0001', '0x0001', 1, '0x0001', now(), '0x0001', 1, '0x0001', 1)`

_, err = db.Exec(insertBatch1)
_, err = db.Exec(addBlock, 3, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", false)
assert.NoError(t, err)
const sql = `SELECT count(*) FROM state.block WHERE checked = true`
row = db.QueryRow(sql)
var result int
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 3, result, "must be 1,50 per migration and 2 by insert")

const insertBatch2 = `
INSERT INTO state.virtual_batch (batch_num, tx_hash, coinbase, block_num, sequencer_addr, timestamp_batch_etrog, l1_info_root, blob_inner_num, prev_l1_it_root, prev_l1_it_index)
VALUES (2,'0x0002', '0x0002', 2, '0x0002', now(), '0x0002', 1, '0x0002', 2)`
const sqlCheckedFalse = `SELECT count(*) FROM state.block WHERE checked = false`
row = db.QueryRow(sqlCheckedFalse)

_, err = db.Exec(insertBatch2)
assert.NoError(t, err)
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 2, result, "must be 150 by migration, and 3 by insert")
}

func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
var result int

m.AssertNewAndRemovedItemsAfterMigrationDown(t, db)

// Check column blob_inner_num doesn't exists in state.virtual_batch table
const getBlobInnerNumColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='virtual_batch' and column_name='blob_inner_num'`
row := db.QueryRow(getBlobInnerNumColumn)
// Check column wip doesn't exists in state.batch table
const sql = `SELECT count(*) FROM state.block`
row := db.QueryRow(sql)
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 0, result)

// Check column prev_l1_it_root doesn't exists in state.virtual_batch table
const getPrevL1ITRootColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='virtual_batch' and column_name='prev_l1_it_root'`
row = db.QueryRow(getPrevL1ITRootColumn)
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 0, result)

// Check column prev_l1_it_index doesn't exists in state.virtual_batch table
const getPrevL1ITIndexColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='virtual_batch' and column_name='prev_l1_it_index'`
row = db.QueryRow(getPrevL1ITIndexColumn)
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 0, result)
assert.Equal(t, 5, result)
}

func TestMigration0018(t *testing.T) {
m := migrationTest0018{
migrationBase: migrationBase{
removedTables: []tableMetadata{
{"state", "proof"},
},

newTables: []tableMetadata{
{"state", "blob_inner"},
{"state", "batch_proof"},
{"state", "blob_inner_proof"},
{"state", "blob_outer_proof"},
},

newColumns: []columnMetadata{
{"state", "virtual_batch", "blob_inner_num"},
{"state", "virtual_batch", "prev_l1_it_root"},
{"state", "virtual_batch", "prev_l1_it_index"},
},
},
}
runMigrationTest(t, 18, m)
runMigrationTest(t, 18, migrationTest0018{})
}
94 changes: 75 additions & 19 deletions db/migrations/state/0019.sql
Original file line number Diff line number Diff line change
@@ -1,25 +1,81 @@
-- +migrate Up
CREATE TABLE state.blob_inner
(
blob_inner_num BIGINT PRIMARY KEY,
data BYTEA,
block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE
);

-- the update below fix the wrong receipt TX indexes
WITH map_fix_tx_index AS (
SELECT t.l2_block_num AS block_num
, t.hash AS tx_hash
, r.tx_index AS current_index
, (ROW_NUMBER() OVER (PARTITION BY t.l2_block_num ORDER BY r.tx_index))-1 AS correct_index
FROM state.receipt r
INNER JOIN state."transaction" t
ON t.hash = r.tx_hash
)
UPDATE state.receipt AS r
SET tx_index = m.correct_index
FROM map_fix_tx_index m
WHERE m.block_num = r.block_num
AND m.tx_hash = r.tx_hash
AND m.current_index = r.tx_index
AND m.current_index != m.correct_index;
ALTER TABLE state.virtual_batch
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT, -- REFERENCES state.blob_inner (blob_inner_num),
ADD COLUMN IF NOT EXISTS prev_l1_it_root VARCHAR,
ADD COLUMN IF NOT EXISTS prev_l1_it_index BIGINT;

ALTER TABLE IF EXISTS state.proof RENAME TO batch_proof;

ALTER TABLE state.batch_proof
ADD COLUMN IF NOT EXISTS blob_inner_num BIGINT; -- NOT NULL REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE;

CREATE TABLE state.blob_inner_proof
(
blob_inner_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_inner_num)
);

CREATE TABLE state.blob_outer_proof
(
blob_outer_num BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
blob_outer_num_final BIGINT NOT NULL, -- REFERENCES state.blob_inner (blob_inner_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (blob_outer_num, blob_outer_num_final)
);

-- +migrate Down
ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

DROP TABLE state.blob_outer_proof;

DROP TABLE state.blob_inner_proof;

DROP TABLE state.batch_proof;

DROP TABLE state.blob_inner;

CREATE TABLE state.proof
(
batch_num BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
batch_num_final BIGINT NOT NULL REFERENCES state.batch (batch_num) ON DELETE CASCADE,
proof_id VARCHAR,
proof VARCHAR,
input_prover VARCHAR,
prover VARCHAR,
prover_id VARCHAR,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
generating_since TIMESTAMP WITH TIME ZONE,
PRIMARY KEY (batch_num, batch_num_final)
);

-- no action is needed, the data fixed by the
-- migrate up must remain fixed
ALTER TABLE state.virtual_batch
DROP COLUMN IF EXISTS blob_inner_num,
DROP COLUMN IF EXISTS prev_l1_it_root,
DROP COLUMN IF EXISTS prev_l1_it_index;

Loading

0 comments on commit c7d7834

Please sign in to comment.