Skip to content
Merged
Show file tree
Hide file tree
Changes from 57 commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
ea5a603
port changes from #1013
jonastheis Dec 10, 2024
b30006f
port changes from #1068
jonastheis Dec 10, 2024
de37d47
go.mod tidy
jonastheis Dec 10, 2024
e34fecf
fix compile error
jonastheis Dec 10, 2024
1327771
fix goimports
jonastheis Dec 10, 2024
b05954d
fix log
jonastheis Dec 10, 2024
ce8f785
address review comments
jonastheis Dec 12, 2024
f10c383
upgrade golang.org/x/net to 0.23.0
jonastheis Dec 12, 2024
cb0a90e
Merge branch 'develop' into jt/l1-follower-mode
jonastheis Dec 12, 2024
b918a2b
port changes from #1018
jonastheis Dec 10, 2024
e51182d
fix tests and linter errors
jonastheis Dec 11, 2024
4e6f759
address review comments
jonastheis Dec 12, 2024
fd6bff3
Merge branch 'develop' into jt/l1-follower-mode-l1-reader
jonastheis Dec 26, 2024
ab3e873
refactor rollup sync service / verifier to use CalldataBlobSource to …
jonastheis Dec 26, 2024
4ced6f2
add configuration and initialize blob clients
jonastheis Dec 27, 2024
6aafa74
fix unit tests
jonastheis Dec 27, 2024
da81a2e
remove unused code
jonastheis Dec 27, 2024
8750045
address review comments
jonastheis Jan 2, 2025
2499c69
address more review comments
jonastheis Jan 2, 2025
fb4fe7c
implement first version of new da-codec and to handle multiple batche…
jonastheis Jan 13, 2025
9bf2f25
add CommitBatchDAV7 and handle multiple commit events submitted in a …
jonastheis Jan 22, 2025
d222f58
Merge remote-tracking branch 'origin/develop' into jt/rollup-verifier…
jonastheis Jan 22, 2025
c56be0d
Merge branch 'jt/rollup-verifier-use-code-from-l1-follower' into jt/l…
jonastheis Jan 22, 2025
3950e58
fix bug due to previous batch being empty when processing the first b…
jonastheis Jan 22, 2025
a043d2f
Allow using MPT
omerfirmak Dec 23, 2024
94c0ad5
Merge remote-tracking branch 'origin/omerfirmak/mpt' into jt/rollup-v…
jonastheis Jan 28, 2025
67c1866
Merge branch 'jt/rollup-verifier-use-code-from-l1-follower' into jt/l…
jonastheis Jan 28, 2025
43d54cb
update to latest da-codec
jonastheis Feb 4, 2025
4290d16
add field to CommittedBatchMeta to store LastL1MessageQueueHash for C…
jonastheis Feb 4, 2025
574dd53
adjust rollup verifier to support CodecV7 batches
jonastheis Feb 4, 2025
53b6ebf
address review comments
jonastheis Feb 5, 2025
ab3bedf
Merge remote-tracking branch 'origin/develop' into jt/rollup-verifier…
jonastheis Feb 6, 2025
3335654
fix issues after merge
jonastheis Feb 6, 2025
3e18f7f
Merge remote-tracking branch 'origin/jt/rollup-verifier-use-code-from…
jonastheis Feb 6, 2025
2342335
Merge remote-tracking branch 'origin/develop' into jt/rollup-verifier…
jonastheis Feb 6, 2025
7c14639
Merge remote-tracking branch 'origin/jt/rollup-verifier-use-code-from…
jonastheis Feb 6, 2025
634d1f1
go mod tidy
jonastheis Feb 6, 2025
0fa3743
fix unit tests
jonastheis Feb 7, 2025
e048f53
Merge remote-tracking branch 'origin/develop' into jt/l1-follower-mod…
jonastheis Feb 10, 2025
a2a68e8
update da-codec
jonastheis Feb 10, 2025
ca6649e
add test TestValidateBatchCodecV7
jonastheis Feb 10, 2025
80976ad
go mod tidy
jonastheis Feb 10, 2025
4022989
do not log error on shutdown
jonastheis Feb 11, 2025
d4cc897
add sanity check for version to deserialization of committedBatchMetaV7
jonastheis Feb 12, 2025
a2cb3d1
port changes from #1073
jonastheis Feb 12, 2025
6d5af23
chore: auto version bump [bot]
Thegaram Feb 12, 2025
3c21f4e
address review comments
jonastheis Feb 13, 2025
0bd6eb1
add more logs
jonastheis Feb 13, 2025
9139e94
disable ENRUpdater if DA sync mode is enabled
jonastheis Feb 13, 2025
e9154ca
exit pipeline if context is cancelled
jonastheis Feb 13, 2025
8820949
correctly handle override by setting the head of the chain to the par…
jonastheis Feb 13, 2025
8ba140c
fix error with genesis event being nil
jonastheis Feb 13, 2025
e6af5b6
Merge branch 'jt/l1-follower-mode-update-da-codec' into jt/permission…
jonastheis Feb 13, 2025
9533eab
rebase #1087 to new base branch
jonastheis Feb 13, 2025
b794dda
Merge remote-tracking branch 'origin/develop' into jt/permissionless-…
jonastheis Feb 14, 2025
438ec09
chore: auto version bump [bot]
jonastheis Feb 14, 2025
1ecc05d
Merge branch 'jt/permissionless-batches-recovery-rebased' into jt/per…
jonastheis Feb 14, 2025
874df56
Merge branch 'develop' into jt/permissionless-batches-recovery-mine-r…
Thegaram Feb 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,12 @@ var (
utils.DABlockNativeAPIEndpointFlag,
utils.DABlobScanAPIEndpointFlag,
utils.DABeaconNodeAPIEndpointFlag,
utils.DARecoveryModeFlag,
utils.DARecoveryInitialL1BlockFlag,
utils.DARecoveryInitialBatchFlag,
utils.DARecoverySignBlocksFlag,
utils.DARecoveryL2EndBlockFlag,
utils.DARecoveryProduceBlocksFlag,
}

rpcFlags = []cli.Flag{
Expand Down
50 changes: 46 additions & 4 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -886,22 +886,46 @@ var (
}

// DA syncing settings
DASyncEnabledFlag = &cli.BoolFlag{
DASyncEnabledFlag = cli.BoolFlag{
Name: "da.sync",
Usage: "Enable node syncing from DA",
}
DABlobScanAPIEndpointFlag = &cli.StringFlag{
DABlobScanAPIEndpointFlag = cli.StringFlag{
Name: "da.blob.blobscan",
Usage: "BlobScan blob API endpoint",
}
DABlockNativeAPIEndpointFlag = &cli.StringFlag{
DABlockNativeAPIEndpointFlag = cli.StringFlag{
Name: "da.blob.blocknative",
Usage: "BlockNative blob API endpoint",
}
DABeaconNodeAPIEndpointFlag = &cli.StringFlag{
DABeaconNodeAPIEndpointFlag = cli.StringFlag{
Name: "da.blob.beaconnode",
Usage: "Beacon node API endpoint",
}
DARecoveryModeFlag = cli.BoolFlag{
Name: "da.recovery",
Usage: "Enable recovery mode for DA syncing",
}
DARecoveryInitialL1BlockFlag = cli.Uint64Flag{
Name: "da.recovery.initiall1block",
Usage: "Initial L1 block to start recovery from",
}
DARecoveryInitialBatchFlag = cli.Uint64Flag{
Name: "da.recovery.initialbatch",
Usage: "Initial batch to start recovery from",
}
DARecoverySignBlocksFlag = cli.BoolFlag{
Name: "da.recovery.signblocks",
Usage: "Sign blocks during recovery (requires correct Clique signer key and history of blocks with Clique signatures)",
}
DARecoveryL2EndBlockFlag = cli.Uint64Flag{
Name: "da.recovery.l2endblock",
Usage: "End L2 block to recover to",
}
DARecoveryProduceBlocksFlag = cli.BoolFlag{
Name: "da.recovery.produceblocks",
Usage: "Produce unsigned blocks after L1 recovery for permissionless batch submission",
}
)

// MakeDataDir retrieves the currently requested data directory, terminating
Expand Down Expand Up @@ -1651,6 +1675,24 @@ func setDA(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) {
cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name)
}
if ctx.IsSet(DARecoveryModeFlag.Name) {
cfg.DA.RecoveryMode = ctx.Bool(DARecoveryModeFlag.Name)
}
if ctx.IsSet(DARecoveryInitialL1BlockFlag.Name) {
cfg.DA.InitialL1Block = ctx.Uint64(DARecoveryInitialL1BlockFlag.Name)
}
if ctx.IsSet(DARecoveryInitialBatchFlag.Name) {
cfg.DA.InitialBatch = ctx.Uint64(DARecoveryInitialBatchFlag.Name)
}
if ctx.IsSet(DARecoverySignBlocksFlag.Name) {
cfg.DA.SignBlocks = ctx.Bool(DARecoverySignBlocksFlag.Name)
}
if ctx.IsSet(DARecoveryL2EndBlockFlag.Name) {
cfg.DA.L2EndBlock = ctx.Uint64(DARecoveryL2EndBlockFlag.Name)
}
if ctx.IsSet(DARecoveryProduceBlocksFlag.Name) {
cfg.DA.ProduceBlocks = ctx.Bool(DARecoveryProduceBlocksFlag.Name)
}
}

func setMaxBlockRange(ctx *cli.Context, cfg *ethconfig.Config) {
Expand Down
56 changes: 50 additions & 6 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1806,15 +1806,15 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
return it.index, err
}

func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types.Header, txs types.Transactions) (WriteStatus, error) {
func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types.Header, txs types.Transactions, sign bool) (*types.Block, WriteStatus, error) {
if !bc.chainmu.TryLock() {
return NonStatTy, errInsertionInterrupted
return nil, NonStatTy, errInsertionInterrupted
}
defer bc.chainmu.Unlock()

statedb, err := state.New(parentBlock.Root(), bc.stateCache, bc.snaps)
if err != nil {
return NonStatTy, err
return nil, NonStatTy, err
}

statedb.StartPrefetcher("l1sync", nil)
Expand All @@ -1825,18 +1825,51 @@ func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types
tempBlock := types.NewBlockWithHeader(header).WithBody(txs, nil)
receipts, logs, gasUsed, err := bc.processor.Process(tempBlock, statedb, bc.vmConfig)
if err != nil {
return NonStatTy, fmt.Errorf("error processing block: %w", err)
return nil, NonStatTy, fmt.Errorf("error processing block: %w", err)
}

// TODO: once we have the extra and difficulty we need to verify the signature of the block with Clique
// This should be done with https://github.com/scroll-tech/go-ethereum/pull/913.

// finalize and assemble block as fullBlock
if sign {
// remember the time as Clique will override it
originalTime := header.Time

err = bc.engine.Prepare(bc, header)
if err != nil {
return nil, NonStatTy, fmt.Errorf("error preparing block %d: %w", tempBlock.Number().Uint64(), err)
}

// we want to re-sign the block: set time to original value again.
header.Time = originalTime
}

// finalize and assemble block as fullBlock: replicates consensus.FinalizeAndAssemble()
header.GasUsed = gasUsed
header.Root = statedb.IntermediateRoot(bc.chainConfig.IsEIP158(header.Number))

fullBlock := types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))

// Sign the block if requested
if sign {
resultCh, stopCh := make(chan *types.Block), make(chan struct{})
if err = bc.engine.Seal(bc, fullBlock, resultCh, stopCh); err != nil {
return nil, NonStatTy, fmt.Errorf("error sealing block %d: %w", fullBlock.Number().Uint64(), err)
}
// Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy
// or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of
// that artificially added delay and subtract it from overall runtime of commit().
fullBlock = <-resultCh
if fullBlock == nil {
return nil, NonStatTy, fmt.Errorf("sealing block failed %d: block is nil", header.Number.Uint64())
}

// verify the generated block with local consensus engine to make sure everything is as expected
if err = bc.engine.VerifyHeader(bc, fullBlock.Header(), true); err != nil {
return nil, NonStatTy, fmt.Errorf("error verifying signed block %d: %w", fullBlock.Number().Uint64(), err)
}
}

blockHash := fullBlock.Hash()
// manually replace the block hash in the receipts
for i, receipt := range receipts {
Expand All @@ -1853,7 +1886,18 @@ func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types
l.BlockHash = blockHash
}

return bc.writeBlockWithState(fullBlock, receipts, logs, statedb, false)
// Double check: even though we just built the block, make sure it is valid.
if err = bc.validator.ValidateBody(fullBlock); err != nil {
bc.reportBlock(fullBlock, receipts, err)
return nil, NonStatTy, fmt.Errorf("error validating block %d: %w", fullBlock.Number().Uint64(), err)
}
if err = bc.validator.ValidateState(fullBlock, statedb, receipts, gasUsed); err != nil {
bc.reportBlock(fullBlock, receipts, err)
return nil, NonStatTy, fmt.Errorf("error validating block %d: %w", fullBlock.Number().Uint64(), err)
}

writeStatus, err := bc.writeBlockWithState(fullBlock, receipts, logs, statedb, false)
return fullBlock, writeStatus, err
}

// insertSideChain is called when an import batch hits upon a pruned ancestor
Expand Down
22 changes: 15 additions & 7 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,11 +229,15 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether
// simply let them run simultaneously. If messages are missing in DA syncing, it will be handled by the syncing pipeline
// by waiting and retrying.
if config.EnableDASyncing {
eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA)
if err != nil {
return nil, fmt.Errorf("cannot initialize da syncer: %w", err)
// Do not start syncing pipeline if we are producing blocks for permissionless batches.
if !config.DA.ProduceBlocks {
eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA)
if err != nil {
return nil, fmt.Errorf("cannot initialize da syncer: %w", err)
}

eth.syncingPipeline.Start()
}
eth.syncingPipeline.Start()
}

// initialize and start L1 message sync service
Expand Down Expand Up @@ -273,7 +277,8 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether
return nil, err
}

eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing)
config.Miner.SigningDisabled = config.DA.ProduceBlocks
eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing && !config.DA.ProduceBlocks)
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))

eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
Expand Down Expand Up @@ -588,7 +593,10 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
// Start implements node.Lifecycle, starting all internal goroutines needed by the
// Ethereum protocol implementation.
func (s *Ethereum) Start() error {
eth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode())
// handler is not enabled when DA syncing enabled
if !s.config.EnableDASyncing {
eth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode())
}

// Start the bloom bits servicing goroutines
s.startBloomHandlers(params.BloomBitsBlocks)
Expand Down Expand Up @@ -629,7 +637,7 @@ func (s *Ethereum) Stop() error {
if s.config.EnableRollupVerify {
s.rollupSyncService.Stop()
}
if s.config.EnableDASyncing {
if s.config.EnableDASyncing && s.syncingPipeline != nil {
s.syncingPipeline.Stop()
}
s.miner.Close()
Expand Down
2 changes: 2 additions & 0 deletions miner/miner.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ type Config struct {
StoreSkippedTxTraces bool // Whether store the wrapped traces when storing a skipped tx
MaxAccountsNum int // Maximum number of accounts that miner will fetch the pending transactions of when building a new block
CCCMaxWorkers int // Maximum number of workers to use for async CCC tasks

SigningDisabled bool // Whether to disable signing blocks with consensus enginek
}

// Miner creates blocks and searches for proof-of-work values.
Expand Down
63 changes: 38 additions & 25 deletions miner/scroll_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -477,11 +477,19 @@ func (w *worker) newWork(now time.Time, parentHash common.Hash, reorging bool, r
header.Coinbase = w.coinbase
}

prepareStart := time.Now()
if err := w.engine.Prepare(w.chain, header); err != nil {
return fmt.Errorf("failed to prepare header for mining: %w", err)
if w.config.SigningDisabled {
// Need to make sure to set difficulty so that a new canonical chain is detected in Blockchain
header.Difficulty = new(big.Int).SetUint64(1)
header.MixDigest = common.Hash{}
header.Coinbase = common.Address{}
header.Nonce = types.BlockNonce{}
} else {
prepareStart := time.Now()
if err := w.engine.Prepare(w.chain, header); err != nil {
return fmt.Errorf("failed to prepare header for mining: %w", err)
}
prepareTimer.UpdateSince(prepareStart)
}
prepareTimer.UpdateSince(prepareStart)

var nextL1MsgIndex uint64
if dbVal := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), header.ParentHash); dbVal != nil {
Expand Down Expand Up @@ -828,28 +836,33 @@ func (w *worker) commit() (common.Hash, error) {
return common.Hash{}, err
}

sealHash := w.engine.SealHash(block.Header())
log.Info("Committing new mining work", "number", block.Number(), "sealhash", sealHash,
"txs", w.current.txs.Len(),
"gas", block.GasUsed(), "fees", totalFees(block, w.current.receipts))

resultCh, stopCh := make(chan *types.Block), make(chan struct{})
if err := w.engine.Seal(w.chain, block, resultCh, stopCh); err != nil {
return common.Hash{}, err
}
// Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy
// or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of
// that artificially added delay and subtract it from overall runtime of commit().
sealStart := time.Now()
block = <-resultCh
sealDelay = time.Since(sealStart)
if block == nil {
return common.Hash{}, errors.New("missed seal response from consensus engine")
}
var sealHash common.Hash
if w.config.SigningDisabled {
sealHash = block.Hash()
} else {
sealHash = w.engine.SealHash(block.Header())
log.Info("Committing new mining work", "number", block.Number(), "sealhash", sealHash,
"txs", w.current.txs.Len(),
"gas", block.GasUsed(), "fees", totalFees(block, w.current.receipts))

resultCh, stopCh := make(chan *types.Block), make(chan struct{})
if err := w.engine.Seal(w.chain, block, resultCh, stopCh); err != nil {
return common.Hash{}, err
}
// Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy
// or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of
// that artificially added delay and subtract it from overall runtime of commit().
sealStart := time.Now()
block = <-resultCh
sealDelay = time.Since(sealStart)
if block == nil {
return common.Hash{}, errors.New("missed seal response from consensus engine")
}

// verify the generated block with local consensus engine to make sure everything is as expected
if err = w.engine.VerifyHeader(w.chain, block.Header(), true); err != nil {
return common.Hash{}, retryableCommitError{inner: err}
// verify the generated block with local consensus engine to make sure everything is as expected
if err = w.engine.VerifyHeader(w.chain, block.Header(), true); err != nil {
return common.Hash{}, retryableCommitError{inner: err}
}
}

blockHash := block.Hash()
Expand Down
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
const (
VersionMajor = 5 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
VersionPatch = 7 // Patch version component of the current release
VersionPatch = 8 // Patch version component of the current release
VersionMeta = "mainnet" // Version metadata to append to the version string
)

Expand Down
6 changes: 6 additions & 0 deletions rollup/da_syncer/batch_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) {
}

for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}

daEntry, err := bq.DAQueue.NextDA(ctx)
if err != nil {
return nil, err
Expand Down
5 changes: 4 additions & 1 deletion rollup/da_syncer/da/calldata_blob_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethdb"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors"
"github.com/scroll-tech/go-ethereum/rollup/l1"
Expand Down Expand Up @@ -65,6 +66,8 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) {
to = min(to, ds.l1Finalized)
}

log.Debug("Fetching rollup events", "from", ds.l1Height, "to", to, "finalized", ds.l1Finalized)

if ds.l1Height > to {
return nil, ErrSourceExhausted
}
Expand Down Expand Up @@ -194,7 +197,7 @@ func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEve
}

if commitEvents[0].BatchIndex().Uint64() == 0 {
return Entries{NewCommitBatchDAV0Empty()}, nil
return Entries{NewCommitBatchDAV0Empty(commitEvents[0])}, nil
}

firstCommitEvent := commitEvents[0]
Expand Down
5 changes: 4 additions & 1 deletion rollup/da_syncer/da/commitV0.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/core/rawdb"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethdb"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors"
"github.com/scroll-tech/go-ethereum/rollup/l1"
)
Expand Down Expand Up @@ -65,9 +66,10 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database,
}, nil
}

func NewCommitBatchDAV0Empty() *CommitBatchDAV0 {
func NewCommitBatchDAV0Empty(event *l1.CommitBatchEvent) *CommitBatchDAV0 {
return &CommitBatchDAV0{
batchIndex: 0,
event: event,
}
}

Expand Down Expand Up @@ -172,6 +174,7 @@ func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skipped
}
l1Tx := rawdb.ReadL1Message(db, currentIndex)
if l1Tx == nil {
log.Info("L1 message not yet available", "index", currentIndex)
// message not yet available
// we return serrors.EOFError as this will be handled in the syncing pipeline with a backoff and retry
return nil, serrors.EOFError
Expand Down
Loading
Loading