Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions eth/downloader/beaconsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,12 @@ func (b *beaconBackfiller) suspend() {
// resume starts the downloader threads for backfilling state and chain data.
func (b *beaconBackfiller) resume() {
b.lock.Lock()
if b.filling {
// If a previous filling cycle is still running, just ignore this start
// request. // TODO(karalabe): We should make this channel driven
b.lock.Unlock()
return
}
b.filling = true
mode := b.syncMode
b.lock.Unlock()
Expand Down
2 changes: 0 additions & 2 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -1296,8 +1296,6 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int, beaconMode bool)
}
d.pivotLock.RUnlock()

fmt.Println(pivot)

frequency := fsHeaderCheckFrequency
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
Expand Down
7 changes: 7 additions & 0 deletions eth/downloader/skeleton.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ func (s *skeleton) Terminate() error {
// This method does not block, rather it just waits until the syncer receives the
// fed header. What the syncer does with it is the syncer's problem.
func (s *skeleton) Sync(head *types.Header) error {
log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash())
select {
case s.headEvents <- head:
return nil
Expand Down Expand Up @@ -393,6 +394,12 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
if reorged := s.processNewHead(head); reorged {
return head, errSyncReorged
}
// New head was integrated into the skeleton chain. If the backfiller
// is still running, it will pick it up. If it already terminated,
// a new cycle needs to be spun up.
if s.scratchHead == 0 {
s.filler.resume()
}

case req := <-requestFails:
s.revertRequest(req)
Expand Down