@@ -1606,30 +1606,17 @@ func (d *Downloader) processSnapSyncContent() error {
16061606
16071607 // To cater for moving pivot points, track the pivot block and subsequently
16081608 // accumulated download results separately.
1609- //
1610- // These will be nil up to the point where we reach the pivot, and will only
1611- // be set temporarily if the synced blocks are piling up, but the pivot is
1612- // still busy downloading. In that case, we need to occasionally check for
1613- // pivot moves, so need to unblock the loop. These fields will accumulate
1614- // the results in the meantime.
1615- //
1616- // Note, there's no issue with memory piling up since after 64 blocks the
1617- // pivot will forcefully move so these accumulators will be dropped.
16181609 var (
16191610 oldPivot * fetchResult // Locked in pivot block, might change eventually
16201611 oldTail []* fetchResult // Downloaded content after the pivot
16211612 )
16221613 for {
1623- // Wait for the next batch of downloaded data to be available. If we have
1624- // not yet reached the pivot point, wait blockingly as there's no need to
1625- // spin-loop check for pivot moves. If we reached the pivot but have not
1626- // yet processed it, check for results async, so we might notice pivot
1627- // moves while state syncing. If the pivot was passed fully, block again
1628- // as there's no more reason to check for pivot moves at all.
1629- results := d .queue .Results (oldPivot == nil )
1614+ // Wait for the next batch of downloaded data to be available, and if the pivot
1615+ // block became stale, move the goalpost
1616+ results := d .queue .Results (oldPivot == nil ) // Block if we're not monitoring pivot staleness
16301617 if len (results ) == 0 {
16311618 // If pivot sync is done, stop
1632- if d . committed . Load () {
1619+ if oldPivot == nil {
16331620 d .reportSnapSyncProgress (true )
16341621 return sync .Cancel ()
16351622 }
@@ -1652,23 +1639,21 @@ func (d *Downloader) processSnapSyncContent() error {
16521639 pivot := d .pivotHeader
16531640 d .pivotLock .RUnlock ()
16541641
1655- if oldPivot == nil { // no results piling up, we can move the pivot
1656- if ! d .committed .Load () { // not yet passed the pivot, we can move the pivot
1657- if pivot .Root != sync .root { // pivot position changed, we can move the pivot
1658- sync .Cancel ()
1659- sync = d .syncState (pivot .Root )
1642+ if oldPivot == nil {
1643+ if pivot .Root != sync .root {
1644+ sync .Cancel ()
1645+ sync = d .syncState (pivot .Root )
16601646
1661- go closeOnErr (sync )
1662- }
1647+ go closeOnErr (sync )
16631648 }
1664- } else { // results already piled up, consume before handling pivot move
1649+ } else {
16651650 results = append (append ([]* fetchResult {oldPivot }, oldTail ... ), results ... )
16661651 }
16671652 // Split around the pivot block and process the two sides via snap/full sync
16681653 if ! d .committed .Load () {
16691654 latest := results [len (results )- 1 ].Header
16701655 // If the height is above the pivot block by 2 sets, it means the pivot
1671- // become stale in the network, and it was garbage collected, move to a
1656+ // become stale in the network and it was garbage collected, move to a
16721657 // new pivot.
16731658 //
16741659 // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
0 commit comments