diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go
index 29a4b3cb58..535493548a 100644
--- a/accounts/abi/bind/backend.go
+++ b/accounts/abi/bind/backend.go
@@ -39,7 +39,7 @@ import (
var (
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
- // have any code associated with it (i.e. suicided).
+ // have any code associated with it (i.e. self-destructed).
ErrNoCode = errors.New("no contract code at given address")
// ErrNoAcceptedState is raised when attempting to perform a accepted state action
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index f1e5eb19e5..390a4536d2 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -854,7 +854,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
}
block := b.blockchain.GetBlockByHash(b.acceptedBlock.ParentHash())
if block == nil {
- return fmt.Errorf("could not find parent")
+ return errors.New("could not find parent")
}
blocks, _, _ := core.GenerateChain(b.config, block, dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) {
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 66ca116dec..0fda7615ec 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -163,6 +163,7 @@ func TestAdjustTime(t *testing.T) {
func TestNewAdjustTimeFail(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr)
+ defer sim.blockchain.Stop()
// Create tx and send
head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
diff --git a/accounts/abi/error.go b/accounts/abi/error.go
index d94c262124..34bb373c60 100644
--- a/accounts/abi/error.go
+++ b/accounts/abi/error.go
@@ -42,7 +42,7 @@ type Error struct {
str string
// Sig contains the string signature according to the ABI spec.
- // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
+ // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 9f7a07a0c3..4adbf5b1c6 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -238,7 +238,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
- return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
+ return nil, errors.New("abi: purely underscored output cannot unpack to struct")
}
// this abi has already been paired, skip it... unless there exists another, yet unassigned
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 47a3481d3a..e637f6d75f 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -80,7 +80,7 @@ var (
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
- return Type{}, fmt.Errorf("invalid arg type in abi")
+ return Type{}, errors.New("invalid arg type in abi")
}
typ.stringKind = t
@@ -119,7 +119,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
}
typ.stringKind = embeddedType.stringKind + sliced
} else {
- return Type{}, fmt.Errorf("invalid formatting of array type")
+ return Type{}, errors.New("invalid formatting of array type")
}
return typ, err
}
@@ -358,7 +358,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
}
}
-// requireLengthPrefix returns whether the type requires any sort of length
+// requiresLengthPrefix returns whether the type requires any sort of length
// prefixing.
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go
index bc57d71db6..2899e5a5b3 100644
--- a/accounts/abi/unpack.go
+++ b/accounts/abi/unpack.go
@@ -28,6 +28,7 @@ package abi
import (
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/big"
@@ -135,7 +136,7 @@ func readBool(word []byte) (bool, error) {
// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
if t.T != FunctionTy {
- return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
+ return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array")
}
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
@@ -148,7 +149,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
// ReadFixedBytes uses reflection to create a fixed array to be read from.
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
- return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
+ return nil, errors.New("abi: invalid type in call to make fixed byte array")
}
// convert
array := reflect.New(t.GetType()).Elem()
@@ -176,7 +177,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
} else {
- return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
+ return nil, errors.New("abi: invalid type in array/slice unpacking stage")
}
// Arrays have packed elements, resulting in longer unpack steps.
diff --git a/accounts/external/backend.go b/accounts/external/backend.go
index 2d919be17c..31f8d6804e 100644
--- a/accounts/external/backend.go
+++ b/accounts/external/backend.go
@@ -27,6 +27,7 @@
package external
import (
+ "errors"
"fmt"
"math/big"
"sync"
@@ -108,11 +109,11 @@ func (api *ExternalSigner) Status() (string, error) {
}
func (api *ExternalSigner) Open(passphrase string) error {
- return fmt.Errorf("operation not supported on external signers")
+ return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Close() error {
- return fmt.Errorf("operation not supported on external signers")
+ return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Accounts() []accounts.Account {
@@ -155,7 +156,7 @@ func (api *ExternalSigner) Contains(account accounts.Account) bool {
}
func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
- return accounts.Account{}, fmt.Errorf("operation not supported on external signers")
+ return accounts.Account{}, errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain interfaces.ChainStateReader) {
@@ -252,14 +253,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
}
func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
- return []byte{}, fmt.Errorf("password-operations not supported on external signers")
+ return []byte{}, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
- return nil, fmt.Errorf("password-operations not supported on external signers")
+ return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) {
- return nil, fmt.Errorf("password-operations not supported on external signers")
+ return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) listAccounts() ([]common.Address, error) {
diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go
index 12b19ef5b9..4284d29f47 100644
--- a/accounts/keystore/account_cache.go
+++ b/accounts/keystore/account_cache.go
@@ -41,6 +41,7 @@ import (
mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/exp/slices"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
@@ -48,11 +49,10 @@ import (
// exist yet, the code will attempt to create a watcher at most this often.
const minReloadInterval = 2 * time.Second
-type accountsByURL []accounts.Account
-
-func (s accountsByURL) Len() int { return len(s) }
-func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 }
-func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+// byURL defines the sorting order for accounts.
+func byURL(a, b accounts.Account) int {
+ return a.URL.Cmp(b.URL)
+}
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
@@ -77,7 +77,7 @@ type accountCache struct {
keydir string
watcher *watcher
mu sync.Mutex
- all accountsByURL
+ all []accounts.Account
byAddr map[common.Address][]accounts.Account
throttle *time.Timer
notify chan struct{}
@@ -204,7 +204,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
copy(err.Matches, matches)
- sort.Sort(accountsByURL(err.Matches))
+ slices.SortFunc(err.Matches, byURL)
return accounts.Account{}, err
}
}
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index 626c93ac6b..334e90d948 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -27,12 +27,12 @@
package keystore
import (
+ "errors"
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
- "sort"
"testing"
"time"
@@ -40,6 +40,7 @@ import (
"github.com/cespare/cp"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
)
var (
@@ -84,7 +85,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
select {
case <-ks.changes:
default:
- return fmt.Errorf("wasn't notified of new accounts")
+ return errors.New("wasn't notified of new accounts")
}
return nil
}
@@ -212,7 +213,7 @@ func TestCacheAddDeleteOrder(t *testing.T) {
// Check that the account list is sorted by filename.
wantAccounts := make([]accounts.Account, len(accs))
copy(wantAccounts, accs)
- sort.Sort(accountsByURL(wantAccounts))
+ slices.SortFunc(wantAccounts, byURL)
list := cache.accounts()
if !reflect.DeepEqual(list, wantAccounts) {
t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts))
diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go
index 2d52a4c015..0219a3dde1 100644
--- a/accounts/keystore/keystore_test.go
+++ b/accounts/keystore/keystore_test.go
@@ -30,7 +30,6 @@ import (
"math/rand"
"os"
"runtime"
- "sort"
"strings"
"sync"
"sync/atomic"
@@ -41,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
+ "golang.org/x/exp/slices"
)
var testSigData = make([]byte, 32)
@@ -410,19 +410,19 @@ func TestImportRace(t *testing.T) {
t.Fatalf("failed to export account: %v", acc)
}
_, ks2 := tmpKeyStore(t, true)
- var atom uint32
+ var atom atomic.Uint32
var wg sync.WaitGroup
wg.Add(2)
for i := 0; i < 2; i++ {
go func() {
defer wg.Done()
if _, err := ks2.Import(json, "new", "new"); err != nil {
- atomic.AddUint32(&atom, 1)
+ atom.Add(1)
}
}()
}
wg.Wait()
- if atom != 1 {
+ if atom.Load() != 1 {
t.Errorf("Import is racy")
}
}
@@ -437,7 +437,7 @@ func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, walle
for _, account := range live {
liveList = append(liveList, account)
}
- sort.Sort(accountsByURL(liveList))
+ slices.SortFunc(liveList, byURL)
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go
index a149a2887a..acf944f717 100644
--- a/accounts/keystore/passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -235,10 +235,13 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
if err != nil {
return nil, err
}
- key := crypto.ToECDSAUnsafe(keyBytes)
+ key, err := crypto.ToECDSA(keyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("invalid key: %w", err)
+ }
id, err := uuid.FromBytes(keyId)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("invalid UUID: %w", err)
}
return &Key{
Id: id,
diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go
index 5fc816e12a..062bfcb198 100644
--- a/accounts/scwallet/securechannel.go
+++ b/accounts/scwallet/securechannel.go
@@ -34,6 +34,7 @@ import (
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
@@ -135,7 +136,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
- return fmt.Errorf("cannot unpair: not paired")
+ return errors.New("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
@@ -151,7 +152,7 @@ func (s *SecureChannelSession) Unpair() error {
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
- return fmt.Errorf("session already opened")
+ return errors.New("session already opened")
}
response, err := s.open()
@@ -225,7 +226,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
- return nil, fmt.Errorf("channel not open")
+ return nil, errors.New("channel not open")
}
data, err := s.encryptAPDU(data)
@@ -264,7 +265,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
- return nil, fmt.Errorf("invalid MAC in response")
+ return nil, errors.New("invalid MAC in response")
}
rapdu := &responseAPDU{}
@@ -329,7 +330,7 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
- return nil, fmt.Errorf("expected end of padding, got 0")
+ return nil, errors.New("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index 721ea38b85..7ef6e5ae18 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -262,7 +262,7 @@ func (w *Wallet) release() error {
// with the wallet.
func (w *Wallet) pair(puk []byte) error {
if w.session.paired() {
- return fmt.Errorf("wallet already paired")
+ return errors.New("wallet already paired")
}
pairing, err := w.session.pair(puk)
if err != nil {
@@ -823,7 +823,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
// unpair deletes an existing pairing.
func (s *Session) unpair() error {
if !s.verified {
- return fmt.Errorf("unpair requires that the PIN be verified")
+ return errors.New("unpair requires that the PIN be verified")
}
return s.Channel.Unpair()
}
@@ -917,7 +917,7 @@ func (s *Session) initialize(seed []byte) error {
return err
}
if status == "Online" {
- return fmt.Errorf("card is already initialized, cowardly refusing to proceed")
+ return errors.New("card is already initialized, cowardly refusing to proceed")
}
s.Wallet.lock.Lock()
diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go
index 2b2f03f1ea..a68b2416c0 100644
--- a/consensus/dummy/consensus.go
+++ b/consensus/dummy/consensus.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
@@ -220,13 +221,21 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}
- // Verify the existence / non-existence of excessDataGas
+ // Verify the existence / non-existence of excessBlobGas
cancun := chain.Config().IsCancun(header.Time)
- if cancun && header.ExcessDataGas == nil {
- return errors.New("missing excessDataGas")
+ if cancun && header.ExcessBlobGas == nil {
+ return errors.New("missing excessBlobGas")
}
- if !cancun && header.ExcessDataGas != nil {
- return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas)
+ if !cancun && header.ExcessBlobGas != nil {
+ return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
+ }
+ if !cancun && header.BlobGasUsed != nil {
+ return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed)
+ }
+ if cancun {
+ if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go
deleted file mode 100644
index f0bca33d01..0000000000
--- a/consensus/misc/eip4844.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package misc
-
-import (
- "math/big"
-
- "github.com/ava-labs/coreth/params"
-)
-
-var (
- minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice)
- dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction)
-)
-
-// CalcBlobFee calculates the blobfee from the header's excess data gas field.
-func CalcBlobFee(excessDataGas *big.Int) *big.Int {
- // If this block does not yet have EIP-4844 enabled, return the starting fee
- if excessDataGas == nil {
- return big.NewInt(params.BlobTxMinDataGasprice)
- }
- return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction)
-}
-
-// fakeExponential approximates factor * e ** (numerator / denominator) using
-// Taylor expansion.
-func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
- var (
- output = new(big.Int)
- accum = new(big.Int).Mul(factor, denominator)
- )
- for i := 1; accum.Sign() > 0; i++ {
- output.Add(output, accum)
-
- accum.Mul(accum, numerator)
- accum.Div(accum, denominator)
- accum.Div(accum, big.NewInt(int64(i)))
- }
- return output.Div(output, denominator)
-}
diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go
new file mode 100644
index 0000000000..5f603f38e6
--- /dev/null
+++ b/consensus/misc/eip4844/eip4844.go
@@ -0,0 +1,108 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eip4844
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+)
+
+var (
+ minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
+ blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction)
+)
+
+// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that
+// if the current block contains no transactions, the excessBlobGas is updated
+// accordingly.
+func VerifyEIP4844Header(parent, header *types.Header) error {
+ // Verify the header is not malformed
+ if header.ExcessBlobGas == nil {
+ return errors.New("header is missing excessBlobGas")
+ }
+ if header.BlobGasUsed == nil {
+ return errors.New("header is missing blobGasUsed")
+ }
+ // Verify that the blob gas used remains within reasonable limits.
+ if *header.BlobGasUsed > params.BlobTxMaxBlobGasPerBlock {
+ return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.BlobTxMaxBlobGasPerBlock)
+ }
+ if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
+ return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
+ }
+ // Verify the excessBlobGas is correct based on the parent header
+ var (
+ parentExcessBlobGas uint64
+ parentBlobGasUsed uint64
+ )
+ if parent.ExcessBlobGas != nil {
+ parentExcessBlobGas = *parent.ExcessBlobGas
+ parentBlobGasUsed = *parent.BlobGasUsed
+ }
+ expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
+ if *header.ExcessBlobGas != expectedExcessBlobGas {
+ return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d",
+ *header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed)
+ }
+ return nil
+}
+
+// CalcExcessBlobGas calculates the excess blob gas after applying the set of
+// blobs on top of the excess blob gas.
+func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 {
+ excessBlobGas := parentExcessBlobGas + parentBlobGasUsed
+ if excessBlobGas < params.BlobTxTargetBlobGasPerBlock {
+ return 0
+ }
+ return excessBlobGas - params.BlobTxTargetBlobGasPerBlock
+}
+
+// CalcBlobFee calculates the blobfee from the header's excess blob gas field.
+func CalcBlobFee(excessBlobGas uint64) *big.Int {
+ return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction)
+}
+
+// fakeExponential approximates factor * e ** (numerator / denominator) using
+// Taylor expansion.
+func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
+ var (
+ output = new(big.Int)
+ accum = new(big.Int).Mul(factor, denominator)
+ )
+ for i := 1; accum.Sign() > 0; i++ {
+ output.Add(output, accum)
+
+ accum.Mul(accum, numerator)
+ accum.Div(accum, denominator)
+ accum.Div(accum, big.NewInt(int64(i)))
+ }
+ return output.Div(output, denominator)
+}
diff --git a/consensus/misc/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go
similarity index 58%
rename from consensus/misc/eip4844_test.go
rename to consensus/misc/eip4844/eip4844_test.go
index 2d35f09747..d00301f799 100644
--- a/consensus/misc/eip4844_test.go
+++ b/consensus/misc/eip4844/eip4844_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package misc
+package eip4844
import (
"fmt"
@@ -24,9 +24,42 @@ import (
"github.com/ava-labs/coreth/params"
)
+func TestCalcExcessBlobGas(t *testing.T) {
+ var tests = []struct {
+ excess uint64
+ blobs uint64
+ want uint64
+ }{
+ // The excess blob gas should not increase from zero if the used blob
+ // slots are below - or equal - to the target.
+ {0, 0, 0},
+ {0, 1, 0},
+ {0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0},
+
+ // If the target blob gas is exceeded, the excessBlobGas should increase
+ // by however much it was overshot
+ {0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob},
+ {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1},
+ {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1},
+
+ // The excess blob gas should decrease by however much the target was
+ // under-shot, capped at zero.
+ {params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxBlobGasPerBlob},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, 0},
+ {params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0},
+ }
+ for _, tt := range tests {
+ result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob)
+ if result != tt.want {
+ t.Errorf("excess blob gas mismatch: have %v, want %v", result, tt.want)
+ }
+ }
+}
+
func TestCalcBlobFee(t *testing.T) {
tests := []struct {
- excessDataGas int64
+ excessBlobGas uint64
blobfee int64
}{
{0, 1},
@@ -34,12 +67,8 @@ func TestCalcBlobFee(t *testing.T) {
{1542707, 2},
{10 * 1024 * 1024, 111},
}
- have := CalcBlobFee(nil)
- if have.Int64() != params.BlobTxMinDataGasprice {
- t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice)
- }
for i, tt := range tests {
- have := CalcBlobFee(big.NewInt(tt.excessDataGas))
+ have := CalcBlobFee(tt.excessBlobGas)
if have.Int64() != tt.blobfee {
t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
}
diff --git a/core/block_validator.go b/core/block_validator.go
index 5433c8c4ec..ba9b5b37e9 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -27,6 +27,7 @@
package core
import (
+ "errors"
"fmt"
"github.com/ava-labs/coreth/consensus"
@@ -77,6 +78,23 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash)
}
+ // Blob transactions may be present after the Cancun fork.
+ var blobs int
+ for _, tx := range block.Transactions() {
+ // Count the number of blobs to validate against the header's blobGasUsed
+ blobs += len(tx.BlobHashes())
+ // The individual checks for blob validity (version-check + not empty)
+ // happens in the state_transition check.
+ }
+ if header.BlobGasUsed != nil {
+ if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
+ return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob)
+ }
+ } else {
+ if blobs > 0 {
+ return errors.New("data blobs present in block body")
+ }
+ }
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
diff --git a/core/blockchain.go b/core/blockchain.go
index 928c0e2e4b..74bacdfedb 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -32,6 +32,7 @@ import (
"errors"
"fmt"
"io"
+ "math/big"
"runtime"
"strings"
"sync"
@@ -39,6 +40,7 @@ import (
"time"
"github.com/ava-labs/coreth/consensus"
+ "github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/state/snapshot"
@@ -97,6 +99,9 @@ var (
errFutureBlockUnsupported = errors.New("future block insertion not supported")
errCacheConfigNotSpecified = errors.New("must specify cache config")
+
+ errInvalidOldChain = errors.New("invalid old chain")
+ errInvalidNewChain = errors.New("invalid new chain")
)
const (
@@ -771,7 +776,7 @@ func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, fi
return fmt.Errorf("export failed on #%d: not found", nr)
}
if nr > first && block.ParentHash() != parentHash {
- return fmt.Errorf("export failed: chain reorg during export")
+ return errors.New("export failed: chain reorg during export")
}
parentHash = block.Hash()
if err := callback(block); err != nil {
@@ -1160,9 +1165,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// diff layer for the block.
var err error
if bc.snaps == nil {
- _, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true)
+ _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true)
} else {
- _, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
+ _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
}
if err != nil {
return err
@@ -1391,20 +1396,25 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
// collectUnflattenedLogs collects the logs that were generated or removed during
// the processing of a block.
func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*types.Log {
+ var blobGasPrice *big.Int
+ excessBlobGas := b.ExcessBlobGas()
+ if excessBlobGas != nil {
+ blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
+ }
receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
- receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), b.Transactions())
-
+ if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {
+ log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err)
+ }
// Note: gross but this needs to be initialized here because returning nil will be treated specially as an incorrect
// error case downstream.
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
receiptLogs := make([]*types.Log, len(receipt.Logs))
for i, log := range receipt.Logs {
- l := *log
if removed {
- l.Removed = true
+ log.Removed = true
}
- receiptLogs[i] = &l
+ receiptLogs[i] = log
}
logs[i] = receiptLogs
}
@@ -1446,10 +1456,10 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
}
}
if oldBlock == nil {
- return errors.New("invalid old chain")
+ return errInvalidOldChain
}
if newBlock == nil {
- return errors.New("invalid new chain")
+ return errInvalidNewChain
}
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
@@ -1466,11 +1476,11 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// Step back with both chains
oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil {
- return fmt.Errorf("invalid old chain")
+ return errInvalidOldChain
}
newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil {
- return fmt.Errorf("invalid new chain")
+ return errInvalidNewChain
}
}
@@ -1705,9 +1715,9 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block)
// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
// diff layer for the block.
if bc.snaps == nil {
- return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false)
+ return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false)
}
- return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
+ return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
}
// initSnapshot instantiates a Snapshot instance and adds it to [bc]
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index ca6032595b..51b135b03b 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -29,6 +29,7 @@ package core
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"sync"
"sync/atomic"
@@ -413,7 +414,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
} else if header.ParentHash != lastHead {
- return common.Hash{}, fmt.Errorf("chain reorged during section processing")
+ return common.Hash{}, errors.New("chain reorged during section processing")
}
if err := c.backend.Process(c.ctx, header); err != nil {
return common.Hash{}, err
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 349c8a418f..809ee6ce21 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -258,7 +258,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
- root, err := statedb.Commit(config.IsEIP158(b.header.Number), false)
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false)
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
diff --git a/core/error.go b/core/error.go
index 0137b5dae2..461efa3e28 100644
--- a/core/error.go
+++ b/core/error.go
@@ -105,4 +105,8 @@ var (
// ErrSenderNoEOA is returned if the sender of a transaction is a contract.
ErrSenderNoEOA = errors.New("sender not an eoa")
+
+ // ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the
+ // blob gas fee of the block.
+ ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee")
)
diff --git a/core/evm.go b/core/evm.go
index 7cce46f9ad..d54145812e 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -74,14 +74,16 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee,
GasLimit: header.GasLimit,
+ ExcessBlobGas: header.ExcessBlobGas,
}
}
// NewEVMTxContext creates a new transaction context for a single transaction.
func NewEVMTxContext(msg *Message) vm.TxContext {
return vm.TxContext{
- Origin: msg.From,
- GasPrice: new(big.Int).Set(msg.GasPrice),
+ Origin: msg.From,
+ GasPrice: new(big.Int).Set(msg.GasPrice),
+ BlobHashes: msg.BlobHashes,
}
}
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index a4ec8f54dd..ac00615c16 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -18,19 +18,21 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce math.HexOrDecimal64 `json:"nonce"`
- Timestamp math.HexOrDecimal64 `json:"timestamp"`
- ExtraData hexutil.Bytes `json:"extraData"`
- GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash common.Hash `json:"mixHash"`
- Coinbase common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"number"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce math.HexOrDecimal64 `json:"nonce"`
+ Timestamp math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData hexutil.Bytes `json:"extraData"`
+ GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash common.Hash `json:"mixHash"`
+ Coinbase common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"number"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var enc Genesis
enc.Config = g.Config
@@ -51,25 +53,29 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.GasUsed = math.HexOrDecimal64(g.GasUsed)
enc.ParentHash = g.ParentHash
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
+ enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
+ enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce *math.HexOrDecimal64 `json:"nonce"`
- Timestamp *math.HexOrDecimal64 `json:"timestamp"`
- ExtraData *hexutil.Bytes `json:"extraData"`
- GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash *common.Hash `json:"mixHash"`
- Coinbase *common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"number"`
- GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash *common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce *math.HexOrDecimal64 `json:"nonce"`
+ Timestamp *math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData *hexutil.Bytes `json:"extraData"`
+ GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash *common.Hash `json:"mixHash"`
+ Coinbase *common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"number"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash *common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@@ -120,5 +126,11 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil {
g.BaseFee = (*big.Int)(dec.BaseFee)
}
+ if dec.ExcessBlobGas != nil {
+ g.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
+ }
+ if dec.BlobGasUsed != nil {
+ g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
+ }
return nil
}
diff --git a/core/genesis.go b/core/genesis.go
index d5c2fe386d..ff67c96fd6 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -66,10 +66,12 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
- Number uint64 `json:"number"`
- GasUsed uint64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *big.Int `json:"baseFeePerGas"`
+ Number uint64 `json:"number"`
+ GasUsed uint64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *big.Int `json:"baseFeePerGas"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
+ BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
}
// GenesisAlloc specifies the initial state that is part of the genesis block.
@@ -101,15 +103,17 @@ type GenesisAccount struct {
// field type overrides for gencodec
type genesisSpecMarshaling struct {
- Nonce math.HexOrDecimal64
- Timestamp math.HexOrDecimal64
- ExtraData hexutil.Bytes
- GasLimit math.HexOrDecimal64
- GasUsed math.HexOrDecimal64
- Number math.HexOrDecimal64
- Difficulty *math.HexOrDecimal256
- BaseFee *math.HexOrDecimal256
- Alloc map[common.UnprefixedAddress]GenesisAccount
+ Nonce math.HexOrDecimal64
+ Timestamp math.HexOrDecimal64
+ ExtraData hexutil.Bytes
+ GasLimit math.HexOrDecimal64
+ GasUsed math.HexOrDecimal64
+ Number math.HexOrDecimal64
+ Difficulty *math.HexOrDecimal256
+ Alloc map[common.UnprefixedAddress]GenesisAccount
+ BaseFee *math.HexOrDecimal256
+ ExcessBlobGas *math.HexOrDecimal64
+ BlobGasUsed *math.HexOrDecimal64
}
type genesisAccountMarshaling struct {
@@ -218,7 +222,7 @@ func SetupGenesisBlock(
// when we start syncing from scratch, the last accepted block
// will be genesis block
if lastBlock == nil {
- return newcfg, common.Hash{}, fmt.Errorf("missing last accepted block")
+ return newcfg, common.Hash{}, errors.New("missing last accepted block")
}
height := lastBlock.NumberU64()
timestamp := lastBlock.Time()
@@ -290,14 +294,26 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block
if g.Difficulty == nil {
head.Difficulty = params.GenesisDifficulty
}
- if g.Config != nil && g.Config.IsApricotPhase3(0) {
- if g.BaseFee != nil {
- head.BaseFee = g.BaseFee
- } else {
- head.BaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee)
+ if conf := g.Config; conf != nil {
+ if conf.IsApricotPhase3(0) {
+ if g.BaseFee != nil {
+ head.BaseFee = g.BaseFee
+ } else {
+ head.BaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee)
+ }
+ }
+ if conf.IsCancun(g.Timestamp) {
+ head.ExcessBlobGas = g.ExcessBlobGas
+ head.BlobGasUsed = g.BlobGasUsed
+ if head.ExcessBlobGas == nil {
+ head.ExcessBlobGas = new(uint64)
+ }
+ if head.BlobGasUsed == nil {
+ head.BlobGasUsed = new(uint64)
+ }
}
}
- statedb.Commit(false, false)
+ statedb.Commit(0, false, false)
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
if err := triedb.Commit(root, true); err != nil {
diff --git a/core/mkalloc.go b/core/mkalloc.go
index e7bdf8f1c0..e65bc859e7 100644
--- a/core/mkalloc.go
+++ b/core/mkalloc.go
@@ -40,32 +40,28 @@ import (
"fmt"
"math/big"
"os"
- "sort"
"strconv"
"github.com/ava-labs/coreth/core"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/exp/slices"
)
type allocItem struct{ Addr, Balance *big.Int }
-type allocList []allocItem
-
-func (a allocList) Len() int { return len(a) }
-func (a allocList) Less(i, j int) bool { return a[i].Addr.Cmp(a[j].Addr) < 0 }
-func (a allocList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func makelist(g *core.Genesis) allocList {
- a := make(allocList, 0, len(g.Alloc))
+func makelist(g *core.Genesis) []allocItem {
+ items := make([]allocItem, 0, len(g.Alloc))
for addr, account := range g.Alloc {
if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 {
panic(fmt.Sprintf("can't encode account %x", addr))
}
bigAddr := new(big.Int).SetBytes(addr.Bytes())
- a = append(a, allocItem{bigAddr, account.Balance})
+ items = append(items, allocItem{bigAddr, account.Balance})
}
- sort.Sort(a)
- return a
+ slices.SortFunc(items, func(a, b allocItem) bool {
+ return a.Addr.Cmp(b.Addr) < 0
+ })
+ return items
}
func makealloc(g *core.Genesis) string {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index cd44e9d743..c286b97da6 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -32,6 +32,7 @@ import (
"errors"
"math/big"
+ "github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/params"
@@ -397,13 +398,19 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64,
return nil
}
header := ReadHeader(db, hash, number)
+
var baseFee *big.Int
if header == nil {
baseFee = big.NewInt(0)
} else {
baseFee = header.BaseFee
}
- if err := receipts.DeriveFields(config, hash, number, time, baseFee, body.Transactions); err != nil {
+ // Compute effective blob gas price.
+ var blobGasPrice *big.Int
+ if header != nil && header.ExcessBlobGas != nil {
+ blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas)
+ }
+ if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
return nil
}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 02e36e7567..27c7cf4a95 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -83,7 +83,7 @@ func TestBodyStorage(t *testing.T) {
WriteBody(db, hash, 0, body)
if entry := ReadBody(db, hash, 0); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
}
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
@@ -137,7 +137,7 @@ func TestBlockStorage(t *testing.T) {
}
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
}
// Delete the block and verify the execution
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index 8cf9d83a0d..f2128cbbfe 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -18,41 +18,17 @@ package rawdb
import (
"bytes"
- "hash"
"math/big"
"testing"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
+ "github.com/ava-labs/coreth/internal/blocktest"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
+var newTestHasher = blocktest.NewHasher
// Tests that positional lookup metadata can be stored and retrieved.
func TestLookupStorage(t *testing.T) {
@@ -99,7 +75,7 @@ func TestLookupStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3}
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher(), nil, true)
+ block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher(), nil, true)
// Check that no transactions entries are in a pristine database
for i, tx := range txs {
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 329f765ec5..cf0a4e921a 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -27,6 +27,8 @@
package rawdb
import (
+ "encoding/binary"
+
"github.com/ava-labs/coreth/ethdb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
@@ -77,3 +79,68 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
log.Crit("Failed to delete contract code", "err", err)
}
}
+
+// ReadStateID retrieves the state id with the provided state root.
+func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 {
+ data, err := db.Get(stateIDKey(root))
+ if err != nil || len(data) == 0 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteStateID writes the provided state lookup to database.
+func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
+ var buff [8]byte
+ binary.BigEndian.PutUint64(buff[:], id)
+ if err := db.Put(stateIDKey(root), buff[:]); err != nil {
+ log.Crit("Failed to store state ID", "err", err)
+ }
+}
+
+// DeleteStateID deletes the specified state lookup from the database.
+func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
+ if err := db.Delete(stateIDKey(root)); err != nil {
+ log.Crit("Failed to delete state ID", "err", err)
+ }
+}
+
+// ReadPersistentStateID retrieves the id of the persistent state from the database.
+func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
+ data, _ := db.Get(persistentStateIDKey)
+ if len(data) != 8 {
+ return 0
+ }
+ return binary.BigEndian.Uint64(data)
+}
+
+// WritePersistentStateID stores the id of the persistent state into database.
+func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store the persistent state ID", "err", err)
+ }
+}
+
+// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at
+// the last shutdown.
+func ReadTrieJournal(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(trieJournalKey)
+ return data
+}
+
+// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at
+// shutdown.
+func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) {
+ if err := db.Put(trieJournalKey, journal); err != nil {
+ log.Crit("Failed to store tries journal", "err", err)
+ }
+}
+
+// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at
+// the last shutdown.
+func DeleteTrieJournal(db ethdb.KeyValueWriter) {
+ if err := db.Delete(trieJournalKey); err != nil {
+ log.Crit("Failed to remove tries journal", "err", err)
+ }
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index d81af9c3a5..65e13bc37f 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -56,21 +56,23 @@ const HashScheme = "hashScheme"
// on extra state diffs to survive deep reorg.
const PathScheme = "pathScheme"
-// nodeHasher used to derive the hash of trie node.
-type nodeHasher struct{ sha crypto.KeccakState }
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
var hasherPool = sync.Pool{
- New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
}
-func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) }
-func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) }
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
-func (h *nodeHasher) hashData(data []byte) (n common.Hash) {
- h.sha.Reset()
- h.sha.Write(data)
- h.sha.Read(n[:])
- return n
+func (h *hasher) release() {
+ hasherPool.Put(h)
}
// ReadAccountTrieNode retrieves the account trie node and the associated node
@@ -80,9 +82,9 @@ func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.H
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasAccountTrieNode checks the account trie node presence with the specified
@@ -92,9 +94,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteAccountTrieNode writes the provided account trie node into database.
@@ -118,9 +120,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasStorageTrieNode checks the storage trie node presence with the provided
@@ -130,9 +132,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteStorageTrieNode writes the provided storage trie node into database.
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
index 282849c600..a14188e76b 100644
--- a/core/rawdb/chain_iterator_test.go
+++ b/core/rawdb/chain_iterator_test.go
@@ -44,7 +44,7 @@ func TestChainIterator(t *testing.T) {
var block *types.Block
var txs []*types.Transaction
to := common.BytesToAddress([]byte{0x11})
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true) // Empty genesis block
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher(), nil, true) // Empty genesis block
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
for i := uint64(1); i <= 10; i++ {
@@ -70,7 +70,7 @@ func TestChainIterator(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true)
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher(), nil, true)
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
@@ -121,7 +121,7 @@ func TestIndexTransactions(t *testing.T) {
to := common.BytesToAddress([]byte{0x11})
// Write empty genesis block
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true)
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher(), nil, true)
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
@@ -148,7 +148,7 @@ func TestIndexTransactions(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true)
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher(), nil, true)
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 2f070f424b..70df0a647b 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -45,6 +45,9 @@ var (
// headHeaderKey tracks the latest known header's hash.
headHeaderKey = []byte("LastHeader")
+ // persistentStateIDKey tracks the id of latest stored state(for path-based only).
+ persistentStateIDKey = []byte("LastStateID")
+
// headBlockKey tracks the latest known full block's hash.
headBlockKey = []byte("LastBlock")
@@ -57,6 +60,9 @@ var (
// snapshotGeneratorKey tracks the snapshot generation marker across restarts.
snapshotGeneratorKey = []byte("SnapshotGenerator")
+ // trieJournalKey tracks the in-memory trie node layers across restarts.
+ trieJournalKey = []byte("TrieJournal")
+
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail")
@@ -93,6 +99,7 @@ var (
// Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+ stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
@@ -218,6 +225,11 @@ func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)
}
+// stateIDKey = stateIDPrefix + root (32 bytes)
+func stateIDKey(root common.Hash) []byte {
+ return append(stateIDPrefix, root.Bytes()...)
+}
+
// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
func accountTrieNodeKey(path []byte) []byte {
return append(trieNodeAccountPrefix, path...)
diff --git a/core/state/database.go b/core/state/database.go
index 420fec75dd..ff5611c453 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -37,6 +37,7 @@ import (
"github.com/ava-labs/coreth/trie/trienode"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/crypto"
)
const (
@@ -53,16 +54,16 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
+ OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error)
// CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie
// ContractCode retrieves a particular contract's code.
- ContractCode(addrHash, codeHash common.Hash) ([]byte, error)
+ ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error)
// ContractCodeSize retrieves a particular contracts code's size.
- ContractCodeSize(addrHash, codeHash common.Hash) (int, error)
+ ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error)
// DiskDB returns the underlying key-value disk database.
DiskDB() ethdb.KeyValueStore
@@ -103,6 +104,10 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount) error
+ // UpdateContractCode abstracts code write to the trie. It is expected
+ // to be moved to the stateWriter interface when the latter is ready.
+ UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error
+
// DeleteStorage removes any existing value for key from the trie. If a node
// was not found in the database, a trie.MissingNodeError is returned.
DeleteStorage(addr common.Address, key []byte) error
@@ -120,11 +125,12 @@ type Trie interface {
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
- // starts at the key after the given start key.
- NodeIterator(startKey []byte) trie.NodeIterator
+ // starts at the key after the given start key. And error will be returned
+ // if fails to create node iterator.
+ NodeIterator(startKey []byte) (trie.NodeIterator, error)
// Prove constructs a Merkle proof for key. The result contains all encoded nodes
// on the path to the value at key. The value itself is also included in the last
@@ -133,7 +139,7 @@ type Trie interface {
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
- Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
+ Prove(key []byte, proofDb ethdb.KeyValueWriter) error
}
// NewDatabase creates a backing store for state. The returned database is safe for
@@ -182,8 +188,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
// OpenStorageTrie opens the storage trie of an account.
-func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) {
- tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb)
+func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) {
+ tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb)
if err != nil {
return nil, err
}
@@ -201,7 +207,7 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
}
// ContractCode retrieves a particular contract's code.
-func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
+func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
code, _ := db.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
@@ -216,11 +222,11 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error
}
// ContractCodeSize retrieves a particular contracts code's size.
-func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
+func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
return cached, nil
}
- code, err := db.ContractCode(addrHash, codeHash)
+ code, err := db.ContractCode(addr, codeHash)
return len(code), err
}
diff --git a/core/state/dump.go b/core/state/dump.go
index cb5e0df783..596b2740c4 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -152,7 +152,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash())
- it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
+ trieIt, err := s.trie.NodeIterator(conf.Start)
+ if err != nil {
+ return nil
+ }
+ it := trie.NewIterator(trieIt)
for it.Next() {
var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
@@ -180,18 +184,23 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
} else {
address = &addr
}
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, &data)
if !conf.SkipCode {
- account.Code = obj.Code(s.db)
+ account.Code = obj.Code()
}
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
- tr, err := obj.getTrie(s.db)
+ tr, err := obj.getTrie()
if err != nil {
log.Error("Failed to load storage trie", "err", err)
continue
}
- storageIt := trie.NewIterator(tr.NodeIterator(nil))
+ trieIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to create trie iterator", "err", err)
+ continue
+ }
+ storageIt := trie.NewIterator(trieIt)
for storageIt.Next() {
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 4d6e2ec4d8..5888ad1919 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -28,6 +28,7 @@ package state
import (
"bytes"
+ "errors"
"fmt"
"github.com/ava-labs/coreth/core/types"
@@ -37,7 +38,8 @@ import (
)
// nodeIterator is an iterator to traverse the entire state trie post-order,
-// including all of the contract code and contract state tries.
+// including all of the contract code and contract state tries. Preimage is
+// required in order to resolve the contract address.
type nodeIterator struct {
state *StateDB // State being iterated
@@ -84,8 +86,12 @@ func (it *nodeIterator) step() error {
return nil
}
// Initialize the iterator if we've just started
+ var err error
if it.stateIt == nil {
- it.stateIt = it.state.trie.NodeIterator(nil)
+ it.stateIt, err = it.state.trie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
}
// If we had data nodes previously, we surely have at least state nodes
if it.dataIt != nil {
@@ -119,18 +125,28 @@ func (it *nodeIterator) step() error {
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
return err
}
- dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root)
+ // Lookup the preimage of account hash
+ preimage := it.state.trie.GetKey(it.stateIt.LeafKey())
+ if preimage == nil {
+ return errors.New("account address is not available")
+ }
+ address := common.BytesToAddress(preimage)
+
+ // Traverse the storage slots belong to the account
+ dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root)
+ if err != nil {
+ return err
+ }
+ it.dataIt, err = dataTrie.NodeIterator(nil)
if err != nil {
return err
}
- it.dataIt = dataTrie.NodeIterator(nil)
if !it.dataIt.Next(true) {
it.dataIt = nil
}
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
it.codeHash = common.BytesToHash(account.CodeHash)
- addrHash := common.BytesToHash(it.stateIt.LeafKey())
- it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash))
+ it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash))
if err != nil {
return fmt.Errorf("code %x: %v", account.CodeHash, err)
}
diff --git a/core/state/journal.go b/core/state/journal.go
index eb87a72598..5a1ee1aef6 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -100,12 +100,19 @@ type (
account *common.Address
}
resetObjectChange struct {
+ account *common.Address
prev *stateObject
prevdestruct bool
+ prevAccount []byte
+ prevStorage map[common.Hash][]byte
+
+ prevAccountOriginExist bool
+ prevAccountOrigin []byte
+ prevStorageOrigin map[common.Hash][]byte
}
- suicideChange struct {
+ selfDestructChange struct {
account *common.Address
- prev bool // whether account had already suicided
+ prev bool // whether account had already self-destructed
prevbalance *big.Int
}
@@ -172,21 +179,33 @@ func (ch resetObjectChange) revert(s *StateDB) {
if !ch.prevdestruct {
delete(s.stateObjectsDestruct, ch.prev.address)
}
+ if ch.prevAccount != nil {
+ s.accounts[ch.prev.addrHash] = ch.prevAccount
+ }
+ if ch.prevStorage != nil {
+ s.storages[ch.prev.addrHash] = ch.prevStorage
+ }
+ if ch.prevAccountOriginExist {
+ s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin
+ }
+ if ch.prevStorageOrigin != nil {
+ s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin
+ }
}
func (ch resetObjectChange) dirtied() *common.Address {
- return nil
+ return ch.account
}
-func (ch suicideChange) revert(s *StateDB) {
+func (ch selfDestructChange) revert(s *StateDB) {
obj := s.getStateObject(*ch.account)
if obj != nil {
- obj.suicided = ch.prev
+ obj.selfDestructed = ch.prev
obj.setBalance(ch.prevbalance)
}
}
-func (ch suicideChange) dirtied() *common.Address {
+func (ch selfDestructChange) dirtied() *common.Address {
return ch.account
}
diff --git a/core/state/metrics.go b/core/state/metrics.go
index f06d0ed265..5e2f060c3a 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -37,4 +37,11 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
+
+ slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
+ slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
+ slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
+ slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
+ slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
+ slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
)
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index d763a3b279..a0589b4a0d 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -358,7 +358,10 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if err != nil {
return err
}
- accIter := t.NodeIterator(nil)
+ accIter, err := t.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
for accIter.Next(true) {
hash := accIter.Hash()
@@ -379,7 +382,10 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if err != nil {
return err
}
- storageIter := storageTrie.NodeIterator(nil)
+ storageIter, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
for storageIter.Next(true) {
hash := storageIter.Hash()
if hash != (common.Hash{}) {
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
deleted file mode 100644
index 35aa33a959..0000000000
--- a/core/state/snapshot/account.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// (c) 2019-2020, Ava Labs, Inc.
-//
-// This file is a derived work, based on the go-ethereum library whose original
-// notices appear below.
-//
-// It is distributed under a license compatible with the licensing terms of the
-// original code from which it is derived.
-//
-// Much love to the original authors for their work.
-// **********
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package snapshot
-
-import (
- "bytes"
- "math/big"
-
- "github.com/ava-labs/coreth/core/types"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
-)
-
-// Account is a modified version of a state.Account, where the root is replaced
-// with a byte slice. This format can be used to represent full-consensus format
-// or slim-snapshot format which replaces the empty root and code hash as nil
-// byte slice.
-type Account struct {
- Nonce uint64
- Balance *big.Int
- Root []byte
- CodeHash []byte
- IsMultiCoin bool
-}
-
-// SlimAccount converts a state.Account content into a slim snapshot account
-func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) Account {
- slim := Account{
- Nonce: nonce,
- Balance: balance,
- IsMultiCoin: isMultiCoin,
- }
- if root != types.EmptyRootHash {
- slim.Root = root[:]
- }
- if !bytes.Equal(codehash, types.EmptyCodeHash[:]) {
- slim.CodeHash = codehash
- }
- return slim
-}
-
-// SlimAccountRLP converts a state.Account content into a slim snapshot
-// version RLP encoded.
-func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) []byte {
- data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash, isMultiCoin))
- if err != nil {
- panic(err)
- }
- return data
-}
-
-// FullAccount decodes the data on the 'slim RLP' format and return
-// the consensus format account.
-func FullAccount(data []byte) (Account, error) {
- var account Account
- if err := rlp.DecodeBytes(data, &account); err != nil {
- return Account{}, err
- }
- if len(account.Root) == 0 {
- account.Root = types.EmptyRootHash[:]
- }
- if len(account.CodeHash) == 0 {
- account.CodeHash = types.EmptyCodeHash[:]
- }
- return account, nil
-}
-
-// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
-func FullAccountRLP(data []byte) ([]byte, error) {
- account, err := FullAccount(data)
- if err != nil {
- return nil, err
- }
- return rlp.EncodeToBytes(account)
-}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 2f79ffd5d7..5195e78a9b 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -27,7 +27,6 @@
package snapshot
import (
- "bytes"
"encoding/binary"
"errors"
"fmt"
@@ -311,7 +310,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
fullData []byte
)
if leafCallback == nil {
- fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ fullData, err = types.FullAccountRLP(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -323,7 +322,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
return stop(err)
}
// Fetch the next account and process it concurrently
- account, err := FullAccount(it.(AccountIterator).Account())
+ account, err := types.FullAccount(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -333,7 +332,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
results <- err
return
}
- if !bytes.Equal(account.Root, subroot.Bytes()) {
+ if account.Root != subroot {
results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
return
}
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index 74dcfc92d5..0674839fe4 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -31,14 +31,15 @@ import (
"fmt"
"math"
"math/rand"
- "sort"
"sync"
"sync/atomic"
"time"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2"
+ "golang.org/x/exp/slices"
)
var (
@@ -289,7 +290,7 @@ func (dl *diffLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -297,7 +298,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
@@ -309,8 +310,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
//
// Note the returned account is not a copy, please don't modify it.
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
- dl.lock.RLock()
// Check staleness before reaching further.
+ dl.lock.RLock()
if dl.Stale() {
dl.lock.RUnlock()
return nil, ErrSnapshotStale
@@ -541,7 +542,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
dl.accountList = append(dl.accountList, hash)
}
}
- sort.Sort(hashes(dl.accountList))
+ slices.SortFunc(dl.accountList, common.Hash.Cmp)
dl.memory += uint64(len(dl.accountList) * common.HashLength)
return dl.accountList
}
@@ -579,7 +580,7 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool)
for k := range storageMap {
storageList = append(storageList, k)
}
- sort.Sort(hashes(storageList))
+ slices.SortFunc(storageList, common.Hash.Cmp)
dl.storageList[accountHash] = storageList
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
return storageList, destructed
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 051a4ac5be..e6fec75e80 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -32,6 +32,7 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/utils"
@@ -88,7 +89,7 @@ func (dl *diskLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -96,7 +97,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index b531c0b1a5..faccba0efb 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -30,7 +30,6 @@ import (
"bytes"
"encoding/binary"
"fmt"
- "math/big"
"time"
"github.com/ava-labs/coreth/core/rawdb"
@@ -287,26 +286,27 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
accMarker = dl.genMarker[:common.HashLength]
}
- accIt := trie.NewIterator(accTrie.NodeIterator(accMarker))
+ accNodeIt, err := accTrie.NodeIterator(accMarker)
+ if err != nil {
+ log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err)
+ abort := <-dl.genAbort
+ dl.genStats = stats
+ close(abort)
+ return
+ }
+ accIt := trie.NewIterator(accNodeIt)
batch := dl.diskdb.NewBatch()
// Iterate from the previous marker and continue generating the state snapshot
dl.logged = time.Now()
for accIt.Next() {
// Retrieve the current account and flatten it into the internal format
+ var acc types.StateAccount
accountHash := common.BytesToHash(accIt.Key)
-
- var acc struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- CodeHash []byte
- IsMultiCoin bool
- }
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
- data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin)
+ data := types.SlimAccountRLP(acc)
// If the account is not yet in-progress, write it out
if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
@@ -340,7 +340,15 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
storeMarker = dl.genMarker[common.HashLength:]
}
- storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker))
+ storeNodeIt, err := storeTrie.NodeIterator(storeMarker)
+ if err != nil {
+ log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", accountHash, "stroot", acc.Root, "err", err)
+ abort := <-dl.genAbort
+ dl.genStats = stats
+ close(abort)
+ return
+ }
+ storeIt := trie.NewIterator(storeNodeIt)
for storeIt.Next() {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value)
stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value))
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 40c359880a..9a541b3711 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -62,9 +62,9 @@ func TestGeneration(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -97,16 +97,16 @@ func TestGenerateExistentState(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
root, snap := helper.CommitAndGenerate()
@@ -170,18 +170,18 @@ func newHelper() *testHelper {
}
}
-func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
+func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) {
val, _ := rlp.EncodeToBytes(acc)
t.accTrie.MustUpdate([]byte(acckey), val)
}
-func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
+func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) {
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(acckey))
rawdb.WriteAccountSnapshot(t.diskdb, key, val)
}
-func (t *testHelper) addAccount(acckey string, acc *Account) {
+func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) {
t.addTrieAccount(acckey, acc)
t.addSnapAccount(acckey, acc)
}
@@ -193,28 +193,28 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
}
}
-func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte {
+func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash {
id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash)
stTrie, _ := trie.NewStateTrie(id, t.triedb)
for i, k := range keys {
stTrie.MustUpdate([]byte(k), []byte(vals[i]))
}
if !commit {
- return stTrie.Hash().Bytes()
+ return stTrie.Hash()
}
- root, nodes := stTrie.Commit(false)
+ root, nodes, _ := stTrie.Commit(false)
if nodes != nil {
t.nodes.Merge(nodes)
}
- return root.Bytes()
+ return root
}
func (t *testHelper) Commit() common.Hash {
- root, nodes := t.accTrie.Commit(true)
+ root, nodes, _ := t.accTrie.Commit(true)
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(root, types.EmptyRootHash, t.nodes)
+ t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil)
t.triedb.Commit(root, false)
return root
}
@@ -247,28 +247,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
helper := newHelper()
// Account one, empty root but non-empty database
- helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Account two, non empty root but empty database
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
// Miss slots
{
// Account three, non empty root but misses slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
// Account four, non empty root but misses slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
// Account five, non empty root but misses slots in the end
helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
}
@@ -276,22 +276,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account six, non empty root but wrong slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
// Account seven, non empty root but wrong slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
// Account eight, non empty root but wrong slots in the end
helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
// Account 9, non empty root but rotated slots
helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
}
@@ -299,17 +299,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account 10, non empty root but extra slots in the beginning
helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
// Account 11, non empty root but extra slots in the middle
helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
// Account 12, non empty root but extra slots in the end
helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
}
@@ -349,25 +349,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
// Missing accounts, only in the trie
{
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
- helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
- helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
+ helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
+ helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
}
// Wrong accounts
{
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
}
// Extra accounts, only in the snap
{
- helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
- helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
- helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
+ helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
+ helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle
+ helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
}
root, snap := helper.CommitAndGenerate()
@@ -396,9 +396,9 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
// without any storage slots to keep the test smaller.
helper := newHelper()
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41
root := helper.Commit() // Root: 0xfa04f652e8bd3938971bf7d71c3c688574af334ca8bc20e64b01ba610ae93cad
@@ -430,16 +430,16 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
// two of which also has the same 3-slot storage trie attached.
helper := newHelper()
- stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441
root := helper.Commit()
// Delete a storage trie root and ensure the generator chokes
- helper.diskdb.Delete(stRoot) // We can only corrupt the disk database, so flush the tries out
+ helper.diskdb.Delete(stRoot.Bytes()) // We can only corrupt the disk database, so flush the tries out
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil)
select {
@@ -464,11 +464,11 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
// two of which also has the same 3-slot storage trie attached.
helper := newHelper()
- stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441
root := helper.Commit()
@@ -500,7 +500,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -520,7 +520,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte("acc-2"))
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
@@ -571,7 +571,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb
@@ -585,7 +585,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
{
// 100 accounts exist only in snapshot
for i := 0; i < 1000; i++ {
- acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
@@ -621,7 +621,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
@@ -657,7 +657,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
@@ -694,7 +694,7 @@ func TestGenerateFromEmptySnap(t *testing.T) {
for i := 0; i < 400; i++ {
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
- &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
}
root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root: 0x2609234ce43f5e471202c87e017ffb4dfecdb3163cfcbaa55de04baa59cad42d
@@ -730,7 +730,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) {
for i := 0; i < 8; i++ {
accKey := fmt.Sprintf("acc-%d", i)
stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true)
- helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
var moddedKeys []string
var moddedVals []string
for ii := 0; ii < 8; ii++ {
@@ -822,11 +822,11 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
@@ -857,11 +857,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
populateDangling(helper.diskdb)
diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go
index 04a61d4a12..4e324ee28b 100644
--- a/core/state/snapshot/iterator_fast.go
+++ b/core/state/snapshot/iterator_fast.go
@@ -32,6 +32,7 @@ import (
"sort"
"github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
)
// weightedIterator is a iterator with an assigned weight. It is used to prioritise
@@ -42,32 +43,25 @@ type weightedIterator struct {
priority int
}
-// weightedIterators is a set of iterators implementing the sort.Interface.
-type weightedIterators []*weightedIterator
-
-// Len implements sort.Interface, returning the number of active iterators.
-func (its weightedIterators) Len() int { return len(its) }
-
-// Less implements sort.Interface, returning which of two iterators in the stack
-// is before the other.
-func (its weightedIterators) Less(i, j int) bool {
+func (it *weightedIterator) Cmp(other *weightedIterator) int {
// Order the iterators primarily by the account hashes
- hashI := its[i].it.Hash()
- hashJ := its[j].it.Hash()
+ hashI := it.it.Hash()
+ hashJ := other.it.Hash()
switch bytes.Compare(hashI[:], hashJ[:]) {
case -1:
- return true
+ return -1
case 1:
- return false
+ return 1
}
// Same account/storage-slot in multiple layers, split by priority
- return its[i].priority < its[j].priority
-}
-
-// Swap implements sort.Interface, swapping two entries in the iterator stack.
-func (its weightedIterators) Swap(i, j int) {
- its[i], its[j] = its[j], its[i]
+ if it.priority < other.priority {
+ return -1
+ }
+ if it.priority > other.priority {
+ return 1
+ }
+ return 0
}
// fastIterator is a more optimized multi-layer iterator which maintains a
@@ -79,7 +73,7 @@ type fastIterator struct {
curAccount []byte
curSlot []byte
- iterators weightedIterators
+ iterators []*weightedIterator
initiated bool
account bool
fail error
@@ -176,7 +170,7 @@ func (fi *fastIterator) init() {
}
}
// Re-sort the entire list
- sort.Sort(fi.iterators)
+ slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) })
fi.initiated = false
}
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index c59906a2f2..7658f0c108 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -61,7 +61,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
// is present in the database (or crashed mid-update).
baseBlockHash := rawdb.ReadSnapshotBlockHash(diskdb)
if baseBlockHash == (common.Hash{}) {
- return nil, false, fmt.Errorf("missing or corrupted snapshot, no snapshot block hash")
+ return nil, false, errors.New("missing or corrupted snapshot, no snapshot block hash")
}
if baseBlockHash != blockHash {
return nil, false, fmt.Errorf("block hash stored on disk (%#x) does not match last accepted (%#x)", baseBlockHash, blockHash)
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index ad7c9d6d41..703724bae3 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -35,6 +35,7 @@ import (
"time"
"github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/metrics"
"github.com/ava-labs/coreth/trie"
@@ -124,7 +125,7 @@ type Snapshot interface {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
- Account(hash common.Hash) (*Account, error)
+ Account(hash common.Hash) (*types.SlimAccount, error)
// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 6fab030a87..7ca901c241 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -51,11 +51,10 @@ func randomHash() common.Hash {
// randomAccount generates a random account and returns it RLP encoded.
func randomAccount() []byte {
- root := randomHash()
- a := Account{
+ a := &types.StateAccount{
Balance: big.NewInt(rand.Int63()),
Nonce: rand.Uint64(),
- Root: root[:],
+ Root: randomHash(),
CodeHash: types.EmptyCodeHash[:],
}
data, _ := rlp.EncodeToBytes(a)
@@ -696,7 +695,7 @@ func TestReadStateDuringFlattening(t *testing.T) {
snap := snaps.Snapshot(diffRootC)
// Register the testing hook to access the state after flattening
- var result = make(chan *Account)
+ var result = make(chan *types.SlimAccount)
snaps.onFlatten = func() {
// Spin up a thread to read the account from the pre-created
// snapshot handler. It's expected to be blocked.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 02ca670035..f1d95eec8e 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -68,33 +68,43 @@ func (s Storage) Copy() Storage {
// stateObject represents an Ethereum account which is being modified.
//
// The usage pattern is as follows:
-// First you need to obtain a state object.
-// Account values can be accessed and modified through the object.
-// Finally, call commitTrie to write the modified storage trie into a database.
+// - First you need to obtain a state object.
+// - Account values as well as storages can be accessed and modified through the object.
+// - Finally, call commit to return the changes of storage trie and update account data.
type stateObject struct {
- address common.Address
- addrHash common.Hash // hash of ethereum address of the account
+ db *StateDB
+ address common.Address // address of ethereum account
+ addrHash common.Hash // hash of ethereum address of the account
+ origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
+ data types.StateAccount // Account data with all mutations applied in the scope of block
+
// dataLock protects the [data] field to prevent a race condition
// in the transaction pool tests. TODO remove after re-implementing
// tx pool to be synchronous.
dataLock sync.RWMutex
- data types.StateAccount
- db *StateDB
// Write caches.
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
- originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
+ originStorage Storage // Storage cache of original entries to dedup rewrites
pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
- dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
+ dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction
// Cache flags.
- // When an object is marked suicided it will be deleted from the trie
- // during the "update" phase of the state transition.
dirtyCode bool // true if the code was updated
- suicided bool
- deleted bool
+
+ // Flag whether the account was marked as self-destructed. The self-destructed account
+ // is still accessible in the scope of same transaction.
+ selfDestructed bool
+
+ // Flag whether the account was marked as deleted. A self-destructed account
+ // or an account that is considered as empty will be marked as deleted at
+ // the end of transaction and no longer accessible anymore.
+ deleted bool
+
+ // Flag whether the object was created in the current transaction
+ created bool
}
// empty returns whether the account is considered empty.
@@ -103,21 +113,17 @@ func (s *stateObject) empty() bool {
}
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject {
- if data.Balance == nil {
- data.Balance = new(big.Int)
- }
- if data.CodeHash == nil {
- data.CodeHash = types.EmptyCodeHash.Bytes()
- }
- if data.Root == (common.Hash{}) {
- data.Root = types.EmptyRootHash
+func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
+ origin := acct
+ if acct == nil {
+ acct = types.NewEmptyStateAccount()
}
return &stateObject{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
- data: data,
+ origin: origin,
+ data: *acct,
originStorage: make(Storage),
pendingStorage: make(Storage),
dirtyStorage: make(Storage),
@@ -129,8 +135,8 @@ func (s *stateObject) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, &s.data)
}
-func (s *stateObject) markSuicided() {
- s.suicided = true
+func (s *stateObject) markSelfdestructed() {
+ s.selfDestructed = true
}
func (s *stateObject) touch() {
@@ -147,17 +153,15 @@ func (s *stateObject) touch() {
// getTrie returns the associated storage trie. The trie will be opened
// if it's not loaded previously. An error will be returned if trie can't
// be loaded.
-func (s *stateObject) getTrie(db Database) (Trie, error) {
+func (s *stateObject) getTrie() (Trie, error) {
if s.trie == nil {
// Try fetching from prefetcher first
- // We don't prefetch empty tries
if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil {
- // When the miner is creating the pending state, there is no
- // prefetcher
+ // When the miner is creating the pending state, there is no prefetcher
s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
}
if s.trie == nil {
- tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
+ tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root)
if err != nil {
return nil, err
}
@@ -168,18 +172,18 @@ func (s *stateObject) getTrie(db Database) (Trie, error) {
}
// GetState retrieves a value from the account storage trie.
-func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetState(key common.Hash) common.Hash {
// If we have a dirty value for this state entry, return it
value, dirty := s.dirtyStorage[key]
if dirty {
return value
}
// Otherwise return the entry's original value
- return s.GetCommittedState(db, key)
+ return s.GetCommittedState(key)
}
// GetCommittedState retrieves a value from the committed account storage trie.
-func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// If we have a pending write or clean cached, return that
if value, pending := s.pendingStorage[key]; pending {
return value
@@ -198,8 +202,9 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
- enc []byte
- err error
+ enc []byte
+ err error
+ value common.Hash
)
if s.db.snap != nil {
start := time.Now()
@@ -207,16 +212,23 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if metrics.EnabledExpensive {
s.db.SnapshotStorageReads += time.Since(start)
}
+ if len(enc) > 0 {
+ _, content, _, err := rlp.Split(enc)
+ if err != nil {
+ s.db.setError(err)
+ }
+ value.SetBytes(content)
+ }
}
// If the snapshot is unavailable or reading from it fails, load from the database.
if s.db.snap == nil || err != nil {
start := time.Now()
- tr, err := s.getTrie(db)
+ tr, err := s.getTrie()
if err != nil {
s.db.setError(err)
return common.Hash{}
}
- enc, err = tr.GetStorage(s.address, key.Bytes())
+ val, err := tr.GetStorage(s.address, key.Bytes())
if metrics.EnabledExpensive {
s.db.StorageReads += time.Since(start)
}
@@ -224,23 +236,16 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
s.db.setError(err)
return common.Hash{}
}
- }
- var value common.Hash
- if len(enc) > 0 {
- _, content, _, err := rlp.Split(enc)
- if err != nil {
- s.db.setError(err)
- }
- value.SetBytes(content)
+ value.SetBytes(val)
}
s.originStorage[key] = value
return value
}
// SetState updates a value in account storage.
-func (s *stateObject) SetState(db Database, key, value common.Hash) {
+func (s *stateObject) SetState(key, value common.Hash) {
// If the new value is the same as old, don't set
- prev := s.GetState(db, key)
+ prev := s.GetState(key)
if prev == value {
return
}
@@ -278,7 +283,7 @@ func (s *stateObject) finalise(prefetch bool) {
// updateTrie writes cached storage modifications into the object's storage trie.
// It will return nil if the trie has not been loaded and no changes have been
// made. An error will be returned if the trie can't be loaded/updated correctly.
-func (s *stateObject) updateTrie(db Database) (Trie, error) {
+func (s *stateObject) updateTrie() (Trie, error) {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false) // Don't prefetch anymore, pull directly if need be
if len(s.pendingStorage) == 0 {
@@ -291,9 +296,10 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
// The snapshot storage map for the object
var (
storage map[common.Hash][]byte
+ origin map[common.Hash][]byte
hasher = s.db.hasher
)
- tr, err := s.getTrie(db)
+ tr, err := s.getTrie()
if err != nil {
s.db.setError(err)
return nil, err
@@ -305,9 +311,11 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
if value == s.originStorage[key] {
continue
}
+ prev := s.originStorage[key]
s.originStorage[key] = value
- var v []byte
+ // rlp-encoded value to be used by the snapshot
+ var snapshotVal []byte
if (value == common.Hash{}) {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@@ -315,25 +323,43 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
}
s.db.StorageDeleted += 1
} else {
+ trimmedVal := common.TrimLeftZeroes(value[:])
// Encoding []byte cannot fail, ok to ignore the error.
- v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
- if err := tr.UpdateStorage(s.address, key[:], v); err != nil {
+ snapshotVal, _ = rlp.EncodeToBytes(trimmedVal)
+ if err := tr.UpdateStorage(s.address, key[:], trimmedVal); err != nil {
s.db.setError(err)
return nil, err
}
s.db.StorageUpdated += 1
}
- // If state snapshotting is active, cache the data til commit
- if s.db.snap != nil {
- if storage == nil {
- // Retrieve the old storage map, if available, create a new one otherwise
- if storage = s.db.snapStorage[s.addrHash]; storage == nil {
- storage = make(map[common.Hash][]byte)
- s.db.snapStorage[s.addrHash] = storage
- }
+ // Cache the mutated storage slots until commit
+ if storage == nil {
+ if storage = s.db.storages[s.addrHash]; storage == nil {
+ storage = make(map[common.Hash][]byte)
+ s.db.storages[s.addrHash] = storage
+ }
+ }
+ khash := crypto.HashData(hasher, key[:])
+ storage[khash] = snapshotVal // snapshotVal will be nil if it's deleted
+
+ // Cache the original value of mutated storage slots
+ if origin == nil {
+ if origin = s.db.storagesOrigin[s.address]; origin == nil {
+ origin = make(map[common.Hash][]byte)
+ s.db.storagesOrigin[s.address] = origin
+ }
+ }
+ // Track the original value of slot only if it's mutated first time
+ if _, ok := origin[khash]; !ok {
+ if prev == (common.Hash{}) {
+ origin[khash] = nil // nil if it was not present previously
+ } else {
+ // Encoding []byte cannot fail, ok to ignore the error.
+ b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:]))
+ origin[khash] = b
}
- storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
}
+ // Cache the items for preloading
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
if s.db.prefetcher != nil {
@@ -347,8 +373,8 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
// UpdateRoot sets the trie root to the current root hash of. An error
// will be returned if trie root hash is not computed correctly.
-func (s *stateObject) updateRoot(db Database) {
- tr, err := s.updateTrie(db)
+func (s *stateObject) updateRoot() {
+ tr, err := s.updateTrie()
if err != nil {
return
}
@@ -363,23 +389,29 @@ func (s *stateObject) updateRoot(db Database) {
s.data.Root = tr.Hash()
}
-// commitTrie submits the storage changes into the storage trie and re-computes
-// the root. Besides, all trie changes will be collected in a nodeset and returned.
-func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
- tr, err := s.updateTrie(db)
+// commit returns the changes made in storage trie and updates the account data.
+func (s *stateObject) commit() (*trienode.NodeSet, error) {
+ tr, err := s.updateTrie()
if err != nil {
return nil, err
}
// If nothing changed, don't bother with committing anything
if tr == nil {
+ s.origin = s.data.Copy()
return nil, nil
}
// Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
}
- root, nodes := tr.Commit(false)
+ root, nodes, err := tr.Commit(false)
+ if err != nil {
+ return nil, err
+ }
s.data.Root = root
+
+ // Update original account data after commit
+ s.origin = s.data.Copy()
return nodes, nil
}
@@ -439,7 +471,7 @@ func (s *stateObject) SubBalanceMultiCoin(coinID common.Hash, amount *big.Int, d
func (s *stateObject) SetBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) {
s.EnableMultiCoin()
NormalizeCoinID(&coinID)
- s.SetState(db, coinID, common.BigToHash(amount))
+ s.SetState(coinID, common.BigToHash(amount))
}
func (s *stateObject) setBalance(amount *big.Int) {
@@ -451,18 +483,24 @@ func (s *stateObject) enableMultiCoin() {
}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
- stateObject := newObject(db, s.address, s.data)
+ obj := &stateObject{
+ db: db,
+ address: s.address,
+ addrHash: s.addrHash,
+ origin: s.origin,
+ data: s.data,
+ }
if s.trie != nil {
- stateObject.trie = db.db.CopyTrie(s.trie)
+ obj.trie = db.db.CopyTrie(s.trie)
}
- stateObject.code = s.code
- stateObject.dirtyStorage = s.dirtyStorage.Copy()
- stateObject.originStorage = s.originStorage.Copy()
- stateObject.pendingStorage = s.pendingStorage.Copy()
- stateObject.suicided = s.suicided
- stateObject.dirtyCode = s.dirtyCode
- stateObject.deleted = s.deleted
- return stateObject
+ obj.code = s.code
+ obj.dirtyStorage = s.dirtyStorage.Copy()
+ obj.originStorage = s.originStorage.Copy()
+ obj.pendingStorage = s.pendingStorage.Copy()
+ obj.selfDestructed = s.selfDestructed
+ obj.dirtyCode = s.dirtyCode
+ obj.deleted = s.deleted
+ return obj
}
//
@@ -475,14 +513,14 @@ func (s *stateObject) Address() common.Address {
}
// Code returns the contract code associated with this object, if any.
-func (s *stateObject) Code(db Database) []byte {
+func (s *stateObject) Code() []byte {
if s.code != nil {
return s.code
}
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return nil
}
- code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash()))
+ code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
}
@@ -493,14 +531,14 @@ func (s *stateObject) Code(db Database) []byte {
// CodeSize returns the size of the contract code associated with this object,
// or zero if none. This method is an almost mirror of Code, but uses a cache
// inside the database to avoid loading codes seen recently.
-func (s *stateObject) CodeSize(db Database) int {
+func (s *stateObject) CodeSize() int {
if s.code != nil {
return len(s.code)
}
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return 0
}
- size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
}
@@ -508,7 +546,7 @@ func (s *stateObject) CodeSize(db Database) int {
}
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code(s.db.db)
+ prevcode := s.Code()
s.db.journal.append(codeChange{
account: &s.address,
prevhash: s.CodeHash(),
@@ -563,7 +601,7 @@ func NormalizeStateKey(key *common.Hash) {
func (s *stateObject) BalanceMultiCoin(coinID common.Hash, db Database) *big.Int {
NormalizeCoinID(&coinID)
- return s.GetState(db, coinID).Big()
+ return s.GetState(coinID).Big()
}
func (s *stateObject) EnableMultiCoin() bool {
diff --git a/core/state/state_test.go b/core/state/state_test.go
index edd345a650..96dacdbfdc 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -40,21 +40,22 @@ import (
"github.com/ethereum/go-ethereum/crypto"
)
-type stateTest struct {
+type stateEnv struct {
db ethdb.Database
state *StateDB
}
-func newStateTest() *stateTest {
+func newStateEnv() *stateEnv {
db := rawdb.NewMemoryDatabase()
sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil)
- return &stateTest{db: db, state: sdb}
+ return &stateEnv{db: db, state: sdb}
}
func TestIterativeDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
- s := &stateTest{db: db, state: sdb}
+ tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+ sdb, _ := New(types.EmptyRootHash, tdb, nil)
+ s := &stateEnv{db: db, state: sdb}
// generate a few entries
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
@@ -69,7 +70,8 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- s.state.Commit(false, false)
+ root, _ := s.state.Commit(0, false, false)
+ s.state, _ = New(root, tdb, nil)
b := &bytes.Buffer{}
s.state.IterativeDump(nil, json.NewEncoder(b))
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 6c116369b5..58dcddef93 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -41,6 +41,7 @@ import (
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/trie"
"github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -66,27 +67,38 @@ func (n *proofList) Delete(key []byte) error {
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
+//
// * Contracts
// * Accounts
+//
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
type StateDB struct {
db Database
prefetcher *triePrefetcher
trie Trie
hasher crypto.KeccakState
+ snap snapshot.Snapshot
// originalRoot is the pre-state root, before any changes were made.
// It will be updated when the Commit is called.
originalRoot common.Hash
- snap snapshot.Snapshot
- snapAccounts map[common.Hash][]byte
- snapStorage map[common.Hash]map[common.Hash][]byte
+ // These maps hold the state changes (including the corresponding
+ // original value) that occurred in this **block**.
+ accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
+ storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
+ accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding
+ storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
- // This map holds 'live' objects, which will get modified while processing a state transition.
+ // This map holds 'live' objects, which will get modified while processing
+ // a state transition.
stateObjects map[common.Address]*stateObject
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
- stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block
// DB error.
// State objects are used by the consensus core and VM which are
@@ -100,11 +112,13 @@ type StateDB struct {
// The refund counter, also used by state transitioning.
refund uint64
+ // The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
logs map[common.Hash][]*types.Log
logSize uint
+ // Preimages occurred seen by VM in the scope of block.
preimages map[common.Hash][]byte
// Per-transaction access list
@@ -161,10 +175,14 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
db: db,
trie: tr,
originalRoot: root,
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Address][]byte),
+ storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
stateObjects: make(map[common.Address]*stateObject),
stateObjectsPending: make(map[common.Address]struct{}),
stateObjectsDirty: make(map[common.Address]struct{}),
- stateObjectsDestruct: make(map[common.Address]struct{}),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
@@ -177,8 +195,6 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St
return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex())
}
sdb.snap = snap
- sdb.snapAccounts = make(map[common.Hash][]byte)
- sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
}
return sdb, nil
}
@@ -280,7 +296,7 @@ func (s *StateDB) SubRefund(gas uint64) {
}
// Exist reports whether the given account address exists in the state.
-// Notably this also returns true for suicided accounts.
+// Notably this also returns true for self-destructed accounts.
func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil
}
@@ -327,7 +343,7 @@ func (s *StateDB) TxIndex() int {
func (s *StateDB) GetCode(addr common.Address) []byte {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Code(s.db)
+ return stateObject.Code()
}
return nil
}
@@ -335,7 +351,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte {
func (s *StateDB) GetCodeSize(addr common.Address) int {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.CodeSize(s.db)
+ return stateObject.CodeSize()
}
return 0
}
@@ -353,7 +369,7 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
NormalizeStateKey(&hash)
- return stateObject.GetState(s.db, hash)
+ return stateObject.GetState(hash)
}
return common.Hash{}
}
@@ -366,7 +382,7 @@ func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) {
// GetProofByHash returns the Merkle proof for a given account.
func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) {
var proof proofList
- err := s.trie.Prove(addrHash[:], 0, &proof)
+ err := s.trie.Prove(addrHash[:], &proof)
return proof, err
}
@@ -380,7 +396,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
return nil, errors.New("storage trie for requested address does not exist")
}
var proof proofList
- err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
+ err = trie.Prove(crypto.Keccak256(key.Bytes()), &proof)
if err != nil {
return nil, err
}
@@ -391,7 +407,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetCommittedState(s.db, hash)
+ return stateObject.GetCommittedState(hash)
}
return common.Hash{}
}
@@ -401,7 +417,7 @@ func (s *StateDB) GetCommittedStateAP1(addr common.Address, hash common.Hash) co
stateObject := s.getStateObject(addr)
if stateObject != nil {
NormalizeStateKey(&hash)
- return stateObject.GetCommittedState(s.db, hash)
+ return stateObject.GetCommittedState(hash)
}
return common.Hash{}
}
@@ -420,16 +436,16 @@ func (s *StateDB) StorageTrie(addr common.Address) (Trie, error) {
return nil, nil
}
cpy := stateObject.deepCopy(s)
- if _, err := cpy.updateTrie(s.db); err != nil {
+ if _, err := cpy.updateTrie(); err != nil {
return nil, err
}
- return cpy.getTrie(s.db)
+ return cpy.getTrie()
}
-func (s *StateDB) HasSuicided(addr common.Address) bool {
+func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.suicided
+ return stateObject.selfDestructed
}
return false
}
@@ -502,44 +518,59 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
NormalizeStateKey(&key)
- stateObject.SetState(s.db, key, value)
+ stateObject.SetState(key, value)
}
}
// SetStorage replaces the entire storage for the specified account with given
-// storage. This function should only be used for debugging.
+// storage. This function should only be used for debugging and the mutations
+// must be discarded afterwards.
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
// SetStorage needs to wipe existing storage. We achieve this by pretending
// that the account self-destructed earlier in this block, by flagging
// it in stateObjectsDestruct. The effect of doing so is that storage lookups
// will not hit disk, since it is assumed that the disk-data is belonging
// to a previous incarnation of the object.
- s.stateObjectsDestruct[addr] = struct{}{}
+ //
+ // TODO(rjl493456442) this function should only be supported by 'unwritable'
+ // state and all mutations made should all be discarded afterwards.
+ if _, ok := s.stateObjectsDestruct[addr]; !ok {
+ s.stateObjectsDestruct[addr] = nil
+ }
stateObject := s.GetOrNewStateObject(addr)
for k, v := range storage {
- stateObject.SetState(s.db, k, v)
+ stateObject.SetState(k, v)
}
}
-// Suicide marks the given account as suicided.
+// SelfDestruct marks the given account as selfdestructed.
// This clears the account balance.
//
// The account's state object is still available until the state is committed,
-// getStateObject will return a non-nil account after Suicide.
-func (s *StateDB) Suicide(addr common.Address) bool {
+// getStateObject will return a non-nil account after SelfDestruct.
+func (s *StateDB) SelfDestruct(addr common.Address) {
stateObject := s.getStateObject(addr)
if stateObject == nil {
- return false
+ return
}
- s.journal.append(suicideChange{
+ s.journal.append(selfDestructChange{
account: &addr,
- prev: stateObject.suicided,
+ prev: stateObject.selfDestructed,
prevbalance: new(big.Int).Set(stateObject.Balance()),
})
- stateObject.markSuicided()
+ stateObject.markSelfdestructed()
stateObject.data.Balance = new(big.Int)
+}
+
+func (s *StateDB) Selfdestruct6780(addr common.Address) {
+ stateObject := s.getStateObject(addr)
+ if stateObject == nil {
+ return
+ }
- return true
+ if stateObject.created {
+ s.SelfDestruct(addr)
+ }
}
// SetTransientState sets transient storage for a given account. It
@@ -584,13 +615,24 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
-
- // If state snapshotting is active, cache the data til commit. Note, this
- // update mechanism is not symmetric to the deletion, because whereas it is
- // enough to track account updates at commit time, deletions need tracking
- // at transaction boundary level to ensure we capture state clearing.
- if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash, obj.data.IsMultiCoin)
+ if obj.dirtyCode {
+ s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
+ }
+ // Cache the data until commit. Note, this update mechanism is not symmetric
+ // to the deletion, because whereas it is enough to track account updates
+ // at commit time, deletions need tracking at transaction boundary level to
+ // ensure we capture state clearing.
+ s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
+
+ // Track the original value of mutated account, nil means it was not present.
+ // Skip if it has been tracked (because updateStateObject may be called
+ // multiple times in a block).
+ if _, ok := s.accountsOrigin[obj.address]; !ok {
+ if obj.origin == nil {
+ s.accountsOrigin[obj.address] = nil
+ } else {
+ s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin)
+ }
}
}
@@ -670,7 +712,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// Insert into the live set
- obj := newObject(s, addr, *data)
+ obj := newObject(s, addr, data)
s.setStateObject(obj)
return obj
}
@@ -692,20 +734,40 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
// the given address, it is overwritten and returned as the second return value.
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
-
- var prevdestruct bool
- if prev != nil {
- _, prevdestruct = s.stateObjectsDestruct[prev.address]
- if !prevdestruct {
- s.stateObjectsDestruct[prev.address] = struct{}{}
- }
- }
- newobj = newObject(s, addr, types.StateAccount{})
+ newobj = newObject(s, addr, nil)
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
- s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ // The original account should be marked as destructed and all cached
+ // account and storage data should be cleared as well. Note, it must
+ // be done here, otherwise the destruction event of "original account"
+ // will be lost.
+ _, prevdestruct := s.stateObjectsDestruct[prev.address]
+ if !prevdestruct {
+ s.stateObjectsDestruct[prev.address] = prev.origin
+ }
+ // There may be some cached account/storage data already since IntermediateRoot
+ // will be called for each transaction before byzantium fork which will always
+ // cache the latest account/storage data.
+ prevAccount, ok := s.accountsOrigin[prev.address]
+ s.journal.append(resetObjectChange{
+ account: &addr,
+ prev: prev,
+ prevdestruct: prevdestruct,
+ prevAccount: s.accounts[prev.addrHash],
+ prevStorage: s.storages[prev.addrHash],
+ prevAccountOriginExist: ok,
+ prevAccountOrigin: prevAccount,
+ prevStorageOrigin: s.storagesOrigin[prev.address],
+ })
+ delete(s.accounts, prev.addrHash)
+ delete(s.storages, prev.addrHash)
+ delete(s.accountsOrigin, prev.address)
+ delete(s.storagesOrigin, prev.address)
}
+
+ newobj.created = true
+
s.setStateObject(newobj)
if prev != nil && !prev.deleted {
return newobj, prev
@@ -730,19 +792,23 @@ func (s *StateDB) CreateAccount(addr common.Address) {
}
}
-func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
- so := db.getStateObject(addr)
+func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
+ so := s.getStateObject(addr)
if so == nil {
return nil
}
- tr, err := so.getTrie(db.db)
+ tr, err := so.getTrie()
if err != nil {
return err
}
- it := trie.NewIterator(tr.NodeIterator(nil))
+ trieIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ it := trie.NewIterator(trieIt)
for it.Next() {
- key := common.BytesToHash(db.trie.GetKey(it.Key))
+ key := common.BytesToHash(s.trie.GetKey(it.Key))
if value, dirty := so.dirtyStorage[key]; dirty {
if !cb(key, value) {
return nil
@@ -771,16 +837,26 @@ func (s *StateDB) Copy() *StateDB {
db: s.db,
trie: s.db.CopyTrie(s.trie),
originalRoot: s.originalRoot,
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Address][]byte),
+ storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
- stateObjectsDestruct: make(map[common.Address]struct{}, len(s.stateObjectsDestruct)),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)),
refund: s.refund,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: make(map[common.Hash][]byte, len(s.preimages)),
journal: newJournal(),
hasher: crypto.NewKeccakState(),
+
+ // In order for the block producer to be able to use and make additions
+ // to the snapshot tree, we need to copy that as well. Otherwise, any
+ // block mined by ourselves will cause gaps in the tree, and force the
+ // miner to operate trie-backed only.
+ snap: s.snap,
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
@@ -814,10 +890,18 @@ func (s *StateDB) Copy() *StateDB {
}
state.stateObjectsDirty[addr] = struct{}{}
}
- // Deep copy the destruction flag.
- for addr := range s.stateObjectsDestruct {
- state.stateObjectsDestruct[addr] = struct{}{}
+ // Deep copy the destruction markers.
+ for addr, value := range s.stateObjectsDestruct {
+ state.stateObjectsDestruct[addr] = value
}
+ // Deep copy the state changes made in the scope of block
+ // along with their original values.
+ state.accounts = copySet(s.accounts)
+ state.storages = copy2DSet(s.storages)
+ state.accountsOrigin = copySet(state.accountsOrigin)
+ state.storagesOrigin = copy2DSet(state.storagesOrigin)
+
+ // Deep copy the logs occurred in the scope of block
for hash, logs := range s.logs {
cpy := make([]*types.Log, len(logs))
for i, l := range logs {
@@ -826,6 +910,7 @@ func (s *StateDB) Copy() *StateDB {
}
state.logs[hash] = cpy
}
+ // Deep copy the preimages occurred in the scope of block
for hash, preimage := range s.preimages {
state.preimages[hash] = preimage
}
@@ -844,27 +929,6 @@ func (s *StateDB) Copy() *StateDB {
if s.prefetcher != nil {
state.prefetcher = s.prefetcher.copy()
}
- if s.snap != nil {
- // In order for the miner to be able to use and make additions
- // to the snapshot tree, we need to copy that as well.
- // Otherwise, any block mined by ourselves will cause gaps in the tree,
- // and force the miner to operate trie-backed only
- state.snap = s.snap
-
- // deep copy needed
- state.snapAccounts = make(map[common.Hash][]byte, len(s.snapAccounts))
- for k, v := range s.snapAccounts {
- state.snapAccounts[k] = v
- }
- state.snapStorage = make(map[common.Hash]map[common.Hash][]byte, len(s.snapStorage))
- for k, v := range s.snapStorage {
- temp := make(map[common.Hash][]byte, len(v))
- for kk, vv := range v {
- temp[kk] = vv
- }
- state.snapStorage[k] = temp
- }
- }
return state
}
@@ -913,24 +977,26 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// Thus, we can safely ignore it here
continue
}
- if obj.suicided || (deleteEmptyObjects && obj.empty()) {
+ if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
obj.deleted = true
// We need to maintain account deletions explicitly (will remain
- // set indefinitely).
- s.stateObjectsDestruct[obj.address] = struct{}{}
-
- // If state snapshotting is active, also mark the destruction there.
+ // set indefinitely). Note only the first occurred self-destruct
+ // event is tracked.
+ if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
+ s.stateObjectsDestruct[obj.address] = obj.origin
+ }
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// resurrect an account; but the snapshotter needs both events.
- if s.snap != nil {
- delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
- }
+ delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect)
} else {
obj.finalise(true) // Prefetch slots in the background
}
+ obj.created = false
s.stateObjectsPending[addr] = struct{}{}
s.stateObjectsDirty[addr] = struct{}{}
@@ -974,7 +1040,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
- obj.updateRoot(s.db)
+ obj.updateRoot()
}
}
// Now we're about to start to write changes to the trie. The trie is so far
@@ -1025,19 +1091,157 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
}
+// deleteStorage iterates the storage trie belongs to the account and mark all
+// slots inside as deleted.
+func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
+ start := time.Now()
+ tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
+ }
+ it, err := tr.NodeIterator(nil)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
+ }
+ var (
+ set = trienode.NewNodeSet(addrHash)
+ slots = make(map[common.Hash][]byte)
+ stateSize common.StorageSize
+ nodeSize common.StorageSize
+ )
+ for it.Next(true) {
+ // arbitrary stateSize limit, make it configurable
+ if stateSize+nodeSize > 512*1024*1024 {
+ log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize)
+ if metrics.EnabledExpensive {
+ slotDeletionSkip.Inc(1)
+ }
+ return true, nil, nil, nil
+ }
+ if it.Leaf() {
+ slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
+ stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob()))
+ continue
+ }
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ nodeSize += common.StorageSize(len(it.Path()))
+ set.AddNode(it.Path(), trienode.NewDeleted())
+ }
+ if err := it.Error(); err != nil {
+ return false, nil, nil, err
+ }
+ if metrics.EnabledExpensive {
+ if int64(len(slots)) > slotDeletionMaxCount.Value() {
+ slotDeletionMaxCount.Update(int64(len(slots)))
+ }
+ if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() {
+ slotDeletionMaxSize.Update(int64(stateSize + nodeSize))
+ }
+ slotDeletionTimer.UpdateSince(start)
+ slotDeletionCount.Mark(int64(len(slots)))
+ slotDeletionSize.Mark(int64(stateSize + nodeSize))
+ }
+ return false, slots, set, nil
+}
+
+// handleDestruction processes all destruction markers and deletes the account
+// and associated storage slots if necessary. There are four possible situations
+// here:
+//
+// - the account was not existent and be marked as destructed
+//
+// - the account was not existent and be marked as destructed,
+// however, it's resurrected later in the same block.
+//
+// - the account was existent and be marked as destructed
+//
+// - the account was existent and be marked as destructed,
+// however it's resurrected later in the same block.
+//
+// In case (a), nothing needs be deleted, nil to nil transition can be ignored.
+//
+// In case (b), nothing needs be deleted, nil is used as the original value for
+// newly created account and storages
+//
+// In case (c), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+//
+// In case (d), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) {
+ incomplete := make(map[common.Address]struct{})
+ for addr, prev := range s.stateObjectsDestruct {
+ // The original account was non-existing, and it's marked as destructed
+ // in the scope of block. It can be case (a) or (b).
+ // - for (a), skip it without doing anything.
+ // - for (b), track account's original value as nil. It may overwrite
+ // the data cached in s.accountsOrigin set by 'updateStateObject'.
+ addrHash := crypto.Keccak256Hash(addr[:])
+ if prev == nil {
+ if _, ok := s.accounts[addrHash]; ok {
+ s.accountsOrigin[addr] = nil // case (b)
+ }
+ continue
+ }
+ // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
+ s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d)
+
+ // Short circuit if the storage was empty.
+ if prev.Root == types.EmptyRootHash {
+ continue
+ }
+ // Remove storage slots belong to the account.
+ aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete storage, err: %w", err)
+ }
+ // The storage is too huge to handle, skip it but mark as incomplete.
+ // For case (d), the account is resurrected might with a few slots
+ // created. In this case, wipe the entire storage state diff because
+ // of aborted deletion.
+ if aborted {
+ incomplete[addr] = struct{}{}
+ delete(s.storagesOrigin, addr)
+ continue
+ }
+ if s.storagesOrigin[addr] == nil {
+ s.storagesOrigin[addr] = slots
+ } else {
+ // It can overwrite the data in s.storagesOrigin[addrHash] set by
+ // 'object.updateTrie'.
+ for key, val := range slots {
+ s.storagesOrigin[addr][key] = val
+ }
+ }
+ if err := nodes.Merge(set); err != nil {
+ return nil, err
+ }
+ }
+ return incomplete, nil
+}
+
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
- return s.commit(deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
+//
+// The associated block number of the state transition is also provided
+// for more chain context.
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) {
+ return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot)
}
// CommitWithSnap writes the state to the underlying in-memory trie database and
// generates a snapshot layer for the newly committed state.
-func (s *StateDB) CommitWithSnap(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
- return s.commit(deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
+func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
+ return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot)
}
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
+func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1054,37 +1258,38 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
nodes = trienode.NewMergedNodeSet()
codeWriter = s.db.DiskDB().NewBatch()
)
+ // Handle all state deletions first
+ incomplete, err := s.handleDestruction(nodes)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Handle all state updates afterwards
for addr := range s.stateObjectsDirty {
- if obj := s.stateObjects[addr]; !obj.deleted {
- // Write any contract code associated with the state object
- if obj.code != nil && obj.dirtyCode {
- rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
- obj.dirtyCode = false
- }
- // Write any storage changes in the state object to its storage trie
- set, err := obj.commitTrie(s.db)
- if err != nil {
+ obj := s.stateObjects[addr]
+ if obj.deleted {
+ continue
+ }
+ // Write any contract code associated with the state object
+ if obj.code != nil && obj.dirtyCode {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ obj.dirtyCode = false
+ }
+ // Write any storage changes in the state object to its storage trie
+ set, err := obj.commit()
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Merge the dirty nodes of storage trie into global set. It is possible
+ // that the account was destructed and then resurrected in the same block.
+ // In this case, the node set is shared by both accounts.
+ if set != nil {
+ if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
- // Merge the dirty nodes of storage trie into global set.
- if set != nil {
- if err := nodes.Merge(set); err != nil {
- return common.Hash{}, err
- }
- updates, deleted := set.Size()
- storageTrieNodesUpdated += updates
- storageTrieNodesDeleted += deleted
- }
+ updates, deleted := set.Size()
+ storageTrieNodesUpdated += updates
+ storageTrieNodesDeleted += deleted
}
- // If the contract is destructed, the storage is still left in the
- // database as dangling data. Theoretically it's should be wiped from
- // database as well, but in hash-based-scheme it's extremely hard to
- // determine that if the trie nodes are also referenced by other storage,
- // and in path-based-scheme some technical challenges are still unsolved.
- // Although it won't affect the correctness but please fix it TODO(rjl493456442).
- }
- if len(s.stateObjectsDirty) > 0 {
- s.stateObjectsDirty = make(map[common.Address]struct{})
}
if codeWriter.ValueSize() > 0 {
if err := codeWriter.Write(); err != nil {
@@ -1096,7 +1301,10 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if metrics.EnabledExpensive {
start = time.Now()
}
- root, set := s.trie.Commit(true)
+ root, set, err := s.trie.Commit(true)
+ if err != nil {
+ return common.Hash{}, err
+ }
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
@@ -1124,16 +1332,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if s.snap == nil {
log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash))
}
- if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.snapAccounts, s.snapStorage); err != nil {
+ if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
log.Warn("Failed to update snapshot tree", "to", root, "err", err)
}
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
}
- s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil
- }
- if len(s.stateObjectsDestruct) > 0 {
- s.stateObjectsDestruct = make(map[common.Address]struct{})
+ s.snap = nil
}
if root == (common.Hash{}) {
root = types.EmptyRootHash
@@ -1145,11 +1350,11 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
if root != origin {
start := time.Now()
if referenceRoot {
- if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, nodes); err != nil {
+ if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil {
return common.Hash{}, err
}
} else {
- if err := s.db.TrieDB().Update(root, origin, nodes); err != nil {
+ if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil {
return common.Hash{}, err
}
}
@@ -1158,6 +1363,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas
s.TrieDBCommits += time.Since(start)
}
}
+ // Clear all internal flags at the end of commit operation.
+ s.accounts = make(map[common.Hash][]byte)
+ s.storages = make(map[common.Hash]map[common.Hash][]byte)
+ s.accountsOrigin = make(map[common.Address][]byte)
+ s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
+ s.stateObjectsDirty = make(map[common.Address]struct{})
+ s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount)
return root, nil
}
@@ -1238,7 +1450,7 @@ func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addre
}
// convertAccountSet converts a provided account set from address keyed to hash keyed.
-func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} {
+func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} {
ret := make(map[common.Hash]struct{}, len(set))
for addr := range set {
obj, exist := s.stateObjects[addr]
@@ -1250,3 +1462,24 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.
}
return ret
}
+
+// copySet returns a deep-copied set.
+func copySet[k comparable](set map[k][]byte) map[k][]byte {
+ copied := make(map[k][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copy2DSet returns a two-dimensional deep-copied set.
+func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte {
+ copied := make(map[k]map[common.Hash][]byte, len(set))
+ for addr, subset := range set {
+ copied[addr] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addr][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
new file mode 100644
index 0000000000..90889334bb
--- /dev/null
+++ b/core/state/statedb_fuzz_test.go
@@ -0,0 +1,386 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package state
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/quick"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// A stateTest checks that the state changes are correctly captured. Instances
+// of this test with pseudorandom content are created by Generate.
+//
+// The test works as follows:
+//
+// A list of states are created by applying actions. The state changes between
+// each state instance are tracked and be verified.
+type stateTest struct {
+ addrs []common.Address // all account addresses
+ actions [][]testAction // modifications to the state, grouped by block
+ chunk int // The number of actions per chunk
+ err error // failure details are reported through this field
+}
+
+// newStateTestAction creates a random action that changes state.
+func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction {
+ actions := []testAction{
+ {
+ name: "SetBalance",
+ fn: func(a testAction, s *StateDB) {
+ s.SetBalance(addr, big.NewInt(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetNonce",
+ fn: func(a testAction, s *StateDB) {
+ s.SetNonce(addr, uint64(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetState",
+ fn: func(a testAction, s *StateDB) {
+ var key, val common.Hash
+ binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
+ binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
+ s.SetState(addr, key, val)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "SetCode",
+ fn: func(a testAction, s *StateDB) {
+ code := make([]byte, 16)
+ binary.BigEndian.PutUint64(code, uint64(a.args[0]))
+ binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
+ s.SetCode(addr, code)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "CreateAccount",
+ fn: func(a testAction, s *StateDB) {
+ s.CreateAccount(addr)
+ },
+ },
+ {
+ name: "Selfdestruct",
+ fn: func(a testAction, s *StateDB) {
+ s.SelfDestruct(addr)
+ },
+ },
+ }
+ var nonRandom = index != -1
+ if index == -1 {
+ index = r.Intn(len(actions))
+ }
+ action := actions[index]
+ var names []string
+ if !action.noAddr {
+ names = append(names, addr.Hex())
+ }
+ for i := range action.args {
+ if nonRandom {
+ action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero
+ } else {
+ action.args[i] = rand.Int63n(10000)
+ }
+ names = append(names, fmt.Sprint(action.args[i]))
+ }
+ action.name += " " + strings.Join(names, ", ")
+ return action
+}
+
+// Generate returns a new snapshot test of the given size. All randomness is
+// derived from r.
+func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value {
+ addrs := make([]common.Address, 5)
+ for i := range addrs {
+ addrs[i][0] = byte(i)
+ }
+ actions := make([][]testAction, rand.Intn(5)+1)
+
+ for i := 0; i < len(actions); i++ {
+ actions[i] = make([]testAction, size)
+ for j := range actions[i] {
+ if j == 0 {
+ // Always include a set balance action to make sure
+ // the state changes are not empty.
+ actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0)
+ continue
+ }
+ actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1)
+ }
+ }
+ chunk := int(math.Sqrt(float64(size)))
+ if size > 0 && chunk == 0 {
+ chunk = 1
+ }
+ return reflect.ValueOf(&stateTest{
+ addrs: addrs,
+ actions: actions,
+ chunk: chunk,
+ })
+}
+
+func (test *stateTest) String() string {
+ out := new(bytes.Buffer)
+ for i, actions := range test.actions {
+ fmt.Fprintf(out, "---- block %d ----\n", i)
+ for j, action := range actions {
+ if j%test.chunk == 0 {
+ fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk)
+ }
+ fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name)
+ }
+ }
+ return out.String()
+}
+
+func (test *stateTest) run() bool {
+ var (
+ roots []common.Hash
+ accountList []map[common.Address][]byte
+ storageList []map[common.Address]map[common.Hash][]byte
+ onCommit = func(states *triestate.Set) {
+ accountList = append(accountList, copySet(states.Accounts))
+ storageList = append(storageList, copy2DSet(states.Storages))
+ }
+ disk = rawdb.NewMemoryDatabase()
+ tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit})
+ sdb = NewDatabaseWithNodeDB(disk, tdb)
+ byzantium = rand.Intn(2) == 0
+ )
+ for i, actions := range test.actions {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[len(roots)-1]
+ }
+ state, err := New(root, sdb, nil)
+ if err != nil {
+ panic(err)
+ }
+ for i, action := range actions {
+ if i%test.chunk == 0 && i != 0 {
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ }
+ action.fn(action, state)
+ }
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ nroot, err := state.Commit(0, true, false) // call commit at the block boundary
+ if err != nil {
+ panic(err)
+ }
+ if nroot == root {
+ return true // filter out non-change state transition
+ }
+ roots = append(roots, nroot)
+ }
+ for i := 0; i < len(test.actions); i++ {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[i-1]
+ }
+ test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i])
+ if test.err != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// verifyAccountCreation this function is called once the state diff says that
+// specific account was not present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed not present in trie
+// - the account is present in new trie, nil->nil is regarded as invalid
+// - the slots transition is correct
+func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
+ // Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ oBlob, err := otr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ nBlob, err := ntr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(oBlob) != 0 {
+ return fmt.Errorf("unexpected account in old trie, %x", addrHash)
+ }
+ if len(nBlob) == 0 {
+ return fmt.Errorf("missing account in new trie, %x", addrHash)
+ }
+
+ // Verify storage changes
+ var nAcct types.StateAccount
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ // Account has no slot, empty slot set is expected
+ if nAcct.Root == types.EmptyRootHash {
+ if len(slots) != 0 {
+ return fmt.Errorf("unexpected slot changes %x", addrHash)
+ }
+ return nil
+ }
+ // Account has slots, ensure all new slots are contained
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != types.EmptyRootHash {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+// verifyAccountUpdate this function is called once the state diff says that
+// specific account was present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed present in trie
+// - the account in old trie matches the provided value
+// - the slots transition is correct
+func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
+ // Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ oBlob, err := otr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ nBlob, err := ntr.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(oBlob) == 0 {
+ return fmt.Errorf("missing account in old trie, %x", addrHash)
+ }
+ full, err := types.FullAccountRLP(origin)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(full, oBlob) {
+ return fmt.Errorf("account value is not matched, %x", addrHash)
+ }
+
+ // Decode accounts
+ var (
+ oAcct types.StateAccount
+ nAcct types.StateAccount
+ nRoot common.Hash
+ )
+ if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil {
+ return err
+ }
+ if len(nBlob) == 0 {
+ nRoot = types.EmptyRootHash
+ } else {
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ nRoot = nAcct.Root
+ }
+
+ // Verify storage
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != oAcct.Root {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
+ otr, err := trie.New(trie.StateTrieID(root), db)
+ if err != nil {
+ return err
+ }
+ ntr, err := trie.New(trie.StateTrieID(next), db)
+ if err != nil {
+ return err
+ }
+ for addr, account := range accountsOrigin {
+ var err error
+ if len(account) == 0 {
+ err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr])
+ } else {
+ err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr])
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func TestStateChanges(t *testing.T) {
+ config := &quick.Config{MaxCount: 1000}
+ err := quick.Check((*stateTest).run, config)
+ if cerr, ok := err.(*quick.CheckError); ok {
+ test := cerr.In[0].(*stateTest)
+ t.Errorf("%v:\n%s", test.err, test)
+ } else if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 7d9928a413..1deb24e107 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -29,6 +29,7 @@ package state
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/big"
@@ -42,6 +43,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -114,7 +116,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
- transRoot, err := transState.Commit(false, false)
+ transRoot, err := transState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@@ -122,7 +124,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
- finalRoot, err := finalState.Commit(false, false)
+ finalRoot, err := finalState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@@ -309,9 +311,9 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
},
{
- name: "Suicide",
+ name: "SelfDestruct",
fn: func(a testAction, s *StateDB) {
- s.Suicide(addr)
+ s.SelfDestruct(addr)
},
},
{
@@ -461,7 +463,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
}
// Check basic accessor methods.
checkeq("Exist", state.Exist(addr), checkstate.Exist(addr))
- checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr))
+ checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr))
checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr))
checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr))
checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr))
@@ -493,9 +495,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
}
func TestTouchDelete(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
s.state.GetOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(false, false)
+ root, _ := s.state.Commit(0, false, false)
s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
snapshot := s.state.Snapshot()
@@ -530,7 +532,8 @@ func TestCopyOfCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCommitCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ tdb := NewDatabase(rawdb.NewMemoryDatabase())
+ state, _ := New(types.EmptyRootHash, tdb, nil)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -567,20 +570,6 @@ func TestCopyCommitCopy(t *testing.T) {
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
-
- copyOne.Commit(false, false)
- if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyOne.GetState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyOne.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
// Copy the copy and check the balance once more
copyTwo := copyOne.Copy()
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -592,8 +581,23 @@ func TestCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetState(addr, skey); val != sval {
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
+ if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ // Commit state, ensure states can be loaded from disk
+ root, _ := state.Commit(0, false, false)
+ state, _ = New(root, tdb, nil)
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != sval {
+ t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval)
}
}
@@ -653,19 +657,6 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyTwo.Commit(false, false)
- if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyTwo.GetState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
// Copy the copy-copy and check the balance once more
copyThree := copyTwo.Copy()
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -677,11 +668,56 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyThree.GetState(addr, skey); val != sval {
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyThree.GetCommittedState(addr, skey); val != sval {
+ if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
}
}
+// TestCommitCopy tests the copy from a committed state is not functional.
+func TestCommitCopy(t *testing.T) {
+ state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+
+ // Create an account and check if the retrieved balance is correct
+ addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
+ skey := common.HexToHash("aaa")
+ sval := common.HexToHash("bbb")
+
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
+
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
+ }
+ // Copy the committed state database, the copied one is not functional.
+ state.Commit(0, true, false)
+ copied := state.Copy()
+ if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 {
+ t.Fatalf("unexpected balance: have %v", balance)
+ }
+ if code := copied.GetCode(addr); code != nil {
+ t.Fatalf("unexpected code: have %x", code)
+ }
+ if val := copied.GetState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ if !errors.Is(copied.Error(), trie.ErrCommitted) {
+ t.Fatalf("unexpected state error, %v", copied.Error())
+ }
+}
+
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
// while changing the internals of StateDB. The workflow is that a contract is
// self-destructed, then in a follow-up transaction (but same block) it's created
@@ -697,11 +733,11 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, big.NewInt(1))
- root, _ := state.Commit(false, false)
+ root, _ := state.Commit(0, false, false)
state, _ = NewWithSnapshot(root, state.db, state.snap)
// Simulate self-destructing in one transaction, then create-reverting in another
- state.Suicide(addr)
+ state.SelfDestruct(addr)
state.Finalise(true)
id := state.Snapshot()
@@ -709,7 +745,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(true, false)
+ root, _ = state.Commit(0, true, false)
state, _ = NewWithSnapshot(root, state.db, state.snap)
if state.getStateObject(addr) != nil {
@@ -733,7 +769,7 @@ func TestMissingTrieNodes(t *testing.T) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, big.NewInt(100))
state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(false, false)
+ root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush
state.Database().TrieDB().Cap(0)
@@ -757,7 +793,7 @@ func TestMissingTrieNodes(t *testing.T) {
}
// Modify the state
state.SetBalance(addr, big.NewInt(2))
- root, err := state.Commit(false, false)
+ root, err := state.Commit(0, false, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
@@ -938,12 +974,12 @@ func TestStateDBAccessList(t *testing.T) {
}
func TestMultiCoinOperations(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
addr := common.Address{1}
assetID := common.Hash{2}
s.state.GetOrNewStateObject(addr)
- root, _ := s.state.Commit(false, false)
+ root, _ := s.state.Commit(0, false, false)
s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap)
s.state.AddBalance(addr, new(big.Int))
@@ -968,9 +1004,8 @@ func TestMultiCoinSnapshot(t *testing.T) {
sdb := NewDatabase(db)
// Create empty snapshot.Tree and StateDB
- root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- // Use the root as both the stateRoot and blockHash for this test.
- snapTree := snapshot.NewTestTree(db, root, root)
+ root := types.EmptyRootHash
+ snapTree := snapshot.NewTestTree(db, common.Hash{}, root)
addr := common.Address{1}
assetID1 := common.Hash{1}
@@ -978,6 +1013,8 @@ func TestMultiCoinSnapshot(t *testing.T) {
var stateDB *StateDB
assertBalances := func(regular, multicoin1, multicoin2 int64) {
+ t.Helper()
+
balance := stateDB.GetBalance(addr)
if balance.Cmp(big.NewInt(regular)) != 0 {
t.Fatal("incorrect non-multicoin balance")
@@ -1000,31 +1037,32 @@ func TestMultiCoinSnapshot(t *testing.T) {
assertBalances(10, 0, 0)
// Commit and get the new root
- root, _ = stateDB.Commit(false, false)
+ root, _ = stateDB.Commit(0, false, false)
+ stateDB, _ = New(root, sdb, snapTree)
assertBalances(10, 0, 0)
// Create a new state from the latest root, add a multicoin balance, and
// commit it to the tree.
- stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10))
- root, _ = stateDB.Commit(false, false)
+ root, _ = stateDB.Commit(0, false, false)
+ stateDB, _ = New(root, sdb, snapTree)
assertBalances(10, 10, 0)
// Add more layers than the cap and ensure the balances and layers are correct
for i := 0; i < 256; i++ {
- stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2))
- root, _ = stateDB.Commit(false, false)
+ root, _ = stateDB.Commit(0, false, false)
+ stateDB, _ = New(root, sdb, snapTree)
}
assertBalances(10, 266, 512)
// Do one more add, including the regular balance which is now in the
// collapsed snapshot
- stateDB, _ = New(root, sdb, snapTree)
stateDB.AddBalance(addr, big.NewInt(1))
stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1))
- _, _ = stateDB.Commit(false, false)
+ root, _ = stateDB.Commit(0, false, false)
+ stateDB, _ = New(root, sdb, snapTree)
assertBalances(11, 267, 512)
}
@@ -1045,7 +1083,7 @@ func TestGenerateMultiCoinAccounts(t *testing.T) {
t.Fatal(err)
}
stateDB.SetBalanceMultiCoin(addr, assetID, assetBalance)
- root, err := stateDB.Commit(false, false)
+ root, err := stateDB.Commit(0, false, false)
if err != nil {
t.Fatal(err)
}
@@ -1104,7 +1142,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
}
}
- root, err := state.Commit(false, false)
+ root, err := state.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
@@ -1162,3 +1200,37 @@ func TestStateDBTransientStorage(t *testing.T) {
t.Fatalf("transient storage mismatch: have %x, want %x", got, value)
}
}
+
+func TestResetObject(t *testing.T) {
+ var (
+ disk = rawdb.NewMemoryDatabase()
+ tdb = trie.NewDatabase(disk)
+ db = NewDatabaseWithNodeDB(disk, tdb)
+ snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash)
+ state, _ = New(types.EmptyRootHash, db, snaps)
+ addr = common.HexToAddress("0x1")
+ slotA = common.HexToHash("0x1")
+ slotB = common.HexToHash("0x2")
+ )
+ // Initialize account with balance and storage in first transaction.
+ state.SetBalance(addr, big.NewInt(1))
+ state.SetState(addr, slotA, common.BytesToHash([]byte{0x1}))
+ state.IntermediateRoot(true)
+
+ // Reset account and mutate balance and storages
+ state.CreateAccount(addr)
+ state.SetBalance(addr, big.NewInt(2))
+ state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
+ root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, true)
+
+ // Ensure the original account is wiped properly
+ snap := snaps.Snapshot(root)
+ slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes()))
+ if len(slot) != 0 {
+ t.Fatalf("Unexpected storage slot")
+ }
+ slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes()))
+ if !bytes.Equal(slot, []byte{0x2}) {
+ t.Fatalf("Unexpected storage slot value %v", slot)
+ }
+}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 1237f3a497..9930be6e97 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -22,6 +22,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
+ "github.com/ava-labs/coreth/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -38,7 +39,7 @@ type testAccount struct {
func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
// Create an empty state
db := rawdb.NewMemoryDatabase()
- sdb := NewDatabase(db)
+ sdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
state, _ := New(types.EmptyRootHash, sdb, nil)
// Fill it with some arbitrary data
@@ -60,13 +61,13 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
if i%5 == 0 {
for j := byte(0); j < 5; j++ {
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
- obj.SetState(sdb, hash, hash)
+ obj.SetState(hash, hash)
}
}
state.updateStateObject(obj)
accounts = append(accounts, acc)
}
- root, _ := state.Commit(false, false)
+ root, _ := state.Commit(0, false, false)
// Return the generated state
return db, sdb, root, accounts
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index fcd751aae4..1694f95949 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -321,7 +321,7 @@ func (sf *subfetcher) loop() {
}
sf.trie = trie
} else {
- trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root)
+ trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root)
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return
diff --git a/core/state_processor.go b/core/state_processor.go
index 67c52c3af6..26d873e59d 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -162,6 +162,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
}
// Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author)
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg)
return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 9cba07bba4..38a1d7ea51 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -32,6 +32,7 @@ import (
"github.com/ava-labs/coreth/consensus"
"github.com/ava-labs/coreth/consensus/dummy"
+ "github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
@@ -40,11 +41,18 @@ import (
"github.com/ava-labs/coreth/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
+func mkConfig() *params.ChainConfig {
+ config := *params.TestChainConfig
+ config.CancunTime = utils.NewUint64(0) // Enable Cancun for blobTx support
+ return &config
+}
+
var (
- config = params.TestChainConfig
+ config = mkConfig()
signer = types.LatestSigner(config)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
@@ -84,6 +92,22 @@ func mkDynamicCreationTx(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *bi
return tx
}
+func mkBlobTx(t testing.TB, nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction {
+ tx, err := types.SignTx(types.NewTx(&types.BlobTx{
+ Nonce: nonce,
+ GasTipCap: uint256.MustFromBig(gasTipCap),
+ GasFeeCap: uint256.MustFromBig(gasFeeCap),
+ Gas: gasLimit,
+ To: to,
+ BlobHashes: hashes,
+ Value: new(uint256.Int),
+ }), signer, testKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return tx
+}
+
func u64(val uint64) *uint64 { return &val }
// TestStateProcessorErrors tests the output from the 'core' errors
@@ -104,8 +128,10 @@ func TestStateProcessorErrors(t *testing.T) {
},
GasLimit: params.CortinaGasLimit,
}
- blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false)
+ blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false)
+ tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
)
+
defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
tooBigNumber := new(big.Int).Set(bigNumber)
@@ -202,6 +228,24 @@ func TestStateProcessorErrors(t *testing.T) {
},
want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000",
},
+ { // ErrMaxInitCodeSizeExceeded
+ txs: []*types.Transaction{
+ mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), tooBigInitCode[:]),
+ },
+ want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152",
+ },
+ { // ErrIntrinsicGas: Not enough gas to cover init code
+ txs: []*types.Transaction{
+ mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), make([]byte, 320)),
+ },
+ want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300",
+ },
+ { // ErrBlobFeeCapTooLow
+ txs: []*types.Transaction{
+ mkBlobTx(t, 0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}),
+ },
+ want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 225000000000",
+ },
} {
block := GenerateBadBlock(gspec.ToBlock(), dummy.NewFaker(), tt.txs, gspec.Config)
_, err := blockchain.InsertChain(types.Blocks{block})
@@ -305,77 +349,6 @@ func TestStateProcessorErrors(t *testing.T) {
}
}
}
-
- // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (DUpgrade/EIP-3860) enabled.
- {
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{
- Config: ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- DAOForkBlock: big.NewInt(0),
- DAOForkSupport: true,
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- ApricotPhase1BlockTimestamp: utils.NewUint64(0),
- ApricotPhase2BlockTimestamp: utils.NewUint64(0),
- ApricotPhase3BlockTimestamp: utils.NewUint64(0),
- ApricotPhase4BlockTimestamp: utils.NewUint64(0),
- ApricotPhase5BlockTimestamp: utils.NewUint64(0),
- ApricotPhasePre6BlockTimestamp: utils.NewUint64(0),
- ApricotPhase6BlockTimestamp: utils.NewUint64(0),
- ApricotPhasePost6BlockTimestamp: utils.NewUint64(0),
- BanffBlockTimestamp: utils.NewUint64(0),
- CortinaBlockTimestamp: utils.NewUint64(0),
- DUpgradeBlockTimestamp: utils.NewUint64(0),
- },
- Alloc: GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: 0,
- },
- },
- GasLimit: params.CortinaGasLimit,
- }
- blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false)
- tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
- smallInitCode = [320]byte{}
- )
- defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrMaxInitCodeSizeExceeded
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), tooBigInitCode[:]),
- },
- want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152",
- },
- { // ErrIntrinsicGas: Not enough gas to cover init code
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), smallInitCode[:]),
- },
- want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300",
- },
- } {
- block := GenerateBadBlock(gspec.ToBlock(), dummy.NewFaker(), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
- }
- }
- }
}
// GenerateBadBlock constructs a "block" which contains the transactions. The transactions are not expected to be
@@ -410,6 +383,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
hasher := sha3.NewLegacyKeccak256()
hasher.Write(header.Number.Bytes())
var cumulativeGas uint64
+ var nBlobs int
for _, tx := range txs {
txh := tx.Hash()
hasher.Write(txh[:])
@@ -418,8 +392,20 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
receipt.GasUsed = tx.Gas()
receipts = append(receipts, receipt)
cumulativeGas += tx.Gas()
+ nBlobs += len(tx.BlobHashes())
}
header.Root = common.BytesToHash(hasher.Sum(nil))
+ if config.IsCancun(header.Time) {
+ var pExcess, pUsed = uint64(0), uint64(0)
+ if parent.ExcessBlobGas() != nil {
+ pExcess = *parent.ExcessBlobGas()
+ pUsed = *parent.BlobGasUsed()
+ }
+ excess := eip4844.CalcExcessBlobGas(pExcess, pUsed)
+ used := uint64(nBlobs * params.BlobTxBlobGasPerBlob)
+ header.ExcessBlobGas = &excess
+ header.BlobGasUsed = &used
+ }
// Assemble and return the final block for sealing
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil), nil, true)
}
diff --git a/core/state_transition.go b/core/state_transition.go
index 527b7c383b..1a41b4c4a1 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -27,10 +27,12 @@
package core
import (
+ "errors"
"fmt"
"math"
"math/big"
+ "github.com/ava-labs/coreth/consensus/misc/eip4844"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/params"
@@ -136,16 +138,18 @@ func toWordSize(size uint64) uint64 {
// A Message contains the data derived from a single transaction that is relevant to state
// processing.
type Message struct {
- To *common.Address
- From common.Address
- Nonce uint64
- Value *big.Int
- GasLimit uint64
- GasPrice *big.Int
- GasFeeCap *big.Int
- GasTipCap *big.Int
- Data []byte
- AccessList types.AccessList
+ To *common.Address
+ From common.Address
+ Nonce uint64
+ Value *big.Int
+ GasLimit uint64
+ GasPrice *big.Int
+ GasFeeCap *big.Int
+ GasTipCap *big.Int
+ Data []byte
+ AccessList types.AccessList
+ BlobGasFeeCap *big.Int
+ BlobHashes []common.Hash
// When SkipAccountChecks is true, the message nonce is not checked against the
// account nonce in state. It also disables checking that the sender is an EOA.
@@ -166,6 +170,8 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
Data: tx.Data(),
AccessList: tx.AccessList(),
SkipAccountChecks: false,
+ BlobHashes: tx.BlobHashes(),
+ BlobGasFeeCap: tx.BlobGasFeeCap(),
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
if baseFee != nil {
@@ -239,12 +245,24 @@ func (st *StateTransition) to() common.Address {
func (st *StateTransition) buyGas() error {
mgval := new(big.Int).SetUint64(st.msg.GasLimit)
mgval = mgval.Mul(mgval, st.msg.GasPrice)
- balanceCheck := mgval
+ balanceCheck := new(big.Int).Set(mgval)
if st.msg.GasFeeCap != nil {
- balanceCheck = new(big.Int).SetUint64(st.msg.GasLimit)
- balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap)
+ balanceCheck.SetUint64(st.msg.GasLimit)
+ balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap)
balanceCheck.Add(balanceCheck, st.msg.Value)
}
+ if st.evm.ChainConfig().IsCancun(st.evm.Context.Time) {
+ if blobGas := st.blobGasUsed(); blobGas > 0 {
+ // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap
+ blobBalanceCheck := new(big.Int).SetUint64(blobGas)
+ blobBalanceCheck.Mul(blobBalanceCheck, st.msg.BlobGasFeeCap)
+ balanceCheck.Add(balanceCheck, blobBalanceCheck)
+ // Pay for blobGasUsed * actual blob fee
+ blobFee := new(big.Int).SetUint64(blobGas)
+ blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas))
+ mgval.Add(mgval, blobFee)
+ }
+ }
if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want)
}
@@ -310,6 +328,29 @@ func (st *StateTransition) preCheck() error {
}
}
}
+ // Check the blob version validity
+ if msg.BlobHashes != nil {
+ if len(msg.BlobHashes) == 0 {
+ return errors.New("blob transaction missing blob hashes")
+ }
+ for i, hash := range msg.BlobHashes {
+ if hash[0] != params.BlobTxHashVersion {
+ return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)",
+ i, hash[0], params.BlobTxHashVersion)
+ }
+ }
+ }
+
+ if st.evm.ChainConfig().IsCancun(st.evm.Context.Time) {
+ if st.blobGasUsed() > 0 {
+ // Check that the user is paying at least the current blob fee
+ blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)
+ if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
+ return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
+ }
+ }
+ }
+
return st.buyGas()
}
@@ -423,3 +464,8 @@ func (st *StateTransition) refundGas(apricotPhase1 bool) {
func (st *StateTransition) gasUsed() uint64 {
return st.initialGas - st.gasRemaining
}
+
+// blobGasUsed returns the amount of blob gas used by the message.
+func (st *StateTransition) blobGasUsed() uint64 {
+ return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob)
+}
diff --git a/core/txpool/errors.go b/core/txpool/errors.go
new file mode 100644
index 0000000000..975e7ed896
--- /dev/null
+++ b/core/txpool/errors.go
@@ -0,0 +1,63 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import "errors"
+
+var (
+ // ErrAlreadyKnown is returned if the transactions is already contained
+ // within the pool.
+ ErrAlreadyKnown = errors.New("already known")
+
+ // ErrInvalidSender is returned if the transaction contains an invalid signature.
+ ErrInvalidSender = errors.New("invalid sender")
+
+ // ErrUnderpriced is returned if a transaction's gas price is below the minimum
+ // configured for the transaction pool.
+ ErrUnderpriced = errors.New("transaction underpriced")
+
+ // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
+ // with a different one without the required price bump.
+ ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
+
+ // ErrGasLimit is returned if a transaction's requested gas limit exceeds the
+ // maximum allowance of the current block.
+ ErrGasLimit = errors.New("exceeds block gas limit")
+
+ // ErrNegativeValue is a sanity error to ensure no one is able to specify a
+ // transaction with a negative value.
+ ErrNegativeValue = errors.New("negative value")
+
+ // ErrOversizedData is returned if the input data of a transaction is greater
+ // than some meaningful limit a user might use. This is not a consensus error
+ // making the transaction invalid, rather a DOS protection.
+ ErrOversizedData = errors.New("oversized data")
+
+ // ErrFutureReplacePending is returned if a future transaction replaces a pending
+ // transaction. Future transactions should only be able to replace other future transactions.
+ ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
+)
diff --git a/core/txpool/journal.go b/core/txpool/legacypool/journal.go
similarity index 99%
rename from core/txpool/journal.go
rename to core/txpool/legacypool/journal.go
index 73302baa72..68769933bd 100644
--- a/core/txpool/journal.go
+++ b/core/txpool/legacypool/journal.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"errors"
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
new file mode 100644
index 0000000000..a87c83f95c
--- /dev/null
+++ b/core/txpool/legacypool/legacypool.go
@@ -0,0 +1,1902 @@
+// (c) 2019-2020, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package legacypool implements the normal EVM execution transaction pool.
+package legacypool
+
+import (
+ "errors"
+ "math"
+ "math/big"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ava-labs/coreth/consensus/dummy"
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/txpool"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/metrics"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+const (
+ // txSlotSize is used to calculate how many data slots a single transaction
+ // takes up based on its size. The slots are used as DoS protection, ensuring
+ // that validating a new transaction remains a constant operation (in reality
+ // O(maxslots), where max slots are 4 currently).
+ txSlotSize = 32 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have. This field has
+ // non-trivial consequences: larger transactions are significantly harder and
+ // more expensive to propagate; larger transactions also take more resources
+ // to validate whether they fit into the pool or not.
+ //
+ // Note: the max contract size is 24KB
+ txMaxSize = 4 * txSlotSize // 128KB
+)
+
+var (
+ // ErrAlreadyKnown is returned if the transactions is already contained
+ // within the pool.
+ ErrAlreadyKnown = errors.New("already known")
+
+ // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
+ // another remote transaction.
+ ErrTxPoolOverflow = errors.New("txpool is full")
+)
+
+var (
+ evictionInterval = time.Minute // Time interval to check for evictable transactions
+ statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
+ baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after Apricot Phase 3 is enabled
+)
+
+var (
+ // Metrics for the pending pool
+ pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
+ pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
+ pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
+ pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
+
+ // Metrics for the queued pool
+ queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
+ queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
+ queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
+ queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
+ queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
+
+ // General tx metrics
+ knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
+ validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
+ invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
+ underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
+ overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
+
+ // throttleTxMeter counts how many transactions are rejected due to too-many-changes between
+ // txpool reorgs.
+ throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
+ // reorgDurationTimer measures how long time a txpool reorg takes.
+ reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
+ // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
+ // that this number is pretty low, since txpool reorgs happen very frequently.
+ dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
+ queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
+ localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
+ slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
+
+ reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
+)
+
+// BlockChain defines the minimal set of methods needed to back a tx pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // CurrentBlock returns the current head of the chain.
+ CurrentBlock() *types.Header
+
+ // GetBlock retrieves a specific block, used during pool resets.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+
+ // StateAt returns a state database for a given root hash (generally the head).
+ StateAt(root common.Hash) (*state.StateDB, error)
+
+ // SenderCacher returns the sender cacher of the chain.
+ SenderCacher() *core.TxSenderCacher
+}
+
+// Config are the configuration parameters of the transaction pool.
+type Config struct {
+ Locals []common.Address // Addresses that should be treated by default as local
+ NoLocals bool // Whether local transaction handling should be disabled
+ Journal string // Journal of local transactions to survive node restarts
+ Rejournal time.Duration // Time interval to regenerate the local transaction journal
+
+ PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
+
+ AccountSlots uint64 // Number of executable transaction slots guaranteed per account
+ GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
+ AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
+ GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
+
+ Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
+}
+
+// DefaultConfig contains the default configurations for the transaction pool.
+var DefaultConfig = Config{
+ Journal: "transactions.rlp",
+ Rejournal: time.Hour,
+
+ PriceLimit: 1,
+ PriceBump: 10,
+
+ AccountSlots: 16,
+ GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
+ AccountQueue: 64,
+ GlobalQueue: 1024,
+
+ Lifetime: 3 * time.Hour,
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *Config) sanitize() Config {
+ conf := *config
+ if conf.Rejournal < time.Second {
+ log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
+ conf.Rejournal = time.Second
+ }
+ if conf.PriceLimit < 1 {
+ log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
+ conf.PriceLimit = DefaultConfig.PriceLimit
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
+ conf.PriceBump = DefaultConfig.PriceBump
+ }
+ if conf.AccountSlots < 1 {
+ log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
+ conf.AccountSlots = DefaultConfig.AccountSlots
+ }
+ if conf.GlobalSlots < 1 {
+ log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
+ conf.GlobalSlots = DefaultConfig.GlobalSlots
+ }
+ if conf.AccountQueue < 1 {
+ log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
+ conf.AccountQueue = DefaultConfig.AccountQueue
+ }
+ if conf.GlobalQueue < 1 {
+ log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
+ conf.GlobalQueue = DefaultConfig.GlobalQueue
+ }
+ if conf.Lifetime < 1 {
+ log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
+ conf.Lifetime = DefaultConfig.Lifetime
+ }
+ return conf
+}
+
+// LegacyPool contains all currently known transactions. Transactions
+// enter the pool when they are received from the network or submitted
+// locally. They exit the pool when they are included in the blockchain.
+//
+// The pool separates processable transactions (which can be applied to the
+// current state) and future transactions. Transactions move between those
+// two states over time as they are received and processed.
+type LegacyPool struct {
+ config Config
+ chainconfig *params.ChainConfig
+ chain BlockChain
+ gasTip atomic.Pointer[big.Int]
+ txFeed event.Feed
+ scope event.SubscriptionScope
+ signer types.Signer
+ mu sync.RWMutex
+
+ currentHead atomic.Pointer[types.Header] // Current head of the blockchain
+ currentState *state.StateDB // Current state in the blockchain head
+ pendingNonces *noncer // Pending state tracking virtual nonces
+
+ locals *accountSet // Set of local transaction to exempt from eviction rules
+ journal *journal // Journal of local transaction to back up to disk
+
+ pending map[common.Address]*list // All currently processable transactions
+ queue map[common.Address]*list // Queued but non-processable transactions
+ beats map[common.Address]time.Time // Last heartbeat from each known account
+ all *lookup // All transactions to allow lookups
+ priced *pricedList // All transactions sorted by price
+
+ reqResetCh chan *txpoolResetRequest
+ reqPromoteCh chan *accountSet
+ queueTxEventCh chan *types.Transaction
+ reorgDoneCh chan chan struct{}
+ reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
+ wg sync.WaitGroup // tracks loop, scheduleReorgLoop
+ initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
+
+ changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
+
+ // additional fields compared to go-ethereum
+ minimumFee *big.Int // minimum fee to accept a transaction
+ currentStateLock sync.Mutex // lock for currentState
+
+ // Any goroutine can listen to this to be notified if it should shut down.
+ generalShutdownChan chan struct{} // closed when the transaction pool is stopped.
+}
+
+type txpoolResetRequest struct {
+ oldHead, newHead *types.Header
+}
+
+// New creates a new transaction pool to gather, sort and filter inbound
+// transactions from the network.
+func New(config Config, chain BlockChain) *LegacyPool {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ // Create the transaction pool with its initial settings
+ pool := &LegacyPool{
+ config: config,
+ chain: chain,
+ chainconfig: chain.Config(),
+ signer: types.LatestSigner(chain.Config()),
+ pending: make(map[common.Address]*list),
+ queue: make(map[common.Address]*list),
+ beats: make(map[common.Address]time.Time),
+ all: newLookup(),
+ reqResetCh: make(chan *txpoolResetRequest),
+ reqPromoteCh: make(chan *accountSet),
+ queueTxEventCh: make(chan *types.Transaction),
+ reorgDoneCh: make(chan chan struct{}),
+ reorgShutdownCh: make(chan struct{}),
+ initDoneCh: make(chan struct{}),
+
+ // additional fields compared to go-ethereum
+ generalShutdownChan: make(chan struct{}),
+ }
+ pool.locals = newAccountSet(pool.signer)
+ for _, addr := range config.Locals {
+ log.Info("Setting new local account", "address", addr)
+ pool.locals.add(addr)
+ }
+ pool.priced = newPricedList(pool.all)
+
+ if !config.NoLocals && config.Journal != "" {
+ pool.journal = newTxJournal(config.Journal)
+ }
+ return pool
+}
+
+// Filter returns whether the given transaction can be consumed by the legacy
+// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction.
+func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
+ switch tx.Type() {
+ case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType:
+ return true
+ default:
+ return false
+ }
+}
+
+// Init sets the gas price needed to keep a transaction in the pool and the chain
+// head to allow balance / nonce checks. The transaction journal will be loaded
+// from disk and filtered based on the provided starting settings. The internal
+// goroutines will be spun up and the pool deemed operational afterwards.
+func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header) error {
+ // Set the basic pool parameters
+ pool.gasTip.Store(gasTip)
+ pool.reset(nil, head)
+
+ // Start the reorg loop early, so it can handle requests generated during
+ // journal loading.
+ pool.wg.Add(1)
+ go pool.scheduleReorgLoop()
+
+ // If local transactions and journaling is enabled, load from disk
+ if pool.journal != nil {
+ if err := pool.journal.load(pool.addLocals); err != nil {
+ log.Warn("Failed to load transaction journal", "err", err)
+ }
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate transaction journal", "err", err)
+ }
+ }
+ pool.wg.Add(1)
+ go pool.loop()
+
+ pool.startPeriodicFeeUpdate()
+ return nil
+}
+
+// loop is the transaction pool's main event loop, waiting for and reacting to
+// outside blockchain events as well as for various reporting and transaction
+// eviction events.
+func (pool *LegacyPool) loop() {
+ defer pool.wg.Done()
+
+ var (
+ prevPending, prevQueued, prevStales int
+
+ // Start the stats reporting and transaction eviction tickers
+ report = time.NewTicker(statsReportInterval)
+ evict = time.NewTicker(evictionInterval)
+ journal = time.NewTicker(pool.config.Rejournal)
+ )
+ defer report.Stop()
+ defer evict.Stop()
+ defer journal.Stop()
+
+ // Notify tests that the init phase is done
+ close(pool.initDoneCh)
+ for {
+ select {
+ // Handle pool shutdown
+ case <-pool.reorgShutdownCh:
+ return
+
+ // Handle stats reporting ticks
+ case <-report.C:
+ pool.mu.RLock()
+ pending, queued := pool.stats()
+ pool.mu.RUnlock()
+ stales := int(pool.priced.stales.Load())
+
+ if pending != prevPending || queued != prevQueued || stales != prevStales {
+ log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
+ prevPending, prevQueued, prevStales = pending, queued, stales
+ }
+
+ // Handle inactive account transaction eviction
+ case <-evict.C:
+ pool.mu.Lock()
+ for addr := range pool.queue {
+ // Skip local transactions from the eviction mechanism
+ if pool.locals.contains(addr) {
+ continue
+ }
+ // Any non-locals old enough should be removed
+ if time.Since(pool.beats[addr]) > pool.config.Lifetime {
+ list := pool.queue[addr].Flatten()
+ for _, tx := range list {
+ pool.removeTx(tx.Hash(), true)
+ }
+ queuedEvictionMeter.Mark(int64(len(list)))
+ }
+ }
+ pool.mu.Unlock()
+
+ // Handle local transaction journal rotation
+ case <-journal.C:
+ if pool.journal != nil {
+ pool.mu.Lock()
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate local tx journal", "err", err)
+ }
+ pool.mu.Unlock()
+ }
+ }
+ }
+}
+
+// Close terminates the transaction pool.
+func (pool *LegacyPool) Close() error {
+ // Unsubscribe all subscriptions registered from txpool
+ pool.scope.Close()
+
+ close(pool.generalShutdownChan)
+ // Terminate the pool reorger and return
+ close(pool.reorgShutdownCh)
+ pool.wg.Wait()
+
+ if pool.journal != nil {
+ pool.journal.close()
+ }
+ log.Info("Transaction pool stopped")
+ return nil
+}
+
+// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
+// kept in sync with the main transacion pool's internal state.
+func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
+ wait := pool.requestReset(oldHead, newHead)
+ <-wait
+}
+
+// SubscribeTransactions registers a subscription of NewTxsEvent and
+// starts sending event to the given channel.
+func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
+ return pool.scope.Track(pool.txFeed.Subscribe(ch))
+}
+
+// SetGasTip updates the minimum gas tip required by the transaction pool for a
+// new transaction, and drops all transactions below this threshold.
+func (pool *LegacyPool) SetGasTip(tip *big.Int) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ old := pool.gasTip.Load()
+ pool.gasTip.Store(new(big.Int).Set(tip))
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if tip.Cmp(old) > 0 {
+ // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
+ drop := pool.all.RemotesBelowTip(tip)
+ for _, tx := range drop {
+ pool.removeTx(tx.Hash(), false)
+ }
+ pool.priced.Removed(len(drop))
+ }
+ log.Info("Legacy pool tip threshold updated", "tip", tip)
+}
+
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (pool *LegacyPool) Nonce(addr common.Address) uint64 {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ return pool.pendingNonces.get(addr)
+}
+
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (pool *LegacyPool) Stats() (int, int) {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ return pool.stats()
+}
+
+// stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (pool *LegacyPool) stats() (int, int) {
+ pending := 0
+ for _, list := range pool.pending {
+ pending += list.Len()
+ }
+ queued := 0
+ for _, list := range pool.queue {
+ queued += list.Len()
+ }
+ return pending, queued
+}
+
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
+ for addr, list := range pool.pending {
+ pending[addr] = list.Flatten()
+ }
+ queued := make(map[common.Address][]*types.Transaction, len(pool.queue))
+ for addr, list := range pool.queue {
+ queued[addr] = list.Flatten()
+ }
+ return pending, queued
+}
+
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ var pending []*types.Transaction
+ if list, ok := pool.pending[addr]; ok {
+ pending = list.Flatten()
+ }
+ var queued []*types.Transaction
+ if list, ok := pool.queue[addr]; ok {
+ queued = list.Flatten()
+ }
+ return pending, queued
+}
+
+// Pending retrieves all currently processable transactions, grouped by origin
+// account and sorted by nonce. The returned transaction set is a copy and can be
+// freely modified by calling code.
+//
+// The enforceTips parameter can be used to do an extra filtering on the pending
+// transactions and only return those whose **effective** tip is large enough in
+// the next pending execution environment.
+func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*types.Transaction {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
+ for addr, list := range pool.pending {
+ txs := list.Flatten()
+
+ // If the miner requests tip enforcement, cap the lists now
+ if enforceTips && !pool.locals.contains(addr) {
+ for i, tx := range txs {
+ if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 {
+ txs = txs[:i]
+ break
+ }
+ }
+ }
+ if len(txs) > 0 {
+ pending[addr] = txs
+ }
+ }
+ return pending
+}
+
+// Locals retrieves the accounts currently considered local by the pool.
+func (pool *LegacyPool) Locals() []common.Address {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ return pool.locals.flatten()
+}
+
+// local retrieves all currently known local transactions, grouped by origin
+// account and sorted by nonce. The returned transaction set is a copy and can be
+// freely modified by calling code.
+func (pool *LegacyPool) local() map[common.Address]types.Transactions {
+ txs := make(map[common.Address]types.Transactions)
+ for addr := range pool.locals.accounts {
+ if pending := pool.pending[addr]; pending != nil {
+ txs[addr] = append(txs[addr], pending.Flatten()...)
+ }
+ if queued := pool.queue[addr]; queued != nil {
+ txs[addr] = append(txs[addr], queued.Flatten()...)
+ }
+ }
+ return txs
+}
+
+// validateTxBasics checks whether a transaction is valid according to the consensus
+// rules, but does not check state-dependent validation such as sufficient balance.
+// This check is meant as an early check which only needs to be performed once,
+// and does not require the pool mutex to be held.
+func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error {
+ opts := &txpool.ValidationOptions{
+ Config: pool.chainconfig,
+ Accept: 0 |
+ 1< pool.config.GlobalSlots+pool.config.GlobalQueue {
+ // If the new transaction is underpriced, don't accept it
+ if !isLocal && pool.priced.Underpriced(tx) {
+ log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ underpricedTxMeter.Mark(1)
+ return false, txpool.ErrUnderpriced
+ }
+
+ // We're about to replace a transaction. The reorg does a more thorough
+ // analysis of what to remove and how, but it runs async. We don't want to
+ // do too many replacements between reorg-runs, so we cap the number of
+ // replacements to 25% of the slots
+ if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
+ throttleTxMeter.Mark(1)
+ return false, ErrTxPoolOverflow
+ }
+
+ // New transaction is better than our worse ones, make room for it.
+ // If it's a local transaction, forcibly discard all available transactions.
+ // Otherwise if we can't make enough room for new one, abort the operation.
+ drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
+
+ // Special case, we still can't make the room for the new remote one.
+ if !isLocal && !success {
+ log.Trace("Discarding overflown transaction", "hash", hash)
+ overflowedTxMeter.Mark(1)
+ return false, ErrTxPoolOverflow
+ }
+
+ // If the new transaction is a future transaction it should never churn pending transactions
+ if !isLocal && pool.isGapped(from, tx) {
+ var replacesPending bool
+ for _, dropTx := range drop {
+ dropSender, _ := types.Sender(pool.signer, dropTx)
+ if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
+ replacesPending = true
+ break
+ }
+ }
+ // Add all transactions back to the priced queue
+ if replacesPending {
+ for _, dropTx := range drop {
+ pool.priced.Put(dropTx, false)
+ }
+ log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
+ return false, txpool.ErrFutureReplacePending
+ }
+ }
+
+ // Kick out the underpriced remote transactions.
+ for _, tx := range drop {
+ log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ underpricedTxMeter.Mark(1)
+ dropped := pool.removeTx(tx.Hash(), false)
+ pool.changesSinceReorg += dropped
+ }
+ }
+
+ // Try to replace an existing transaction in the pending pool
+ if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
+ // Nonce already pending, check if required price bump is met
+ inserted, old := list.Add(tx, pool.config.PriceBump)
+ if !inserted {
+ pendingDiscardMeter.Mark(1)
+ return false, txpool.ErrReplaceUnderpriced
+ }
+ // New transaction is better, replace old one
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ pendingReplaceMeter.Mark(1)
+ }
+ pool.all.Add(tx, isLocal)
+ pool.priced.Put(tx, isLocal)
+ pool.journalTx(from, tx)
+ pool.queueTxEvent(tx)
+ log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
+
+ // Successful promotion, bump the heartbeat
+ pool.beats[from] = time.Now()
+ return old != nil, nil
+ }
+ // New transaction isn't replacing a pending one, push into queue
+ replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
+ if err != nil {
+ return false, err
+ }
+ // Mark local addresses and journal local transactions
+ if local && !pool.locals.contains(from) {
+ log.Info("Setting new local account", "address", from)
+ pool.locals.add(from)
+ pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
+ }
+ if isLocal {
+ localGauge.Inc(1)
+ }
+ pool.journalTx(from, tx)
+
+ log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
+ return replaced, nil
+}
+
+// isGapped reports whether the given transaction is immediately executable.
+func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
+ // Short circuit if transaction falls within the scope of the pending list
+ // or matches the next pending nonce which can be promoted as an executable
+ // transaction afterwards. Note, the tx staleness is already checked in
+ // 'validateTx' function previously.
+ next := pool.pendingNonces.get(from)
+ if tx.Nonce() <= next {
+ return false
+ }
+ // The transaction has a nonce gap with pending list, it's only considered
+ // as executable if transactions in queue can fill up the nonce gap.
+ queue, ok := pool.queue[from]
+ if !ok {
+ return true
+ }
+ for nonce := next; nonce < tx.Nonce(); nonce++ {
+ if !queue.Contains(nonce) {
+ return true // txs in queue can't fill up the nonce gap
+ }
+ }
+ return false
+}
+
+// enqueueTx inserts a new transaction into the non-executable transaction queue.
+//
+// Note, this method assumes the pool lock is held!
+func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
+ // Try to insert the transaction into the future queue
+ from, _ := types.Sender(pool.signer, tx) // already validated
+ if pool.queue[from] == nil {
+ pool.queue[from] = newList(false)
+ }
+ inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
+ if !inserted {
+ // An older transaction was better, discard this
+ queuedDiscardMeter.Mark(1)
+ return false, txpool.ErrReplaceUnderpriced
+ }
+ // Discard any previous transaction and mark this
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ queuedReplaceMeter.Mark(1)
+ } else {
+ // Nothing was replaced, bump the queued counter
+ queuedGauge.Inc(1)
+ }
+ // If the transaction isn't in lookup set but it's expected to be there,
+ // show the error log.
+ if pool.all.Get(hash) == nil && !addAll {
+ log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
+ }
+ if addAll {
+ pool.all.Add(tx, local)
+ pool.priced.Put(tx, local)
+ }
+ // If we never record the heartbeat, do it right now.
+ if _, exist := pool.beats[from]; !exist {
+ pool.beats[from] = time.Now()
+ }
+ return old != nil, nil
+}
+
+// journalTx adds the specified transaction to the local disk journal if it is
+// deemed to have been sent from a local account.
+func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) {
+ // Only journal if it's enabled and the transaction is local
+ if pool.journal == nil || !pool.locals.contains(from) {
+ return
+ }
+ if err := pool.journal.insert(tx); err != nil {
+ log.Warn("Failed to journal local transaction", "err", err)
+ }
+}
+
+// promoteTx adds a transaction to the pending (processable) list of transactions
+// and returns whether it was inserted or an older was better.
+//
+// Note, this method assumes the pool lock is held!
+func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
+ // Try to insert the transaction into the pending queue
+ if pool.pending[addr] == nil {
+ pool.pending[addr] = newList(true)
+ }
+ list := pool.pending[addr]
+
+ inserted, old := list.Add(tx, pool.config.PriceBump)
+ if !inserted {
+ // An older transaction was better, discard this
+ pool.all.Remove(hash)
+ pool.priced.Removed(1)
+ pendingDiscardMeter.Mark(1)
+ return false
+ }
+ // Otherwise discard any previous transaction and mark this
+ if old != nil {
+ pool.all.Remove(old.Hash())
+ pool.priced.Removed(1)
+ pendingReplaceMeter.Mark(1)
+ } else {
+ // Nothing was replaced, bump the pending counter
+ pendingGauge.Inc(1)
+ }
+ // Set the potentially new pending nonce and notify any subsystems of the new tx
+ pool.pendingNonces.set(addr, tx.Nonce()+1)
+
+ // Successful promotion, bump the heartbeat
+ pool.beats[addr] = time.Now()
+ return true
+}
+
+// Add enqueues a batch of transactions into the pool if they are valid. Depending
+// on the local flag, full pricing contraints will or will not be applied.
+//
+// If sync is set, the method will block until all internal maintenance related
+// to the add is finished. Only use this during tests for determinism!
+func (pool *LegacyPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
+ unwrapped := make([]*types.Transaction, len(txs))
+ for i, tx := range txs {
+ unwrapped[i] = tx.Tx
+ }
+ return pool.addTxs(unwrapped, local, sync)
+}
+
+// addLocals enqueues a batch of transactions into the pool if they are valid, marking the
+// senders as a local ones, ensuring they go around the local pricing constraints.
+//
+// This method is used to add transactions from the RPC API and performs synchronous pool
+// reorganization and event propagation.
+func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, !pool.config.NoLocals, true)
+}
+
+// addLocal enqueues a single local transaction into the pool if it is valid. This is
+// a convenience wrapper around addLocals.
+func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
+ errs := pool.addLocals([]*types.Transaction{tx})
+ return errs[0]
+}
+
+// addRemotes enqueues a batch of transactions into the pool if they are valid. If the
+// senders are not among the locally tracked ones, full pricing constraints will apply.
+//
+// This method is used to add transactions from the p2p network and does not wait for pool
+// reorganization and internal event propagation.
+func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, false, false)
+}
+
+// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
+// wrapper around addRemotes.
+func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
+ errs := pool.addRemotes([]*types.Transaction{tx})
+ return errs[0]
+}
+
+// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
+func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
+ return pool.addTxs(txs, false, true)
+}
+
+// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
+func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
+ return pool.addTxs([]*types.Transaction{tx}, false, true)[0]
+}
+
+// addTxs attempts to queue a batch of transactions if they are valid.
+func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
+ // Filter out known ones without obtaining the pool lock or recovering signatures
+ var (
+ errs = make([]error, len(txs))
+ news = make([]*types.Transaction, 0, len(txs))
+ )
+ for i, tx := range txs {
+ // If the transaction is known, pre-set the error slot
+ if pool.all.Get(tx.Hash()) != nil {
+ errs[i] = ErrAlreadyKnown
+ knownTxMeter.Mark(1)
+ continue
+ }
+ // Exclude transactions with basic errors, e.g invalid signatures and
+ // insufficient intrinsic gas as soon as possible and cache senders
+ // in transactions before obtaining lock
+ if err := pool.validateTxBasics(tx, local); err != nil {
+ errs[i] = err
+ invalidTxMeter.Mark(1)
+ continue
+ }
+ // Accumulate all unknown transactions for deeper processing
+ news = append(news, tx)
+ }
+ if len(news) == 0 {
+ return errs
+ }
+
+ // Process all the new transaction and merge any errors into the original slice
+ pool.mu.Lock()
+ newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
+ pool.mu.Unlock()
+
+ var nilSlot = 0
+ for _, err := range newErrs {
+ for errs[nilSlot] != nil {
+ nilSlot++
+ }
+ errs[nilSlot] = err
+ nilSlot++
+ }
+ // Reorg the pool internals if needed and return
+ done := pool.requestPromoteExecutables(dirtyAddrs)
+ if sync {
+ <-done
+ }
+ return errs
+}
+
+// addTxsLocked attempts to queue a batch of transactions if they are valid.
+// The transaction pool lock must be held.
+func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
+ dirty := newAccountSet(pool.signer)
+ errs := make([]error, len(txs))
+ for i, tx := range txs {
+ replaced, err := pool.add(tx, local)
+ errs[i] = err
+ if err == nil && !replaced {
+ dirty.addTx(tx)
+ }
+ }
+ validTxMeter.Mark(int64(len(dirty.accounts)))
+ return errs, dirty
+}
+
+// Status returns the status (unknown/pending/queued) of a batch of transactions
+// identified by their hashes.
+func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
+ tx := pool.get(hash)
+ if tx == nil {
+ return txpool.TxStatusUnknown
+ }
+ from, _ := types.Sender(pool.signer, tx) // already validated
+
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
+ return txpool.TxStatusPending
+ } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
+ return txpool.TxStatusQueued
+ }
+ return txpool.TxStatusUnknown
+}
+
+// Get returns a transaction if it is contained in the pool and nil otherwise.
+func (pool *LegacyPool) Get(hash common.Hash) *txpool.Transaction {
+ tx := pool.get(hash)
+ if tx == nil {
+ return nil
+ }
+ return &txpool.Transaction{Tx: tx}
+}
+
+// get returns a transaction if it is contained in the pool and nil otherwise.
+func (pool *LegacyPool) get(hash common.Hash) *types.Transaction {
+ return pool.all.Get(hash)
+}
+
+// Has returns an indicator whether txpool has a transaction cached with the
+// given hash.
+func (pool *LegacyPool) Has(hash common.Hash) bool {
+ return pool.all.Get(hash) != nil
+}
+
+// removeTx removes a single transaction from the queue, moving all subsequent
+// transactions back to the future queue.
+// Returns the number of transactions removed from the pending queue.
+func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool) int {
+ // Fetch the transaction we wish to delete
+ tx := pool.all.Get(hash)
+ if tx == nil {
+ return 0
+ }
+ addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
+
+ // Remove it from the list of known transactions
+ pool.all.Remove(hash)
+ if outofbound {
+ pool.priced.Removed(1)
+ }
+ if pool.locals.contains(addr) {
+ localGauge.Dec(1)
+ }
+ // Remove the transaction from the pending lists and reset the account nonce
+ if pending := pool.pending[addr]; pending != nil {
+ if removed, invalids := pending.Remove(tx); removed {
+ // If no more pending transactions are left, remove the list
+ if pending.Empty() {
+ delete(pool.pending, addr)
+ }
+ // Postpone any invalidated transactions
+ for _, tx := range invalids {
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(tx.Hash(), tx, false, false)
+ }
+ // Update the account nonce if needed
+ pool.pendingNonces.setIfLower(addr, tx.Nonce())
+ // Reduce the pending counter
+ pendingGauge.Dec(int64(1 + len(invalids)))
+ return 1 + len(invalids)
+ }
+ }
+ // Transaction is in the future queue
+ if future := pool.queue[addr]; future != nil {
+ if removed, _ := future.Remove(tx); removed {
+ // Reduce the queued counter
+ queuedGauge.Dec(1)
+ }
+ if future.Empty() {
+ delete(pool.queue, addr)
+ delete(pool.beats, addr)
+ }
+ }
+ return 0
+}
+
+// requestReset requests a pool reset to the new head block.
+// The returned channel is closed when the reset has occurred.
+func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
+ select {
+ case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
+ return <-pool.reorgDoneCh
+ case <-pool.reorgShutdownCh:
+ return pool.reorgShutdownCh
+ }
+}
+
+// requestPromoteExecutables requests transaction promotion checks for the given addresses.
+// The returned channel is closed when the promotion checks have occurred.
+func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} {
+ select {
+ case pool.reqPromoteCh <- set:
+ return <-pool.reorgDoneCh
+ case <-pool.reorgShutdownCh:
+ return pool.reorgShutdownCh
+ }
+}
+
+// queueTxEvent enqueues a transaction event to be sent in the next reorg run.
+func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) {
+ select {
+ case pool.queueTxEventCh <- tx:
+ case <-pool.reorgShutdownCh:
+ }
+}
+
+// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
+// call those methods directly, but request them being run using requestReset and
+// requestPromoteExecutables instead.
+func (pool *LegacyPool) scheduleReorgLoop() {
+ defer pool.wg.Done()
+
+ var (
+ curDone chan struct{} // non-nil while runReorg is active
+ nextDone = make(chan struct{})
+ launchNextRun bool
+ reset *txpoolResetRequest
+ dirtyAccounts *accountSet
+ queuedEvents = make(map[common.Address]*sortedMap)
+ )
+ for {
+ // Launch next background reorg if needed
+ if curDone == nil && launchNextRun {
+ // Run the background reorg and announcements
+ go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
+
+ // Prepare everything for the next round of reorg
+ curDone, nextDone = nextDone, make(chan struct{})
+ launchNextRun = false
+
+ reset, dirtyAccounts = nil, nil
+ queuedEvents = make(map[common.Address]*sortedMap)
+ }
+
+ select {
+ case req := <-pool.reqResetCh:
+ // Reset request: update head if request is already pending.
+ if reset == nil {
+ reset = req
+ } else {
+ reset.newHead = req.newHead
+ }
+ launchNextRun = true
+ pool.reorgDoneCh <- nextDone
+
+ case req := <-pool.reqPromoteCh:
+ // Promote request: update address set if request is already pending.
+ if dirtyAccounts == nil {
+ dirtyAccounts = req
+ } else {
+ dirtyAccounts.merge(req)
+ }
+ launchNextRun = true
+ pool.reorgDoneCh <- nextDone
+
+ case tx := <-pool.queueTxEventCh:
+ // Queue up the event, but don't schedule a reorg. It's up to the caller to
+ // request one later if they want the events sent.
+ addr, _ := types.Sender(pool.signer, tx)
+ if _, ok := queuedEvents[addr]; !ok {
+ queuedEvents[addr] = newSortedMap()
+ }
+ queuedEvents[addr].Put(tx)
+
+ case <-curDone:
+ curDone = nil
+
+ case <-pool.reorgShutdownCh:
+ // Wait for current run to finish.
+ if curDone != nil {
+ <-curDone
+ }
+ close(nextDone)
+ return
+ }
+ }
+}
+
+// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
+func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) {
+ defer func(t0 time.Time) {
+ reorgDurationTimer.Update(time.Since(t0))
+ }(time.Now())
+ defer close(done)
+
+ var promoteAddrs []common.Address
+ if dirtyAccounts != nil && reset == nil {
+ // Only dirty accounts need to be promoted, unless we're resetting.
+ // For resets, all addresses in the tx queue will be promoted and
+ // the flatten operation can be avoided.
+ promoteAddrs = dirtyAccounts.flatten()
+ }
+ pool.mu.Lock()
+ if reset != nil {
+ // Reset from the old head to the new, rescheduling any reorged transactions
+ pool.reset(reset.oldHead, reset.newHead)
+
+ // Nonces were reset, discard any events that became stale
+ for addr := range events {
+ events[addr].Forward(pool.pendingNonces.get(addr))
+ if events[addr].Len() == 0 {
+ delete(events, addr)
+ }
+ }
+ // Reset needs promote for all addresses
+ promoteAddrs = make([]common.Address, 0, len(pool.queue))
+ for addr := range pool.queue {
+ promoteAddrs = append(promoteAddrs, addr)
+ }
+ }
+ // Check for pending transactions for every account that sent new ones
+ promoted := pool.promoteExecutables(promoteAddrs)
+
+ // If a new block appeared, validate the pool of pending transactions. This will
+ // remove any transaction that has been included in the block or was invalidated
+ // because of another transaction (e.g. higher gas price).
+ if reset != nil {
+ pool.demoteUnexecutables()
+ if reset.newHead != nil {
+ if pool.chainconfig.IsApricotPhase3(reset.newHead.Time) {
+ _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix()))
+ if err == nil {
+ pool.priced.SetBaseFee(baseFeeEstimate)
+ }
+ } else {
+ pool.priced.Reheap()
+ }
+ }
+ // Update all accounts to the latest known pending nonce
+ nonces := make(map[common.Address]uint64, len(pool.pending))
+ for addr, list := range pool.pending {
+ highestPending := list.LastElement()
+ nonces[addr] = highestPending.Nonce() + 1
+ }
+ pool.pendingNonces.setAll(nonces)
+ }
+ // Ensure pool.queue and pool.pending sizes stay within the configured limits.
+ pool.truncatePending()
+ pool.truncateQueue()
+
+ dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
+ pool.changesSinceReorg = 0 // Reset change counter
+ pool.mu.Unlock()
+
+ // Notify subsystems for newly added transactions
+ for _, tx := range promoted {
+ addr, _ := types.Sender(pool.signer, tx)
+ if _, ok := events[addr]; !ok {
+ events[addr] = newSortedMap()
+ }
+ events[addr].Put(tx)
+ }
+ if len(events) > 0 {
+ var txs []*types.Transaction
+ for _, set := range events {
+ txs = append(txs, set.Flatten()...)
+ }
+ pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
+ }
+}
+
+// reset retrieves the current state of the blockchain and ensures the content
+// of the transaction pool is valid with regard to the chain state.
+func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
+ // If we're reorging an old state, reinject all dropped transactions
+ var reinject types.Transactions
+
+ if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
+ // If the reorg is too deep, avoid doing it (will happen during fast sync)
+ oldNum := oldHead.Number.Uint64()
+ newNum := newHead.Number.Uint64()
+
+ if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
+ log.Debug("Skipping deep transaction reorg", "depth", depth)
+ } else {
+ // Reorg seems shallow enough to pull in all transactions into memory
+ var (
+ rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
+ add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
+ )
+ if rem == nil {
+ // This can happen if a setHead is performed, where we simply discard the old
+ // head from the chain.
+ // If that is the case, we don't have the lost transactions anymore, and
+ // there's nothing to add
+ if newNum >= oldNum {
+ // If we reorged to a same or higher number, then it's not a case of setHead
+ log.Warn("Transaction pool reset with missing oldhead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return
+ }
+ // If the reorg ended up on a lower number, it's indicative of setHead being the cause
+ log.Debug("Skipping transaction reset caused by setHead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ // We still need to update the current state s.th. the lost transactions can be readded by the user
+ } else {
+ if add == nil {
+ // if the new head is nil, it means that something happened between
+ // the firing of newhead-event and _now_: most likely a
+ // reorg caused by sync-reversion or explicit sethead back to an
+ // earlier block.
+ log.Warn("New head missing in txpool reset", "number", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ var discarded, included types.Transactions
+ for rem.NumberU64() > add.NumberU64() {
+ discarded = append(discarded, rem.Transactions()...)
+ if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return
+ }
+ }
+ for add.NumberU64() > rem.NumberU64() {
+ included = append(included, add.Transactions()...)
+ if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ }
+ for rem.Hash() != add.Hash() {
+ discarded = append(discarded, rem.Transactions()...)
+ if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return
+ }
+ included = append(included, add.Transactions()...)
+ if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
+ return
+ }
+ }
+ reinject = types.TxDifference(discarded, included)
+ }
+ }
+ }
+ // Initialize the internal state to the current head
+ if newHead == nil {
+ newHead = pool.chain.CurrentBlock() // Special case during testing
+ }
+ statedb, err := pool.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
+ return
+ }
+ pool.currentHead.Store(newHead)
+ pool.currentStateLock.Lock()
+ pool.currentState = statedb
+ pool.currentStateLock.Unlock()
+ pool.pendingNonces = newNoncer(statedb)
+
+ // Inject any transactions discarded due to reorgs
+ log.Debug("Reinjecting stale transactions", "count", len(reinject))
+ pool.chain.SenderCacher().Recover(pool.signer, reinject)
+ pool.addTxsLocked(reinject, false)
+}
+
+// promoteExecutables moves transactions that have become processable from the
+// future queue to the set of pending transactions. During this process, all
+// invalidated transactions (low nonce, low balance) are deleted.
+func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
+ pool.currentStateLock.Lock()
+ defer pool.currentStateLock.Unlock()
+
+ // Track the promoted transactions to broadcast them at once
+ var promoted []*types.Transaction
+
+ // Iterate over all accounts and promote any executable transactions
+ gasLimit := pool.currentHead.Load().GasLimit
+ for _, addr := range accounts {
+ list := pool.queue[addr]
+ if list == nil {
+ continue // Just in case someone calls with a non existing account
+ }
+ // Drop all transactions that are deemed too old (low nonce)
+ forwards := list.Forward(pool.currentState.GetNonce(addr))
+ for _, tx := range forwards {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ }
+ log.Trace("Removed old queued transactions", "count", len(forwards))
+ // Drop all transactions that are too costly (low balance or out of gas)
+ drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
+ for _, tx := range drops {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ }
+ log.Trace("Removed unpayable queued transactions", "count", len(drops))
+ queuedNofundsMeter.Mark(int64(len(drops)))
+
+ // Gather all executable transactions and promote them
+ readies := list.Ready(pool.pendingNonces.get(addr))
+ for _, tx := range readies {
+ hash := tx.Hash()
+ if pool.promoteTx(addr, hash, tx) {
+ promoted = append(promoted, tx)
+ }
+ }
+ log.Trace("Promoted queued transactions", "count", len(promoted))
+ queuedGauge.Dec(int64(len(readies)))
+
+ // Drop all transactions over the allowed limit
+ var caps types.Transactions
+ if !pool.locals.contains(addr) {
+ caps = list.Cap(int(pool.config.AccountQueue))
+ for _, tx := range caps {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
+ }
+ queuedRateLimitMeter.Mark(int64(len(caps)))
+ }
+ // Mark all the items dropped as removed
+ pool.priced.Removed(len(forwards) + len(drops) + len(caps))
+ queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ }
+ // Delete the entire queue entry if it became empty.
+ if list.Empty() {
+ delete(pool.queue, addr)
+ delete(pool.beats, addr)
+ }
+ }
+ return promoted
+}
+
+// truncatePending removes transactions from the pending queue if the pool is above the
+// pending limit. The algorithm tries to reduce transaction counts by an approximately
+// equal number for all for accounts with many pending transactions.
+func (pool *LegacyPool) truncatePending() {
+ pending := uint64(0)
+ for _, list := range pool.pending {
+ pending += uint64(list.Len())
+ }
+ if pending <= pool.config.GlobalSlots {
+ return
+ }
+
+ pendingBeforeCap := pending
+ // Assemble a spam order to penalize large transactors first
+ spammers := prque.New[int64, common.Address](nil)
+ for addr, list := range pool.pending {
+ // Only evict transactions from high rollers
+ if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
+ spammers.Push(addr, int64(list.Len()))
+ }
+ }
+ // Gradually drop transactions from offenders
+ offenders := []common.Address{}
+ for pending > pool.config.GlobalSlots && !spammers.Empty() {
+ // Retrieve the next offender if not local address
+ offender, _ := spammers.Pop()
+ offenders = append(offenders, offender)
+
+ // Equalize balances until all the same or below threshold
+ if len(offenders) > 1 {
+ // Calculate the equalization threshold for all current offenders
+ threshold := pool.pending[offender].Len()
+
+ // Iteratively reduce all offenders until below limit or threshold reached
+ for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
+ for i := 0; i < len(offenders)-1; i++ {
+ list := pool.pending[offenders[i]]
+
+ caps := list.Cap(list.Len() - 1)
+ for _, tx := range caps {
+ // Drop the transaction from the global pools too
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+
+ // Update the account nonce to the dropped transaction
+ pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
+ log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
+ }
+ pool.priced.Removed(len(caps))
+ pendingGauge.Dec(int64(len(caps)))
+ if pool.locals.contains(offenders[i]) {
+ localGauge.Dec(int64(len(caps)))
+ }
+ pending--
+ }
+ }
+ }
+ }
+
+ // If still above threshold, reduce to limit or min allowance
+ if pending > pool.config.GlobalSlots && len(offenders) > 0 {
+ for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
+ for _, addr := range offenders {
+ list := pool.pending[addr]
+
+ caps := list.Cap(list.Len() - 1)
+ for _, tx := range caps {
+ // Drop the transaction from the global pools too
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+
+ // Update the account nonce to the dropped transaction
+ pool.pendingNonces.setIfLower(addr, tx.Nonce())
+ log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
+ }
+ pool.priced.Removed(len(caps))
+ pendingGauge.Dec(int64(len(caps)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(caps)))
+ }
+ pending--
+ }
+ }
+ }
+ pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
+}
+
+// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
+func (pool *LegacyPool) truncateQueue() {
+ queued := uint64(0)
+ for _, list := range pool.queue {
+ queued += uint64(list.Len())
+ }
+ if queued <= pool.config.GlobalQueue {
+ return
+ }
+
+ // Sort all accounts with queued transactions by heartbeat
+ addresses := make(addressesByHeartbeat, 0, len(pool.queue))
+ for addr := range pool.queue {
+ if !pool.locals.contains(addr) { // don't drop locals
+ addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
+ }
+ }
+ sort.Sort(sort.Reverse(addresses))
+
+ // Drop transactions until the total is below the limit or only locals remain
+ for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
+ addr := addresses[len(addresses)-1]
+ list := pool.queue[addr.address]
+
+ addresses = addresses[:len(addresses)-1]
+
+ // Drop all transactions if they are less than the overflow
+ if size := uint64(list.Len()); size <= drop {
+ for _, tx := range list.Flatten() {
+ pool.removeTx(tx.Hash(), true)
+ }
+ drop -= size
+ queuedRateLimitMeter.Mark(int64(size))
+ continue
+ }
+ // Otherwise drop only last few transactions
+ txs := list.Flatten()
+ for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
+ pool.removeTx(txs[i].Hash(), true)
+ drop--
+ queuedRateLimitMeter.Mark(1)
+ }
+ }
+}
+
+// demoteUnexecutables removes invalid and processed transactions from the pools
+// executable/pending queue and any subsequent transactions that become unexecutable
+// are moved back into the future queue.
+//
+// Note: transactions are not marked as removed in the priced list because re-heaping
+// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
+// to trigger a re-heap is this function
+func (pool *LegacyPool) demoteUnexecutables() {
+ pool.currentStateLock.Lock()
+ defer pool.currentStateLock.Unlock()
+
+ // Iterate over all accounts and demote any non-executable transactions
+ gasLimit := pool.currentHead.Load().GasLimit
+ for addr, list := range pool.pending {
+ nonce := pool.currentState.GetNonce(addr)
+
+ // Drop all transactions that are deemed too old (low nonce)
+ olds := list.Forward(nonce)
+ for _, tx := range olds {
+ hash := tx.Hash()
+ pool.all.Remove(hash)
+ log.Trace("Removed old pending transaction", "hash", hash)
+ }
+ // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
+ drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
+ for _, tx := range drops {
+ hash := tx.Hash()
+ log.Trace("Removed unpayable pending transaction", "hash", hash)
+ pool.all.Remove(hash)
+ }
+ pendingNofundsMeter.Mark(int64(len(drops)))
+
+ for _, tx := range invalids {
+ hash := tx.Hash()
+ log.Trace("Demoting pending transaction", "hash", hash)
+
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(hash, tx, false, false)
+ }
+ pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ if pool.locals.contains(addr) {
+ localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ }
+ // If there's a gap in front, alert (should never happen) and postpone all transactions
+ if list.Len() > 0 && list.txs.Get(nonce) == nil {
+ gapped := list.Cap(0)
+ for _, tx := range gapped {
+ hash := tx.Hash()
+ log.Error("Demoting invalidated transaction", "hash", hash)
+
+ // Internal shuffle shouldn't touch the lookup set.
+ pool.enqueueTx(hash, tx, false, false)
+ }
+ pendingGauge.Dec(int64(len(gapped)))
+ }
+ // Delete the entire pending entry if it became empty.
+ if list.Empty() {
+ delete(pool.pending, addr)
+ }
+ }
+}
+
+// addressByHeartbeat is an account address tagged with its last activity timestamp.
+type addressByHeartbeat struct {
+ address common.Address
+ heartbeat time.Time
+}
+
+type addressesByHeartbeat []addressByHeartbeat
+
+func (a addressesByHeartbeat) Len() int { return len(a) }
+func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
+func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// accountSet is simply a set of addresses to check for existence, and a signer
+// capable of deriving addresses from transactions.
+type accountSet struct {
+ accounts map[common.Address]struct{}
+ signer types.Signer
+ cache *[]common.Address
+}
+
+// newAccountSet creates a new address set with an associated signer for sender
+// derivations.
+func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
+ as := &accountSet{
+ accounts: make(map[common.Address]struct{}, len(addrs)),
+ signer: signer,
+ }
+ for _, addr := range addrs {
+ as.add(addr)
+ }
+ return as
+}
+
+// contains checks if a given address is contained within the set.
+func (as *accountSet) contains(addr common.Address) bool {
+ _, exist := as.accounts[addr]
+ return exist
+}
+
+// containsTx checks if the sender of a given tx is within the set. If the sender
+// cannot be derived, this method returns false.
+func (as *accountSet) containsTx(tx *types.Transaction) bool {
+ if addr, err := types.Sender(as.signer, tx); err == nil {
+ return as.contains(addr)
+ }
+ return false
+}
+
+// add inserts a new address into the set to track.
+func (as *accountSet) add(addr common.Address) {
+ as.accounts[addr] = struct{}{}
+ as.cache = nil
+}
+
+// addTx adds the sender of tx into the set.
+func (as *accountSet) addTx(tx *types.Transaction) {
+ if addr, err := types.Sender(as.signer, tx); err == nil {
+ as.add(addr)
+ }
+}
+
+// flatten returns the list of addresses within this set, also caching it for later
+// reuse. The returned slice should not be changed!
+func (as *accountSet) flatten() []common.Address {
+ if as.cache == nil {
+ accounts := make([]common.Address, 0, len(as.accounts))
+ for account := range as.accounts {
+ accounts = append(accounts, account)
+ }
+ as.cache = &accounts
+ }
+ return *as.cache
+}
+
+// merge adds all addresses from the 'other' set into 'as'.
+func (as *accountSet) merge(other *accountSet) {
+ for addr := range other.accounts {
+ as.accounts[addr] = struct{}{}
+ }
+ as.cache = nil
+}
+
+// lookup is used internally by LegacyPool to track transactions while allowing
+// lookup without mutex contention.
+//
+// Note, although this type is properly protected against concurrent access, it
+// is **not** a type that should ever be mutated or even exposed outside of the
+// transaction pool, since its internal state is tightly coupled with the pools
+// internal mechanisms. The sole purpose of the type is to permit out-of-bound
+// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped
+// LegacyPool.mu mutex.
+//
+// This lookup set combines the notion of "local transactions", which is useful
+// to build upper-level structure.
+type lookup struct {
+ slots int
+ lock sync.RWMutex
+ locals map[common.Hash]*types.Transaction
+ remotes map[common.Hash]*types.Transaction
+}
+
+// newLookup returns a new lookup structure.
+func newLookup() *lookup {
+ return &lookup{
+ locals: make(map[common.Hash]*types.Transaction),
+ remotes: make(map[common.Hash]*types.Transaction),
+ }
+}
+
+// Range calls f on each key and value present in the map. The callback passed
+// should return the indicator whether the iteration needs to be continued.
+// Callers need to specify which set (or both) to be iterated.
+func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ if local {
+ for key, value := range t.locals {
+ if !f(key, value, true) {
+ return
+ }
+ }
+ }
+ if remote {
+ for key, value := range t.remotes {
+ if !f(key, value, false) {
+ return
+ }
+ }
+ }
+}
+
+// Get returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) Get(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ if tx := t.locals[hash]; tx != nil {
+ return tx
+ }
+ return t.remotes[hash]
+}
+
+// GetLocal returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.locals[hash]
+}
+
+// GetRemote returns a transaction if it exists in the lookup, or nil if not found.
+func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.remotes[hash]
+}
+
+// Count returns the current number of transactions in the lookup.
+func (t *lookup) Count() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.locals) + len(t.remotes)
+}
+
+// LocalCount returns the current number of local transactions in the lookup.
+func (t *lookup) LocalCount() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.locals)
+}
+
+// RemoteCount returns the current number of remote transactions in the lookup.
+func (t *lookup) RemoteCount() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return len(t.remotes)
+}
+
+// Slots returns the current number of slots used in the lookup.
+func (t *lookup) Slots() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.slots
+}
+
+// Add adds a transaction to the lookup.
+func (t *lookup) Add(tx *types.Transaction, local bool) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ t.slots += numSlots(tx)
+ slotsGauge.Update(int64(t.slots))
+
+ if local {
+ t.locals[tx.Hash()] = tx
+ } else {
+ t.remotes[tx.Hash()] = tx
+ }
+}
+
+// Remove removes a transaction from the lookup.
+func (t *lookup) Remove(hash common.Hash) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ tx, ok := t.locals[hash]
+ if !ok {
+ tx, ok = t.remotes[hash]
+ }
+ if !ok {
+ log.Error("No transaction found to be deleted", "hash", hash)
+ return
+ }
+ t.slots -= numSlots(tx)
+ slotsGauge.Update(int64(t.slots))
+
+ delete(t.locals, hash)
+ delete(t.remotes, hash)
+}
+
+// RemoteToLocals migrates the transactions belongs to the given locals to locals
+// set. The assumption is held the locals set is thread-safe to be used.
+func (t *lookup) RemoteToLocals(locals *accountSet) int {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ var migrated int
+ for hash, tx := range t.remotes {
+ if locals.containsTx(tx) {
+ t.locals[hash] = tx
+ delete(t.remotes, hash)
+ migrated += 1
+ }
+ }
+ return migrated
+}
+
+// RemotesBelowTip finds all remote transactions below the given tip threshold.
+func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
+ found := make(types.Transactions, 0, 128)
+ t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
+ if tx.GasTipCapIntCmp(threshold) < 0 {
+ found = append(found, tx)
+ }
+ return true
+ }, false, true) // Only iterate remotes
+ return found
+}
+
+// numSlots calculates the number of slots needed for a single transaction.
+func numSlots(tx *types.Transaction) int {
+ return int((tx.Size() + txSlotSize - 1) / txSlotSize)
+}
diff --git a/core/txpool/txpool2_test.go b/core/txpool/legacypool/legacypool2_test.go
similarity index 85%
rename from core/txpool/txpool2_test.go
rename to core/txpool/legacypool/legacypool2_test.go
index 330ed60a94..dc26295cc3 100644
--- a/core/txpool/txpool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -23,7 +23,7 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"crypto/ecdsa"
@@ -43,7 +43,7 @@ func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gaspric
return tx
}
-func count(t *testing.T, pool *TxPool) (pending int, queued int) {
+func count(t *testing.T, pool *LegacyPool) (pending int, queued int) {
t.Helper()
pending, queued = pool.stats()
if err := validatePoolInternals(pool); err != nil {
@@ -52,7 +52,7 @@ func count(t *testing.T, pool *TxPool) (pending int, queued int) {
return pending, queued
}
-func fillPool(t testing.TB, pool *TxPool) {
+func fillPool(t testing.TB, pool *LegacyPool) {
t.Helper()
// Create a number of test accounts, fund them and make transactions
executableTxs := types.Transactions{}
@@ -66,8 +66,8 @@ func fillPool(t testing.TB, pool *TxPool) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(executableTxs)
- pool.AddRemotesSync(nonExecutableTxs)
+ pool.addRemotesSync(executableTxs)
+ pool.addRemotesSync(nonExecutableTxs)
pending, queued := pool.Stats()
slots := pool.all.Slots()
// sanity-check that the test prerequisites are ok (pending full)
@@ -89,12 +89,13 @@ func TestTransactionFutureAttack(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
fillPool(t, pool)
pending, _ := pool.Stats()
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
@@ -106,7 +107,7 @@ func TestTransactionFutureAttack(t *testing.T) {
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key))
}
for i := 0; i < 5; i++ {
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
newPending, newQueued := count(t, pool)
t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots())
}
@@ -125,9 +126,10 @@ func TestTransactionFuture1559(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -141,7 +143,7 @@ func TestTransactionFuture1559(t *testing.T) {
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key))
}
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
newPending, _ := pool.Stats()
// Pending should not have been touched
@@ -157,9 +159,10 @@ func TestTransactionZAttack(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -191,7 +194,7 @@ func TestTransactionZAttack(t *testing.T) {
key, _ := crypto.GenerateKey()
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key))
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
overDraftTxs := types.Transactions{}
@@ -202,11 +205,11 @@ func TestTransactionZAttack(t *testing.T) {
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
}
}
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
- pool.AddRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
+ pool.addRemotesSync(overDraftTxs)
newPending, newQueued := count(t, pool)
newIvPending := countInvalidPending()
@@ -224,12 +227,13 @@ func TestTransactionZAttack(t *testing.T) {
func BenchmarkFutureAttack(b *testing.B) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
fillPool(b, pool)
key, _ := crypto.GenerateKey()
@@ -241,6 +245,6 @@ func BenchmarkFutureAttack(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < 5; i++ {
- pool.AddRemotesSync(futureTxs)
+ pool.addRemotesSync(futureTxs)
}
}
diff --git a/core/txpool/legacypool/legacypool_ext.go b/core/txpool/legacypool/legacypool_ext.go
new file mode 100644
index 0000000000..46ca4ed127
--- /dev/null
+++ b/core/txpool/legacypool/legacypool_ext.go
@@ -0,0 +1,91 @@
+// (c) 2021-2022, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package legacypool
+
+import (
+ "math/big"
+ "time"
+
+ "github.com/ava-labs/coreth/consensus/dummy"
+ "github.com/ava-labs/coreth/utils"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Has returns an indicator whether txpool has a local transaction cached with
+// the given hash.
+func (pool *LegacyPool) HasLocal(hash common.Hash) bool {
+ return pool.all.GetLocal(hash) != nil
+}
+
+func (pool *LegacyPool) RemoveTx(hash common.Hash) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.removeTx(hash, true)
+}
+
+// GasTip returns the current gas tip enforced by the transaction pool.
+func (pool *LegacyPool) GasTip() *big.Int {
+ pool.mu.RLock()
+ defer pool.mu.RUnlock()
+
+ return new(big.Int).Set(pool.gasTip.Load())
+}
+
+func (pool *LegacyPool) SetMinFee(minFee *big.Int) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.minimumFee = minFee
+}
+
+func (pool *LegacyPool) startPeriodicFeeUpdate() {
+ if pool.chainconfig.ApricotPhase3BlockTimestamp == nil {
+ return
+ }
+
+ // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
+ // when starting up in ApricotPhase3 before the base fee is updated.
+ if time.Now().After(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp)) {
+ pool.updateBaseFee()
+ }
+
+ pool.wg.Add(1)
+ go pool.periodicBaseFeeUpdate()
+}
+
+func (pool *LegacyPool) periodicBaseFeeUpdate() {
+ defer pool.wg.Done()
+
+ // Sleep until its time to start the periodic base fee update or the tx pool is shutting down
+ select {
+ case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp))):
+ case <-pool.generalShutdownChan:
+ return // Return early if shutting down
+ }
+
+ // Update the base fee every [baseFeeUpdateInterval]
+ // and shutdown when [generalShutdownChan] is closed by Stop()
+ for {
+ select {
+ case <-time.After(baseFeeUpdateInterval):
+ pool.updateBaseFee()
+ case <-pool.generalShutdownChan:
+ return
+ }
+ }
+}
+
+func (pool *LegacyPool) updateBaseFee() {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, pool.currentHead.Load(), uint64(time.Now().Unix()))
+ if err == nil {
+ pool.priced.SetBaseFee(baseFeeEstimate)
+ } else {
+ log.Error("failed to update base fee", "currentHead", pool.currentHead.Load().Hash(), "err", err)
+ }
+}
diff --git a/core/txpool/txpool_test.go b/core/txpool/legacypool/legacypool_test.go
similarity index 86%
rename from core/txpool/txpool_test.go
rename to core/txpool/legacypool/legacypool_test.go
index a2f39a2447..b5b39f6b2c 100644
--- a/core/txpool/txpool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"crypto/ecdsa"
@@ -34,8 +34,6 @@ import (
"math/big"
"math/rand"
"os"
- "strings"
- "sync"
"sync/atomic"
"testing"
"time"
@@ -43,6 +41,7 @@ import (
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/txpool"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/trie"
@@ -72,31 +71,23 @@ func init() {
}
type testBlockChain struct {
- statedb *state.StateDB
+ config *params.ChainConfig
gasLimit atomic.Uint64
+ statedb *state.StateDB
chainHeadFeed *event.Feed
- lock sync.Mutex
}
-func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain {
- bc := testBlockChain{statedb: statedb, chainHeadFeed: chainHeadFeed}
+func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain {
+ bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: chainHeadFeed}
bc.gasLimit.Store(gasLimit)
return &bc
}
-func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) {
- bc.lock.Lock()
- defer bc.lock.Unlock()
-
- bc.statedb = statedb
- bc.gasLimit.Store(gasLimit)
- bc.chainHeadFeed = chainHeadFeed
+func (bc *testBlockChain) Config() *params.ChainConfig {
+ return bc.config
}
func (bc *testBlockChain) CurrentBlock() *types.Header {
- bc.lock.Lock()
- defer bc.lock.Unlock()
-
return &types.Header{
Number: new(big.Int),
GasLimit: bc.gasLimit.Load(),
@@ -108,16 +99,10 @@ func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block
}
func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
- bc.lock.Lock()
- defer bc.lock.Unlock()
-
return bc.statedb, nil
}
func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
- bc.lock.Lock()
- defer bc.lock.Unlock()
-
return bc.chainHeadFeed.Subscribe(ch)
}
@@ -158,24 +143,26 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int,
return tx
}
-func setupPool() (*TxPool, *ecdsa.PrivateKey) {
+func setupPool() (*LegacyPool, *ecdsa.PrivateKey) {
return setupPoolWithConfig(params.TestChainConfig)
}
-func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
+func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(10000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed))
key, _ := crypto.GenerateKey()
- pool := NewTxPool(testTxPoolConfig, config, blockchain)
-
+ pool := New(testTxPoolConfig, blockchain)
+ if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock()); err != nil {
+ panic(err)
+ }
// wait for the pool to initialize
<-pool.initDoneCh
return pool, key
}
// validatePoolInternals checks various consistency invariants within the pool.
-func validatePoolInternals(pool *TxPool) error {
+func validatePoolInternals(pool *LegacyPool) error {
pool.mu.RLock()
defer pool.mu.RUnlock()
@@ -279,20 +266,21 @@ func TestStateChangeDuringReset(t *testing.T) {
// setup pool with 2 transaction in it
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
- blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger}
+ blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger}
tx0 := transaction(0, 100000, key)
tx1 := transaction(1, 100000, key)
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
nonce := pool.Nonce(address)
if nonce != 0 {
t.Fatalf("Invalid nonce, want 0, got %d", nonce)
}
- pool.AddRemotesSync([]*types.Transaction{tx0, tx1})
+ pool.addRemotesSync([]*types.Transaction{tx0, tx1})
nonce = pool.Nonce(address)
if nonce != 2 {
@@ -309,13 +297,13 @@ func TestStateChangeDuringReset(t *testing.T) {
}
}
-func testAddBalance(pool *TxPool, addr common.Address, amount *big.Int) {
+func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
pool.mu.Lock()
pool.currentState.AddBalance(addr, amount)
pool.mu.Unlock()
}
-func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) {
+func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) {
pool.mu.Lock()
pool.currentState.SetNonce(addr, nonce)
pool.mu.Unlock()
@@ -325,36 +313,36 @@ func TestInvalidTransactions(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
// Intrinsic gas too low
testAddBalance(pool, from, big.NewInt(1))
- if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
// Insufficient funds
tx = transaction(0, 100000, key)
- if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
testSetNonce(pool, from, 1)
testAddBalance(pool, from, big.NewInt(0xffffffffffffff))
tx = transaction(0, 100000, key)
- if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
+ if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
tx = transaction(1, 100000, key)
- pool.gasPrice = big.NewInt(1000)
- if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) {
+ pool.gasTip.Store(big.NewInt(1000))
+ if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Error("expected", nil, "got", err)
}
}
@@ -363,7 +351,7 @@ func TestQueue(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
@@ -394,7 +382,7 @@ func TestQueue2(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx1 := transaction(0, 100, key)
tx2 := transaction(10, 100, key)
@@ -420,13 +408,13 @@ func TestNegativeValue(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key)
from, _ := deriveSender(tx)
testAddBalance(pool, from, big.NewInt(1))
- if err := pool.AddRemote(tx); err != ErrNegativeValue {
- t.Error("expected", ErrNegativeValue, "got", err)
+ if err := pool.addRemote(tx); err != txpool.ErrNegativeValue {
+ t.Error("expected", txpool.ErrNegativeValue, "got", err)
}
}
@@ -434,11 +422,11 @@ func TestTipAboveFeeCap(t *testing.T) {
t.Parallel()
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key)
- if err := pool.AddRemote(tx); err != core.ErrTipAboveFeeCap {
+ if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap {
t.Error("expected", core.ErrTipAboveFeeCap, "got", err)
}
}
@@ -447,18 +435,18 @@ func TestVeryHighValues(t *testing.T) {
t.Parallel()
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
veryBigNumber := big.NewInt(1)
veryBigNumber.Lsh(veryBigNumber, 300)
tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key)
- if err := pool.AddRemote(tx); err != core.ErrTipVeryHigh {
+ if err := pool.addRemote(tx); err != core.ErrTipVeryHigh {
t.Error("expected", core.ErrTipVeryHigh, "got", err)
}
tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key)
- if err := pool.AddRemote(tx2); err != core.ErrFeeCapVeryHigh {
+ if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh {
t.Error("expected", core.ErrFeeCapVeryHigh, "got", err)
}
}
@@ -467,14 +455,14 @@ func TestChainFork(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed))
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
<-pool.requestReset(nil, nil)
}
resetState()
@@ -496,14 +484,14 @@ func TestDoubleNonce(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed))
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
<-pool.requestReset(nil, nil)
}
resetState()
@@ -547,7 +535,7 @@ func TestMissingNonce(t *testing.T) {
t.Parallel()
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, addr, big.NewInt(100000000000000))
@@ -571,7 +559,7 @@ func TestNonceRecovery(t *testing.T) {
const n = 10
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
testSetNonce(pool, addr, n)
@@ -579,7 +567,7 @@ func TestNonceRecovery(t *testing.T) {
<-pool.requestReset(nil, nil)
tx := transaction(n, 100000, key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
@@ -597,7 +585,7 @@ func TestDropping(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000))
@@ -674,7 +662,7 @@ func TestDropping(t *testing.T) {
}
// Reduce the block gas limit, check that invalidated transactions are dropped
tbc := pool.chain.(*testBlockChain)
- tbc.reset(tbc.statedb, 100, tbc.chainHeadFeed)
+ pool.chain = newTestBlockChain(pool.chainconfig, 100, tbc.statedb, tbc.chainHeadFeed)
<-pool.requestReset(nil, nil)
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
@@ -702,10 +690,11 @@ func TestPostponing(t *testing.T) {
// Create the pool to test the postponing with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create two test accounts to produce different gap profiles with
keys := make([]*ecdsa.PrivateKey, 2)
@@ -730,7 +719,7 @@ func TestPostponing(t *testing.T) {
txs = append(txs, tx)
}
}
- for i, err := range pool.AddRemotesSync(txs) {
+ for i, err := range pool.addRemotesSync(txs) {
if err != nil {
t.Fatalf("tx %d: failed to add transactions: %v", i, err)
}
@@ -815,7 +804,7 @@ func TestGapFilling(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -826,7 +815,7 @@ func TestGapFilling(t *testing.T) {
defer sub.Unsubscribe()
// Create a pending and a queued transaction with a nonce-gap in between
- pool.AddRemotesSync([]*types.Transaction{
+ pool.addRemotesSync([]*types.Transaction{
transaction(0, 100000, key),
transaction(2, 100000, key),
})
@@ -869,7 +858,7 @@ func TestQueueAccountLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -914,14 +903,15 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.NoLocals = nolocals
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts and fund them (last one will be the local)
keys := make([]*ecdsa.PrivateKey, 5)
@@ -943,7 +933,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
nonces[addr]++
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
queued := 0
for addr, list := range pool.queue {
@@ -960,7 +950,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
for i := uint64(0); i < 3*config.GlobalQueue; i++ {
txs = append(txs, transaction(i+1, 100000, local))
}
- pool.AddLocals(txs)
+ pool.addLocals(txs)
// If locals are disabled, the previous eviction algorithm should apply here too
if nolocals {
@@ -1006,14 +996,15 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the non-expiration enforcement
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.Lifetime = time.Second
config.NoLocals = nolocals
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -1023,10 +1014,10 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add the two transactions and ensure they both are queued up
- if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
pending, queued := pool.Stats()
@@ -1093,7 +1084,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
}
// Queue gapped transactions
- if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
@@ -1102,7 +1093,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
time.Sleep(5 * evictionInterval) // A half lifetime pass
// Queue executable transactions, the life cycle should be restarted.
- if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
@@ -1116,7 +1107,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 2 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
@@ -1150,7 +1141,7 @@ func TestPendingLimiting(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000000))
@@ -1191,13 +1182,14 @@ func TestPendingGlobalLimiting(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 5)
@@ -1217,7 +1209,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending := 0
for _, list := range pool.pending {
@@ -1239,7 +1231,7 @@ func TestAllowedTxSize(t *testing.T) {
// Create a test account and fund it
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000))
@@ -1248,7 +1240,7 @@ func TestAllowedTxSize(t *testing.T) {
//
// It is assumed the fields in the transaction (except of the data) are:
// - nonce <= 32 bytes
- // - gasPrice <= 32 bytes
+ // - gasTip <= 32 bytes
// - gasLimit <= 32 bytes
// - recipient == 20 bytes
// - value <= 32 bytes
@@ -1256,22 +1248,21 @@ func TestAllowedTxSize(t *testing.T) {
// All those fields are summed up to at most 213 bytes.
baseSize := uint64(213)
dataSize := txMaxSize - baseSize
- maxGas := pool.currentMaxGas.Load()
// Try adding a transaction with maximal allowed size
- tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize)
+ tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize)
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
}
// Try adding a transaction with random allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
t.Fatalf("failed to add transaction of random allowed size: %v", err)
}
// Try adding a transaction of minimal not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil {
t.Fatalf("expected rejection on slightly oversize transaction")
}
// Try adding a transaction of random not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
t.Fatalf("expected rejection on oversize transaction")
}
// Run some sanity checks on the pool internals
@@ -1293,15 +1284,16 @@ func TestCapClearsFromAll(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.AccountSlots = 2
config.AccountQueue = 2
config.GlobalSlots = 8
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts and fund them
key, _ := crypto.GenerateKey()
@@ -1313,7 +1305,7 @@ func TestCapClearsFromAll(t *testing.T) {
txs = append(txs, transaction(uint64(j), 100000, key))
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotes(txs)
+ pool.addRemotes(txs)
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1327,13 +1319,14 @@ func TestPendingMinimumAllowance(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 1
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 5)
@@ -1353,7 +1346,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
}
}
// Import the batch and verify that limits have been enforced
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
for addr, list := range pool.pending {
if list.Len() != int(config.AccountSlots) {
@@ -1375,10 +1368,11 @@ func TestRepricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1409,8 +1403,8 @@ func TestRepricing(t *testing.T) {
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 7 {
@@ -1426,7 +1420,7 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1442,14 +1436,14 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Check that we can't add the old transactions back
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, txpool.ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, txpool.ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want error to conain %v", err, txpool.ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
@@ -1459,7 +1453,7 @@ func TestRepricing(t *testing.T) {
}
// However we can add local underpriced transactions
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
@@ -1472,13 +1466,13 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// And we can fill gaps with properly priced transactions
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
+ if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
@@ -1499,7 +1493,7 @@ func TestRepricingDynamicFee(t *testing.T) {
// Create the pool to test the pricing enforcement with
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1530,8 +1524,8 @@ func TestRepricingDynamicFee(t *testing.T) {
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 7 {
@@ -1547,7 +1541,7 @@ func TestRepricingDynamicFee(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1564,16 +1558,16 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// Check that we can't add the old transactions back
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])
- if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) {
- t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
@@ -1583,7 +1577,7 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// However we can add local underpriced transactions
tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3])
- if err := pool.AddLocal(tx); err != nil {
+ if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
@@ -1597,15 +1591,15 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// And we can fill gaps with properly priced transactions
tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
@@ -1623,10 +1617,11 @@ func TestRepricingKeepsLocals(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 3)
@@ -1638,23 +1633,23 @@ func TestRepricingKeepsLocals(t *testing.T) {
for i := uint64(0); i < 500; i++ {
// Add pending transaction.
pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2])
- if err := pool.AddLocal(pendingTx); err != nil {
+ if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued transaction.
queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2])
- if err := pool.AddLocal(queuedTx); err != nil {
+ if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
// Add pending dynamic fee transaction.
pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
- if err := pool.AddLocal(pendingTx); err != nil {
+ if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued dynamic fee transaction.
queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
- if err := pool.AddLocal(queuedTx); err != nil {
+ if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
}
@@ -1676,13 +1671,13 @@ func TestRepricingKeepsLocals(t *testing.T) {
validate()
// Reprice the pool and check that nothing is dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
validate()
- pool.SetGasPrice(big.NewInt(2))
- pool.SetGasPrice(big.NewInt(4))
- pool.SetGasPrice(big.NewInt(8))
- pool.SetGasPrice(big.NewInt(100))
+ pool.SetGasTip(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(4))
+ pool.SetGasTip(big.NewInt(8))
+ pool.SetGasTip(big.NewInt(100))
validate()
}
@@ -1696,14 +1691,15 @@ func TestUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 2
config.GlobalQueue = 2
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1727,8 +1723,8 @@ func TestUnderpricing(t *testing.T) {
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs)
- pool.AddLocal(ltx)
+ pool.addRemotes(txs)
+ pool.addLocal(ltx)
pending, queued := pool.Stats()
if pending != 3 {
@@ -1744,8 +1740,8 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding an underpriced transaction on block limit fails
- if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
// Replace a future transaction with a future transaction
if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
@@ -1762,8 +1758,8 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("failed to add well priced transaction: %v", err)
}
// Ensure that replacing a pending transaction with a future transaction fails
- if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending {
- t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending)
+ if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending {
+ t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending)
}
pending, queued = pool.Stats()
if pending != 2 {
@@ -1780,11 +1776,11 @@ func TestUnderpricing(t *testing.T) {
}
// Ensure that adding local transactions can push out even higher priced ones
ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
@@ -1810,14 +1806,15 @@ func TestStableUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalSlots = 128
config.GlobalQueue = 0
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -1835,7 +1832,7 @@ func TestStableUnderpricing(t *testing.T) {
for i := uint64(0); i < config.GlobalSlots; i++ {
txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0]))
}
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != int(config.GlobalSlots) {
@@ -1878,7 +1875,7 @@ func TestUnderpricingDynamicFee(t *testing.T) {
t.Parallel()
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
pool.config.GlobalSlots = 2
pool.config.GlobalQueue = 2
@@ -1905,8 +1902,8 @@ func TestUnderpricingDynamicFee(t *testing.T) {
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2])
// Import the batch and that both pending and queued transactions match up
- pool.AddRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1
- pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
+ pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
+ pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
pending, queued := pool.Stats()
if pending != 3 {
@@ -1924,8 +1921,8 @@ func TestUnderpricingDynamicFee(t *testing.T) {
// Ensure that adding an underpriced transaction fails
tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.addRemoteSync(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1
- t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
+ if err := pool.addRemoteSync(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
}
// Ensure that adding high priced transactions drops cheap ones, but not own
@@ -1957,11 +1954,11 @@ func TestUnderpricingDynamicFee(t *testing.T) {
}
// Ensure that adding local transactions can push out even higher priced ones
ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3])
- if err := pool.AddLocal(ltx); err != nil {
+ if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
@@ -1985,7 +1982,7 @@ func TestDualHeapEviction(t *testing.T) {
t.Parallel()
pool, _ := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
pool.config.GlobalSlots = 10
pool.config.GlobalQueue = 10
@@ -2014,7 +2011,7 @@ func TestDualHeapEviction(t *testing.T) {
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
highCap = tx
}
- pool.AddRemotesSync([]*types.Transaction{tx})
+ pool.addRemotesSync([]*types.Transaction{tx})
}
pending, queued := pool.Stats()
if pending+queued != 20 {
@@ -2044,10 +2041,11 @@ func TestDeduplication(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create a test account to add transactions with
key, _ := crypto.GenerateKey()
@@ -2062,7 +2060,7 @@ func TestDeduplication(t *testing.T) {
for i := 0; i < len(txs); i += 2 {
firsts = append(firsts, txs[i])
}
- errs := pool.AddRemotesSync(firsts)
+ errs := pool.addRemotesSync(firsts)
if len(errs) != len(firsts) {
t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts))
}
@@ -2079,7 +2077,7 @@ func TestDeduplication(t *testing.T) {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1)
}
// Try to add all of them now and ensure previous ones error out as knowns
- errs = pool.AddRemotesSync(txs)
+ errs = pool.addRemotesSync(txs)
if len(errs) != len(txs) {
t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs))
}
@@ -2110,10 +2108,11 @@ func TestReplacement(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
events := make(chan core.NewTxsEvent, 32)
@@ -2131,10 +2130,10 @@ func TestReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap pending transaction: %v", err)
}
if err := validateEvents(events, 2); err != nil {
@@ -2144,10 +2143,10 @@ func TestReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper pending transaction: %v", err)
}
if err := validateEvents(events, 2); err != nil {
@@ -2155,23 +2154,23 @@ func TestReplacement(t *testing.T) {
}
// Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper queued transaction: %v", err)
}
@@ -2190,7 +2189,7 @@ func TestReplacementDynamicFee(t *testing.T) {
// Create the pool to test the pricing enforcement with
pool, key := setupPoolWithConfig(eip1559Config)
- defer pool.Stop()
+ defer pool.Close()
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
// Keep track of transaction events to ensure all executables get announced
@@ -2232,12 +2231,12 @@ func TestReplacementDynamicFee(t *testing.T) {
}
// 2. Don't bump tip or feecap => discard
tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 3. Bump both more than min => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
}
// 4. Check events match expected (2 new executable txs during pending, 0 during queue)
@@ -2255,27 +2254,27 @@ func TestReplacementDynamicFee(t *testing.T) {
}
// 6. Bump tip max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 7. Bump fee cap max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 8. Bump tip min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 9. Bump fee cap min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
- t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
}
// 10. Check events match expected (3 new executable txs during pending, 0 during queue)
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key)
- if err := pool.AddRemote(tx); err != nil {
+ if err := pool.addRemote(tx); err != nil {
t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
}
// 11. Check events match expected (3 new executable txs during pending, 0 during queue)
@@ -2315,14 +2314,15 @@ func testJournaling(t *testing.T, nolocals bool) {
// Create the original pool to inject transaction into the journal
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.NoLocals = nolocals
config.Journal = journal
config.Rejournal = time.Second
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -2332,13 +2332,13 @@ func testJournaling(t *testing.T, nolocals bool) {
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add three local and a remote transactions and ensure they are queued up
- if err := pool.AddLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
- if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
@@ -2355,11 +2355,12 @@ func testJournaling(t *testing.T, nolocals bool) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
- pool.Stop()
+ pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ pool = New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
pending, queued = pool.Stats()
if queued != 0 {
@@ -2381,11 +2382,12 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
<-pool.requestReset(nil, nil)
time.Sleep(2 * config.Rejournal)
- pool.Stop()
+ pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+ pool = New(config, blockchain)
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
pending, queued = pool.Stats()
if pending != 0 {
@@ -2403,7 +2405,7 @@ func testJournaling(t *testing.T, nolocals bool) {
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
- pool.Stop()
+ pool.Close()
}
// TestStatusCheck tests that the pool can correctly retrieve the
@@ -2413,10 +2415,11 @@ func TestStatusCheck(t *testing.T) {
// Create the pool to test the status retrievals with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
- defer pool.Stop()
+ pool := New(testTxPoolConfig, blockchain)
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ defer pool.Close()
// Create the test accounts to check various transaction statuses with
keys := make([]*ecdsa.PrivateKey, 3)
@@ -2433,7 +2436,7 @@ func TestStatusCheck(t *testing.T) {
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only
// Import the transaction and ensure they are correctly added
- pool.AddRemotesSync(txs)
+ pool.addRemotesSync(txs)
pending, queued := pool.Stats()
if pending != 2 {
@@ -2451,13 +2454,11 @@ func TestStatusCheck(t *testing.T) {
hashes[i] = tx.Hash()
}
hashes = append(hashes, common.Hash{})
+ expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown}
- statuses := pool.Status(hashes)
- expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown}
-
- for i := 0; i < len(statuses); i++ {
- if statuses[i] != expect[i] {
- t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i])
+ for i := 0; i < len(hashes); i++ {
+ if status := pool.Status(hashes[i]); status != expect[i] {
+ t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i])
}
}
}
@@ -2489,7 +2490,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1
func benchmarkPendingDemotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -2514,7 +2515,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1
func benchmarkFuturePromotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000))
@@ -2542,7 +2543,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000
func benchmarkBatchInsert(b *testing.B, size int, local bool) {
// Generate a batch of transactions to enqueue into the pool
pool, key := setupPool()
- defer pool.Stop()
+ defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000000000000))
@@ -2558,9 +2559,9 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) {
b.ResetTimer()
for _, batch := range batches {
if local {
- pool.AddLocals(batch)
+ pool.addLocals(batch)
} else {
- pool.AddRemotes(batch)
+ pool.addRemotes(batch)
}
}
}
@@ -2588,15 +2589,15 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
pool, _ := setupPool()
testAddBalance(pool, account, big.NewInt(100000000))
for _, local := range locals {
- pool.AddLocal(local)
+ pool.addLocal(local)
}
b.StartTimer()
// Assign a high enough balance for testing
testAddBalance(pool, remoteAddr, big.NewInt(100000000))
for i := 0; i < len(remotes); i++ {
- pool.AddRemotes([]*types.Transaction{remotes[i]})
+ pool.addRemotes([]*types.Transaction{remotes[i]})
}
- pool.Stop()
+ pool.Close()
}
}
@@ -2604,7 +2605,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
func BenchmarkMultiAccountBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, _ := setupPool()
- defer pool.Stop()
+ defer pool.Close()
b.ReportAllocs()
batches := make(types.Transactions, b.N)
for i := 0; i < b.N; i++ {
@@ -2617,6 +2618,6 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) {
// Benchmark importing the transactions into the queue
b.ResetTimer()
for _, tx := range batches {
- pool.AddRemotesSync([]*types.Transaction{tx})
+ pool.addRemotesSync([]*types.Transaction{tx})
}
}
diff --git a/core/txpool/list.go b/core/txpool/legacypool/list.go
similarity index 99%
rename from core/txpool/list.go
rename to core/txpool/legacypool/list.go
index 96f655a672..40e46cb419 100644
--- a/core/txpool/list.go
+++ b/core/txpool/legacypool/list.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"container/heap"
@@ -600,7 +600,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool {
func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) {
drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop
for slots > 0 {
- if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio || floatingRatio == 0 {
+ if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio {
// Discard stale transactions if found during cleanup
tx := heap.Pop(&l.urgent).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
diff --git a/core/txpool/list_test.go b/core/txpool/legacypool/list_test.go
similarity index 99%
rename from core/txpool/list_test.go
rename to core/txpool/legacypool/list_test.go
index 8c2a3e0571..d8aaa31644 100644
--- a/core/txpool/list_test.go
+++ b/core/txpool/legacypool/list_test.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"math/big"
diff --git a/core/txpool/noncer.go b/core/txpool/legacypool/noncer.go
similarity index 99%
rename from core/txpool/noncer.go
rename to core/txpool/legacypool/noncer.go
index 828717e2bf..dd880a6ba3 100644
--- a/core/txpool/noncer.go
+++ b/core/txpool/legacypool/noncer.go
@@ -24,7 +24,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package txpool
+package legacypool
import (
"sync"
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
new file mode 100644
index 0000000000..89a1dee9d2
--- /dev/null
+++ b/core/txpool/subpool.go
@@ -0,0 +1,127 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import (
+ "math/big"
+
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+// Transaction is a helper struct to group together a canonical transaction with
+// satellite data items that are needed by the pool but are not part of the chain.
+type Transaction struct {
+ Tx *types.Transaction // Canonical transaction
+
+ BlobTxBlobs []kzg4844.Blob // Blobs needed by the blob pool
+ BlobTxCommits []kzg4844.Commitment // Commitments needed by the blob pool
+ BlobTxProofs []kzg4844.Proof // Proofs needed by the blob pool
+}
+
+// SubPool represents a specialized transaction pool that lives on its own (e.g.
+// blob pool). Since independent of how many specialized pools we have, they do
+// need to be updated in lockstep and assemble into one coherent view for block
+// production, this interface defines the common methods that allow the primary
+// transaction pool to manage the subpools.
+type SubPool interface {
+ // Filter is a selector used to decide whether a transaction whould be added
+ // to this particular subpool.
+ Filter(tx *types.Transaction) bool
+
+ // Init sets the base parameters of the subpool, allowing it to load any saved
+ // transactions from disk and also permitting internal maintenance routines to
+ // start up.
+ //
+ // These should not be passed as a constructor argument - nor should the pools
+ // start by themselves - in order to keep multiple subpools in lockstep with
+ // one another.
+ Init(gasTip *big.Int, head *types.Header) error
+
+ // Close terminates any background processing threads and releases any held
+ // resources.
+ Close() error
+
+ // Reset retrieves the current state of the blockchain and ensures the content
+ // of the transaction pool is valid with regard to the chain state.
+ Reset(oldHead, newHead *types.Header)
+
+ // SetGasTip updates the minimum price required by the subpool for a new
+ // transaction, and drops all transactions below this threshold.
+ SetGasTip(tip *big.Int)
+ GasTip() *big.Int
+
+ // SetMinFee updates the minimum fee required by the subpool for a new
+ // transaction, and drops all transactions below this threshold.
+ SetMinFee(tip *big.Int)
+
+ // Has returns an indicator whether subpool has a transaction cached with the
+ // given hash.
+ Has(hash common.Hash) bool
+ HasLocal(hash common.Hash) bool
+
+ // Get returns a transaction if it is contained in the pool, or nil otherwise.
+ Get(hash common.Hash) *Transaction
+
+ // Add enqueues a batch of transactions into the pool if they are valid. Due
+ // to the large transaction churn, add may postpone fully integrating the tx
+ // to a later point to batch multiple ones together.
+ Add(txs []*Transaction, local bool, sync bool) []error
+
+ // Pending retrieves all currently processable transactions, grouped by origin
+ // account and sorted by nonce.
+ Pending(enforceTips bool) map[common.Address][]*types.Transaction
+
+ // SubscribeTransactions subscribes to new transaction events.
+ SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription
+
+ // Nonce returns the next nonce of an account, with all transactions executable
+ // by the pool already applied on top.
+ Nonce(addr common.Address) uint64
+
+ // Stats retrieves the current pool stats, namely the number of pending and the
+ // number of queued (non-executable) transactions.
+ Stats() (int, int)
+
+ // Content retrieves the data content of the transaction pool, returning all the
+ // pending as well as queued transactions, grouped by account and sorted by nonce.
+ Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction)
+
+ // ContentFrom retrieves the data content of the transaction pool, returning the
+ // pending as well as queued transactions of this address, grouped by nonce.
+ ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction)
+
+ // Locals retrieves the accounts currently considered local by the pool.
+ Locals() []common.Address
+
+ // Status returns the known status (unknown/pending/queued) of a transaction
+ // identified by their hashes.
+ Status(hash common.Hash) TxStatus
+}
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index bc3dd52eb6..b85332cddf 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -8,7 +8,7 @@
//
// Much love to the original authors for their work.
// **********
-// Copyright 2014 The go-ethereum Authors
+// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -27,132 +27,13 @@
package txpool
import (
- "errors"
"fmt"
- "math"
"math/big"
- "sort"
- "sync"
- "sync/atomic"
- "time"
- "github.com/ava-labs/coreth/consensus/dummy"
"github.com/ava-labs/coreth/core"
- "github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/metrics"
- "github.com/ava-labs/coreth/params"
- "github.com/ava-labs/coreth/utils"
- "github.com/ava-labs/coreth/vmerrs"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/log"
-)
-
-const (
- // chainHeadChanSize is the size of channel listening to ChainHeadEvent.
- chainHeadChanSize = 10
-
- // txSlotSize is used to calculate how many data slots a single transaction
- // takes up based on its size. The slots are used as DoS protection, ensuring
- // that validating a new transaction remains a constant operation (in reality
- // O(maxslots), where max slots are 4 currently).
- txSlotSize = 32 * 1024
-
- // txMaxSize is the maximum size a single transaction can have. This field has
- // non-trivial consequences: larger transactions are significantly harder and
- // more expensive to propagate; larger transactions also take more resources
- // to validate whether they fit into the pool or not.
- //
- // Note: the max contract size is 24KB
- txMaxSize = 4 * txSlotSize // 128KB
-)
-
-var (
- // ErrAlreadyKnown is returned if the transactions is already contained
- // within the pool.
- ErrAlreadyKnown = errors.New("already known")
-
- // ErrInvalidSender is returned if the transaction contains an invalid signature.
- ErrInvalidSender = errors.New("invalid sender")
-
- // ErrUnderpriced is returned if a transaction's gas price is below the minimum
- // configured for the transaction pool.
- ErrUnderpriced = errors.New("transaction underpriced")
-
- // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
- // another remote transaction.
- ErrTxPoolOverflow = errors.New("txpool is full")
-
- // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
- // with a different one without the required price bump.
- ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
-
- // ErrGasLimit is returned if a transaction's requested gas limit exceeds the
- // maximum allowance of the current block.
- ErrGasLimit = errors.New("exceeds block gas limit")
-
- // ErrNegativeValue is a sanity error to ensure no one is able to specify a
- // transaction with a negative value.
- ErrNegativeValue = errors.New("negative value")
-
- // ErrOversizedData is returned if the input data of a transaction is greater
- // than some meaningful limit a user might use. This is not a consensus error
- // making the transaction invalid, rather a DOS protection.
- ErrOversizedData = errors.New("oversized data")
-
- // ErrFutureReplacePending is returned if a future transaction replaces a pending
- // transaction. Future transactions should only be able to replace other future transactions.
- ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
-
- // ErrOverdraft is returned if a transaction would cause the senders balance to go negative
- // thus invalidating a potential large number of transactions.
- ErrOverdraft = errors.New("transaction would cause overdraft")
-)
-
-var (
- evictionInterval = time.Minute // Time interval to check for evictable transactions
- statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
- baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after Apricot Phase 3 is enabled
-)
-
-var (
- // Metrics for the pending pool
- pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
- pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
- pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
- pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
-
- // Metrics for the queued pool
- queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
- queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
- queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
- queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
- queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
-
- // General tx metrics
- knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
- validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
- invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
- underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
- overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
-
- // throttleTxMeter counts how many transactions are rejected due to too-many-changes between
- // txpool reorgs.
- throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
- // reorgDurationTimer measures how long time a txpool reorg takes.
- reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
- // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
- // that this number is pretty low, since txpool reorgs happen very frequently.
- dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
-
- pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
- queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
- localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
- slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
-
- reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
)
// TxStatus is the current status of a transaction as seen by the pool.
@@ -164,1935 +45,326 @@ const (
TxStatusPending
)
-// blockChain provides the state of blockchain and current gas limit to do
-// some pre checks in tx pool and event subscribers.
-type blockChain interface {
+// BlockChain defines the minimal set of methods needed to back a tx pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // CurrentBlock returns the current head of the chain.
CurrentBlock() *types.Header
- GetBlock(hash common.Hash, number uint64) *types.Block
- StateAt(root common.Hash) (*state.StateDB, error)
- SenderCacher() *core.TxSenderCacher
+ // SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
}
-// Config are the configuration parameters of the transaction pool.
-type Config struct {
- Locals []common.Address // Addresses that should be treated by default as local
- NoLocals bool // Whether local transaction handling should be disabled
- Journal string // Journal of local transactions to survive node restarts
- Rejournal time.Duration // Time interval to regenerate the local transaction journal
-
- PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
- PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
-
- AccountSlots uint64 // Number of executable transaction slots guaranteed per account
- GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
- AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
- GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
-
- Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
+// TxPool is an aggregator for various transaction specific pools, collectively
+// tracking all the transactions deemed interesting by the node. Transactions
+// enter the pool when they are received from the network or submitted locally.
+// They exit the pool when they are included in the blockchain or evicted due to
+// resource constraints.
+type TxPool struct {
+ subpools []SubPool // List of subpools for specialized transaction handling
+ subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown
+ quit chan chan error // Quit channel to tear down the head updater
+ resetFeed event.Feed // Feed of processed chain heads
}
-// DefaultConfig contains the default configurations for the transaction
-// pool.
-var DefaultConfig = Config{
- Journal: "transactions.rlp",
- Rejournal: time.Hour,
-
- PriceLimit: 1,
- PriceBump: 10,
-
- AccountSlots: 16,
- GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
- AccountQueue: 64,
- GlobalQueue: 1024,
-
- Lifetime: 3 * time.Hour,
-}
+// New creates a new transaction pool to gather, sort and filter inbound
+// transactions from the network.
+func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) {
+ // Retrieve the current head so that all subpools and this main coordinator
+ // pool will have the same starting state, even if the chain moves forward
+ // during initialization.
+ head := chain.CurrentBlock()
-// sanitize checks the provided user configurations and changes anything that's
-// unreasonable or unworkable.
-func (config *Config) sanitize() Config {
- conf := *config
- if conf.Rejournal < time.Second {
- log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
- conf.Rejournal = time.Second
- }
- if conf.PriceLimit < 1 {
- log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
- conf.PriceLimit = DefaultConfig.PriceLimit
- }
- if conf.PriceBump < 1 {
- log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
- conf.PriceBump = DefaultConfig.PriceBump
- }
- if conf.AccountSlots < 1 {
- log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
- conf.AccountSlots = DefaultConfig.AccountSlots
- }
- if conf.GlobalSlots < 1 {
- log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
- conf.GlobalSlots = DefaultConfig.GlobalSlots
- }
- if conf.AccountQueue < 1 {
- log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
- conf.AccountQueue = DefaultConfig.AccountQueue
- }
- if conf.GlobalQueue < 1 {
- log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
- conf.GlobalQueue = DefaultConfig.GlobalQueue
+ pool := &TxPool{
+ subpools: subpools,
+ quit: make(chan chan error),
}
- if conf.Lifetime < 1 {
- log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
- conf.Lifetime = DefaultConfig.Lifetime
+ for i, subpool := range subpools {
+ if err := subpool.Init(gasTip, head); err != nil {
+ for j := i - 1; j >= 0; j-- {
+ subpools[j].Close()
+ }
+ return nil, err
+ }
}
- return conf
-}
-
-// TxPool contains all currently known transactions. Transactions
-// enter the pool when they are received from the network or submitted
-// locally. They exit the pool when they are included in the blockchain.
-//
-// The pool separates processable transactions (which can be applied to the
-// current state) and future transactions. Transactions move between those
-// two states over time as they are received and processed.
-type TxPool struct {
- config Config
- chainconfig *params.ChainConfig
- chain blockChain
- gasPrice *big.Int
- minimumFee *big.Int
- txFeed event.Feed
- headFeed event.Feed
- reorgFeed event.Feed
- scope event.SubscriptionScope
- signer types.Signer
- mu sync.RWMutex
- istanbul atomic.Bool // Fork indicator whether we are in the istanbul stage.
- eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions.
- eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions.
- eip3860 atomic.Bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum)
-
- currentHead *types.Header
- // [currentState] is the state of the blockchain head. It is reset whenever
- // head changes.
- currentState *state.StateDB
- // [currentStateLock] is required to allow concurrent access to address nonces
- // and balances during reorgs and gossip handling.
- currentStateLock sync.Mutex
-
- pendingNonces *noncer // Pending state tracking virtual nonces
- currentMaxGas atomic.Uint64 // Current gas limit for transaction caps
-
- locals *accountSet // Set of local transaction to exempt from eviction rules
- journal *journal // Journal of local transaction to back up to disk
-
- pending map[common.Address]*list // All currently processable transactions
- queue map[common.Address]*list // Queued but non-processable transactions
- beats map[common.Address]time.Time // Last heartbeat from each known account
- all *lookup // All transactions to allow lookups
- priced *pricedList // All transactions sorted by price
-
- chainHeadCh chan core.ChainHeadEvent
- chainHeadSub event.Subscription
- reqResetCh chan *txpoolResetRequest
- reqPromoteCh chan *accountSet
- queueTxEventCh chan *types.Transaction
- reorgDoneCh chan chan struct{}
- reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
- generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen
- // to this to be notified if it should shut down.
- wg sync.WaitGroup // tracks loop, scheduleReorgLoop
- initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
-
- changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
-}
+ // Subscribe to chain head events to trigger subpool resets
+ var (
+ newHeadCh = make(chan core.ChainHeadEvent)
+ newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
+ )
+ go func() {
+ defer newHeadSub.Unsubscribe()
+ pool.loop(head, chain, newHeadCh)
+ }()
-type txpoolResetRequest struct {
- oldHead, newHead *types.Header
+ return pool, nil
}
-// NewTxPool creates a new transaction pool to gather, sort and filter inbound
-// transactions from the network.
-func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
- // Sanitize the input to ensure no vulnerable gas prices are set
- config = (&config).sanitize()
+// Close terminates the transaction pool and all its subpools.
+func (p *TxPool) Close() error {
+ var errs []error
- // Create the transaction pool with its initial settings
- pool := &TxPool{
- config: config,
- chainconfig: chainconfig,
- chain: chain,
- signer: types.LatestSigner(chainconfig),
- pending: make(map[common.Address]*list),
- queue: make(map[common.Address]*list),
- beats: make(map[common.Address]time.Time),
- all: newLookup(),
- chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
- reqResetCh: make(chan *txpoolResetRequest),
- reqPromoteCh: make(chan *accountSet),
- queueTxEventCh: make(chan *types.Transaction),
- reorgDoneCh: make(chan chan struct{}),
- reorgShutdownCh: make(chan struct{}),
- initDoneCh: make(chan struct{}),
- generalShutdownChan: make(chan struct{}),
- gasPrice: new(big.Int).SetUint64(config.PriceLimit),
- }
- pool.locals = newAccountSet(pool.signer)
- for _, addr := range config.Locals {
- log.Info("Setting new local account", "address", addr)
- pool.locals.add(addr)
+ // Terminate the reset loop and wait for it to finish
+ errc := make(chan error)
+ p.quit <- errc
+ if err := <-errc; err != nil {
+ errs = append(errs, err)
}
- pool.priced = newPricedList(pool.all)
- pool.reset(nil, chain.CurrentBlock())
- // Start the reorg loop early so it can handle requests generated during journal loading.
- pool.wg.Add(1)
- go pool.scheduleReorgLoop()
-
- // If local transactions and journaling is enabled, load from disk
- if !config.NoLocals && config.Journal != "" {
- pool.journal = newTxJournal(config.Journal)
-
- if err := pool.journal.load(pool.AddLocals); err != nil {
- log.Warn("Failed to load transaction journal", "err", err)
- }
- if err := pool.journal.rotate(pool.local()); err != nil {
- log.Warn("Failed to rotate transaction journal", "err", err)
+ // Terminate each subpool
+ for _, subpool := range p.subpools {
+ if err := subpool.Close(); err != nil {
+ errs = append(errs, err)
}
}
-
- // Subscribe events from blockchain and start the main event loop.
- pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
- pool.wg.Add(1)
- go pool.loop()
-
- pool.startPeriodicFeeUpdate()
-
- return pool
+ if len(errs) > 0 {
+ return fmt.Errorf("subpool close errors: %v", errs)
+ }
+ return nil
}
// loop is the transaction pool's main event loop, waiting for and reacting to
// outside blockchain events as well as for various reporting and transaction
// eviction events.
-func (pool *TxPool) loop() {
- defer pool.wg.Done()
-
+func (p *TxPool) loop(head *types.Header, chain BlockChain, newHeadCh <-chan core.ChainHeadEvent) {
+ // Track the previous and current head to feed to an idle reset
var (
- prevPending, prevQueued, prevStales int
- // Start the stats reporting and transaction eviction tickers
- report = time.NewTicker(statsReportInterval)
- evict = time.NewTicker(evictionInterval)
- journal = time.NewTicker(pool.config.Rejournal)
- // Track the previous head headers for transaction reorgs
- head = pool.chain.CurrentBlock()
+ oldHead = head
+ newHead = oldHead
)
- defer report.Stop()
- defer evict.Stop()
- defer journal.Stop()
-
- // Notify tests that the init phase is done
- close(pool.initDoneCh)
- for {
- select {
- // Handle ChainHeadEvent
- case ev := <-pool.chainHeadCh:
- if ev.Block != nil {
- pool.requestReset(head, ev.Block.Header())
- head = ev.Block.Header()
- pool.headFeed.Send(core.NewTxPoolHeadEvent{Head: head})
- }
-
- // System shutdown.
- case <-pool.chainHeadSub.Err():
- close(pool.reorgShutdownCh)
- return
-
- // Handle stats reporting ticks
- case <-report.C:
- pool.mu.RLock()
- pending, queued := pool.stats()
- pool.mu.RUnlock()
- stales := int(pool.priced.stales.Load())
-
- if pending != prevPending || queued != prevQueued || stales != prevStales {
- log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
- prevPending, prevQueued, prevStales = pending, queued, stales
- }
-
- // Handle inactive account transaction eviction
- case <-evict.C:
- pool.mu.Lock()
- for addr := range pool.queue {
- // Skip local transactions from the eviction mechanism
- if pool.locals.contains(addr) {
- continue
- }
- // Any non-locals old enough should be removed
- if time.Since(pool.beats[addr]) > pool.config.Lifetime {
- list := pool.queue[addr].Flatten()
- for _, tx := range list {
- pool.removeTx(tx.Hash(), true)
+ // Consume chain head events and start resets when none is running
+ var (
+ resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
+ resetDone = make(chan *types.Header)
+ )
+ var errc chan error
+ for errc == nil {
+ // Something interesting might have happened, run a reset if there is
+ // one needed but none is running. The resetter will run on its own
+ // goroutine to allow chain head events to be consumed contiguously.
+ if newHead != oldHead {
+ // Try to inject a busy marker and start a reset if successful
+ select {
+ case resetBusy <- struct{}{}:
+ // Busy marker injected, start a new subpool reset
+ go func(oldHead, newHead *types.Header) {
+ for _, subpool := range p.subpools {
+ subpool.Reset(oldHead, newHead)
}
- queuedEvictionMeter.Mark(int64(len(list)))
- }
- }
- pool.mu.Unlock()
+ p.resetFeed.Send(core.NewTxPoolReorgEvent{Head: newHead})
+ resetDone <- newHead
+ }(oldHead, newHead)
- // Handle local transaction journal rotation
- case <-journal.C:
- if pool.journal != nil {
- pool.mu.Lock()
- if err := pool.journal.rotate(pool.local()); err != nil {
- log.Warn("Failed to rotate local tx journal", "err", err)
- }
- pool.mu.Unlock()
+ default:
+ // Reset already running, wait until it finishes
}
}
- }
-}
-
-// Stop terminates the transaction pool.
-func (pool *TxPool) Stop() {
- // Unsubscribe all subscriptions registered from txpool
- pool.scope.Close()
-
- close(pool.generalShutdownChan)
- // Unsubscribe subscriptions registered from blockchain
- pool.chainHeadSub.Unsubscribe()
- pool.wg.Wait()
-
- if pool.journal != nil {
- pool.journal.close()
- }
- log.Info("Transaction pool stopped")
-}
-
-// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
- return pool.scope.Track(pool.txFeed.Subscribe(ch))
-}
-
-// SubscribeNewHeadEvent registers a subscription of NewHeadEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- core.NewTxPoolHeadEvent) event.Subscription {
- return pool.scope.Track(pool.headFeed.Subscribe(ch))
-}
-
-// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and
-// starts sending event to the given channel.
-func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription {
- return pool.scope.Track(pool.reorgFeed.Subscribe(ch))
-}
-
-// GasPrice returns the current gas price enforced by the transaction pool.
-func (pool *TxPool) GasPrice() *big.Int {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return new(big.Int).Set(pool.gasPrice)
-}
+ // Wait for the next chain head event or a previous reset finish
+ select {
+ case event := <-newHeadCh:
+ // Chain moved forward, store the head for later consumption
+ newHead = event.Block.Header()
-// SetGasPrice updates the minimum price required by the transaction pool for a
-// new transaction, and drops all transactions below this threshold.
-func (pool *TxPool) SetGasPrice(price *big.Int) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
+ case head := <-resetDone:
+ // Previous reset finished, update the old head and allow a new reset
+ oldHead = head
+ <-resetBusy
- old := pool.gasPrice
- pool.gasPrice = price
- // if the min miner fee increased, remove transactions below the new threshold
- if price.Cmp(old) > 0 {
- // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
- drop := pool.all.RemotesBelowTip(price)
- for _, tx := range drop {
- pool.removeTx(tx.Hash(), false)
+ case errc = <-p.quit:
+ // Termination requested, break out on the next loop round
}
- pool.priced.Removed(len(drop))
}
-
- log.Info("Transaction pool price threshold updated", "price", price)
-}
-
-func (pool *TxPool) SetMinFee(minFee *big.Int) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pool.minimumFee = minFee
-}
-
-// Nonce returns the next nonce of an account, with all transactions executable
-// by the pool already applied on top.
-func (pool *TxPool) Nonce(addr common.Address) uint64 {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return pool.pendingNonces.get(addr)
-}
-
-// Stats retrieves the current pool stats, namely the number of pending and the
-// number of queued (non-executable) transactions.
-func (pool *TxPool) Stats() (int, int) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return pool.stats()
+ // Notify the closer of termination (no error possible for now)
+ errc <- nil
}
-// stats retrieves the current pool stats, namely the number of pending and the
-// number of queued (non-executable) transactions.
-func (pool *TxPool) stats() (int, int) {
- pending := 0
- for _, list := range pool.pending {
- pending += list.Len()
- }
- queued := 0
- for _, list := range pool.queue {
- queued += list.Len()
+// SetGasTip updates the minimum gas tip required by the transaction pool for a
+// new transaction, and drops all transactions below this threshold.
+func (p *TxPool) SetGasTip(tip *big.Int) {
+ for _, subpool := range p.subpools {
+ subpool.SetGasTip(tip)
}
- return pending, queued
}
-// Content retrieves the data content of the transaction pool, returning all the
-// pending as well as queued transactions, grouped by account and sorted by nonce.
-func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pending := make(map[common.Address]types.Transactions, len(pool.pending))
- for addr, list := range pool.pending {
- pending[addr] = list.Flatten()
- }
- queued := make(map[common.Address]types.Transactions, len(pool.queue))
- for addr, list := range pool.queue {
- queued[addr] = list.Flatten()
+// Has returns an indicator whether the pool has a transaction cached with the
+// given hash.
+func (p *TxPool) Has(hash common.Hash) bool {
+ for _, subpool := range p.subpools {
+ if subpool.Has(hash) {
+ return true
+ }
}
- return pending, queued
+ return false
}
-// ContentFrom retrieves the data content of the transaction pool, returning the
-// pending as well as queued transactions of this address, grouped by nonce.
-func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- var pending types.Transactions
- if list, ok := pool.pending[addr]; ok {
- pending = list.Flatten()
- }
- var queued types.Transactions
- if list, ok := pool.queue[addr]; ok {
- queued = list.Flatten()
+// Get returns a transaction if it is contained in the pool, or nil otherwise.
+func (p *TxPool) Get(hash common.Hash) *Transaction {
+ for _, subpool := range p.subpools {
+ if tx := subpool.Get(hash); tx != nil {
+ return tx
+ }
}
- return pending, queued
+ return nil
}
-// Pending retrieves all currently processable transactions, grouped by origin
-// account and sorted by nonce. The returned transaction set is a copy and can be
-// freely modified by calling code.
-//
-// The enforceTips parameter can be used to do an extra filtering on the pending
-// transactions and only return those whose **effective** tip is large enough in
-// the next pending execution environment.
-func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pending := make(map[common.Address]types.Transactions, len(pool.pending))
- for addr, list := range pool.pending {
- txs := list.Flatten()
+// Add enqueues a batch of transactions into the pool if they are valid. Due
+// to the large transaction churn, add may postpone fully integrating the tx
+// to a later point to batch multiple ones together.
+func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error {
+ // Split the input transactions between the subpools. It shouldn't really
+ // happen that we receive merged batches, but better graceful than strange
+ // errors.
+ //
+ // We also need to track how the transactions were split across the subpools,
+ // so we can piece back the returned errors into the original order.
+ txsets := make([][]*Transaction, len(p.subpools))
+ splits := make([]int, len(txs))
- // If the miner requests tip enforcement, cap the lists now
- if enforceTips && !pool.locals.contains(addr) {
- for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
- txs = txs[:i]
- break
- }
+ for i, tx := range txs {
+ // Mark this transaction belonging to no-subpool
+ splits[i] = -1
+
+ // Try to find a subpool that accepts the transaction
+ for j, subpool := range p.subpools {
+ if subpool.Filter(tx.Tx) {
+ txsets[j] = append(txsets[j], tx)
+ splits[i] = j
+ break
}
}
- if len(txs) > 0 {
- pending[addr] = txs
- }
}
- return pending
-}
-
-// PendingSize returns the number of pending txs in the tx pool.
-func (pool *TxPool) PendingSize() int {
- pending := pool.Pending(true)
- count := 0
- for _, txs := range pending {
- count += len(txs)
+ // Add the transactions split apart to the individual subpools and piece
+ // back the errors into the original sort order.
+ errsets := make([][]error, len(p.subpools))
+ for i := 0; i < len(p.subpools); i++ {
+ errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
}
- return count
-}
-
-// IteratePending iterates over [pool.pending] until [f] returns false.
-// The caller must not modify [tx].
-func (pool *TxPool) IteratePending(f func(tx *types.Transaction) bool) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- for _, list := range pool.pending {
- for _, tx := range list.txs.items {
- if !f(tx) {
- return
- }
+ errs := make([]error, len(txs))
+ for i, split := range splits {
+ // If the transaction was rejected by all subpools, mark it unsupported
+ if split == -1 {
+ errs[i] = core.ErrTxTypeNotSupported
+ continue
}
+ // Find which subpool handled it and pull in the corresponding error
+ errs[i] = errsets[split][0]
+ errsets[split] = errsets[split][1:]
}
+ return errs
}
-// Locals retrieves the accounts currently considered local by the pool.
-func (pool *TxPool) Locals() []common.Address {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- return pool.locals.flatten()
-}
-
-// local retrieves all currently known local transactions, grouped by origin
-// account and sorted by nonce. The returned transaction set is a copy and can be
-// freely modified by calling code.
-func (pool *TxPool) local() map[common.Address]types.Transactions {
- txs := make(map[common.Address]types.Transactions)
- for addr := range pool.locals.accounts {
- if pending := pool.pending[addr]; pending != nil {
- txs[addr] = append(txs[addr], pending.Flatten()...)
- }
- if queued := pool.queue[addr]; queued != nil {
- txs[addr] = append(txs[addr], queued.Flatten()...)
+// Pending retrieves all currently processable transactions, grouped by origin
+// account and sorted by nonce.
+func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*types.Transaction {
+ txs := make(map[common.Address][]*types.Transaction)
+ for _, subpool := range p.subpools {
+ for addr, set := range subpool.Pending(enforceTips) {
+ txs[addr] = set
}
}
return txs
}
-// checks transaction validity against the current state.
-func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) error {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- txNonce := tx.Nonce()
- // Ensure the transaction adheres to nonce ordering
- if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce {
- return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)",
- core.ErrNonceTooLow, from.Hex(), currentNonce, txNonce)
- }
-
- // cost == V + GP * GL
- balance := pool.currentState.GetBalance(from)
- if balance.Cmp(tx.Cost()) < 0 {
- return fmt.Errorf("%w: address %s have (%d) want (%d)", core.ErrInsufficientFunds, from.Hex(), balance, tx.Cost())
- }
-
- // Verify that replacing transactions will not result in overdraft
- list := pool.pending[from]
- if list != nil { // Sender already has pending txs
- sum := new(big.Int).Add(tx.Cost(), list.totalcost)
- if repl := list.txs.Get(tx.Nonce()); repl != nil {
- // Deduct the cost of a transaction replaced by this
- sum.Sub(sum, repl.Cost())
- }
- if balance.Cmp(sum) < 0 {
- log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum)
- return ErrOverdraft
+// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending
+// events to the given channel.
+func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
+ subs := make([]event.Subscription, 0, len(p.subpools))
+ for _, subpool := range p.subpools {
+ sub := subpool.SubscribeTransactions(ch)
+ if sub != nil {
+ // Note: This avoids a race condition in shutting down the pool
+ // where the subpool is closed before we can subscribe to it.
+ subs = append(subs, sub)
}
}
-
- return nil
+ return p.subs.Track(event.JoinSubscriptions(subs...))
}
-// validateTxBasics checks whether a transaction is valid according to the consensus
-// rules, but does not check state-dependent validation such as sufficient balance.
-// This check is meant as an early check which only needs to be performed once,
-// and does not require the pool mutex to be held.
-func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
- // Accept only legacy transactions until EIP-2718/2930 activates.
- if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject dynamic fee transactions until EIP-1559 activates.
- if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject blob transactions forever, those will have their own pool.
- if tx.Type() == types.BlobTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject transactions over defined size to prevent DOS attacks
- if tx.Size() > txMaxSize {
- return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, tx.Size(), txMaxSize)
- }
- // Check whether the init code size has been exceeded.
- if pool.eip3860.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
- return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
- }
- // Transactions can't be negative. This may never happen using RLP decoded
- // transactions but may occur if you create a transaction using the RPC.
- if tx.Value().Sign() < 0 {
- return ErrNegativeValue
- }
- // Ensure the transaction doesn't exceed the current block limit gas.
- if txGas := tx.Gas(); pool.currentMaxGas.Load() < txGas {
- return fmt.Errorf(
- "%w: tx gas (%d) > current max gas (%d)",
- ErrGasLimit,
- txGas,
- pool.currentMaxGas.Load(),
- )
- }
- // Sanity check for extremely large numbers
- if tx.GasFeeCap().BitLen() > 256 {
- return core.ErrFeeCapVeryHigh
- }
- if tx.GasTipCap().BitLen() > 256 {
- return core.ErrTipVeryHigh
- }
- // Ensure gasFeeCap is greater than or equal to gasTipCap.
- if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
- return core.ErrTipAboveFeeCap
- }
- // Make sure the transaction is signed properly.
- from, err := types.Sender(pool.signer, tx)
- if err != nil {
- return ErrInvalidSender
- }
- // Drop non-local transactions under our own minimal accepted gas price or tip
- if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
- return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice)
- }
- // Ensure the transaction has more gas than the basic tx fee.
- intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul.Load(), pool.eip3860.Load())
- if err != nil {
- return err
- }
- if txGas := tx.Gas(); txGas < intrGas {
- return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas)
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (p *TxPool) Nonce(addr common.Address) uint64 {
+ // Since (for now) accounts are unique to subpools, only one pool will have
+ // (at max) a non-state nonce. To avoid stateful lookups, just return the
+ // highest nonce for now.
+ var nonce uint64
+ for _, subpool := range p.subpools {
+ if next := subpool.Nonce(addr); nonce < next {
+ nonce = next
+ }
}
- return nil
+ return nonce
}
-// validateTx checks whether a transaction is valid according to the consensus
-// rules and adheres to some heuristic limits of the local node (price and size).
-func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
- // Signature has been checked already, this cannot error.
- from, _ := types.Sender(pool.signer, tx)
- // Drop the transaction if the gas fee cap is below the pool's minimum fee
- if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 {
- return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee)
- }
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (p *TxPool) Stats() (int, int) {
+ var runnable, blocked int
+ for _, subpool := range p.subpools {
+ run, block := subpool.Stats()
- // Ensure the transaction adheres to nonce ordering
- // Transactor should have enough funds to cover the costs
- if err := pool.checkTxState(from, tx); err != nil {
- return err
+ runnable += run
+ blocked += block
}
- return nil
+ return runnable, blocked
}
-// add validates a transaction and inserts it into the non-executable queue for later
-// pending promotion and execution. If the transaction is a replacement for an already
-// pending or queued one, it overwrites the previous transaction if its price is higher.
-//
-// If a newly added transaction is marked as local, its sending account will be
-// be added to the allowlist, preventing any associated transaction from being dropped
-// out of the pool due to pricing constraints.
-func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
- // If the transaction is already known, discard it
- hash := tx.Hash()
- if pool.all.Get(hash) != nil {
- log.Trace("Discarding already known transaction", "hash", hash)
- knownTxMeter.Mark(1)
- return false, ErrAlreadyKnown
- }
- // Make the local flag. If it's from local source or it's from the network but
- // the sender is marked as local previously, treat it as the local transaction.
- isLocal := local || pool.locals.containsTx(tx)
-
- // If the transaction fails basic validation, discard it
- if err := pool.validateTx(tx, isLocal); err != nil {
- log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
- invalidTxMeter.Mark(1)
- return false, err
- }
-
- // already validated by this point
- from, _ := types.Sender(pool.signer, tx)
-
- // If the transaction pool is full, discard underpriced transactions
- if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
- // If the new transaction is underpriced, don't accept it
- if !isLocal && pool.priced.Underpriced(tx) {
- log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
- return false, ErrUnderpriced
- }
-
- // We're about to replace a transaction. The reorg does a more thorough
- // analysis of what to remove and how, but it runs async. We don't want to
- // do too many replacements between reorg-runs, so we cap the number of
- // replacements to 25% of the slots
- if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
- throttleTxMeter.Mark(1)
- return false, ErrTxPoolOverflow
- }
-
- // New transaction is better than our worse ones, make room for it.
- // If it's a local transaction, forcibly discard all available transactions.
- // Otherwise if we can't make enough room for new one, abort the operation.
- drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
-
- // Special case, we still can't make the room for the new remote one.
- if !isLocal && !success {
- log.Trace("Discarding overflown transaction", "hash", hash)
- overflowedTxMeter.Mark(1)
- return false, ErrTxPoolOverflow
- }
-
- // If the new transaction is a future transaction it should never churn pending transactions
- if !isLocal && pool.isGapped(from, tx) {
- var replacesPending bool
- for _, dropTx := range drop {
- dropSender, _ := types.Sender(pool.signer, dropTx)
- if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
- replacesPending = true
- break
- }
- }
- // Add all transactions back to the priced queue
- if replacesPending {
- for _, dropTx := range drop {
- pool.priced.Put(dropTx, false)
- }
- log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
- return false, ErrFutureReplacePending
- }
- }
-
- // Kick out the underpriced remote transactions.
- for _, tx := range drop {
- log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
- dropped := pool.removeTx(tx.Hash(), false)
- pool.changesSinceReorg += dropped
- }
- }
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ var (
+ runnable = make(map[common.Address][]*types.Transaction)
+ blocked = make(map[common.Address][]*types.Transaction)
+ )
+ for _, subpool := range p.subpools {
+ run, block := subpool.Content()
- // Try to replace an existing transaction in the pending pool
- if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
- // Nonce already pending, check if required price bump is met
- inserted, old := list.Add(tx, pool.config.PriceBump)
- if !inserted {
- pendingDiscardMeter.Mark(1)
- return false, ErrReplaceUnderpriced
+ for addr, txs := range run {
+ runnable[addr] = txs
}
- // New transaction is better, replace old one
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
+ for addr, txs := range block {
+ blocked[addr] = txs
}
- pool.all.Add(tx, isLocal)
- pool.priced.Put(tx, isLocal)
- pool.journalTx(from, tx)
- pool.queueTxEvent(tx)
- log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
-
- // Successful promotion, bump the heartbeat
- pool.beats[from] = time.Now()
- return old != nil, nil
- }
- // New transaction isn't replacing a pending one, push into queue
- replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
- if err != nil {
- return false, err
- }
- // Mark local addresses and journal local transactions
- if local && !pool.locals.contains(from) {
- log.Info("Setting new local account", "address", from)
- pool.locals.add(from)
- pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
}
- if isLocal {
- localGauge.Inc(1)
- }
- pool.journalTx(from, tx)
-
- log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
- return replaced, nil
+ return runnable, blocked
}
-// isGapped reports whether the given transaction is immediately executable.
-func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool {
- // Short circuit if transaction matches pending nonce and can be promoted
- // to pending list as an executable transaction.
- next := pool.pendingNonces.get(from)
- if tx.Nonce() == next {
- return false
- }
- // The transaction has a nonce gap with pending list, it's only considered
- // as executable if transactions in queue can fill up the nonce gap.
- queue, ok := pool.queue[from]
- if !ok {
- return true
- }
- for nonce := next; nonce < tx.Nonce(); nonce++ {
- if !queue.Contains(nonce) {
- return true // txs in queue can't fill up the nonce gap
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ for _, subpool := range p.subpools {
+ run, block := subpool.ContentFrom(addr)
+ if len(run) != 0 || len(block) != 0 {
+ return run, block
}
}
- return false
-}
-
-// enqueueTx inserts a new transaction into the non-executable transaction queue.
-//
-// Note, this method assumes the pool lock is held!
-func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
- // Try to insert the transaction into the future queue
- from, _ := types.Sender(pool.signer, tx) // already validated
- if pool.queue[from] == nil {
- pool.queue[from] = newList(false)
- }
- inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
- if !inserted {
- // An older transaction was better, discard this
- queuedDiscardMeter.Mark(1)
- return false, ErrReplaceUnderpriced
- }
- // Discard any previous transaction and mark this
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- queuedReplaceMeter.Mark(1)
- } else {
- // Nothing was replaced, bump the queued counter
- queuedGauge.Inc(1)
- }
- // If the transaction isn't in lookup set but it's expected to be there,
- // show the error log.
- if pool.all.Get(hash) == nil && !addAll {
- log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
- }
- if addAll {
- pool.all.Add(tx, local)
- pool.priced.Put(tx, local)
- }
- // If we never record the heartbeat, do it right now.
- if _, exist := pool.beats[from]; !exist {
- pool.beats[from] = time.Now()
- }
- return old != nil, nil
-}
-
-// journalTx adds the specified transaction to the local disk journal if it is
-// deemed to have been sent from a local account.
-func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
- // Only journal if it's enabled and the transaction is local
- if pool.journal == nil || !pool.locals.contains(from) {
- return
- }
- if err := pool.journal.insert(tx); err != nil {
- log.Warn("Failed to journal local transaction", "err", err)
- }
+ return []*types.Transaction{}, []*types.Transaction{}
}
-// promoteTx adds a transaction to the pending (processable) list of transactions
-// and returns whether it was inserted or an older was better.
-//
-// Note, this method assumes the pool lock is held!
-func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
- // Try to insert the transaction into the pending queue
- if pool.pending[addr] == nil {
- pool.pending[addr] = newList(true)
- }
- list := pool.pending[addr]
-
- inserted, old := list.Add(tx, pool.config.PriceBump)
- if !inserted {
- // An older transaction was better, discard this
- pool.all.Remove(hash)
- pool.priced.Removed(1)
- pendingDiscardMeter.Mark(1)
- return false
+// Locals retrieves the accounts currently considered local by the pool.
+func (p *TxPool) Locals() []common.Address {
+ // Retrieve the locals from each subpool and deduplicate them
+ locals := make(map[common.Address]struct{})
+ for _, subpool := range p.subpools {
+ for _, local := range subpool.Locals() {
+ locals[local] = struct{}{}
+ }
}
- // Otherwise discard any previous transaction and mark this
- if old != nil {
- pool.all.Remove(old.Hash())
- pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
- } else {
- // Nothing was replaced, bump the pending counter
- pendingGauge.Inc(1)
+ // Flatten and return the deduplicated local set
+ flat := make([]common.Address, 0, len(locals))
+ for local := range locals {
+ flat = append(flat, local)
}
- // Set the potentially new pending nonce and notify any subsystems of the new tx
- pool.pendingNonces.set(addr, tx.Nonce()+1)
-
- // Successful promotion, bump the heartbeat
- pool.beats[addr] = time.Now()
- return true
-}
-
-// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
-// senders as a local ones, ensuring they go around the local pricing constraints.
-//
-// This method is used to add transactions from the RPC API and performs synchronous pool
-// reorganization and event propagation.
-func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
- return pool.addTxs(txs, !pool.config.NoLocals, true)
-}
-
-// AddLocal enqueues a single local transaction into the pool if it is valid. This is
-// a convenience wrapper around AddLocals.
-func (pool *TxPool) AddLocal(tx *types.Transaction) error {
- errs := pool.AddLocals([]*types.Transaction{tx})
- return errs[0]
-}
-
-// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
-// senders are not among the locally tracked ones, full pricing constraints will apply.
-//
-// This method is used to add transactions from the p2p network and does not wait for pool
-// reorganization and internal event propagation.
-func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, false)
-}
-
-// AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method.
-func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, true)
+ return flat
}
-// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
-func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
- errs := pool.AddRemotesSync([]*types.Transaction{tx})
- return errs[0]
-}
-
-// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
-// wrapper around AddRemotes.
-//
-// Deprecated: use AddRemotes
-func (pool *TxPool) AddRemote(tx *types.Transaction) error {
- errs := pool.AddRemotes([]*types.Transaction{tx})
- return errs[0]
-}
-
-// addTxs attempts to queue a batch of transactions if they are valid.
-func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
- // Filter out known ones without obtaining the pool lock or recovering signatures
- var (
- errs = make([]error, len(txs))
- news = make([]*types.Transaction, 0, len(txs))
- )
- for i, tx := range txs {
- // If the transaction is known, pre-set the error slot
- if pool.all.Get(tx.Hash()) != nil {
- errs[i] = ErrAlreadyKnown
- knownTxMeter.Mark(1)
- continue
- }
- // Exclude transactions with basic errors, e.g invalid signatures and
- // insufficient intrinsic gas as soon as possible and cache senders
- // in transactions before obtaining lock
-
- if err := pool.validateTxBasics(tx, local); err != nil {
- errs[i] = err
- invalidTxMeter.Mark(1)
- continue
+// Status returns the known status (unknown/pending/queued) of a transaction
+// identified by their hashes.
+func (p *TxPool) Status(hash common.Hash) TxStatus {
+ for _, subpool := range p.subpools {
+ if status := subpool.Status(hash); status != TxStatusUnknown {
+ return status
}
- // Accumulate all unknown transactions for deeper processing
- news = append(news, tx)
- }
- if len(news) == 0 {
- return errs
}
-
- // Process all the new transaction and merge any errors into the original slice
- pool.mu.Lock()
- newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
- pool.mu.Unlock()
-
- var nilSlot = 0
- for _, err := range newErrs {
- for errs[nilSlot] != nil {
- nilSlot++
- }
- errs[nilSlot] = err
- nilSlot++
- }
- // Reorg the pool internals if needed and return
- done := pool.requestPromoteExecutables(dirtyAddrs)
- if sync {
- <-done
- }
- return errs
-}
-
-// addTxsLocked attempts to queue a batch of transactions if they are valid.
-// The transaction pool lock must be held.
-func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
- dirty := newAccountSet(pool.signer)
- errs := make([]error, len(txs))
- for i, tx := range txs {
- replaced, err := pool.add(tx, local)
- errs[i] = err
- if err == nil && !replaced {
- dirty.addTx(tx)
- }
- }
- validTxMeter.Mark(int64(len(dirty.accounts)))
- return errs, dirty
-}
-
-// Status returns the status (unknown/pending/queued) of a batch of transactions
-// identified by their hashes.
-func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
- status := make([]TxStatus, len(hashes))
- for i, hash := range hashes {
- tx := pool.Get(hash)
- if tx == nil {
- continue
- }
- from, _ := types.Sender(pool.signer, tx) // already validated
- pool.mu.RLock()
- if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
- status[i] = TxStatusPending
- } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
- status[i] = TxStatusQueued
- }
- // implicit else: the tx may have been included into a block between
- // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
- pool.mu.RUnlock()
- }
- return status
-}
-
-// Get returns a transaction if it is contained in the pool and nil otherwise.
-func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
- return pool.all.Get(hash)
-}
-
-// Has returns an indicator whether txpool has a transaction cached with the
-// given hash.
-func (pool *TxPool) Has(hash common.Hash) bool {
- return pool.all.Get(hash) != nil
-}
-
-// Has returns an indicator whether txpool has a local transaction cached with
-// the given hash.
-func (pool *TxPool) HasLocal(hash common.Hash) bool {
- return pool.all.GetLocal(hash) != nil
-}
-
-func (pool *TxPool) RemoveTx(hash common.Hash) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- pool.removeTx(hash, true)
-}
-
-// removeTx removes a single transaction from the queue, moving all subsequent
-// transactions back to the future queue.
-// Returns the number of transactions removed from the pending queue.
-func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int {
- // Fetch the transaction we wish to delete
- tx := pool.all.Get(hash)
- if tx == nil {
- return 0
- }
- addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
-
- // Remove it from the list of known transactions
- pool.all.Remove(hash)
- if outofbound {
- pool.priced.Removed(1)
- }
- if pool.locals.contains(addr) {
- localGauge.Dec(1)
- }
- // Remove the transaction from the pending lists and reset the account nonce
- if pending := pool.pending[addr]; pending != nil {
- if removed, invalids := pending.Remove(tx); removed {
- // If no more pending transactions are left, remove the list
- if pending.Empty() {
- delete(pool.pending, addr)
- }
- // Postpone any invalidated transactions
- for _, tx := range invalids {
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(tx.Hash(), tx, false, false)
- }
- // Update the account nonce if needed
- pool.pendingNonces.setIfLower(addr, tx.Nonce())
- // Reduce the pending counter
- pendingGauge.Dec(int64(1 + len(invalids)))
- return 1 + len(invalids)
- }
- }
- // Transaction is in the future queue
- if future := pool.queue[addr]; future != nil {
- if removed, _ := future.Remove(tx); removed {
- // Reduce the queued counter
- queuedGauge.Dec(1)
- }
- if future.Empty() {
- delete(pool.queue, addr)
- delete(pool.beats, addr)
- }
- }
- return 0
-}
-
-// requestReset requests a pool reset to the new head block.
-// The returned channel is closed when the reset has occurred.
-func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
- select {
- case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
- return <-pool.reorgDoneCh
- case <-pool.reorgShutdownCh:
- return pool.reorgShutdownCh
- }
-}
-
-// requestPromoteExecutables requests transaction promotion checks for the given addresses.
-// The returned channel is closed when the promotion checks have occurred.
-func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
- select {
- case pool.reqPromoteCh <- set:
- return <-pool.reorgDoneCh
- case <-pool.reorgShutdownCh:
- return pool.reorgShutdownCh
- }
-}
-
-// queueTxEvent enqueues a transaction event to be sent in the next reorg run.
-func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
- select {
- case pool.queueTxEventCh <- tx:
- case <-pool.reorgShutdownCh:
- }
-}
-
-// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
-// call those methods directly, but request them being run using requestReset and
-// requestPromoteExecutables instead.
-func (pool *TxPool) scheduleReorgLoop() {
- defer pool.wg.Done()
-
- var (
- curDone chan struct{} // non-nil while runReorg is active
- nextDone = make(chan struct{})
- launchNextRun bool
- reset *txpoolResetRequest
- dirtyAccounts *accountSet
- queuedEvents = make(map[common.Address]*sortedMap)
- )
- for {
- // Launch next background reorg if needed
- if curDone == nil && launchNextRun {
- // Run the background reorg and announcements
- go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
-
- // Prepare everything for the next round of reorg
- curDone, nextDone = nextDone, make(chan struct{})
- launchNextRun = false
-
- reset, dirtyAccounts = nil, nil
- queuedEvents = make(map[common.Address]*sortedMap)
- }
-
- select {
- case req := <-pool.reqResetCh:
- // Reset request: update head if request is already pending.
- if reset == nil {
- reset = req
- } else {
- reset.newHead = req.newHead
- }
- launchNextRun = true
- pool.reorgDoneCh <- nextDone
-
- case req := <-pool.reqPromoteCh:
- // Promote request: update address set if request is already pending.
- if dirtyAccounts == nil {
- dirtyAccounts = req
- } else {
- dirtyAccounts.merge(req)
- }
- launchNextRun = true
- pool.reorgDoneCh <- nextDone
-
- case tx := <-pool.queueTxEventCh:
- // Queue up the event, but don't schedule a reorg. It's up to the caller to
- // request one later if they want the events sent.
- addr, _ := types.Sender(pool.signer, tx)
- if _, ok := queuedEvents[addr]; !ok {
- queuedEvents[addr] = newSortedMap()
- }
- queuedEvents[addr].Put(tx)
-
- case <-curDone:
- curDone = nil
-
- case <-pool.reorgShutdownCh:
- // Wait for current run to finish.
- if curDone != nil {
- <-curDone
- }
- close(nextDone)
- return
- }
- }
-}
-
-// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
-func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) {
- defer func(t0 time.Time) {
- reorgDurationTimer.Update(time.Since(t0))
- }(time.Now())
- defer close(done)
-
- var promoteAddrs []common.Address
- if dirtyAccounts != nil && reset == nil {
- // Only dirty accounts need to be promoted, unless we're resetting.
- // For resets, all addresses in the tx queue will be promoted and
- // the flatten operation can be avoided.
- promoteAddrs = dirtyAccounts.flatten()
- }
- pool.mu.Lock()
- if reset != nil {
- // Reset from the old head to the new, rescheduling any reorged transactions
- pool.reset(reset.oldHead, reset.newHead)
-
- // Nonces were reset, discard any events that became stale
- for addr := range events {
- events[addr].Forward(pool.pendingNonces.get(addr))
- if events[addr].Len() == 0 {
- delete(events, addr)
- }
- }
- // Reset needs promote for all addresses
- promoteAddrs = make([]common.Address, 0, len(pool.queue))
- for addr := range pool.queue {
- promoteAddrs = append(promoteAddrs, addr)
- }
- }
- // Check for pending transactions for every account that sent new ones
- promoted := pool.promoteExecutables(promoteAddrs)
-
- // If a new block appeared, validate the pool of pending transactions. This will
- // remove any transaction that has been included in the block or was invalidated
- // because of another transaction (e.g. higher gas price).
- if reset != nil {
- pool.demoteUnexecutables()
- if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(reset.newHead.Time) {
- _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix()))
- if err == nil {
- pool.priced.SetBaseFee(baseFeeEstimate)
- }
- }
-
- // Update all accounts to the latest known pending nonce
- nonces := make(map[common.Address]uint64, len(pool.pending))
- for addr, list := range pool.pending {
- highestPending := list.LastElement()
- nonces[addr] = highestPending.Nonce() + 1
- }
- pool.pendingNonces.setAll(nonces)
- }
- // Ensure pool.queue and pool.pending sizes stay within the configured limits.
- pool.truncatePending()
- pool.truncateQueue()
-
- dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
- pool.changesSinceReorg = 0 // Reset change counter
- pool.mu.Unlock()
-
- if reset != nil && reset.newHead != nil {
- pool.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: reset.newHead})
- }
-
- // Notify subsystems for newly added transactions
- for _, tx := range promoted {
- addr, _ := types.Sender(pool.signer, tx)
- if _, ok := events[addr]; !ok {
- events[addr] = newSortedMap()
- }
- events[addr].Put(tx)
- }
- if len(events) > 0 {
- var txs []*types.Transaction
- for _, set := range events {
- txs = append(txs, set.Flatten()...)
- }
- pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
- }
-}
-
-// reset retrieves the current state of the blockchain and ensures the content
-// of the transaction pool is valid with regard to the chain state.
-func (pool *TxPool) reset(oldHead, newHead *types.Header) {
- // If we're reorging an old state, reinject all dropped transactions
- var reinject types.Transactions
-
- if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
- // If the reorg is too deep, avoid doing it (will happen during fast sync)
- oldNum := oldHead.Number.Uint64()
- newNum := newHead.Number.Uint64()
-
- if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
- log.Debug("Skipping deep transaction reorg", "depth", depth)
- } else {
- // Reorg seems shallow enough to pull in all transactions into memory
- var discarded, included types.Transactions
- var (
- rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
- add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
- )
- if rem == nil {
- // This can happen if a setHead is performed, where we simply discard the old
- // head from the chain.
- // If that is the case, we don't have the lost transactions anymore, and
- // there's nothing to add
- if newNum >= oldNum {
- // If we reorged to a same or higher number, then it's not a case of setHead
- log.Warn("Transaction pool reset with missing oldhead",
- "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
- return
- }
- // If the reorg ended up on a lower number, it's indicative of setHead being the cause
- log.Debug("Skipping transaction reset caused by setHead",
- "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
- // We still need to update the current state s.th. the lost transactions can be readded by the user
- } else {
- for rem.NumberU64() > add.NumberU64() {
- discarded = append(discarded, rem.Transactions()...)
- if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
- log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
- return
- }
- }
- for add.NumberU64() > rem.NumberU64() {
- included = append(included, add.Transactions()...)
- if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
- log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
- return
- }
- }
- for rem.Hash() != add.Hash() {
- discarded = append(discarded, rem.Transactions()...)
- if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
- log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
- return
- }
- included = append(included, add.Transactions()...)
- if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
- log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
- return
- }
- }
- reinject = types.TxDifference(discarded, included)
- }
- }
- }
- // Initialize the internal state to the current head
- if newHead == nil {
- newHead = pool.chain.CurrentBlock() // Special case during testing
- }
- statedb, err := pool.chain.StateAt(newHead.Root)
- if err != nil {
- log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
- return
- }
- pool.currentHead = newHead
- pool.currentStateLock.Lock()
- pool.currentState = statedb
- pool.currentStateLock.Unlock()
- pool.pendingNonces = newNoncer(statedb)
- pool.currentMaxGas.Store(newHead.GasLimit)
-
- // Inject any transactions discarded due to reorgs
- log.Debug("Reinjecting stale transactions", "count", len(reinject))
- pool.chain.SenderCacher().Recover(pool.signer, reinject)
- pool.addTxsLocked(reinject, false)
-
- // Update all fork indicator by next pending block number.
- next := new(big.Int).Add(newHead.Number, big.NewInt(1))
- pool.istanbul.Store(pool.chainconfig.IsIstanbul(next))
- pool.eip2718.Store(pool.chainconfig.IsApricotPhase2(newHead.Time))
- pool.eip1559.Store(pool.chainconfig.IsApricotPhase3(newHead.Time))
- pool.eip3860.Store(pool.chainconfig.IsDUpgrade(newHead.Time))
-}
-
-// promoteExecutables moves transactions that have become processable from the
-// future queue to the set of pending transactions. During this process, all
-// invalidated transactions (low nonce, low balance) are deleted.
-func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- // Track the promoted transactions to broadcast them at once
- var promoted []*types.Transaction
-
- // Iterate over all accounts and promote any executable transactions
- for _, addr := range accounts {
- list := pool.queue[addr]
- if list == nil {
- continue // Just in case someone calls with a non existing account
- }
- // Drop all transactions that are deemed too old (low nonce)
- forwards := list.Forward(pool.currentState.GetNonce(addr))
- for _, tx := range forwards {
- hash := tx.Hash()
- pool.all.Remove(hash)
- }
- log.Trace("Removed old queued transactions", "count", len(forwards))
- // Drop all transactions that are too costly (low balance or out of gas)
- drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
- for _, tx := range drops {
- hash := tx.Hash()
- pool.all.Remove(hash)
- }
- log.Trace("Removed unpayable queued transactions", "count", len(drops))
- queuedNofundsMeter.Mark(int64(len(drops)))
-
- // Gather all executable transactions and promote them
- readies := list.Ready(pool.pendingNonces.get(addr))
- for _, tx := range readies {
- hash := tx.Hash()
- if pool.promoteTx(addr, hash, tx) {
- promoted = append(promoted, tx)
- }
- }
- log.Trace("Promoted queued transactions", "count", len(promoted))
- queuedGauge.Dec(int64(len(readies)))
-
- // Drop all transactions over the allowed limit
- var caps types.Transactions
- if !pool.locals.contains(addr) {
- caps = list.Cap(int(pool.config.AccountQueue))
- for _, tx := range caps {
- hash := tx.Hash()
- pool.all.Remove(hash)
- log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
- }
- queuedRateLimitMeter.Mark(int64(len(caps)))
- }
- // Mark all the items dropped as removed
- pool.priced.Removed(len(forwards) + len(drops) + len(caps))
- queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
- }
- // Delete the entire queue entry if it became empty.
- if list.Empty() {
- delete(pool.queue, addr)
- delete(pool.beats, addr)
- }
- }
- return promoted
-}
-
-// truncatePending removes transactions from the pending queue if the pool is above the
-// pending limit. The algorithm tries to reduce transaction counts by an approximately
-// equal number for all for accounts with many pending transactions.
-func (pool *TxPool) truncatePending() {
- pending := uint64(0)
- for _, list := range pool.pending {
- pending += uint64(list.Len())
- }
- if pending <= pool.config.GlobalSlots {
- return
- }
-
- pendingBeforeCap := pending
- // Assemble a spam order to penalize large transactors first
- spammers := prque.New[int64, common.Address](nil)
- for addr, list := range pool.pending {
- // Only evict transactions from high rollers
- if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
- spammers.Push(addr, int64(list.Len()))
- }
- }
- // Gradually drop transactions from offenders
- offenders := []common.Address{}
- for pending > pool.config.GlobalSlots && !spammers.Empty() {
- // Retrieve the next offender if not local address
- offender, _ := spammers.Pop()
- offenders = append(offenders, offender)
-
- // Equalize balances until all the same or below threshold
- if len(offenders) > 1 {
- // Calculate the equalization threshold for all current offenders
- threshold := pool.pending[offender].Len()
-
- // Iteratively reduce all offenders until below limit or threshold reached
- for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
- for i := 0; i < len(offenders)-1; i++ {
- list := pool.pending[offenders[i]]
-
- caps := list.Cap(list.Len() - 1)
- for _, tx := range caps {
- // Drop the transaction from the global pools too
- hash := tx.Hash()
- pool.all.Remove(hash)
-
- // Update the account nonce to the dropped transaction
- pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
- log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
- }
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
- if pool.locals.contains(offenders[i]) {
- localGauge.Dec(int64(len(caps)))
- }
- pending--
- }
- }
- }
- }
-
- // If still above threshold, reduce to limit or min allowance
- if pending > pool.config.GlobalSlots && len(offenders) > 0 {
- for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
- for _, addr := range offenders {
- list := pool.pending[addr]
-
- caps := list.Cap(list.Len() - 1)
- for _, tx := range caps {
- // Drop the transaction from the global pools too
- hash := tx.Hash()
- pool.all.Remove(hash)
-
- // Update the account nonce to the dropped transaction
- pool.pendingNonces.setIfLower(addr, tx.Nonce())
- log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
- }
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(caps)))
- }
- pending--
- }
- }
- }
- pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
-}
-
-// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
-func (pool *TxPool) truncateQueue() {
- queued := uint64(0)
- for _, list := range pool.queue {
- queued += uint64(list.Len())
- }
- if queued <= pool.config.GlobalQueue {
- return
- }
-
- // Sort all accounts with queued transactions by heartbeat
- addresses := make(addressesByHeartbeat, 0, len(pool.queue))
- for addr := range pool.queue {
- if !pool.locals.contains(addr) { // don't drop locals
- addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
- }
- }
- sort.Sort(sort.Reverse(addresses))
-
- // Drop transactions until the total is below the limit or only locals remain
- for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
- addr := addresses[len(addresses)-1]
- list := pool.queue[addr.address]
-
- addresses = addresses[:len(addresses)-1]
-
- // Drop all transactions if they are less than the overflow
- if size := uint64(list.Len()); size <= drop {
- for _, tx := range list.Flatten() {
- pool.removeTx(tx.Hash(), true)
- }
- drop -= size
- queuedRateLimitMeter.Mark(int64(size))
- continue
- }
- // Otherwise drop only last few transactions
- txs := list.Flatten()
- for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
- pool.removeTx(txs[i].Hash(), true)
- drop--
- queuedRateLimitMeter.Mark(1)
- }
- }
-}
-
-// demoteUnexecutables removes invalid and processed transactions from the pools
-// executable/pending queue and any subsequent transactions that become unexecutable
-// are moved back into the future queue.
-//
-// Note: transactions are not marked as removed in the priced list because re-heaping
-// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
-// to trigger a re-heap is this function
-func (pool *TxPool) demoteUnexecutables() {
- pool.currentStateLock.Lock()
- defer pool.currentStateLock.Unlock()
-
- // Iterate over all accounts and demote any non-executable transactions
- for addr, list := range pool.pending {
- nonce := pool.currentState.GetNonce(addr)
-
- // Drop all transactions that are deemed too old (low nonce)
- olds := list.Forward(nonce)
- for _, tx := range olds {
- hash := tx.Hash()
- pool.all.Remove(hash)
- log.Trace("Removed old pending transaction", "hash", hash)
- }
- // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
- drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
- for _, tx := range drops {
- hash := tx.Hash()
- log.Trace("Removed unpayable pending transaction", "hash", hash)
- pool.all.Remove(hash)
- }
- pendingNofundsMeter.Mark(int64(len(drops)))
-
- for _, tx := range invalids {
- hash := tx.Hash()
- log.Trace("Demoting pending transaction", "hash", hash)
-
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(hash, tx, false, false)
- }
- pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
- }
- // If there's a gap in front, alert (should never happen) and postpone all transactions
- if list.Len() > 0 && list.txs.Get(nonce) == nil {
- gapped := list.Cap(0)
- for _, tx := range gapped {
- hash := tx.Hash()
- log.Error("Demoting invalidated transaction", "hash", hash)
-
- // Internal shuffle shouldn't touch the lookup set.
- pool.enqueueTx(hash, tx, false, false)
- }
- pendingGauge.Dec(int64(len(gapped)))
- }
- // Delete the entire pending entry if it became empty.
- if list.Empty() {
- delete(pool.pending, addr)
- }
- }
-}
-
-func (pool *TxPool) startPeriodicFeeUpdate() {
- if pool.chainconfig.ApricotPhase3BlockTimestamp == nil {
- return
- }
-
- // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
- // when starting up in ApricotPhase3 before the base fee is updated.
- if time.Now().After(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp)) {
- pool.updateBaseFee()
- }
-
- pool.wg.Add(1)
- go pool.periodicBaseFeeUpdate()
-}
-
-func (pool *TxPool) periodicBaseFeeUpdate() {
- defer pool.wg.Done()
-
- // Sleep until its time to start the periodic base fee update or the tx pool is shutting down
- select {
- case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp))):
- case <-pool.generalShutdownChan:
- return // Return early if shutting down
- }
-
- // Update the base fee every [baseFeeUpdateInterval]
- // and shutdown when [generalShutdownChan] is closed by Stop()
- for {
- select {
- case <-time.After(baseFeeUpdateInterval):
- pool.updateBaseFee()
- case <-pool.generalShutdownChan:
- return
- }
- }
-}
-
-func (pool *TxPool) updateBaseFee() {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
- _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, pool.currentHead, uint64(time.Now().Unix()))
- if err == nil {
- pool.priced.SetBaseFee(baseFeeEstimate)
- } else {
- log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err)
- }
-}
-
-// addressByHeartbeat is an account address tagged with its last activity timestamp.
-type addressByHeartbeat struct {
- address common.Address
- heartbeat time.Time
-}
-
-type addressesByHeartbeat []addressByHeartbeat
-
-func (a addressesByHeartbeat) Len() int { return len(a) }
-func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
-func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// accountSet is simply a set of addresses to check for existence, and a signer
-// capable of deriving addresses from transactions.
-type accountSet struct {
- accounts map[common.Address]struct{}
- signer types.Signer
- cache *[]common.Address
-}
-
-// newAccountSet creates a new address set with an associated signer for sender
-// derivations.
-func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
- as := &accountSet{
- accounts: make(map[common.Address]struct{}, len(addrs)),
- signer: signer,
- }
- for _, addr := range addrs {
- as.add(addr)
- }
- return as
-}
-
-// contains checks if a given address is contained within the set.
-func (as *accountSet) contains(addr common.Address) bool {
- _, exist := as.accounts[addr]
- return exist
-}
-
-// containsTx checks if the sender of a given tx is within the set. If the sender
-// cannot be derived, this method returns false.
-func (as *accountSet) containsTx(tx *types.Transaction) bool {
- if addr, err := types.Sender(as.signer, tx); err == nil {
- return as.contains(addr)
- }
- return false
-}
-
-// add inserts a new address into the set to track.
-func (as *accountSet) add(addr common.Address) {
- as.accounts[addr] = struct{}{}
- as.cache = nil
-}
-
-// addTx adds the sender of tx into the set.
-func (as *accountSet) addTx(tx *types.Transaction) {
- if addr, err := types.Sender(as.signer, tx); err == nil {
- as.add(addr)
- }
-}
-
-// flatten returns the list of addresses within this set, also caching it for later
-// reuse. The returned slice should not be changed!
-func (as *accountSet) flatten() []common.Address {
- if as.cache == nil {
- accounts := make([]common.Address, 0, len(as.accounts))
- for account := range as.accounts {
- accounts = append(accounts, account)
- }
- as.cache = &accounts
- }
- return *as.cache
-}
-
-// merge adds all addresses from the 'other' set into 'as'.
-func (as *accountSet) merge(other *accountSet) {
- for addr := range other.accounts {
- as.accounts[addr] = struct{}{}
- }
- as.cache = nil
-}
-
-// lookup is used internally by TxPool to track transactions while allowing
-// lookup without mutex contention.
-//
-// Note, although this type is properly protected against concurrent access, it
-// is **not** a type that should ever be mutated or even exposed outside of the
-// transaction pool, since its internal state is tightly coupled with the pools
-// internal mechanisms. The sole purpose of the type is to permit out-of-bound
-// peeking into the pool in TxPool.Get without having to acquire the widely scoped
-// TxPool.mu mutex.
-//
-// This lookup set combines the notion of "local transactions", which is useful
-// to build upper-level structure.
-type lookup struct {
- slots int
- lock sync.RWMutex
- locals map[common.Hash]*types.Transaction
- remotes map[common.Hash]*types.Transaction
-}
-
-// newLookup returns a new lookup structure.
-func newLookup() *lookup {
- return &lookup{
- locals: make(map[common.Hash]*types.Transaction),
- remotes: make(map[common.Hash]*types.Transaction),
- }
-}
-
-// Range calls f on each key and value present in the map. The callback passed
-// should return the indicator whether the iteration needs to be continued.
-// Callers need to specify which set (or both) to be iterated.
-func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- if local {
- for key, value := range t.locals {
- if !f(key, value, true) {
- return
- }
- }
- }
- if remote {
- for key, value := range t.remotes {
- if !f(key, value, false) {
- return
- }
- }
- }
-}
-
-// Get returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) Get(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- if tx := t.locals[hash]; tx != nil {
- return tx
- }
- return t.remotes[hash]
-}
-
-// GetLocal returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.locals[hash]
-}
-
-// GetRemote returns a transaction if it exists in the lookup, or nil if not found.
-func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.remotes[hash]
-}
-
-// Count returns the current number of transactions in the lookup.
-func (t *lookup) Count() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.locals) + len(t.remotes)
-}
-
-// LocalCount returns the current number of local transactions in the lookup.
-func (t *lookup) LocalCount() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.locals)
-}
-
-// RemoteCount returns the current number of remote transactions in the lookup.
-func (t *lookup) RemoteCount() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return len(t.remotes)
-}
-
-// Slots returns the current number of slots used in the lookup.
-func (t *lookup) Slots() int {
- t.lock.RLock()
- defer t.lock.RUnlock()
-
- return t.slots
-}
-
-// Add adds a transaction to the lookup.
-func (t *lookup) Add(tx *types.Transaction, local bool) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- t.slots += numSlots(tx)
- slotsGauge.Update(int64(t.slots))
-
- if local {
- t.locals[tx.Hash()] = tx
- } else {
- t.remotes[tx.Hash()] = tx
- }
-}
-
-// Remove removes a transaction from the lookup.
-func (t *lookup) Remove(hash common.Hash) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- tx, ok := t.locals[hash]
- if !ok {
- tx, ok = t.remotes[hash]
- }
- if !ok {
- log.Error("No transaction found to be deleted", "hash", hash)
- return
- }
- t.slots -= numSlots(tx)
- slotsGauge.Update(int64(t.slots))
-
- delete(t.locals, hash)
- delete(t.remotes, hash)
-}
-
-// RemoteToLocals migrates the transactions belongs to the given locals to locals
-// set. The assumption is held the locals set is thread-safe to be used.
-func (t *lookup) RemoteToLocals(locals *accountSet) int {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- var migrated int
- for hash, tx := range t.remotes {
- if locals.containsTx(tx) {
- t.locals[hash] = tx
- delete(t.remotes, hash)
- migrated += 1
- }
- }
- return migrated
-}
-
-// RemotesBelowTip finds all remote transactions below the given tip threshold.
-func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
- found := make(types.Transactions, 0, 128)
- t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
- if tx.GasTipCapIntCmp(threshold) < 0 {
- found = append(found, tx)
- }
- return true
- }, false, true) // Only iterate remotes
- return found
-}
-
-// numSlots calculates the number of slots needed for a single transaction.
-func numSlots(tx *types.Transaction) int {
- return int((tx.Size() + txSlotSize - 1) / txSlotSize)
+ return TxStatusUnknown
}
diff --git a/core/txpool/txpool_ext.go b/core/txpool/txpool_ext.go
new file mode 100644
index 0000000000..66ca1e235e
--- /dev/null
+++ b/core/txpool/txpool_ext.go
@@ -0,0 +1,83 @@
+// (c) 2021-2022, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package txpool
+
+import (
+ "math/big"
+
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+// PendingSize returns the number of pending txs in the tx pool.
+func (pool *TxPool) PendingSize() int {
+ pending := pool.Pending(true)
+ count := 0
+ for _, txs := range pending {
+ count += len(txs)
+ }
+ return count
+}
+
+// IteratePending iterates over [pool.pending] until [f] returns false.
+// The caller must not modify [tx].
+func (pool *TxPool) IteratePending(f func(tx *types.Transaction) bool) {
+ pending := pool.Pending(false)
+ for _, list := range pending {
+ for _, tx := range list {
+ if !f(tx) {
+ return
+ }
+ }
+ }
+}
+
+// SetMinFee updates the minimum fee required by the transaction pool for a
+// new transaction, and drops all transactions below this threshold.
+func (p *TxPool) SetMinFee(tip *big.Int) {
+ for _, subpool := range p.subpools {
+ subpool.SetMinFee(tip)
+ }
+}
+
+func (p *TxPool) GasTip() *big.Int {
+ return p.subpools[0].GasTip()
+}
+
+func (p *TxPool) HasLocal(hash common.Hash) bool {
+ for _, subpool := range p.subpools {
+ if subpool.HasLocal(hash) {
+ return true
+ }
+ }
+ return false
+}
+
+func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription {
+ return pool.subs.Track(pool.resetFeed.Subscribe(ch))
+}
+
+// TODO: consider removing these wrappers
+
+func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
+ return pool.Add(WrapTxs(txs), true, true)
+}
+
+func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
+ return pool.Add(WrapTxs(txs), false, false)
+}
+
+func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
+ return pool.Add(WrapTxs(txs), false, true)
+}
+
+func WrapTxs(txs []*types.Transaction) []*Transaction {
+ wrapped := make([]*Transaction, len(txs))
+ for i, tx := range txs {
+ wrapped[i] = &Transaction{Tx: tx}
+ }
+ return wrapped
+}
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
new file mode 100644
index 0000000000..73f09b7c09
--- /dev/null
+++ b/core/txpool/validation.go
@@ -0,0 +1,242 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "math/big"
+
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ValidationOptions define certain differences between transaction validation
+// across the different pools without having to duplicate those checks.
+type ValidationOptions struct {
+ Config *params.ChainConfig // Chain configuration to selectively validate based on current fork rules
+
+ Accept uint8 // Bitmap of transaction types that should be accepted for the calling pool
+ MaxSize uint64 // Maximum size of a transaction that the caller can meaningfully handle
+ MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool
+}
+
+// ValidateTransaction is a helper method to check whether a transaction is valid
+// according to the consensus rules, but does not check state-dependent validation
+// (balance, nonce, etc).
+//
+// This check is public to allow different transaction pools to check the basic
+// rules without duplicating code and running the risk of missed updates.
+func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof, head *types.Header, signer types.Signer, opts *ValidationOptions) error {
+ // Ensure transactions not implemented by the calling pool are rejected
+ if opts.Accept&(1< opts.MaxSize {
+ return fmt.Errorf("%w: transaction size %v, limit %v", ErrOversizedData, tx.Size(), opts.MaxSize)
+ }
+ // Ensure only transactions that have been enabled are accepted
+ if !opts.Config.IsApricotPhase2(head.Time) && tx.Type() != types.LegacyTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in Berlin", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ if !opts.Config.IsApricotPhase3(head.Time) && tx.Type() == types.DynamicFeeTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in London", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ if !opts.Config.IsCancun(head.Time) && tx.Type() == types.BlobTxType {
+ return fmt.Errorf("%w: type %d rejected, pool not yet in Cancun", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ // Check whether the init code size has been exceeded
+ if opts.Config.IsDUpgrade(head.Time) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
+ return fmt.Errorf("%w: code size %v, limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
+ }
+ // Transactions can't be negative. This may never happen using RLP decoded
+ // transactions but may occur for transactions created using the RPC.
+ if tx.Value().Sign() < 0 {
+ return ErrNegativeValue
+ }
+ // Ensure the transaction doesn't exceed the current block limit gas
+ if head.GasLimit < tx.Gas() {
+ return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, tx.Gas(), head.GasLimit)
+ }
+ // Sanity check for extremely large numbers (supported by RLP or RPC)
+ if tx.GasFeeCap().BitLen() > 256 {
+ return core.ErrFeeCapVeryHigh
+ }
+ if tx.GasTipCap().BitLen() > 256 {
+ return core.ErrTipVeryHigh
+ }
+ // Ensure gasFeeCap is greater than or equal to gasTipCap
+ if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
+ return core.ErrTipAboveFeeCap
+ }
+ // Make sure the transaction is signed properly
+ from, err := types.Sender(signer, tx)
+ if err != nil {
+ return ErrInvalidSender
+ }
+ // Ensure the transaction has more gas than the bare minimum needed to cover
+ // the transaction metadata
+ intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, opts.Config.IsIstanbul(head.Number), opts.Config.IsDUpgrade(head.Time))
+ if err != nil {
+ return err
+ }
+ if tx.Gas() < intrGas {
+ return fmt.Errorf("%w: address %s needed %v, allowed %v", core.ErrIntrinsicGas, from.Hex(), intrGas, tx.Gas())
+ }
+ // Ensure the gasprice is high enough to cover the requirement of the calling
+ // pool and/or block producer
+ if tx.GasTipCapIntCmp(opts.MinTip) < 0 {
+ return fmt.Errorf("%w: address %s tip needed %v, tip permitted %v", ErrUnderpriced, from.Hex(), opts.MinTip, tx.GasTipCap())
+ }
+ // Ensure blob transactions have valid commitments
+ if tx.Type() == types.BlobTxType {
+ // Ensure the number of items in the blob transaction and vairous side
+ // data match up before doing any expensive validations
+ hashes := tx.BlobHashes()
+ if len(hashes) == 0 {
+ return fmt.Errorf("blobless blob transaction")
+ }
+ if len(hashes) > params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
+ return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)
+ }
+ if len(blobs) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(blobs), len(hashes))
+ }
+ if len(commits) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(commits), len(hashes))
+ }
+ if len(proofs) != len(hashes) {
+ return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(proofs), len(hashes))
+ }
+ // Blob quantities match up, validate that the provers match with the
+ // transaction hash before getting to the cryptography
+ hasher := sha256.New()
+ for i, want := range hashes {
+ hasher.Write(commits[i][:])
+ hash := hasher.Sum(nil)
+ hasher.Reset()
+
+ var vhash common.Hash
+ vhash[0] = params.BlobTxHashVersion
+ copy(vhash[1:], hash[1:])
+
+ if vhash != want {
+ return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want)
+ }
+ }
+ // Blob commitments match with the hashes in the transaction, verify the
+ // blobs themselves via KZG
+ for i := range blobs {
+ if err := kzg4844.VerifyBlobProof(blobs[i], commits[i], proofs[i]); err != nil {
+ return fmt.Errorf("invalid blob %d: %v", i, err)
+ }
+ }
+ }
+ return nil
+}
+
+// ValidationOptionsWithState define certain differences between stateful transaction
+// validation across the different pools without having to duplicate those checks.
+type ValidationOptionsWithState struct {
+ State *state.StateDB // State database to check nonces and balances against
+ MinimumFee *big.Int // Minimum gas fee cap needed to allow a transaction into the caller pool
+
+ // FirstNonceGap is an optional callback to retrieve the first nonce gap in
+ // the list of pooled transactions of a specific account. If this method is
+ // set, nonce gaps will be checked and forbidden. If this method is not set,
+ // nonce gaps will be ignored and permitted.
+ FirstNonceGap func(addr common.Address) uint64
+
+ // ExistingExpenditure is a mandatory callback to retrieve the cummulative
+ // cost of the already pooled transactions to check for overdrafts.
+ ExistingExpenditure func(addr common.Address) *big.Int
+
+ // ExistingCost is a mandatory callback to retrieve an already pooled
+ // transaction's cost with the given nonce to check for overdrafts.
+ ExistingCost func(addr common.Address, nonce uint64) *big.Int
+}
+
+// ValidateTransactionWithState is a helper method to check whether a transaction
+// is valid according to the pool's internal state checks (balance, nonce, gaps).
+//
+// This check is public to allow different transaction pools to check the stateful
+// rules without duplicating code and running the risk of missed updates.
+func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, opts *ValidationOptionsWithState) error {
+ // Ensure the transaction adheres to nonce ordering
+ from, err := signer.Sender(tx) // already validated (and cached), but cleaner to check
+ if err != nil {
+ log.Error("Transaction sender recovery failed", "err", err)
+ return err
+ }
+ // Drop the transaction if the gas fee cap is below the pool's minimum fee
+ if opts.MinimumFee != nil && tx.GasFeeCapIntCmp(opts.MinimumFee) < 0 {
+ return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), opts.MinimumFee)
+ }
+
+ next := opts.State.GetNonce(from)
+ if next > tx.Nonce() {
+ return fmt.Errorf("%w: address %s next nonce %v, tx nonce %v", core.ErrNonceTooLow, from.Hex(), next, tx.Nonce())
+ }
+ // Ensure the transaction doesn't produce a nonce gap in pools that do not
+ // support arbitrary orderings
+ if opts.FirstNonceGap != nil {
+ if gap := opts.FirstNonceGap(from); gap < tx.Nonce() {
+ return fmt.Errorf("%w: address %s tx nonce %v, gapped nonce %v", core.ErrNonceTooHigh, from.Hex(), tx.Nonce(), gap)
+ }
+ }
+ // Ensure the transactor has enough funds to cover the transaction costs
+ var (
+ balance = opts.State.GetBalance(from)
+ cost = tx.Cost()
+ )
+ if balance.Cmp(cost) < 0 {
+ return fmt.Errorf("%w: address %s balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, from.Hex(), balance, cost, new(big.Int).Sub(cost, balance))
+ }
+ // Ensure the transactor has enough funds to cover for replacements or nonce
+ // expansions without overdrafts
+ spent := opts.ExistingExpenditure(from)
+ if prev := opts.ExistingCost(from, tx.Nonce()); prev != nil {
+ bump := new(big.Int).Sub(cost, prev)
+ need := new(big.Int).Add(spent, bump)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: address %s balance %v, queued cost %v, tx bumped %v, overshot %v", core.ErrInsufficientFunds, from.Hex(), balance, spent, bump, new(big.Int).Sub(need, balance))
+ }
+ } else {
+ need := new(big.Int).Add(spent, cost)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: address %s balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, from.Hex(), balance, spent, cost, new(big.Int).Sub(need, balance))
+ }
+ }
+ return nil
+}
diff --git a/core/types/block.go b/core/types/block.go
index 4cc44bc84d..f6cb1776dd 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -102,8 +102,11 @@ type Header struct {
// headers.
BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"`
- // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers.
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
+ // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers.
+ BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
+
+ // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers.
+ ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
}
// field type overrides for gencodec
@@ -118,6 +121,8 @@ type headerMarshaling struct {
ExtDataGasUsed *hexutil.Big
BlockGasCost *hexutil.Big
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
+ BlobGasUsed *hexutil.Uint64
+ ExcessBlobGas *hexutil.Uint64
}
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
@@ -158,7 +163,23 @@ type Body struct {
ExtData *[]byte `rlp:"nil"`
}
-// Block represents an entire block in the Ethereum blockchain.
+// Block represents an Ethereum block.
+//
+// Note the Block type tries to be 'immutable', and contains certain caches that rely
+// on that. The rules around block immutability are as follows:
+//
+// - We copy all data when the block is constructed. This makes references held inside
+// the block independent of whatever value was passed in.
+//
+// - We copy all header data on access. This is because any change to the header would mess
+// up the cached hash and size values in the block. Calling code is expected to take
+// advantage of this to avoid over-allocating!
+//
+// - When new body data is attached to the block, a shallow copy of the block is returned.
+// This ensures block modifications are race-free.
+//
+// - We do not copy body data on access because it does not affect the caches, and also
+// because it would be too expensive.
type Block struct {
header *Header
uncles []*Header
@@ -182,9 +203,8 @@ type extblock struct {
ExtData *[]byte `rlp:"nil"`
}
-// NewBlock creates a new block. The input data is copied,
-// changes to header and to the field values will not affect the
-// block.
+// NewBlock creates a new block. The input data is copied, changes to header and to the
+// field values will not affect the block.
//
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, uncles
@@ -225,15 +245,7 @@ func NewBlock(
return b
}
-// NewBlockWithHeader creates a block with the given header data. The
-// header data is copied, changes to header and to the field values
-// will not affect the block.
-func NewBlockWithHeader(header *Header) *Block {
- return &Block{header: CopyHeader(header)}
-}
-
-// CopyHeader creates a deep copy of a block header to prevent side effects from
-// modifying a header variable.
+// CopyHeader creates a deep copy of a block header.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
@@ -255,10 +267,18 @@ func CopyHeader(h *Header) *Header {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
+ if h.ExcessBlobGas != nil {
+ cpy.ExcessBlobGas = new(uint64)
+ *cpy.ExcessBlobGas = *h.ExcessBlobGas
+ }
+ if h.BlobGasUsed != nil {
+ cpy.BlobGasUsed = new(uint64)
+ *cpy.BlobGasUsed = *h.BlobGasUsed
+ }
return &cpy
}
-// DecodeRLP decodes the Ethereum
+// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
_, size, _ := s.Kind()
@@ -302,9 +322,9 @@ func (b *Block) Version() uint32 {
return b.version
}
-// EncodeRLP serializes b into the Ethereum RLP block format.
+// EncodeRLP serializes a block as RLP.
func (b *Block) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, extblock{
+ return rlp.Encode(w, &extblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
@@ -313,7 +333,14 @@ func (b *Block) EncodeRLP(w io.Writer) error {
})
}
-// TODO: copies
+// Body returns the non-header content of the block.
+// Note the returned data is not an independent copy.
+func (b *Block) Body() *Body {
+ return &Body{b.transactions, b.uncles, b.version, b.extdata}
+}
+
+// Accessors for body data. These do not return a copy because the content
+// of the body slices does not affect the cached hash/size in block.
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
@@ -327,6 +354,13 @@ func (b *Block) Transaction(hash common.Hash) *Transaction {
return nil
}
+// Header returns the block header (as a copy).
+func (b *Block) Header() *Header {
+ return CopyHeader(b.header)
+}
+
+// Header value accessors. These do copy!
+
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
@@ -367,10 +401,23 @@ func (b *Block) BlockGasCost() *big.Int {
return new(big.Int).Set(b.header.BlockGasCost)
}
-func (b *Block) Header() *Header { return CopyHeader(b.header) }
+func (b *Block) ExcessBlobGas() *uint64 {
+ var excessBlobGas *uint64
+ if b.header.ExcessBlobGas != nil {
+ excessBlobGas = new(uint64)
+ *excessBlobGas = *b.header.ExcessBlobGas
+ }
+ return excessBlobGas
+}
-// Body returns the non-header content of the block.
-func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.version, b.extdata} }
+func (b *Block) BlobGasUsed() *uint64 {
+ var blobGasUsed *uint64
+ if b.header.BlobGasUsed != nil {
+ blobGasUsed = new(uint64)
+ *blobGasUsed = *b.header.BlobGasUsed
+ }
+ return blobGasUsed
+}
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value.
@@ -384,6 +431,13 @@ func (b *Block) Size() uint64 {
return uint64(c)
}
+// NewBlockWithHeader creates a block with the given header data. The
+// header data is copied, changes to header and to the field values
+// will not affect the block.
+func NewBlockWithHeader(header *Header) *Block {
+ return &Block{header: CopyHeader(header)}
+}
+
type writeCounter uint64
func (c *writeCounter) Write(b []byte) (int, error) {
@@ -408,19 +462,17 @@ func CalcUncleHash(uncles []*Header) common.Hash {
// WithSeal returns a new block with the data from b but the header replaced with
// the sealed one.
func (b *Block) WithSeal(header *Header) *Block {
- cpy := *header
-
return &Block{
- header: &cpy,
+ header: CopyHeader(header),
transactions: b.transactions,
uncles: b.uncles,
}
}
-// WithBody returns a new block with the given transaction and uncle contents.
+// WithBody returns a copy of the block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata *[]byte) *Block {
block := &Block{
- header: CopyHeader(b.header),
+ header: b.header,
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
version: version,
diff --git a/core/types/block_test.go b/core/types/block_test.go
index ace763be0b..3467989283 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -28,17 +28,16 @@ package types
import (
"bytes"
- "hash"
"math/big"
"reflect"
"testing"
+ "github.com/ava-labs/coreth/internal/blocktest"
"github.com/ava-labs/coreth/params"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
- "golang.org/x/crypto/sha3"
)
// This test has been modified from https://github.com/ethereum/go-ethereum/blob/v1.9.21/core/types/block_test.go#L35 to fit
@@ -300,31 +299,6 @@ func BenchmarkEncodeBlock(b *testing.B) {
}
}
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
-
func makeBenchBlock() *Block {
var (
key, _ = crypto.GenerateKey()
@@ -363,7 +337,7 @@ func makeBenchBlock() *Block {
Extra: []byte("benchmark uncle"),
}
}
- return NewBlock(header, txs, uncles, receipts, newHasher(), nil, true)
+ return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher(), nil, true)
}
func TestAP4BlockEncoding(t *testing.T) {
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 8ee37232ab..d67f9bea67 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -16,27 +16,28 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
- Coinbase common.Address `json:"miner" gencodec:"required"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
- ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
- Bloom Bloom `json:"logsBloom" gencodec:"required"`
- Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
- Number *hexutil.Big `json:"number" gencodec:"required"`
- GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce BlockNonce `json:"nonce"`
- ExtDataHash common.Hash `json:"extDataHash" gencodec:"required"`
- BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
- ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"`
- BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
- Hash common.Hash `json:"hash"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
+ Coinbase common.Address `json:"miner" gencodec:"required"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
+ ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
+ Bloom Bloom `json:"logsBloom" gencodec:"required"`
+ Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
+ Number *hexutil.Big `json:"number" gencodec:"required"`
+ GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
+ ExtDataHash common.Hash `json:"extDataHash" gencodec:"required"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
+ ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"`
+ BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
+ Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@@ -58,7 +59,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*hexutil.Big)(h.BaseFee)
enc.ExtDataGasUsed = (*hexutil.Big)(h.ExtDataGasUsed)
enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost)
- enc.ExcessDataGas = h.ExcessDataGas
+ enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed)
+ enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@@ -85,7 +87,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"`
BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"`
- ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -162,8 +165,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.BlockGasCost != nil {
h.BlockGasCost = (*big.Int)(dec.BlockGasCost)
}
- if dec.ExcessDataGas != nil {
- h.ExcessDataGas = dec.ExcessDataGas
+ if dec.BlobGasUsed != nil {
+ h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
+ }
+ if dec.ExcessBlobGas != nil {
+ h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
return nil
}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
index 74f0feb76e..2e04b00cd3 100644
--- a/core/types/gen_header_rlp.go
+++ b/core/types/gen_header_rlp.go
@@ -44,8 +44,9 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp1 := obj.BaseFee != nil
_tmp2 := obj.ExtDataGasUsed != nil
_tmp3 := obj.BlockGasCost != nil
- _tmp4 := obj.ExcessDataGas != nil
- if _tmp1 || _tmp2 || _tmp3 || _tmp4 {
+ _tmp4 := obj.BlobGasUsed != nil
+ _tmp5 := obj.ExcessBlobGas != nil
+ if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@@ -55,7 +56,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
- if _tmp2 || _tmp3 || _tmp4 {
+ if _tmp2 || _tmp3 || _tmp4 || _tmp5 {
if obj.ExtDataGasUsed == nil {
w.Write(rlp.EmptyString)
} else {
@@ -65,7 +66,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.ExtDataGasUsed)
}
}
- if _tmp3 || _tmp4 {
+ if _tmp3 || _tmp4 || _tmp5 {
if obj.BlockGasCost == nil {
w.Write(rlp.EmptyString)
} else {
@@ -75,14 +76,18 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BlockGasCost)
}
}
- if _tmp4 {
- if obj.ExcessDataGas == nil {
- w.Write(rlp.EmptyString)
+ if _tmp4 || _tmp5 {
+ if obj.BlobGasUsed == nil {
+ w.Write([]byte{0x80})
} else {
- if obj.ExcessDataGas.Sign() == -1 {
- return rlp.ErrNegativeBigInt
- }
- w.WriteBigInt(obj.ExcessDataGas)
+ w.WriteUint64((*obj.BlobGasUsed))
+ }
+ }
+ if _tmp5 {
+ if obj.ExcessBlobGas == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.ExcessBlobGas))
}
}
w.ListEnd(_tmp0)
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index d83be14477..4c641a9727 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -26,6 +26,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
ContractAddress common.Address `json:"contractAddress"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
+ BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"`
BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex hexutil.Uint `json:"transactionIndex"`
@@ -41,6 +43,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
enc.ContractAddress = r.ContractAddress
enc.GasUsed = hexutil.Uint64(r.GasUsed)
enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice)
+ enc.BlobGasUsed = hexutil.Uint64(r.BlobGasUsed)
+ enc.BlobGasPrice = (*hexutil.Big)(r.BlobGasPrice)
enc.BlockHash = r.BlockHash
enc.BlockNumber = (*hexutil.Big)(r.BlockNumber)
enc.TransactionIndex = hexutil.Uint(r.TransactionIndex)
@@ -60,6 +64,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
ContractAddress *common.Address `json:"contractAddress"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
+ BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"`
BlockHash *common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex *hexutil.Uint `json:"transactionIndex"`
@@ -103,6 +109,12 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
if dec.EffectiveGasPrice != nil {
r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice)
}
+ if dec.BlobGasUsed != nil {
+ r.BlobGasUsed = uint64(*dec.BlobGasUsed)
+ }
+ if dec.BlobGasPrice != nil {
+ r.BlobGasPrice = (*big.Int)(dec.BlobGasPrice)
+ }
if dec.BlockHash != nil {
r.BlockHash = *dec.BlockHash
}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index 41631821f3..6f77a106b9 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -29,6 +29,7 @@ package types
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
)
var (
@@ -50,3 +51,13 @@ var (
// EmptyExtDataHash is the known hash of empty extdata bytes.
EmptyExtDataHash = rlpHash([]byte(nil))
)
+
+// TrieRootHash returns the hash itself if it's non-empty or the predefined
+// emptyHash one instead.
+func TrieRootHash(hash common.Hash) common.Hash {
+ if hash == (common.Hash{}) {
+ log.Error("Zero trie root hash!")
+ return EmptyRootHash
+ }
+ return hash
+}
diff --git a/core/types/receipt.go b/core/types/receipt.go
index beca87e995..e7ab11505d 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -73,6 +73,8 @@ type Receipt struct {
ContractAddress common.Address `json:"contractAddress"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility
+ BlobGasUsed uint64 `json:"blobGasUsed,omitempty"`
+ BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"`
// Inclusion information: These fields provide information about the inclusion of the
// transaction corresponding to this receipt.
@@ -88,6 +90,8 @@ type receiptMarshaling struct {
CumulativeGasUsed hexutil.Uint64
GasUsed hexutil.Uint64
EffectiveGasPrice *hexutil.Big
+ BlobGasUsed hexutil.Uint64
+ BlobGasPrice *hexutil.Big
BlockNumber *hexutil.Big
TransactionIndex hexutil.Uint
}
@@ -204,7 +208,7 @@ func (r *Receipt) decodeTyped(b []byte) error {
return errShortTypedReceipt
}
switch b[0] {
- case DynamicFeeTxType, AccessListTxType:
+ case DynamicFeeTxType, AccessListTxType, BlobTxType:
var data receiptRLP
err := rlp.DecodeBytes(b[1:], &data)
if err != nil {
@@ -306,14 +310,13 @@ func (rs Receipts) Len() int { return len(rs) }
func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
r := rs[i]
data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}
- switch r.Type {
- case LegacyTxType:
- rlp.Encode(w, data)
- case AccessListTxType:
- w.WriteByte(AccessListTxType)
+ if r.Type == LegacyTxType {
rlp.Encode(w, data)
- case DynamicFeeTxType:
- w.WriteByte(DynamicFeeTxType)
+ return
+ }
+ w.WriteByte(r.Type)
+ switch r.Type {
+ case AccessListTxType, DynamicFeeTxType, BlobTxType:
rlp.Encode(w, data)
default:
// For unsupported types, write nothing. Since this is for
@@ -324,7 +327,7 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
// DeriveFields fills the receipts with their computed fields based on consensus
// data and contextual infos like containing block and transactions.
-func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, txs []*Transaction) error {
+func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs []*Transaction) error {
signer := MakeSigner(config, new(big.Int).SetUint64(number), time)
logIndex := uint(0)
@@ -335,9 +338,14 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu
// The transaction type and hash can be retrieved from the transaction itself
rs[i].Type = txs[i].Type()
rs[i].TxHash = txs[i].Hash()
-
rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee)
+ // EIP-4844 blob transaction fields
+ if txs[i].Type() == BlobTxType {
+ rs[i].BlobGasUsed = txs[i].BlobGas()
+ rs[i].BlobGasPrice = blobGasPrice
+ }
+
// block location fields
rs[i].BlockHash = hash
rs[i].BlockNumber = new(big.Int).SetUint64(number)
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index b73fd99794..32da193c60 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -140,22 +140,24 @@ var (
}),
// EIP-4844 transactions.
NewTx(&BlobTx{
- To: &to6,
+ To: to6,
Nonce: 6,
Value: uint256.NewInt(6),
Gas: 6,
GasTipCap: uint256.NewInt(66),
GasFeeCap: uint256.NewInt(1066),
BlobFeeCap: uint256.NewInt(100066),
+ BlobHashes: []common.Hash{{}},
}),
NewTx(&BlobTx{
- To: &to7,
+ To: to7,
Nonce: 7,
Value: uint256.NewInt(7),
Gas: 7,
GasTipCap: uint256.NewInt(77),
GasFeeCap: uint256.NewInt(1077),
BlobFeeCap: uint256.NewInt(100077),
+ BlobHashes: []common.Hash{{}, {}, {}},
}),
}
@@ -280,6 +282,8 @@ var (
TxHash: txs[5].Hash(),
GasUsed: 6,
EffectiveGasPrice: big.NewInt(1066),
+ BlobGasUsed: params.BlobTxBlobGasPerBlob,
+ BlobGasPrice: big.NewInt(920),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 5,
@@ -293,6 +297,8 @@ var (
TxHash: txs[6].Hash(),
GasUsed: 7,
EffectiveGasPrice: big.NewInt(1077),
+ BlobGasUsed: 3 * params.BlobTxBlobGasPerBlob,
+ BlobGasPrice: big.NewInt(920),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 6,
@@ -313,8 +319,9 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
func TestDeriveFields(t *testing.T) {
// Re-derive receipts.
basefee := big.NewInt(1000)
+ blobGasPrice := big.NewInt(920)
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, txs)
+ err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs)
if err != nil {
t.Fatalf("DeriveFields(...) = %v, want ", err)
}
@@ -511,6 +518,9 @@ func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt {
cpy.ContractAddress = common.Address{0xff, 0xff, 0x33}
cpy.GasUsed = 0xffffffff
cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs)
+ cpy.EffectiveGasPrice = big.NewInt(0)
+ cpy.BlobGasUsed = 0
+ cpy.BlobGasPrice = nil
return &cpy
}
diff --git a/core/types/state_account.go b/core/types/state_account.go
index ca41dc3dc6..0118721fda 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -27,9 +27,11 @@
package types
import (
+ "bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
)
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type StateAccount -out gen_account_rlp.go
@@ -43,3 +45,91 @@ type StateAccount struct {
CodeHash []byte
IsMultiCoin bool
}
+
+// NewEmptyStateAccount constructs an empty state account.
+func NewEmptyStateAccount() *StateAccount {
+ return &StateAccount{
+ Balance: new(big.Int),
+ Root: EmptyRootHash,
+ CodeHash: EmptyCodeHash.Bytes(),
+ }
+}
+
+// Copy returns a deep-copied state account object.
+func (acct *StateAccount) Copy() *StateAccount {
+ var balance *big.Int
+ if acct.Balance != nil {
+ balance = new(big.Int).Set(acct.Balance)
+ }
+ return &StateAccount{
+ Nonce: acct.Nonce,
+ Balance: balance,
+ Root: acct.Root,
+ CodeHash: common.CopyBytes(acct.CodeHash),
+ }
+}
+
+// SlimAccount is a modified version of an Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim format which replaces the empty root and code hash as nil byte slice.
+type SlimAccount struct {
+ Nonce uint64
+ Balance *big.Int
+ Root []byte // Nil if root equals to types.EmptyRootHash
+ CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
+ IsMultiCoin bool
+}
+
+// SlimAccountRLP encodes the state account in 'slim RLP' format.
+func SlimAccountRLP(account StateAccount) []byte {
+ slim := SlimAccount{
+ Nonce: account.Nonce,
+ Balance: account.Balance,
+ IsMultiCoin: account.IsMultiCoin,
+ }
+ if account.Root != EmptyRootHash {
+ slim.Root = account.Root[:]
+ }
+ if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) {
+ slim.CodeHash = account.CodeHash
+ }
+ data, err := rlp.EncodeToBytes(slim)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (*StateAccount, error) {
+ var slim SlimAccount
+ if err := rlp.DecodeBytes(data, &slim); err != nil {
+ return nil, err
+ }
+ var account StateAccount
+ account.Nonce, account.Balance = slim.Nonce, slim.Balance
+ account.IsMultiCoin = slim.IsMultiCoin
+
+ // Interpret the storage root and code hash in slim format.
+ if len(slim.Root) == 0 {
+ account.Root = EmptyRootHash
+ } else {
+ account.Root = common.BytesToHash(slim.Root)
+ }
+ if len(slim.CodeHash) == 0 {
+ account.CodeHash = EmptyCodeHash[:]
+ } else {
+ account.CodeHash = slim.CodeHash
+ }
+ return &account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 34c185a7b7..34c888a4c4 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -299,10 +299,10 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g
// GasFeeCap returns the fee cap per gas of the transaction.
func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
-// BlobGas returns the data gas limit of the transaction for blob transactions, 0 otherwise.
+// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() }
-// BlobGasFeeCap returns the data gas fee cap per data gas of the transaction for blob transactions, nil otherwise.
+// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() }
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise.
@@ -575,10 +575,10 @@ func (s *TxByPriceAndTime) Pop() interface{} {
// transactions in a profit-maximizing sorted order, while supporting removing
// entire batches of transactions for non-executable accounts.
type TransactionsByPriceAndNonce struct {
- txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
- heads TxByPriceAndTime // Next transaction for each unique account (price heap)
- signer Signer // Signer for the set of transactions
- baseFee *big.Int // Current base fee
+ txs map[common.Address][]*Transaction // Per account nonce-sorted list of transactions
+ heads TxByPriceAndTime // Next transaction for each unique account (price heap)
+ signer Signer // Signer for the set of transactions
+ baseFee *big.Int // Current base fee
}
// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
@@ -586,7 +586,7 @@ type TransactionsByPriceAndNonce struct {
//
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
-func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce {
+func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address][]*Transaction, baseFee *big.Int) *TransactionsByPriceAndNonce {
// Initialize a price and received time based heap with the head transactions
heads := make(TxByPriceAndTime, 0, len(txs))
for from, accTxs := range txs {
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index 4d0dd2331f..2437a5b2f3 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -47,7 +47,7 @@ type txJSON struct {
GasPrice *hexutil.Big `json:"gasPrice"`
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
- MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"`
+ MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"`
Value *hexutil.Big `json:"value"`
Input *hexutil.Bytes `json:"input"`
AccessList *AccessList `json:"accessList,omitempty"`
@@ -55,11 +55,32 @@ type txJSON struct {
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
+ YParity *hexutil.Uint64 `json:"yParity,omitempty"`
// Only used for encoding:
Hash common.Hash `json:"hash"`
}
+// yParityValue returns the YParity value from JSON. For backwards-compatibility reasons,
+// this can be given in the 'v' field or the 'yParity' field. If both exist, they must match.
+func (tx *txJSON) yParityValue() (*big.Int, error) {
+ if tx.YParity != nil {
+ val := uint64(*tx.YParity)
+ if val != 0 && val != 1 {
+ return nil, errors.New("'yParity' field must be 0 or 1")
+ }
+ bigval := new(big.Int).SetUint64(val)
+ if tx.V != nil && tx.V.ToInt().Cmp(bigval) != 0 {
+ return nil, errors.New("'v' and 'yParity' fields do not match")
+ }
+ return bigval, nil
+ }
+ if tx.V != nil {
+ return tx.V.ToInt(), nil
+ }
+ return nil, errors.New("missing 'yParity' or 'v' field in transaction")
+}
+
// MarshalJSON marshals as JSON with a hash.
func (tx *Transaction) MarshalJSON() ([]byte, error) {
var enc txJSON
@@ -79,6 +100,9 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ if tx.Protected() {
+ enc.ChainID = (*hexutil.Big)(tx.ChainId())
+ }
case *AccessListTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
@@ -92,6 +116,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
case *DynamicFeeTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
@@ -106,6 +132,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
case *BlobTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig())
@@ -113,7 +141,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig())
enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig())
- enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig())
+ enc.MaxFeePerBlobGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig())
enc.Value = (*hexutil.Big)(itx.Value.ToBig())
enc.Input = (*hexutil.Bytes)(&itx.Data)
enc.AccessList = &itx.AccessList
@@ -122,6 +150,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.V = (*hexutil.Big)(itx.V.ToBig())
enc.R = (*hexutil.Big)(itx.R.ToBig())
enc.S = (*hexutil.Big)(itx.S.ToBig())
+ yparity := itx.V.Uint64()
+ enc.YParity = (*hexutil.Uint64)(&yparity)
}
return json.Marshal(&enc)
}
@@ -129,7 +159,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (tx *Transaction) UnmarshalJSON(input []byte) error {
var dec txJSON
- if err := json.Unmarshal(input, &dec); err != nil {
+ err := json.Unmarshal(input, &dec)
+ if err != nil {
return err
}
@@ -162,20 +193,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
- if dec.V == nil {
- return errors.New("missing required field 'v' in transaction")
- }
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ if dec.V == nil {
+ return errors.New("missing required field 'v' in transaction")
+ }
+ itx.V = (*big.Int)(dec.V)
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, true); err != nil {
return err
}
@@ -211,23 +245,26 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
- if dec.V == nil {
- return errors.New("missing required field 'v' in transaction")
- }
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ itx.V, err = dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil {
return err
}
@@ -273,17 +310,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}
- itx.V = (*big.Int)(dec.V)
+
+ // signature R
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
itx.R = (*big.Int)(dec.R)
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
itx.S = (*big.Int)(dec.S)
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
+ // signature V
+ itx.V, err = dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil {
return err
}
@@ -300,9 +343,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'nonce' in transaction")
}
itx.Nonce = uint64(*dec.Nonce)
- if dec.To != nil {
- itx.To = dec.To
+ if dec.To == nil {
+ return errors.New("missing required field 'to' in transaction")
}
+ itx.To = *dec.To
if dec.Gas == nil {
return errors.New("missing required field 'gas' for txdata")
}
@@ -315,10 +359,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'maxFeePerGas' for txdata")
}
itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas))
- if dec.MaxFeePerDataGas == nil {
- return errors.New("missing required field 'maxFeePerDataGas' for txdata")
+ if dec.MaxFeePerBlobGas == nil {
+ return errors.New("missing required field 'maxFeePerBlobGas' for txdata")
}
- itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas))
+ itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerBlobGas))
if dec.Value == nil {
return errors.New("missing required field 'value' in transaction")
}
@@ -337,18 +381,35 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'blobVersionedHashes' in transaction")
}
itx.BlobHashes = dec.BlobVersionedHashes
- itx.V = uint256.MustFromBig((*big.Int)(dec.V))
+
+ // signature R
+ var ok bool
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
- itx.R = uint256.MustFromBig((*big.Int)(dec.R))
+ itx.R, ok = uint256.FromBig((*big.Int)(dec.R))
+ if !ok {
+ return errors.New("'r' value overflows uint256")
+ }
+ // signature S
if dec.S == nil {
return errors.New("missing required field 's' in transaction")
}
- itx.S = uint256.MustFromBig((*big.Int)(dec.S))
- withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
- if withSignature {
- if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil {
+ itx.S, ok = uint256.FromBig((*big.Int)(dec.S))
+ if !ok {
+ return errors.New("'s' value overflows uint256")
+ }
+ // signature V
+ vbig, err := dec.yParityValue()
+ if err != nil {
+ return err
+ }
+ itx.V, ok = uint256.FromBig(vbig)
+ if !ok {
+ return errors.New("'v' value overflows uint256")
+ }
+ if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 {
+ if err := sanityCheckSignature(vbig, itx.R.ToBig(), itx.S.ToBig(), false); err != nil {
return err
}
}
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index 09f6c97476..3307e20171 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -65,9 +65,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
}
// LatestSigner returns the 'most permissive' Signer available for the given chain
-// configuration. Specifically, this enables support of EIP-155 replay protection and
-// EIP-2930 access list transactions when their respective forks are scheduled to occur at
-// any block number in the chain config.
+// configuration. Specifically, this enables support of all types of transacrions
+// when their respective forks are scheduled to occur at any block number (or time)
+// in the chain config.
//
// Use this in transaction-handling code where the current block number is unknown. If you
// have the current block number available, use MakeSigner instead.
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 995afbb9a0..2e789b340e 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -291,7 +291,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
signer := LatestSignerForChainID(common.Big1)
// Generate a batch of transactions with overlapping values, but shifted nonces
- groups := map[common.Address]Transactions{}
+ groups := map[common.Address][]*Transaction{}
expectedCount := 0
for start, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -378,7 +378,7 @@ func TestTransactionTimeSort(t *testing.T) {
signer := HomesteadSigner{}
// Generate a batch of transactions with overlapping prices, but different creation times
- groups := map[common.Address]Transactions{}
+ groups := map[common.Address][]*Transaction{}
for start, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -536,7 +536,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error {
}
if orig.AccessList() != nil {
if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) {
- return fmt.Errorf("access list wrong!")
+ return errors.New("access list wrong!")
}
}
return nil
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index 3141749be0..e168fa3a77 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -31,11 +31,11 @@ type BlobTx struct {
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
Gas uint64
- To *common.Address `rlp:"nil"` // nil means contract creation
+ To common.Address
Value *uint256.Int
Data []byte
AccessList AccessList
- BlobFeeCap *uint256.Int // a.k.a. maxFeePerDataGas
+ BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas
BlobHashes []common.Hash
// Signature values
@@ -48,7 +48,7 @@ type BlobTx struct {
func (tx *BlobTx) copy() TxData {
cpy := &BlobTx{
Nonce: tx.Nonce,
- To: copyAddressPtr(tx.To),
+ To: tx.To,
Data: common.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
@@ -104,8 +104,8 @@ func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() }
func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() }
func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() }
func (tx *BlobTx) nonce() uint64 { return tx.Nonce }
-func (tx *BlobTx) to() *common.Address { return tx.To }
-func (tx *BlobTx) blobGas() uint64 { return params.BlobTxDataGasPerBlob * uint64(len(tx.BlobHashes)) }
+func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp }
+func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) }
func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() }
func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes }
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 0f9b2c8b59..863d807d63 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -43,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bls12381"
"github.com/ethereum/go-ethereum/crypto/bn256"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
big2 "github.com/holiman/big"
"golang.org/x/crypto/ripemd160"
)
@@ -159,7 +160,26 @@ var PrecompiledContractsBanff = map[common.Address]precompile.StatefulPrecompile
NativeAssetCallAddr: &deprecatedContract{},
}
+// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum
+// contracts used in the Cancun release.
+var PrecompiledContractsCancun = map[common.Address]precompile.StatefulPrecompiledContract{
+ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}),
+ common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}),
+ common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}),
+ common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}),
+ common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}),
+ common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}),
+ common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}),
+ common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}),
+ common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}),
+ common.BytesToAddress([]byte{0x0a}): newWrappedPrecompiledContract(&kzgPointEvaluation{}),
+ genesisContractAddr: &deprecatedContract{},
+ NativeAssetBalanceAddr: &deprecatedContract{},
+ NativeAssetCallAddr: &deprecatedContract{},
+}
+
var (
+ PrecompiledAddressesCancun []common.Address
PrecompiledAddressesBanff []common.Address
PrecompiledAddressesApricotPhase6 []common.Address
PrecompiledAddressesApricotPhasePre6 []common.Address
@@ -192,6 +212,9 @@ func init() {
for k := range PrecompiledContractsBanff {
PrecompiledAddressesBanff = append(PrecompiledAddressesBanff, k)
}
+ for k := range PrecompiledContractsCancun {
+ PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
+ }
// Set of all native precompile addresses that are in use
// Note: this will repeat some addresses, but this is cheap and makes the code clearer.
@@ -233,6 +256,8 @@ func init() {
// ActivePrecompiles returns the precompiles enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address {
switch {
+ case rules.IsCancun:
+ return PrecompiledAddressesCancun
case rules.IsBanff:
return PrecompiledAddressesBanff
case rules.IsApricotPhase2:
@@ -1154,3 +1179,67 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) {
// Encode the G2 point to 256 bytes
return g.EncodePoint(r), nil
}
+
+// kzgPointEvaluation implements the EIP-4844 point evaluation precompile.
+type kzgPointEvaluation struct{}
+
+// RequiredGas estimates the gas required for running the point evaluation precompile.
+func (b *kzgPointEvaluation) RequiredGas(input []byte) uint64 {
+ return params.BlobTxPointEvaluationPrecompileGas
+}
+
+const (
+ blobVerifyInputLength = 192 // Max input length for the point evaluation precompile.
+ blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile.
+ blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"
+)
+
+var (
+ errBlobVerifyInvalidInputLength = errors.New("invalid input length")
+ errBlobVerifyMismatchedVersion = errors.New("mismatched versioned hash")
+ errBlobVerifyKZGProof = errors.New("error verifying kzg proof")
+)
+
+// Run executes the point evaluation precompile.
+func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) {
+ if len(input) != blobVerifyInputLength {
+ return nil, errBlobVerifyInvalidInputLength
+ }
+ // versioned hash: first 32 bytes
+ var versionedHash common.Hash
+ copy(versionedHash[:], input[:])
+
+ var (
+ point kzg4844.Point
+ claim kzg4844.Claim
+ )
+ // Evaluation point: next 32 bytes
+ copy(point[:], input[32:])
+ // Expected output: next 32 bytes
+ copy(claim[:], input[64:])
+
+ // input kzg point: next 48 bytes
+ var commitment kzg4844.Commitment
+ copy(commitment[:], input[96:])
+ if kZGToVersionedHash(commitment) != versionedHash {
+ return nil, errBlobVerifyMismatchedVersion
+ }
+
+ // Proof: next 48 bytes
+ var proof kzg4844.Proof
+ copy(proof[:], input[144:])
+
+ if err := kzg4844.VerifyProof(commitment, point, claim, proof); err != nil {
+ return nil, fmt.Errorf("%w: %v", errBlobVerifyKZGProof, err)
+ }
+
+ return common.Hex2Bytes(blobPrecompileReturnValue), nil
+}
+
+// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844
+func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
+ h := sha256.Sum256(kzg[:])
+ h[0] = blobCommitmentVersionKZG
+
+ return h
+}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index e4114a046e..8608fdf36d 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -66,15 +66,17 @@ var allPrecompiles = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{9}): &blake2F{},
- common.BytesToAddress([]byte{10}): &bls12381G1Add{},
- common.BytesToAddress([]byte{11}): &bls12381G1Mul{},
- common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{},
- common.BytesToAddress([]byte{13}): &bls12381G2Add{},
- common.BytesToAddress([]byte{14}): &bls12381G2Mul{},
- common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{},
- common.BytesToAddress([]byte{16}): &bls12381Pairing{},
- common.BytesToAddress([]byte{17}): &bls12381MapG1{},
- common.BytesToAddress([]byte{18}): &bls12381MapG2{},
+ common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
+
+ common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{},
+ common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{},
+ common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{},
+ common.BytesToAddress([]byte{0x0f, 0x0d}): &bls12381G2Add{},
+ common.BytesToAddress([]byte{0x0f, 0x0e}): &bls12381G2Mul{},
+ common.BytesToAddress([]byte{0x0f, 0x0f}): &bls12381G2MultiExp{},
+ common.BytesToAddress([]byte{0x0f, 0x10}): &bls12381Pairing{},
+ common.BytesToAddress([]byte{0x0f, 0x11}): &bls12381MapG1{},
+ common.BytesToAddress([]byte{0x0f, 0x12}): &bls12381MapG2{},
}
// EIP-152 test vectors
@@ -312,36 +314,38 @@ func benchJson(name, addr string, b *testing.B) {
}
}
-func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0a", t) }
-func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "0b", t) }
-func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "0c", t) }
-func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "0e", t) }
-func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "12", t) }
-
-func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0a", b) }
-func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "0b", b) }
-func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "0c", b) }
-func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "0d", b) }
-func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "0e", b) }
-func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0f", b) }
-func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "10", b) }
-func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "11", b) }
-func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "12", b) }
+func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "f0a", t) }
+func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "f0b", t) }
+func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "f0c", t) }
+func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "f0d", t) }
+func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "f0e", t) }
+func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "f0f", t) }
+func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "f10", t) }
+func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "f11", t) }
+func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "f12", t) }
+
+func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "0a", t) }
+
+func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "f0a", b) }
+func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "f0b", b) }
+func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "f0c", b) }
+func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "f0d", b) }
+func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "f0e", b) }
+func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "f0f", b) }
+func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "f10", b) }
+func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "f11", b) }
+func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "f12", b) }
// Failure tests
-func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0a", t) }
-func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "0b", t) }
-func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "0c", t) }
-func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "0d", t) }
-func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "0e", t) }
-func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0f", t) }
-func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "10", t) }
-func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "11", t) }
-func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) }
+func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "f0a", t) }
+func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "f0b", t) }
+func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "f0c", t) }
+func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "f0d", t) }
+func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "f0e", t) }
+func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "f0f", t) }
+func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "f10", t) }
+func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "f11", t) }
+func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "f12", t) }
func loadJson(name string) ([]precompiledTest, error) {
data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name))
diff --git a/core/vm/eips.go b/core/vm/eips.go
index f7e3f7e35d..738f4bfecf 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -37,6 +37,8 @@ import (
)
var activators = map[int]func(*JumpTable){
+ 5656: enable5656,
+ 6780: enable6780,
3855: enable3855,
3860: enable3860,
3198: enable3198,
@@ -250,9 +252,69 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
return nil, nil
}
-// ebnable3860 enables "EIP-3860: Limit and meter initcode"
+// enable3860 enables "EIP-3860: Limit and meter initcode"
// https://eips.ethereum.org/EIPS/eip-3860
func enable3860(jt *JumpTable) {
jt[CREATE].dynamicGas = gasCreateEip3860
jt[CREATE2].dynamicGas = gasCreate2Eip3860
}
+
+// enable5656 enables EIP-5656 (MCOPY opcode)
+// https://eips.ethereum.org/EIPS/eip-5656
+func enable5656(jt *JumpTable) {
+ jt[MCOPY] = &operation{
+ execute: opMcopy,
+ constantGas: GasFastestStep,
+ dynamicGas: gasMcopy,
+ minStack: minStack(3, 0),
+ maxStack: maxStack(3, 0),
+ memorySize: memoryMcopy,
+ }
+}
+
+// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656)
+func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ var (
+ dst = scope.Stack.pop()
+ src = scope.Stack.pop()
+ length = scope.Stack.pop()
+ )
+ // These values are checked for overflow during memory expansion calculation
+ // (the memorySize function on the opcode).
+ scope.Memory.Copy(dst.Uint64(), src.Uint64(), length.Uint64())
+ return nil, nil
+}
+
+// opBlobHash implements the BLOBHASH opcode
+func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ index := scope.Stack.peek()
+ if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) {
+ blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()]
+ index.SetBytes32(blobHash[:])
+ } else {
+ index.Clear()
+ }
+ return nil, nil
+}
+
+// enable4844 applies EIP-4844 (DATAHASH opcode)
+func enable4844(jt *JumpTable) {
+ // New opcode
+ jt[BLOBHASH] = &operation{
+ execute: opBlobHash,
+ constantGas: GasFastestStep,
+ minStack: minStack(1, 1),
+ maxStack: maxStack(1, 1),
+ }
+}
+
+// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT)
+func enable6780(jt *JumpTable) {
+ jt[SELFDESTRUCT] = &operation{
+ execute: opSelfdestruct6780,
+ dynamicGas: gasSelfdestructEIP3529,
+ constantGas: params.SelfdestructGasEIP150,
+ minStack: minStack(1, 0),
+ maxStack: maxStack(1, 0),
+ }
+}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index d0b861ad46..b978cf782f 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -32,6 +32,7 @@ import (
"time"
"github.com/ava-labs/coreth/constants"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/precompile"
"github.com/ava-labs/coreth/vmerrs"
@@ -59,10 +60,6 @@ func IsProhibited(addr common.Address) bool {
return false
}
-// emptyCodeHash is used by create to ensure deployment is disallowed to already
-// deployed contract addresses (relevant after the account abstraction).
-var emptyCodeHash = crypto.Keccak256Hash(nil)
-
type (
// CanTransferFunc is the signature of a transfer guard function
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
@@ -78,6 +75,8 @@ type (
func (evm *EVM) precompile(addr common.Address) (precompile.StatefulPrecompiledContract, bool) {
var precompiles map[common.Address]precompile.StatefulPrecompiledContract
switch {
+ case evm.chainRules.IsCancun:
+ precompiles = PrecompiledContractsCancun
case evm.chainRules.IsBanff:
precompiles = PrecompiledContractsBanff
case evm.chainRules.IsApricotPhase6:
@@ -122,12 +121,13 @@ type BlockContext struct {
GetHash GetHashFunc
// Block information
- Coinbase common.Address // Provides information for COINBASE
- GasLimit uint64 // Provides information for GASLIMIT
- BlockNumber *big.Int // Provides information for NUMBER
- Time uint64 // Provides information for TIME
- Difficulty *big.Int // Provides information for DIFFICULTY
- BaseFee *big.Int // Provides information for BASEFEE
+ Coinbase common.Address // Provides information for COINBASE
+ GasLimit uint64 // Provides information for GASLIMIT
+ BlockNumber *big.Int // Provides information for NUMBER
+ Time uint64 // Provides information for TIME
+ Difficulty *big.Int // Provides information for DIFFICULTY
+ BaseFee *big.Int // Provides information for BASEFEE
+ ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data
}
func (b *BlockContext) Number() *big.Int {
@@ -142,8 +142,9 @@ func (b *BlockContext) Timestamp() uint64 {
// All fields can change between transactions.
type TxContext struct {
// Message information
- Origin common.Address // Provides information for ORIGIN
- GasPrice *big.Int // Provides information for GASPRICE
+ Origin common.Address // Provides information for ORIGIN
+ GasPrice *big.Int // Provides information for GASPRICE
+ BlobHashes []common.Hash // Provides information for BLOBHASH
}
// EVM is the Ethereum Virtual Machine base object and provides
@@ -597,7 +598,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
// Ensure there's no existing contract already at the designated address
contractHash := evm.StateDB.GetCodeHash(address)
- if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
+ if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) {
return nil, common.Address{}, 0, vmerrs.ErrContractAddressCollision
}
// Create a new account on the state
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 7201c57781..7095f9fdc7 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -71,6 +71,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
// as argument:
// CALLDATACOPY (stack position 2)
// CODECOPY (stack position 2)
+// MCOPY (stack position 2)
// EXTCODECOPY (stack position 3)
// RETURNDATACOPY (stack position 2)
func memoryCopierGas(stackpos int) gasFunc {
@@ -100,6 +101,7 @@ func memoryCopierGas(stackpos int) gasFunc {
var (
gasCallDataCopy = memoryCopierGas(2)
gasCodeCopy = memoryCopierGas(2)
+ gasMcopy = memoryCopierGas(2)
gasExtCodeCopy = memoryCopierGas(3)
gasReturnDataCopy = memoryCopierGas(2)
)
@@ -555,7 +557,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
}
}
- if !evm.StateDB.HasSuicided(contract.Address()) {
+ if !evm.StateDB.HasSelfDestructed(contract.Address()) {
evm.StateDB.AddRefund(params.SelfdestructRefundGas)
}
return gas, nil
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 4c4198acf3..35f85b4f77 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -432,7 +432,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// emptyCodeHash. If the precompile account is not transferred any amount on a private or
// customized chain, the return value will be zero.
//
-// 5. Caller tries to get the code hash for an account which is marked as suicided
+// 5. Caller tries to get the code hash for an account which is marked as self-destructed
// in the current transaction, the code hash of this account should be returned.
//
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
@@ -886,7 +886,23 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
beneficiary := scope.Stack.pop()
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
- interpreter.evm.StateDB.Suicide(scope.Contract.Address())
+ interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address())
+ if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
+ tracer.CaptureExit([]byte{}, 0, nil)
+ }
+ return nil, errStopToken
+}
+
+func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ if interpreter.readOnly {
+ return nil, vmerrs.ErrWriteProtection
+ }
+ beneficiary := scope.Stack.pop()
+ balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
+ interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance)
+ interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
+ interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address())
if tracer := interpreter.evm.Config.Tracer; tracer != nil {
tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
tracer.CaptureExit([]byte{}, 0, nil)
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 187617eb97..1c6c874c37 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -32,13 +32,16 @@ import (
"fmt"
"math/big"
"os"
+ "strings"
"testing"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/coreth/vmerrs"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/holiman/uint256"
)
@@ -736,7 +739,7 @@ func TestRandom(t *testing.T) {
for _, tt := range []testcase{
{name: "empty hash", random: common.Hash{}},
{name: "1", random: common.Hash{0}},
- {name: "emptyCodeHash", random: emptyCodeHash},
+ {name: "emptyCodeHash", random: types.EmptyCodeHash},
{name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
} {
var (
@@ -760,3 +763,183 @@ func TestRandom(t *testing.T) {
}
}
}
+
+func TestBlobHash(t *testing.T) {
+ type testcase struct {
+ name string
+ idx uint64
+ expect common.Hash
+ hashes []common.Hash
+ }
+ var (
+ zero = common.Hash{0}
+ one = common.Hash{1}
+ two = common.Hash{2}
+ three = common.Hash{3}
+ )
+ for _, tt := range []testcase{
+ {name: "[{1}]", idx: 0, expect: one, hashes: []common.Hash{one}},
+ {name: "[1,{2},3]", idx: 2, expect: three, hashes: []common.Hash{one, two, three}},
+ {name: "out-of-bounds (empty)", idx: 10, expect: zero, hashes: []common.Hash{}},
+ {name: "out-of-bounds", idx: 25, expect: zero, hashes: []common.Hash{one, two, three}},
+ {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil},
+ } {
+ var (
+ env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ stack.push(uint256.NewInt(tt.idx))
+ opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
+ if len(stack.data) != 1 {
+ t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
+ }
+ actual := stack.pop()
+ expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.expect.Bytes()))
+ if overflow {
+ t.Errorf("Testcase %v: invalid overflow", tt.name)
+ }
+ if actual.Cmp(expected) != 0 {
+ t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
+ }
+ }
+}
+
+func TestOpMCopy(t *testing.T) {
+ // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
+ for i, tc := range []struct {
+ dst, src, len string
+ pre string
+ want string
+ wantGas uint64
+ }{
+ { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0.
+ dst: "0x0", src: "0x20", len: "0x20",
+ pre: "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ want: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ wantGas: 6,
+ },
+
+ { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0.
+ dst: "0x0", src: "0x0", len: "0x20",
+ pre: "0101010101010101010101010101010101010101010101010101010101010101",
+ want: "0101010101010101010101010101010101010101010101010101010101010101",
+ wantGas: 6,
+ },
+ { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping).
+ dst: "0x0", src: "0x1", len: "0x8",
+ pre: "000102030405060708 000000000000000000000000000000000000000000000000",
+ want: "010203040506070808 000000000000000000000000000000000000000000000000",
+ wantGas: 6,
+ },
+ { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping).
+ dst: "0x1", src: "0x0", len: "0x8",
+ pre: "000102030405060708 000000000000000000000000000000000000000000000000",
+ want: "000001020304050607 000000000000000000000000000000000000000000000000",
+ wantGas: 6,
+ },
+ // Tests below are not in the EIP, but maybe should be added
+ { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping).
+ dst: "0xFFFFFFFFFFFF", src: "0xFFFFFFFFFFFF", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds.
+ dst: "0xFFFFFFFFFFFF", src: "0x0", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem
+ dst: "0x0", src: "0xFFFFFFFFFFFF", len: "0x0",
+ pre: "11",
+ want: "11",
+ wantGas: 3,
+ },
+ { // MCOPY - copy 1 from space outside of uint64 space
+ dst: "0x0", src: "0x10000000000000000", len: "0x1",
+ pre: "0",
+ },
+ { // MCOPY - copy 1 from 0 to space outside of uint64
+ dst: "0x10000000000000000", src: "0x0", len: "0x1",
+ pre: "0",
+ },
+ { // MCOPY - copy nothing from 0 to space outside of uint64
+ dst: "0x10000000000000000", src: "0x0", len: "0x0",
+ pre: "",
+ want: "",
+ wantGas: 3,
+ },
+ { // MCOPY - copy 1 from 0x20 to 0x10, with no prior allocated mem
+ dst: "0x10", src: "0x20", len: "0x1",
+ pre: "",
+ // 64 bytes
+ want: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ wantGas: 12,
+ },
+ { // MCOPY - copy 1 from 0x19 to 0x10, with no prior allocated mem
+ dst: "0x10", src: "0x19", len: "0x1",
+ pre: "",
+ // 32 bytes
+ want: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ wantGas: 9,
+ },
+ } {
+ var (
+ env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ data := common.FromHex(strings.ReplaceAll(tc.pre, " ", ""))
+ // Set pre
+ mem := NewMemory()
+ mem.Resize(uint64(len(data)))
+ mem.Set(0, uint64(len(data)), data)
+ // Push stack args
+ len, _ := uint256.FromHex(tc.len)
+ src, _ := uint256.FromHex(tc.src)
+ dst, _ := uint256.FromHex(tc.dst)
+
+ stack.push(len)
+ stack.push(src)
+ stack.push(dst)
+ wantErr := (tc.wantGas == 0)
+ // Calc mem expansion
+ var memorySize uint64
+ if memSize, overflow := memoryMcopy(stack); overflow {
+ if wantErr {
+ continue
+ }
+ t.Errorf("overflow")
+ } else {
+ var overflow bool
+ if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
+ t.Error(vmerrs.ErrGasUintOverflow)
+ }
+ }
+ // and the dynamic cost
+ var haveGas uint64
+ if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil {
+ t.Error(err)
+ } else {
+ haveGas = GasFastestStep + dynamicCost
+ }
+ // Expand mem
+ if memorySize > 0 {
+ mem.Resize(memorySize)
+ }
+ // Do the copy
+ opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
+ want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
+ if have := mem.store; !bytes.Equal(want, have) {
+ t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)
+ }
+ wantGas := tc.wantGas
+ if haveGas != wantGas {
+ t.Errorf("case %d: gas wrong, want %d have %d\n", i, wantGas, haveGas)
+ }
+ }
+}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index 8546aea534..2956ca8a8c 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -66,11 +66,13 @@ type StateDB interface {
GetTransientState(addr common.Address, key common.Hash) common.Hash
SetTransientState(addr common.Address, key, value common.Hash)
- Suicide(common.Address) bool
- HasSuicided(common.Address) bool
+ SelfDestruct(common.Address)
+ HasSelfDestructed(common.Address) bool
+
+ Selfdestruct6780(common.Address)
// Exist reports whether the given account exists in state.
- // Notably this should also return true for suicided accounts.
+ // Notably this should also return true for self-destructed accounts.
Exist(common.Address) bool
// Empty returns whether the given account is empty. Empty
// is defined according to EIP161 (balance = nonce = code = 0).
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 10b225b1f8..ccf3c3f10a 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -77,6 +77,8 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter {
// If jump table was not initialised we set the default one.
var table *JumpTable
switch {
+ case evm.chainRules.IsCancun:
+ table = &cancunInstructionSet
case evm.chainRules.IsDUpgrade:
table = &dUpgradeInstructionSet
case evm.chainRules.IsApricotPhase3:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index c13dbe150d..48eb560d81 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -66,6 +66,7 @@ var (
apricotPhase2InstructionSet = newApricotPhase2InstructionSet()
apricotPhase3InstructionSet = newApricotPhase3InstructionSet()
dUpgradeInstructionSet = newDUpgradeInstructionSet()
+ cancunInstructionSet = newCancunInstructionSet()
)
// JumpTable contains the EVM opcodes supported at a given fork.
@@ -89,10 +90,20 @@ func validate(jt JumpTable) JumpTable {
return jt
}
+func newCancunInstructionSet() JumpTable {
+ instructionSet := newDUpgradeInstructionSet()
+ enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode)
+ enable1153(&instructionSet) // EIP-1153 "Transient Storage"
+ enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
+ enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction
+ return validate(instructionSet)
+}
+
func newDUpgradeInstructionSet() JumpTable {
instructionSet := newApricotPhase3InstructionSet()
enable3855(&instructionSet) // PUSH0 instruction
enable3860(&instructionSet) // Limit and meter initcode
+
return validate(instructionSet)
}
diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go
index 7800f8c3cb..ff986dabd8 100644
--- a/core/vm/jump_table_export.go
+++ b/core/vm/jump_table_export.go
@@ -24,6 +24,8 @@ import (
// the rules.
func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
+ case rules.IsCancun:
+ return newCancunInstructionSet(), nil
case rules.IsDUpgrade:
return newDUpgradeInstructionSet(), nil
case rules.IsApricotPhase3, rules.IsApricotPhase4,
diff --git a/core/vm/memory.go b/core/vm/memory.go
index eb6bc89078..259b7bf463 100644
--- a/core/vm/memory.go
+++ b/core/vm/memory.go
@@ -113,3 +113,14 @@ func (m *Memory) Len() int {
func (m *Memory) Data() []byte {
return m.store
}
+
+// Copy copies data from the src position slice into the dst position.
+// The source and destination may overlap.
+// OBS: This operation assumes that any necessary memory expansion has already been performed,
+// and this method may panic otherwise.
+func (m *Memory) Copy(dst, src, len uint64) {
+ if len == 0 {
+ return
+ }
+ copy(m.store[dst:], m.store[src:src+len])
+}
diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go
index 4af8c93cf0..8d0d80406c 100644
--- a/core/vm/memory_table.go
+++ b/core/vm/memory_table.go
@@ -58,6 +58,14 @@ func memoryMStore(stack *Stack) (uint64, bool) {
return calcMemSize64WithUint(stack.Back(0), 32)
}
+func memoryMcopy(stack *Stack) (uint64, bool) {
+ mStart := stack.Back(0) // stack[0]: dest
+ if stack.Back(1).Gt(mStart) {
+ mStart = stack.Back(1) // stack[1]: source
+ }
+ return calcMemSize64(mStart, stack.Back(2)) // stack[2]: length
+}
+
func memoryCreate(stack *Stack) (uint64, bool) {
return calcMemSize64(stack.Back(1), stack.Back(2))
}
diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go
new file mode 100644
index 0000000000..5acb6903eb
--- /dev/null
+++ b/core/vm/memory_test.go
@@ -0,0 +1,79 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+package vm
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func TestMemoryCopy(t *testing.T) {
+ // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
+ for i, tc := range []struct {
+ dst, src, len uint64
+ pre string
+ want string
+ }{
+ { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0.
+ 0, 32, 32,
+ "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ },
+
+ { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0.
+ 0, 0, 32,
+ "0101010101010101010101010101010101010101010101010101010101010101",
+ "0101010101010101010101010101010101010101010101010101010101010101",
+ },
+ { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping).
+ 0, 1, 8,
+ "000102030405060708 000000000000000000000000000000000000000000000000",
+ "010203040506070808 000000000000000000000000000000000000000000000000",
+ },
+ { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping).
+ 1, 0, 8,
+ "000102030405060708 000000000000000000000000000000000000000000000000",
+ "000001020304050607 000000000000000000000000000000000000000000000000",
+ },
+ // Tests below are not in the EIP, but maybe should be added
+ { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping).
+ 0xFFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0,
+ "11",
+ "11",
+ },
+ { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds.
+ 0xFFFFFFFFFFFF, 0, 0,
+ "11",
+ "11",
+ },
+ { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem
+ 0, 0xFFFFFFFFFFFF, 0,
+ "11",
+ "11",
+ },
+ } {
+ m := NewMemory()
+ // Clean spaces
+ data := common.FromHex(strings.ReplaceAll(tc.pre, " ", ""))
+ // Set pre
+ m.Resize(uint64(len(data)))
+ m.Set(0, uint64(len(data)), data)
+ // Do the copy
+ m.Copy(tc.dst, tc.src, tc.len)
+ want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
+ if have := m.store; !bytes.Equal(want, have) {
+ t.Errorf("case %d: want: %#x\nhave: %#x\n", i, want, have)
+ }
+ }
+}
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index f8a86784ef..fbaeecb2f3 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -108,6 +108,7 @@ const (
CHAINID OpCode = 0x46
SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48
+ BLOBHASH OpCode = 0x49
)
// 0x50 range - 'storage' and execution.
@@ -124,6 +125,9 @@ const (
MSIZE OpCode = 0x59
GAS OpCode = 0x5a
JUMPDEST OpCode = 0x5b
+ TLOAD OpCode = 0x5c
+ TSTORE OpCode = 0x5d
+ MCOPY OpCode = 0x5e
PUSH0 OpCode = 0x5f
)
@@ -212,12 +216,6 @@ const (
LOG4
)
-// 0xb0 range.
-const (
- TLOAD OpCode = 0xb3
- TSTORE OpCode = 0xb4
-)
-
const (
BALANCEMC = 0xcd
CALLEX = 0xcf
@@ -302,6 +300,7 @@ var opCodeToString = map[OpCode]string{
CHAINID: "CHAINID",
SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE",
+ BLOBHASH: "BLOBHASH",
// 0x50 range - 'storage' and execution.
POP: "POP",
@@ -316,6 +315,9 @@ var opCodeToString = map[OpCode]string{
MSIZE: "MSIZE",
GAS: "GAS",
JUMPDEST: "JUMPDEST",
+ TLOAD: "TLOAD",
+ TSTORE: "TSTORE",
+ MCOPY: "MCOPY",
PUSH0: "PUSH0",
// 0x60 range - pushes.
@@ -395,10 +397,6 @@ var opCodeToString = map[OpCode]string{
LOG3: "LOG3",
LOG4: "LOG4",
- // 0xb0 range.
- TLOAD: "TLOAD",
- TSTORE: "TSTORE",
-
// 0xf0 range - closures.
CREATE: "CREATE",
CALL: "CALL",
@@ -461,6 +459,7 @@ var stringToOp = map[string]OpCode{
"CALLDATACOPY": CALLDATACOPY,
"CHAINID": CHAINID,
"BASEFEE": BASEFEE,
+ "BLOBHASH": BLOBHASH,
"DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL,
"CODESIZE": CODESIZE,
@@ -490,6 +489,9 @@ var stringToOp = map[string]OpCode{
"MSIZE": MSIZE,
"GAS": GAS,
"JUMPDEST": JUMPDEST,
+ "TLOAD": TLOAD,
+ "TSTORE": TSTORE,
+ "MCOPY": MCOPY,
"PUSH0": PUSH0,
"PUSH1": PUSH1,
"PUSH2": PUSH2,
@@ -560,8 +562,6 @@ var stringToOp = map[string]OpCode{
"LOG2": LOG2,
"LOG3": LOG3,
"LOG4": LOG4,
- "TLOAD": TLOAD,
- "TSTORE": TSTORE,
"CREATE": CREATE,
"CREATE2": CREATE2,
"CALL": CALL,
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index 7b36e8c1ec..16df124448 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -194,22 +194,31 @@ var (
gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall)
gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall)
gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode)
+ gasSelfdestructEIP2929 = makeSelfdestructGasFn(true)
+ // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds)
+ gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
)
-func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
- var (
- gas uint64
- address = common.Address(stack.peek().Bytes20())
- )
- if !evm.StateDB.AddressInAccessList(address) {
- // If the caller cannot afford the cost, this change will be rolled back
- evm.StateDB.AddAddressToAccessList(address)
- gas = params.ColdAccountAccessCostEIP2929
- }
- // if empty and transfers value
- if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
- gas += params.CreateBySelfdestructGas
+// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539
+func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
+ gasFunc := func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ var (
+ gas uint64
+ address = common.Address(stack.peek().Bytes20())
+ )
+ if !evm.StateDB.AddressInAccessList(address) {
+ // If the caller cannot afford the cost, this change will be rolled back
+ evm.StateDB.AddAddressToAccessList(address)
+ gas = params.ColdAccountAccessCostEIP2929
+ }
+ // if empty and transfers value
+ if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
+ gas += params.CreateBySelfdestructGas
+ }
+ if refundsEnabled && !evm.StateDB.HasSelfDestructed(contract.Address()) {
+ evm.StateDB.AddRefund(params.SelfdestructRefundGas)
+ }
+ return gas, nil
}
-
- return gas, nil
+ return gasFunc
}
diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go
index 5293c40846..9fe4e87689 100644
--- a/core/vm/runtime/env.go
+++ b/core/vm/runtime/env.go
@@ -33,8 +33,9 @@ import (
func NewEnv(cfg *Config) *vm.EVM {
txContext := vm.TxContext{
- Origin: cfg.Origin,
- GasPrice: cfg.GasPrice,
+ Origin: cfg.Origin,
+ GasPrice: cfg.GasPrice,
+ BlobHashes: cfg.BlobHashes,
}
blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 81ea6792d4..36efb454e1 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -54,6 +54,7 @@ type Config struct {
Debug bool
EVMConfig vm.Config
BaseFee *big.Int
+ BlobHashes []common.Hash
State *state.StateDB
GetHashFn func(n uint64) common.Hash
diff --git a/core/vm/testdata/precompiles/pointEvaluation.json b/core/vm/testdata/precompiles/pointEvaluation.json
new file mode 100644
index 0000000000..93fc66d836
--- /dev/null
+++ b/core/vm/testdata/precompiles/pointEvaluation.json
@@ -0,0 +1,9 @@
+[
+ {
+ "Input": "01d18459b334ffe8e2226eef1db874fda6db2bdd9357268b39220af2d59464fb564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d3630624d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806942307f266e636553e94006d11423f2688945ff3bdf515859eba1005c1a7708d620a94d91a1c0c285f9584e75ec2f82a",
+ "Expected": "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
+ "Name": "pointEvaluation1",
+ "Gas": 50000,
+ "NoBenchmark": false
+ }
+]
diff --git a/eth/api.go b/eth/api.go
index 8aa03fa1c8..5842cb5625 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -27,26 +27,7 @@
package eth
import (
- "compress/gzip"
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "strings"
- "time"
-
- "github.com/ava-labs/coreth/core"
- "github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state"
- "github.com/ava-labs/coreth/core/types"
- "github.com/ava-labs/coreth/internal/ethapi"
- "github.com/ava-labs/coreth/rpc"
- "github.com/ava-labs/coreth/trie"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
// EthereumAPI provides an API to access Ethereum full node-related information.
@@ -59,419 +40,12 @@ func NewEthereumAPI(e *Ethereum) *EthereumAPI {
return &EthereumAPI{e}
}
-// Etherbase is the address that mining rewards will be send to.
+// Etherbase is the address that mining rewards will be sent to.
func (api *EthereumAPI) Etherbase() (common.Address, error) {
return api.e.Etherbase()
}
-// Coinbase is the address that mining rewards will be send to (alias for Etherbase).
+// Coinbase is the address that mining rewards will be sent to (alias for Etherbase).
func (api *EthereumAPI) Coinbase() (common.Address, error) {
return api.Etherbase()
}
-
-// AdminAPI is the collection of Ethereum full node related APIs for node
-// administration.
-type AdminAPI struct {
- eth *Ethereum
-}
-
-// NewAdminAPI creates a new instance of AdminAPI.
-func NewAdminAPI(eth *Ethereum) *AdminAPI {
- return &AdminAPI{eth: eth}
-}
-
-// ExportChain exports the current blockchain into a local file,
-// or a range of blocks if first and last are non-nil.
-func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
- if first == nil && last != nil {
- return false, errors.New("last cannot be specified without first")
- }
- if first != nil && last == nil {
- head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
- last = &head
- }
- if _, err := os.Stat(file); err == nil {
- // File already exists. Allowing overwrite could be a DoS vector,
- // since the 'file' may point to arbitrary paths on the drive.
- return false, errors.New("location would overwrite an existing file")
- }
- // Make sure we can create the file to export into
- out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return false, err
- }
- defer out.Close()
-
- var writer io.Writer = out
- if strings.HasSuffix(file, ".gz") {
- writer = gzip.NewWriter(writer)
- defer writer.(*gzip.Writer).Close()
- }
-
- // Export the blockchain
- if first != nil {
- if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
- return false, err
- }
- } else if err := api.eth.BlockChain().Export(writer); err != nil {
- return false, err
- }
- return true, nil
-}
-
-func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
- for _, b := range bs {
- if !chain.HasBlock(b.Hash(), b.NumberU64()) {
- return false
- }
- }
-
- return true
-}
-
-// ImportChain imports a blockchain from a local file.
-func (api *AdminAPI) ImportChain(file string) (bool, error) {
- // Make sure the can access the file to import
- in, err := os.Open(file)
- if err != nil {
- return false, err
- }
- defer in.Close()
-
- var reader io.Reader = in
- if strings.HasSuffix(file, ".gz") {
- if reader, err = gzip.NewReader(reader); err != nil {
- return false, err
- }
- }
-
- // Run actual the import in pre-configured batches
- stream := rlp.NewStream(reader, 0)
-
- blocks, index := make([]*types.Block, 0, 2500), 0
- for batch := 0; ; batch++ {
- // Load a batch of blocks from the input file
- for len(blocks) < cap(blocks) {
- block := new(types.Block)
- if err := stream.Decode(block); err == io.EOF {
- break
- } else if err != nil {
- return false, fmt.Errorf("block %d: failed to parse: %v", index, err)
- }
- blocks = append(blocks, block)
- index++
- }
- if len(blocks) == 0 {
- break
- }
-
- if hasAllBlocks(api.eth.BlockChain(), blocks) {
- blocks = blocks[:0]
- continue
- }
- // Import the batch and reset the buffer
- if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil {
- return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err)
- }
- blocks = blocks[:0]
- }
- return true, nil
-}
-
-// DebugAPI is the collection of Ethereum full node APIs for debugging the
-// protocol.
-type DebugAPI struct {
- eth *Ethereum
-}
-
-// NewDebugAPI creates a new DebugAPI instance.
-func NewDebugAPI(eth *Ethereum) *DebugAPI {
- return &DebugAPI{eth: eth}
-}
-
-// DumpBlock retrieves the entire state of the database at a given block.
-func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
- opts := &state.DumpConfig{
- OnlyWithAddresses: true,
- Max: AccountRangeMaxResults, // Sanity limit over RPC
- }
- var header *types.Header
- if blockNr.IsAccepted() {
- header = api.eth.LastAcceptedBlock().Header()
- } else {
- block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
- if block == nil {
- return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
- }
- header = block.Header()
- }
- if header == nil {
- return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
- }
- stateDb, err := api.eth.BlockChain().StateAt(header.Root)
- if err != nil {
- return state.Dump{}, err
- }
- return stateDb.RawDump(opts), nil
-}
-
-// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
-func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
- if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {
- return preimage, nil
- }
- return nil, errors.New("unknown preimage")
-}
-
-// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
-// and returns them as a JSON list of block hashes.
-func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) {
- internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend)
- return internalAPI.GetBadBlocks(ctx)
-}
-
-// AccountRangeMaxResults is the maximum number of results to be returned per call
-const AccountRangeMaxResults = 256
-
-// AccountRange enumerates all accounts in the given block and start point in paging request
-func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) {
- var stateDb *state.StateDB
- var err error
-
- if number, ok := blockNrOrHash.Number(); ok {
- var header *types.Header
- if number.IsAccepted() {
- header = api.eth.LastAcceptedBlock().Header()
- } else {
- block := api.eth.blockchain.GetBlockByNumber(uint64(number))
- if block == nil {
- return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
- }
- header = block.Header()
- }
- if header == nil {
- return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
- }
- stateDb, err = api.eth.BlockChain().StateAt(header.Root)
- if err != nil {
- return state.IteratorDump{}, err
- }
- } else if hash, ok := blockNrOrHash.Hash(); ok {
- block := api.eth.blockchain.GetBlockByHash(hash)
- if block == nil {
- return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex())
- }
- stateDb, err = api.eth.BlockChain().StateAt(block.Root())
- if err != nil {
- return state.IteratorDump{}, err
- }
- } else {
- return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
- }
-
- opts := &state.DumpConfig{
- SkipCode: nocode,
- SkipStorage: nostorage,
- OnlyWithAddresses: !incompletes,
- Start: start,
- Max: uint64(maxResults),
- }
- if maxResults > AccountRangeMaxResults || maxResults <= 0 {
- opts.Max = AccountRangeMaxResults
- }
- return stateDb.IteratorDump(opts), nil
-}
-
-// StorageRangeResult is the result of a debug_storageRangeAt API call.
-type StorageRangeResult struct {
- Storage storageMap `json:"storage"`
- NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie.
-}
-
-type storageMap map[common.Hash]storageEntry
-
-type storageEntry struct {
- Key *common.Hash `json:"key"`
- Value common.Hash `json:"value"`
-}
-
-// StorageRangeAt returns the storage at the given block height and transaction index.
-func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {
- // Retrieve the block
- block := api.eth.blockchain.GetBlockByHash(blockHash)
- if block == nil {
- return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash)
- }
- _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0)
- if err != nil {
- return StorageRangeResult{}, err
- }
- defer release()
-
- st, err := statedb.StorageTrie(contractAddress)
- if err != nil {
- return StorageRangeResult{}, err
- }
- if st == nil {
- return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
- }
- return storageRangeAt(st, keyStart, maxResult)
-}
-
-func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
- it := trie.NewIterator(st.NodeIterator(start))
- result := StorageRangeResult{Storage: storageMap{}}
- for i := 0; i < maxResult && it.Next(); i++ {
- _, content, _, err := rlp.Split(it.Value)
- if err != nil {
- return StorageRangeResult{}, err
- }
- e := storageEntry{Value: common.BytesToHash(content)}
- if preimage := st.GetKey(it.Key); preimage != nil {
- preimage := common.BytesToHash(preimage)
- e.Key = &preimage
- }
- result.Storage[common.BytesToHash(it.Key)] = e
- }
- // Add the 'next key' so clients can continue downloading.
- if it.Next() {
- next := common.BytesToHash(it.Key)
- result.NextKey = &next
- }
- return result, nil
-}
-
-// GetModifiedAccountsByNumber returns all accounts that have changed between the
-// two blocks specified. A change is defined as a difference in nonce, balance,
-// code hash, or storage hash.
-//
-// With one parameter, returns the list of accounts modified in the specified block.
-func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
- var startBlock, endBlock *types.Block
-
- startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
- if startBlock == nil {
- return nil, fmt.Errorf("start block %x not found", startNum)
- }
-
- if endNum == nil {
- endBlock = startBlock
- startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
- if startBlock == nil {
- return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
- }
- } else {
- endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
- if endBlock == nil {
- return nil, fmt.Errorf("end block %d not found", *endNum)
- }
- }
- return api.getModifiedAccounts(startBlock, endBlock)
-}
-
-// GetModifiedAccountsByHash returns all accounts that have changed between the
-// two blocks specified. A change is defined as a difference in nonce, balance,
-// code hash, or storage hash.
-//
-// With one parameter, returns the list of accounts modified in the specified block.
-func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
- var startBlock, endBlock *types.Block
- startBlock = api.eth.blockchain.GetBlockByHash(startHash)
- if startBlock == nil {
- return nil, fmt.Errorf("start block %x not found", startHash)
- }
-
- if endHash == nil {
- endBlock = startBlock
- startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
- if startBlock == nil {
- return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
- }
- } else {
- endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
- if endBlock == nil {
- return nil, fmt.Errorf("end block %x not found", *endHash)
- }
- }
- return api.getModifiedAccounts(startBlock, endBlock)
-}
-
-func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
- if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
- return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
- }
- triedb := api.eth.BlockChain().StateCache().TrieDB()
-
- oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb)
- if err != nil {
- return nil, err
- }
- newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb)
- if err != nil {
- return nil, err
- }
- diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
- iter := trie.NewIterator(diff)
-
- var dirty []common.Address
- for iter.Next() {
- key := newTrie.GetKey(iter.Key)
- if key == nil {
- return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
- }
- dirty = append(dirty, common.BytesToAddress(key))
- }
- return dirty, nil
-}
-
-// GetAccessibleState returns the first number where the node has accessible
-// state on disk. Note this being the post-state of that block and the pre-state
-// of the next block.
-// The (from, to) parameters are the sequence of blocks to search, which can go
-// either forwards or backwards
-func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) {
- var resolveNum = func(num rpc.BlockNumber) (uint64, error) {
- // We don't have state for pending (-2), so treat it as latest
- if num.Int64() < 0 {
- block := api.eth.blockchain.CurrentBlock()
- if block == nil {
- return 0, errors.New("current block missing")
- }
- return block.Number.Uint64(), nil
- }
- return uint64(num.Int64()), nil
- }
- var (
- start uint64
- end uint64
- delta = int64(1)
- lastLog time.Time
- err error
- )
- if start, err = resolveNum(from); err != nil {
- return 0, err
- }
- if end, err = resolveNum(to); err != nil {
- return 0, err
- }
- if start == end {
- return 0, errors.New("from and to needs to be different")
- }
- if start > end {
- delta = -1
- }
- for i := int64(start); i != int64(end); i += delta {
- if time.Since(lastLog) > 8*time.Second {
- log.Info("Finding roots", "from", start, "to", end, "at", i)
- lastLog = time.Now()
- }
- h := api.eth.BlockChain().GetHeaderByNumber(uint64(i))
- if h == nil {
- return 0, fmt.Errorf("missing header %d", i)
- }
- if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok {
- return uint64(i), nil
- }
- }
- return 0, errors.New("no state found")
-}
diff --git a/eth/api_admin.go b/eth/api_admin.go
new file mode 100644
index 0000000000..c37560923d
--- /dev/null
+++ b/eth/api_admin.go
@@ -0,0 +1,149 @@
+// (c) 2019-2020, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eth
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/ava-labs/coreth/core"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// AdminAPI is the collection of Ethereum full node related APIs for node
+// administration.
+type AdminAPI struct {
+ eth *Ethereum
+}
+
+// NewAdminAPI creates a new instance of AdminAPI.
+func NewAdminAPI(eth *Ethereum) *AdminAPI {
+ return &AdminAPI{eth: eth}
+}
+
+// ExportChain exports the current blockchain into a local file,
+// or a range of blocks if first and last are non-nil.
+func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
+ if first == nil && last != nil {
+ return false, errors.New("last cannot be specified without first")
+ }
+ if first != nil && last == nil {
+ head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
+ last = &head
+ }
+ if _, err := os.Stat(file); err == nil {
+ // File already exists. Allowing overwrite could be a DoS vector,
+ // since the 'file' may point to arbitrary paths on the drive.
+ return false, errors.New("location would overwrite an existing file")
+ }
+ // Make sure we can create the file to export into
+ out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return false, err
+ }
+ defer out.Close()
+
+ var writer io.Writer = out
+ if strings.HasSuffix(file, ".gz") {
+ writer = gzip.NewWriter(writer)
+ defer writer.(*gzip.Writer).Close()
+ }
+
+ // Export the blockchain
+ if first != nil {
+ if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
+ return false, err
+ }
+ } else if err := api.eth.BlockChain().Export(writer); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
+ for _, b := range bs {
+ if !chain.HasBlock(b.Hash(), b.NumberU64()) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ImportChain imports a blockchain from a local file.
+func (api *AdminAPI) ImportChain(file string) (bool, error) {
+ // Make sure the can access the file to import
+ in, err := os.Open(file)
+ if err != nil {
+ return false, err
+ }
+ defer in.Close()
+
+ var reader io.Reader = in
+ if strings.HasSuffix(file, ".gz") {
+ if reader, err = gzip.NewReader(reader); err != nil {
+ return false, err
+ }
+ }
+
+ // Run actual the import in pre-configured batches
+ stream := rlp.NewStream(reader, 0)
+
+ blocks, index := make([]*types.Block, 0, 2500), 0
+ for batch := 0; ; batch++ {
+ // Load a batch of blocks from the input file
+ for len(blocks) < cap(blocks) {
+ block := new(types.Block)
+ if err := stream.Decode(block); err == io.EOF {
+ break
+ } else if err != nil {
+ return false, fmt.Errorf("block %d: failed to parse: %v", index, err)
+ }
+ blocks = append(blocks, block)
+ index++
+ }
+ if len(blocks) == 0 {
+ break
+ }
+
+ if hasAllBlocks(api.eth.BlockChain(), blocks) {
+ blocks = blocks[:0]
+ continue
+ }
+ // Import the batch and reset the buffer
+ if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil {
+ return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err)
+ }
+ blocks = blocks[:0]
+ }
+ return true, nil
+}
diff --git a/eth/api_backend.go b/eth/api_backend.go
index fc4aba5eb3..5e873db5bf 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -39,6 +39,7 @@ import (
"github.com/ava-labs/coreth/core/bloombits"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/txpool"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/eth/gasprice"
@@ -52,7 +53,7 @@ import (
var ErrUnfinalizedData = errors.New("cannot query unfinalized data")
-// EthAPIBackend implements ethapi.Backend for full nodes
+// EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes
type EthAPIBackend struct {
extRPCEnabled bool
allowUnprotectedTxs bool
@@ -319,7 +320,7 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction)
if err := ctx.Err(); err != nil {
return err
}
- return b.eth.txPool.AddLocal(signedTx)
+ return b.eth.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, false)[0]
}
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
@@ -332,7 +333,10 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
}
func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction {
- return b.eth.txPool.Get(hash)
+ if tx := b.eth.txPool.Get(hash); tx != nil {
+ return tx.Tx
+ }
+ return nil
}
func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
@@ -358,15 +362,15 @@ func (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) (
return b.eth.txPool.Nonce(addr), nil
}
-func (b *EthAPIBackend) Stats() (pending int, queued int) {
+func (b *EthAPIBackend) Stats() (runnable int, blocked int) {
return b.eth.txPool.Stats()
}
-func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+func (b *EthAPIBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
return b.eth.txPool.Content()
}
-func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
+func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
return b.eth.txPool.ContentFrom(addr)
}
diff --git a/eth/api_debug.go b/eth/api_debug.go
new file mode 100644
index 0000000000..6032d83328
--- /dev/null
+++ b/eth/api_debug.go
@@ -0,0 +1,360 @@
+// (c) 2019-2020, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eth
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/state"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/internal/ethapi"
+ "github.com/ava-labs/coreth/rpc"
+ "github.com/ava-labs/coreth/trie"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// DebugAPI is the collection of Ethereum full node APIs for debugging the
+// protocol.
+type DebugAPI struct {
+ eth *Ethereum
+}
+
+// NewDebugAPI creates a new DebugAPI instance.
+func NewDebugAPI(eth *Ethereum) *DebugAPI {
+ return &DebugAPI{eth: eth}
+}
+
+// DumpBlock retrieves the entire state of the database at a given block.
+func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
+ opts := &state.DumpConfig{
+ OnlyWithAddresses: true,
+ Max: AccountRangeMaxResults, // Sanity limit over RPC
+ }
+ var header *types.Header
+ if blockNr.IsAccepted() {
+ header = api.eth.LastAcceptedBlock().Header()
+ } else {
+ block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
+ if block == nil {
+ return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
+ }
+ header = block.Header()
+ }
+ if header == nil {
+ return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
+ }
+ stateDb, err := api.eth.BlockChain().StateAt(header.Root)
+ if err != nil {
+ return state.Dump{}, err
+ }
+ return stateDb.RawDump(opts), nil
+}
+
+// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.
+func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {
+ if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {
+ return preimage, nil
+ }
+ return nil, errors.New("unknown preimage")
+}
+
+// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
+// and returns them as a JSON list of block hashes.
+func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) {
+ internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend)
+ return internalAPI.GetBadBlocks(ctx)
+}
+
+// AccountRangeMaxResults is the maximum number of results to be returned per call
+const AccountRangeMaxResults = 256
+
+// AccountRange enumerates all accounts in the given block and start point in paging request
+func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) {
+ var stateDb *state.StateDB
+ var err error
+
+ if number, ok := blockNrOrHash.Number(); ok {
+ var header *types.Header
+ if number.IsAccepted() {
+ header = api.eth.LastAcceptedBlock().Header()
+ } else {
+ block := api.eth.blockchain.GetBlockByNumber(uint64(number))
+ if block == nil {
+ return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
+ }
+ header = block.Header()
+ }
+ if header == nil {
+ return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
+ }
+ stateDb, err = api.eth.BlockChain().StateAt(header.Root)
+ if err != nil {
+ return state.IteratorDump{}, err
+ }
+ } else if hash, ok := blockNrOrHash.Hash(); ok {
+ block := api.eth.blockchain.GetBlockByHash(hash)
+ if block == nil {
+ return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex())
+ }
+ stateDb, err = api.eth.BlockChain().StateAt(block.Root())
+ if err != nil {
+ return state.IteratorDump{}, err
+ }
+ } else {
+ return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
+ }
+
+ opts := &state.DumpConfig{
+ SkipCode: nocode,
+ SkipStorage: nostorage,
+ OnlyWithAddresses: !incompletes,
+ Start: start,
+ Max: uint64(maxResults),
+ }
+ if maxResults > AccountRangeMaxResults || maxResults <= 0 {
+ opts.Max = AccountRangeMaxResults
+ }
+ return stateDb.IteratorDump(opts), nil
+}
+
+// StorageRangeResult is the result of a debug_storageRangeAt API call.
+type StorageRangeResult struct {
+ Storage storageMap `json:"storage"`
+ NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie.
+}
+
+type storageMap map[common.Hash]storageEntry
+
+type storageEntry struct {
+ Key *common.Hash `json:"key"`
+ Value common.Hash `json:"value"`
+}
+
+// StorageRangeAt returns the storage at the given block height and transaction index.
+func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {
+ var block *types.Block
+
+ block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, blockNrOrHash)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+
+ if block == nil {
+ return StorageRangeResult{}, fmt.Errorf("block %v not found", blockNrOrHash)
+ }
+ _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ defer release()
+
+ st, err := statedb.StorageTrie(contractAddress)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ if st == nil {
+ return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
+ }
+ return storageRangeAt(st, keyStart, maxResult)
+}
+
+func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
+ trieIt, err := st.NodeIterator(start)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ it := trie.NewIterator(trieIt)
+ result := StorageRangeResult{Storage: storageMap{}}
+ for i := 0; i < maxResult && it.Next(); i++ {
+ _, content, _, err := rlp.Split(it.Value)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ e := storageEntry{Value: common.BytesToHash(content)}
+ if preimage := st.GetKey(it.Key); preimage != nil {
+ preimage := common.BytesToHash(preimage)
+ e.Key = &preimage
+ }
+ result.Storage[common.BytesToHash(it.Key)] = e
+ }
+ // Add the 'next key' so clients can continue downloading.
+ if it.Next() {
+ next := common.BytesToHash(it.Key)
+ result.NextKey = &next
+ }
+ return result, nil
+}
+
+// GetModifiedAccountsByNumber returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+
+ startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startNum)
+ }
+
+ if endNum == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %d not found", *endNum)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+// GetModifiedAccountsByHash returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+ startBlock = api.eth.blockchain.GetBlockByHash(startHash)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startHash)
+ }
+
+ if endHash == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %x not found", *endHash)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
+ if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
+ return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
+ }
+ triedb := api.eth.BlockChain().StateCache().TrieDB()
+
+ oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb)
+ if err != nil {
+ return nil, err
+ }
+ newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb)
+ if err != nil {
+ return nil, err
+ }
+ oldIt, err := oldTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ newIt, err := newTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ diff, _ := trie.NewDifferenceIterator(oldIt, newIt)
+ iter := trie.NewIterator(diff)
+
+ var dirty []common.Address
+ for iter.Next() {
+ key := newTrie.GetKey(iter.Key)
+ if key == nil {
+ return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
+ }
+ dirty = append(dirty, common.BytesToAddress(key))
+ }
+ return dirty, nil
+}
+
+// GetAccessibleState returns the first number where the node has accessible
+// state on disk. Note this being the post-state of that block and the pre-state
+// of the next block.
+// The (from, to) parameters are the sequence of blocks to search, which can go
+// either forwards or backwards
+func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) {
+ var resolveNum = func(num rpc.BlockNumber) (uint64, error) {
+ // We don't have state for pending (-2), so treat it as latest
+ if num.Int64() < 0 {
+ block := api.eth.blockchain.CurrentBlock()
+ if block == nil {
+ return 0, errors.New("current block missing")
+ }
+ return block.Number.Uint64(), nil
+ }
+ return uint64(num.Int64()), nil
+ }
+ var (
+ start uint64
+ end uint64
+ delta = int64(1)
+ lastLog time.Time
+ err error
+ )
+ if start, err = resolveNum(from); err != nil {
+ return 0, err
+ }
+ if end, err = resolveNum(to); err != nil {
+ return 0, err
+ }
+ if start == end {
+ return 0, errors.New("from and to needs to be different")
+ }
+ if start > end {
+ delta = -1
+ }
+ for i := int64(start); i != int64(end); i += delta {
+ if time.Since(lastLog) > 8*time.Second {
+ log.Info("Finding roots", "from", start, "to", end, "at", i)
+ lastLog = time.Now()
+ }
+ h := api.eth.BlockChain().GetHeaderByNumber(uint64(i))
+ if h == nil {
+ return 0, fmt.Errorf("missing header %d", i)
+ }
+ if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok {
+ return uint64(i), nil
+ }
+ }
+ return 0, errors.New("no state found")
+}
diff --git a/eth/backend.go b/eth/backend.go
index 7cabb9333f..e0eb37f667 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -30,6 +30,7 @@ package eth
import (
"errors"
"fmt"
+ "math/big"
"sync"
"time"
@@ -42,6 +43,7 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/state/pruner"
"github.com/ava-labs/coreth/core/txpool"
+ "github.com/ava-labs/coreth/core/txpool/legacypool"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/eth/ethconfig"
@@ -75,7 +77,8 @@ type Ethereum struct {
config *Config
// Handlers
- txPool *txpool.TxPool
+ txPool *txpool.TxPool
+
blockchain *core.BlockChain
// DB interfaces
@@ -224,7 +227,12 @@ func New(
eth.bloomIndexer.Start(eth.blockchain)
config.TxPool.Journal = ""
- eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain)
+ legacyPool := legacypool.New(config.TxPool, eth.blockchain)
+
+ eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool})
+ if err != nil {
+ return nil, err
+ }
eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock)
@@ -350,7 +358,7 @@ func (s *Ethereum) Start() {
func (s *Ethereum) Stop() error {
s.bloomIndexer.Close()
close(s.closeBloomHandler)
- s.txPool.Stop()
+ s.txPool.Close()
s.blockchain.Stop()
s.engine.Close()
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 49dc251ed5..0436b345d6 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -30,7 +30,7 @@ import (
"time"
"github.com/ava-labs/coreth/core"
- "github.com/ava-labs/coreth/core/txpool"
+ "github.com/ava-labs/coreth/core/txpool/legacypool"
"github.com/ava-labs/coreth/eth/gasprice"
"github.com/ava-labs/coreth/miner"
"github.com/ethereum/go-ethereum/common"
@@ -60,7 +60,7 @@ func NewDefaultConfig() Config {
SnapshotCache: 256,
AcceptedCacheSize: 32,
Miner: miner.Config{},
- TxPool: txpool.DefaultConfig,
+ TxPool: legacypool.DefaultConfig,
RPCGasCap: 25000000,
RPCEVMTimeout: 5 * time.Second,
GPO: DefaultFullGPOConfig,
@@ -110,7 +110,7 @@ type Config struct {
Miner miner.Config
// Transaction pool options
- TxPool txpool.Config
+ TxPool legacypool.Config
// Gas Price Oracle options
GPO gasprice.Config
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index e8e0c1b370..d725bf971e 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -130,33 +130,45 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
}
return f.blockLogs(ctx, header)
}
+
+ var (
+ beginPending = f.begin == rpc.PendingBlockNumber.Int64()
+ endPending = f.end == rpc.PendingBlockNumber.Int64()
+ )
+
+ // special case for pending logs
+ if beginPending && !endPending {
+ return nil, errors.New("invalid block range")
+ }
+
// Short-cut if all we care about is pending logs
- if f.begin == rpc.PendingBlockNumber.Int64() {
- if f.end != rpc.PendingBlockNumber.Int64() {
- return nil, errors.New("invalid block range")
- }
+ if beginPending && endPending {
// There is no pending block, if the request specifies only the pending block, then return nil.
return nil, nil
}
- // Figure out the limits of the filter range
- // LatestBlockNumber is transformed into the last accepted block in HeaderByNumber
- // so it is left in place here.
- header, err := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
- if err != nil {
- return nil, err
- }
- if header == nil {
- return nil, nil
+
+ resolveSpecial := func(number int64) (int64, error) {
+ var hdr *types.Header
+ switch {
+ case number < 0:
+ // we should return head here since we've already captured
+ // that we need to get the pending logs in the pending boolean above
+ hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
+ if hdr == nil {
+ return 0, errors.New("latest header not found")
+ }
+ default:
+ return number, nil
+ }
+ return hdr.Number.Int64(), nil
}
- var (
- head = header.Number.Uint64()
- end = uint64(f.end)
- )
- if f.begin < 0 {
- f.begin = int64(head)
+ var err error
+ // range query need to resolve the special begin/end block number
+ if f.begin, err = resolveSpecial(f.begin); err != nil {
+ return nil, err
}
- if f.end < 0 {
- end = head
+ if f.end, err = resolveSpecial(f.end); err != nil {
+ return nil, err
}
// When querying unfinalized data without a populated end block, it is
@@ -165,52 +177,87 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
// We error in this case to prevent a bad UX where the caller thinks there
// are no logs from the specified beginning to end (when in reality there may
// be some).
- if end < uint64(f.begin) {
- return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, end)
+ if f.end < f.begin {
+ return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, f.end)
}
// If the requested range of blocks exceeds the maximum number of blocks allowed by the backend
// return an error instead of searching for the logs.
- if maxBlocks := f.sys.backend.GetMaxBlocksPerRequest(); int64(end)-f.begin >= maxBlocks && maxBlocks > 0 {
- return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, int64(end), maxBlocks)
+ if maxBlocks := f.sys.backend.GetMaxBlocksPerRequest(); f.end-f.begin >= maxBlocks && maxBlocks > 0 {
+ return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, f.end, maxBlocks)
}
- // Gather all indexed logs, and finish with non indexed ones
+
+ logChan, errChan := f.rangeLogsAsync(ctx)
+ var logs []*types.Log
+ for {
+ select {
+ case log := <-logChan:
+ logs = append(logs, log)
+ case err := <-errChan:
+ if err != nil {
+ // if an error occurs during extraction, we do return the extracted data
+ return logs, err
+ }
+ return logs, nil
+ }
+ }
+}
+
+// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
+// it creates and returns two channels: one for delivering log data, and one for reporting errors.
+func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
var (
- logs []*types.Log
- size, sections = f.sys.backend.BloomStatus()
+ logChan = make(chan *types.Log)
+ errChan = make(chan error)
)
- if indexed := sections * size; indexed > uint64(f.begin) {
- if indexed > end {
- logs, err = f.indexedLogs(ctx, end)
- } else {
- logs, err = f.indexedLogs(ctx, indexed-1)
+
+ go func() {
+ defer func() {
+ close(errChan)
+ close(logChan)
+ }()
+
+ // Gather all indexed logs, and finish with non indexed ones
+ var (
+ end = uint64(f.end)
+ size, sections = f.sys.backend.BloomStatus()
+ err error
+ )
+ if indexed := sections * size; indexed > uint64(f.begin) {
+ if indexed > end {
+ indexed = end + 1
+ }
+ if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
+ errChan <- err
+ return
+ }
}
- if err != nil {
- return logs, err
+
+ if err := f.unindexedLogs(ctx, end, logChan); err != nil {
+ errChan <- err
+ return
}
- }
- rest, err := f.unindexedLogs(ctx, end)
- logs = append(logs, rest...)
- return logs, err
+
+ errChan <- nil
+ }()
+
+ return logChan, errChan
}
// indexedLogs returns the logs matching the filter criteria based on the bloom
// bits indexed available locally or via the network.
-func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
+func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
// Create a matcher session and request servicing from the backend
matches := make(chan uint64, 64)
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
if err != nil {
- return nil, err
+ return err
}
defer session.Close()
f.sys.backend.ServiceFilter(ctx, session)
- // Iterate over the matches until exhausted or context closed
- var logs []*types.Log
-
for {
select {
case number, ok := <-matches:
@@ -220,47 +267,50 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
if err == nil {
f.begin = int64(end) + 1
}
- return logs, err
+ return err
}
f.begin = int64(number) + 1
// Retrieve the suggested block and pull any truly matching logs
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
if header == nil || err != nil {
- return logs, err
+ return err
}
found, err := f.checkMatches(ctx, header)
if err != nil {
- return logs, err
+ return err
+ }
+ for _, log := range found {
+ logChan <- log
}
- logs = append(logs, found...)
case <-ctx.Done():
- return logs, ctx.Err()
+ return ctx.Err()
}
}
}
// unindexedLogs returns the logs matching the filter criteria based on raw block
// iteration and bloom matching.
-func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
- var logs []*types.Log
-
+func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
for ; f.begin <= int64(end); f.begin++ {
- if f.begin%10 == 0 && ctx.Err() != nil {
- return logs, ctx.Err()
- }
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
if header == nil || err != nil {
- return logs, err
+ return err
}
found, err := f.blockLogs(ctx, header)
if err != nil {
- return logs, err
+ return err
+ }
+ for _, log := range found {
+ select {
+ case logChan <- log:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
- logs = append(logs, found...)
}
- return logs, nil
+ return nil
}
// blockLogs returns the logs matching the filter criteria within a single block.
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index ababe85555..0e64d51723 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -519,15 +519,6 @@ func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
}
}
-func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
- for _, f := range filters[LogsSubscription] {
- matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
- if len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
-}
-
func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent, accepted bool) {
for _, f := range filters[PendingTransactionsSubscription] {
f.txs <- ev.Txs
@@ -579,7 +570,7 @@ func (es *EventSystem) eventLoop() {
case ev := <-es.logsAcceptedCh:
es.handleAcceptedLogs(index, ev)
case ev := <-es.rmLogsCh:
- es.handleRemovedLogs(index, ev)
+ es.handleLogs(index, ev.Logs)
case ev := <-es.pendingLogsCh:
es.handlePendingLogs(index, ev)
case ev := <-es.chainCh:
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index ccfcfab8b0..7666da34c0 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -28,16 +28,21 @@ package filters
import (
"context"
+ "encoding/json"
"math/big"
- "reflect"
+ "strings"
"testing"
+ "time"
+ "github.com/ava-labs/coreth/accounts/abi"
"github.com/ava-labs/coreth/consensus/dummy"
"github.com/ava-labs/coreth/core"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
+ "github.com/ava-labs/coreth/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
@@ -115,10 +120,48 @@ func BenchmarkFilters(b *testing.B) {
func TestFilters(t *testing.T) {
var (
- db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false)
- _, sys = newTestFilterSystem(t, db, Config{})
+ db = rawdb.NewMemoryDatabase()
+ _, sys = newTestFilterSystem(t, db, Config{})
+ // Sender account
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
+ signer = types.NewLondonSigner(big.NewInt(1))
+ // Logging contract
+ contract = common.Address{0xfe}
+ contract2 = common.Address{0xff}
+ abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]`
+ /*
+ // SPDX-License-Identifier: GPL-3.0
+ pragma solidity >=0.7.0 <0.9.0;
+ contract Logger {
+ function log0() external {
+ assembly {
+ log0(0, 0)
+ }
+ }
+ function log1(uint t1) external {
+ assembly {
+ log1(0, 0, t1)
+ }
+ }
+ function log2(uint t1, uint t2) external {
+ assembly {
+ log2(0, 0, t1, t2)
+ }
+ }
+ function log3(uint t1, uint t2, uint t3) external {
+ assembly {
+ log3(0, 0, t1, t2, t3)
+ }
+ }
+ function log4(uint t1, uint t2, uint t3, uint t4) external {
+ assembly {
+ log4(0, 0, t1, t2, t3, t4)
+ }
+ }
+ }
+ */
+ bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033")
hash1 = common.BytesToHash([]byte("topic1"))
hash2 = common.BytesToHash([]byte("topic2"))
@@ -126,139 +169,199 @@ func TestFilters(t *testing.T) {
hash4 = common.BytesToHash([]byte("topic4"))
gspec = &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
+ Config: params.TestChainConfig,
+ Alloc: core.GenesisAlloc{
+ addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))},
+ contract: {Balance: big.NewInt(0), Code: bytecode},
+ contract2: {Balance: big.NewInt(0), Code: bytecode},
+ },
BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee),
}
)
- defer db.Close()
- _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) {
+ contractABI, err := abi.JSON(strings.NewReader(abiStr))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Hack: GenerateChainWithGenesis creates a new db.
+ // Commit the genesis manually and use GenerateChain.
+ genesis, err := gspec.Commit(db, trie.NewDatabase(db))
+ if err != nil {
+ t.Fatal(err)
+ }
+ chain, _, err := core.GenerateChain(gspec.Config, gspec.ToBlock(), dummy.NewFaker(), db, 1000, 10, func(i int, gen *core.BlockGen) {
switch i {
case 1:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash1},
- },
+ data, err := contractABI.Pack("log1", hash1.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 0,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
+ tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 1,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract2,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx2)
case 2:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash2},
- },
+ data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, gen.BaseFee(), nil))
-
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 2,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
case 998:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash3},
- },
+ data, err := contractABI.Pack("log1", hash3.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 3,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract2,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
case 999:
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {
- Address: addr,
- Topics: []common.Hash{hash4},
- },
+ data, err := contractABI.Pack("log1", hash4.Big())
+ if err != nil {
+ t.Fatal(err)
}
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: 4,
+ GasPrice: gen.BaseFee(),
+ Gas: 30000,
+ To: &contract,
+ Data: data,
+ }), signer, key1)
+ gen.AddTx(tx)
}
})
require.NoError(t, err)
- // The test txs are not properly signed, can't simply create a chain
- // and then import blocks. TODO(rjl493456442) try to get rid of the
- // manual database writes.
- gspec.MustCommit(db)
- for i, block := range chain {
- rawdb.WriteBlock(db, block)
- rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- rawdb.WriteHeadBlockHash(db, block.Hash())
- rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
+ bc, err := core.NewBlockChain(db, core.DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, genesis.Hash(), false)
+ if err != nil {
+ t.Fatal(err)
}
-
- filter, err := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
- require.NoError(t, err)
- logs, _ := filter.Logs(context.Background())
- if len(logs) != 4 {
- t.Error("expected 4 log, got", len(logs))
+ defer bc.Stop()
+ _, err = bc.InsertChain(chain)
+ if err != nil {
+ t.Fatal(err)
}
for i, tc := range []struct {
- f *Filter
- wantHashes []common.Hash
+ f *Filter
+ want string
+ err string
}{
{
- mustNewRangeFilter(t, sys, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}),
- []common.Hash{hash3},
+ f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0x5d5342957475422c666fe7675770c7d27cabe33a0350f7f696b6d43e8f596186","transactionIndex":"0x0","blockHash":"0xce06191cc3a417a7dda8e7086e62db1f14b6dd6dae87184a8fafd9100d798ae7","logIndex":"0x0","removed":false}]`,
+ }, {
+ f: mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0x59e5726208cfe27fc80d1074333b71ed6762ef241810c6ed5d1f0f01c8a21903","transactionIndex":"0x0","blockHash":"0x18eb08bf73a9115f521d053cf4a6d65ea869d4774bb64ce762abb43c266aa399","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0x5d5342957475422c666fe7675770c7d27cabe33a0350f7f696b6d43e8f596186","transactionIndex":"0x0","blockHash":"0xce06191cc3a417a7dda8e7086e62db1f14b6dd6dae87184a8fafd9100d798ae7","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
+ }, {
+ f: mustNewRangeFilter(t, sys, 900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}),
}, {
- mustNewRangeFilter(t, sys, 990, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash3}}),
- []common.Hash{hash3},
+ f: mustNewRangeFilter(t, sys, 990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}),
+ want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0xa1de840b4d4f0fcdf52025d485914ea18ca832759483875b88765637509c9433","transactionIndex":"0x0","blockHash":"0xb9ad02847cc5298618df86bb3749efb240f2abb8cdff068a2f49c56cd2323bba","logIndex":"0x0","removed":false}]`,
}, {
- mustNewRangeFilter(t, sys, 1, 10, nil, [][]common.Hash{{hash1, hash2}}),
- []common.Hash{hash1, hash2},
+ f: mustNewRangeFilter(t, sys, 1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0x5d5342957475422c666fe7675770c7d27cabe33a0350f7f696b6d43e8f596186","transactionIndex":"0x0","blockHash":"0xce06191cc3a417a7dda8e7086e62db1f14b6dd6dae87184a8fafd9100d798ae7","logIndex":"0x0","removed":false}]`,
}, {
- mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}),
- nil,
+ f: mustNewRangeFilter(t, sys, 1, 10, nil, [][]common.Hash{{hash1, hash2}}),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0x59e5726208cfe27fc80d1074333b71ed6762ef241810c6ed5d1f0f01c8a21903","transactionIndex":"0x0","blockHash":"0x18eb08bf73a9115f521d053cf4a6d65ea869d4774bb64ce762abb43c266aa399","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xf19672f1a7855ed5cec48b529e223341fbce7c9422ea6d547f2fc29fba8371c1","transactionIndex":"0x1","blockHash":"0x18eb08bf73a9115f521d053cf4a6d65ea869d4774bb64ce762abb43c266aa399","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0x5d5342957475422c666fe7675770c7d27cabe33a0350f7f696b6d43e8f596186","transactionIndex":"0x0","blockHash":"0xce06191cc3a417a7dda8e7086e62db1f14b6dd6dae87184a8fafd9100d798ae7","logIndex":"0x0","removed":false}]`,
}, {
- mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil),
- nil,
+ f: mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}),
}, {
- mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}),
- nil,
+ f: mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil),
}, {
- mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4},
+ f: mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}),
+ }, {
+ f: mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
// Note: modified from go-ethereum since we don't have FinalizedBlock
- mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4},
+ f: mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
// Note: modified from go-ethereum since we don't have FinalizedBlock
- mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4},
+ f: mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
// Note: modified from go-ethereum since we don't have FinalizedBlock
- mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), -3, nil, nil), []common.Hash{hash4},
+ f: mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
- // Note: modified from go-ethereum since we don't have SafeBlock
- mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4},
+ // Note: modified from go-ethereum since we don't have FinalizedBlock
+ f: mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
- // Note: modified from go-ethereum since we don't have SafeBlock
- mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4},
+ // Note: modified from go-ethereum since we don't have FinalizedBlock
+ f: mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
}, {
- // Note: modified from go-ethereum since we don't have SafeBlock
- mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4},
- },
- {
- mustNewRangeFilter(t, sys, int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), nil,
+ // Note: modified from go-ethereum since we don't have FinalizedBlock
+ f: mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
+ }, {
+ // Note: modified from go-ethereum since we don't have a pending block
+ f: mustNewRangeFilter(t, sys, int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil),
+ }, {
+ // Note: modified from go-ethereum since we don't have a pending block
+ f: mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil),
+ want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x1d934f91812ca2a5223d9d35eef0a4c8f1a837d7882ac510ddab40efc216f611","transactionIndex":"0x0","blockHash":"0x7653f439a5f9f6acef0fc36c2a0e5fc4d0046a0b1fbc59ab2ffeebae80c6ca31","logIndex":"0x0","removed":false}]`,
+ }, {
+ f: mustNewRangeFilter(t, sys, int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
+ err: "invalid block range",
},
} {
- logs, _ := tc.f.Logs(context.Background())
- var haveHashes []common.Hash
- for _, l := range logs {
- haveHashes = append(haveHashes, l.Topics[0])
- }
- if have, want := len(haveHashes), len(tc.wantHashes); have != want {
- t.Fatalf("test %d, have %d logs, want %d", i, have, want)
+ logs, err := tc.f.Logs(context.Background())
+ if err == nil && tc.err != "" {
+ t.Fatalf("test %d, expected error %q, got nil", i, tc.err)
+ } else if err != nil && err.Error() != tc.err {
+ t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error())
}
- if len(haveHashes) == 0 {
+ if tc.want == "" && len(logs) == 0 {
continue
}
- if !reflect.DeepEqual(tc.wantHashes, haveHashes) {
- t.Fatalf("test %d, have %v want %v", i, haveHashes, tc.wantHashes)
+ have, err := json.Marshal(logs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(have) != tc.want {
+ t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want)
}
}
+
+ t.Run("timeout", func(t *testing.T) {
+ f := mustNewRangeFilter(t, sys, 0, -1, nil, nil)
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour))
+ defer cancel()
+ _, err := f.Logs(ctx)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err != context.DeadlineExceeded {
+ t.Fatalf("expected context.DeadlineExceeded, got %v", err)
+ }
+ })
}
func mustNewRangeFilter(t *testing.T, sys *FilterSystem, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index 8940640a6e..94de3e2792 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -31,12 +31,12 @@ import (
"errors"
"fmt"
"math/big"
- "sort"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/rpc"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/exp/slices"
)
var (
@@ -46,26 +46,16 @@ var (
)
// txGasAndReward is sorted in ascending order based on reward
-type (
- txGasAndReward struct {
- gasUsed uint64
- reward *big.Int
- }
- sortGasAndReward []txGasAndReward
- slimBlock struct {
- GasUsed uint64
- GasLimit uint64
- BaseFee *big.Int
- Txs []txGasAndReward
- }
-)
-
-func (s sortGasAndReward) Len() int { return len(s) }
-func (s sortGasAndReward) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
+type txGasAndReward struct {
+ gasUsed uint64
+ reward *big.Int
}
-func (s sortGasAndReward) Less(i, j int) bool {
- return s[i].reward.Cmp(s[j].reward) < 0
+
+type slimBlock struct {
+ GasUsed uint64
+ GasLimit uint64
+ BaseFee *big.Int
+ Txs []txGasAndReward
}
// processBlock prepares a [slimBlock] from a retrieved block and list of
@@ -77,12 +67,14 @@ func processBlock(block *types.Block, receipts types.Receipts) *slimBlock {
}
sb.GasUsed = block.GasUsed()
sb.GasLimit = block.GasLimit()
- sorter := make(sortGasAndReward, len(block.Transactions()))
+ sorter := make([]txGasAndReward, len(block.Transactions()))
for i, tx := range block.Transactions() {
reward, _ := tx.EffectiveGasTip(sb.BaseFee)
sorter[i] = txGasAndReward{gasUsed: receipts[i].GasUsed, reward: reward}
}
- sort.Stable(sorter)
+ slices.SortStableFunc(sorter, func(a, b txGasAndReward) int {
+ return a.reward.Cmp(b.reward)
+ })
sb.Txs = sorter
return &sb
}
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index 2c652c07da..31a2c7b554 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -29,7 +29,6 @@ package gasprice
import (
"context"
"math/big"
- "sort"
"sync"
"github.com/ava-labs/avalanchego/utils/timer/mockable"
@@ -43,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/exp/slices"
)
const (
@@ -356,12 +356,12 @@ func (oracle *Oracle) suggestDynamicFees(ctx context.Context) (*big.Int, *big.In
price := lastPrice
baseFee := lastBaseFee
if len(tipResults) > 0 {
- sort.Sort(bigIntArray(tipResults))
+ slices.SortFunc(tipResults, func(a, b *big.Int) int { return a.Cmp(b) })
price = tipResults[(len(tipResults)-1)*oracle.percentile/100]
}
if len(baseFeeResults) > 0 {
- sort.Sort(bigIntArray(baseFeeResults))
+ slices.SortFunc(baseFeeResults, func(a, b *big.Int) int { return a.Cmp(b) })
baseFee = baseFeeResults[(len(baseFeeResults)-1)*oracle.percentile/100]
}
if price.Cmp(oracle.maxPrice) > 0 {
@@ -394,9 +394,3 @@ func (oracle *Oracle) getFeeInfo(ctx context.Context, number uint64) (*feeInfo,
}
return oracle.feeInfoProvider.addHeader(ctx, header)
}
-
-type bigIntArray []*big.Int
-
-func (s bigIntArray) Len() int { return len(s) }
-func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
-func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 72ea7f01cf..029fdb859f 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -175,7 +175,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()), true)
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true)
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 597008dda2..81b7417268 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -1085,6 +1085,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig)
copy.CancunTime = timestamp
canon = false
}
+ if timestamp := override.VerkleTime; timestamp != nil {
+ copy.VerkleTime = timestamp
+ canon = false
+ }
return copy, canon
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index fd460fc338..0cde24ca5f 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -27,7 +27,6 @@
package tracers
import (
- "bytes"
"context"
"crypto/ecdsa"
"encoding/json"
@@ -35,7 +34,6 @@ import (
"fmt"
"math/big"
"reflect"
- "sort"
"sync/atomic"
"testing"
@@ -54,6 +52,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
+ "golang.org/x/exp/slices"
)
var (
@@ -804,19 +803,13 @@ type Account struct {
addr common.Address
}
-type Accounts []Account
-
-func (a Accounts) Len() int { return len(a) }
-func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }
-
-func newAccounts(n int) (accounts Accounts) {
+func newAccounts(n int) (accounts []Account) {
for i := 0; i < n; i++ {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
accounts = append(accounts, Account{key: key, addr: addr})
}
- sort.Sort(accounts)
+ slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) })
return accounts
}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index ff100fb7e2..57ea83ef24 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -312,13 +312,13 @@ func TestInternals(t *testing.T) {
byte(vm.CALL),
},
tracer: mkTracer("callTracer", nil),
- want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`,
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`,
},
{
name: "Stack depletion in LOG0",
code: []byte{byte(vm.LOG3)},
tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
- want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0xc350","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`,
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`,
},
{
name: "Mem expansion in LOG0",
@@ -331,11 +331,11 @@ func TestInternals(t *testing.T) {
byte(vm.LOG0),
},
tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
- want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`,
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`,
},
{
// Leads to OOM on the prestate tracer
- name: "Prestate-tracer - mem expansion in CREATE2",
+ name: "Prestate-tracer - CREATE2 OOM",
code: []byte{
byte(vm.PUSH1), 0x1,
byte(vm.PUSH1), 0x0,
@@ -349,41 +349,62 @@ func TestInternals(t *testing.T) {
byte(vm.PUSH1), 0x0,
byte(vm.LOG0),
},
- tracer: mkTracer("prestateTracer", json.RawMessage(`{ "withLog": true }`)),
- want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`,
+ tracer: mkTracer("prestateTracer", nil),
+ want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`,
+ },
+ {
+ // CREATE2 which requires padding memory by prestate tracer
+ name: "Prestate-tracer - CREATE2 Memory padding",
+ code: []byte{
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.MSTORE),
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0xff,
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.CREATE2),
+ byte(vm.PUSH1), 0xff,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.LOG0),
+ },
+ tracer: mkTracer("prestateTracer", nil),
+ want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"0x91ff9a805d36f54e3e272e230f3e3f5c1b330804":{"balance":"0x0"}}`,
},
} {
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
- core.GenesisAlloc{
- to: core.GenesisAccount{
- Code: tc.code,
- },
- origin: core.GenesisAccount{
- Balance: big.NewInt(500000000000000),
- },
- }, false)
- evm := vm.NewEVM(context, txContext, statedb, params.AvalancheMainnetChainConfig, vm.Config{Tracer: tc.tracer})
- msg := &core.Message{
- To: &to,
- From: origin,
- Value: big.NewInt(0),
- GasLimit: 50000,
- GasPrice: big.NewInt(0),
- GasFeeCap: big.NewInt(0),
- GasTipCap: big.NewInt(0),
- SkipAccountChecks: false,
- }
- st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
- if _, err := st.TransitionDb(); err != nil {
- t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err)
- }
- // Retrieve the trace result and compare against the expected
- res, err := tc.tracer.GetResult()
- if err != nil {
- t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err)
- }
- if string(res) != tc.want {
- t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want)
- }
+ t.Run(tc.name, func(t *testing.T) {
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
+ core.GenesisAlloc{
+ to: core.GenesisAccount{
+ Code: tc.code,
+ },
+ origin: core.GenesisAccount{
+ Balance: big.NewInt(500000000000000),
+ },
+ }, false)
+ evm := vm.NewEVM(context, txContext, statedb, params.AvalancheMainnetChainConfig, vm.Config{Tracer: tc.tracer})
+ msg := &core.Message{
+ To: &to,
+ From: origin,
+ Value: big.NewInt(0),
+ GasLimit: 80000,
+ GasPrice: big.NewInt(0),
+ GasFeeCap: big.NewInt(0),
+ GasTipCap: big.NewInt(0),
+ SkipAccountChecks: false,
+ }
+ st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
+ if _, err := st.TransitionDb(); err != nil {
+ t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err)
+ }
+ // Retrieve the trace result and compare against the expected
+ res, err := tc.tracer.GetResult()
+ if err != nil {
+ t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err)
+ }
+ if string(res) != tc.want {
+ t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want)
+ }
+ })
}
}
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index b82bfea3d9..cf5d4b184b 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -96,7 +96,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
if !obj.Get("constructor").SameAs(bufType) {
break
}
- b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes()
+ b := obj.Export().([]byte)
return b, nil
}
return nil, errors.New("invalid buffer type")
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index 2173db790f..b1a3c7f8e0 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -418,6 +418,7 @@ type StructLogRes struct {
Depth int `json:"depth"`
Error string `json:"error,omitempty"`
Stack *[]string `json:"stack,omitempty"`
+ ReturnData string `json:"returnData,omitempty"`
Memory *[]string `json:"memory,omitempty"`
Storage *map[string]string `json:"storage,omitempty"`
RefundCounter uint64 `json:"refund,omitempty"`
@@ -443,6 +444,9 @@ func formatLogs(logs []StructLog) []StructLogRes {
}
formatted[index].Stack = &stack
}
+ if trace.ReturnData != nil && len(trace.ReturnData) > 0 {
+ formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String()
+ }
if trace.Memory != nil {
memory := make([]string, 0, (len(trace.Memory)+31)/32)
for i := 0; i+32 <= len(trace.Memory); i += 32 {
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index 53f95c47a0..95fd7c64a6 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -38,6 +38,7 @@ import (
"github.com/ava-labs/coreth/vmerrs"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/log"
)
//go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go
@@ -194,6 +195,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco
data, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(mStart.Uint64()), int64(mSize.Uint64()))
if err != nil {
// mSize was unrealistically large
+ log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "callTracer", "offset", mStart, "size", mSize)
return
}
diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go
index 36704ee412..26d520661f 100644
--- a/eth/tracers/native/call_flat.go
+++ b/eth/tracers/native/call_flat.go
@@ -259,7 +259,7 @@ func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx
case vm.CREATE, vm.CREATE2:
frame = newFlatCreate(input)
case vm.SELFDESTRUCT:
- frame = newFlatSuicide(input)
+ frame = newFlatSelfdestruct(input)
case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL:
frame = newFlatCall(input)
default:
@@ -341,7 +341,7 @@ func newFlatCall(input *callFrame) *flatCallFrame {
}
}
-func newFlatSuicide(input *callFrame) *flatCallFrame {
+func newFlatSelfdestruct(input *callFrame) *flatCallFrame {
return &flatCallFrame{
Type: "suicide",
Action: flatCallAction{
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index 59b860c5b1..5c800646c7 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
)
//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go
@@ -175,7 +176,11 @@ func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64,
case stackLen >= 4 && op == vm.CREATE2:
offset := stackData[stackLen-2]
size := stackData[stackLen-3]
- init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
+ init, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(offset.Uint64()), int64(size.Uint64()))
+ if err != nil {
+ log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "prestateTracer", "offset", offset, "size", size)
+ return
+ }
inithash := crypto.Keccak256(init)
salt := stackData[stackLen-4]
addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash)
diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go
index 8757d9b6af..e878c81ad1 100644
--- a/ethdb/dbtest/testsuite.go
+++ b/ethdb/dbtest/testsuite.go
@@ -34,6 +34,7 @@ import (
"testing"
"github.com/ava-labs/coreth/ethdb"
+ "golang.org/x/exp/slices"
)
// TestDatabaseSuite runs a suite of tests against a KeyValueStore database
@@ -473,7 +474,7 @@ func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) {
vals = append(vals, randBytes(vsize))
}
if order {
- sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 })
+ slices.SortFunc(keys, func(a, b []byte) int { return bytes.Compare(a, b) })
}
return keys, vals
}
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 5b48558888..0df2d7b3f6 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -32,8 +32,6 @@ package leveldb
import (
"fmt"
- "strconv"
- "strings"
"sync"
"time"
@@ -87,6 +85,8 @@ type Database struct {
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
manualMemAllocGauge metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC)
+ levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels
+
quitLock sync.Mutex // Mutex protecting the quit channel access
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
@@ -157,7 +157,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
ldb.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
// Start up the metrics gathering and return
- go ldb.meter(metricsGatheringInterval)
+ go ldb.meter(metricsGatheringInterval, namespace)
return ldb, nil
}
@@ -263,122 +263,63 @@ func (db *Database) Path() string {
// meter periodically retrieves internal leveldb counters and reports them to
// the metrics subsystem.
-//
-// This is how a LevelDB stats table looks like (currently):
-//
-// Compactions
-// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
-// -------+------------+---------------+---------------+---------------+---------------
-// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
-// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
-// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
-// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
-//
-// This is how the write delay look like (currently):
-// DelayN:5 Delay:406.604657ms Paused: false
-//
-// This is how the iostats look like (currently):
-// Read(MB):3895.04860 Write(MB):3654.64712
-func (db *Database) meter(refresh time.Duration) {
+func (db *Database) meter(refresh time.Duration, namespace string) {
// Create the counters to store current and previous compaction values
- compactions := make([][]float64, 2)
+ compactions := make([][]int64, 2)
for i := 0; i < 2; i++ {
- compactions[i] = make([]float64, 4)
+ compactions[i] = make([]int64, 4)
}
- // Create storage for iostats.
- var iostats [2]float64
-
- // Create storage and warning log tracer for write delay.
- var (
- delaystats [2]int64
- lastWritePaused time.Time
- )
-
+ // Create storages for states and warning log tracer.
var (
errc chan error
merr error
- )
+ stats leveldb.DBStats
+ iostats [2]int64
+ delaystats [2]int64
+ lastWritePaused time.Time
+ )
timer := time.NewTimer(refresh)
defer timer.Stop()
// Iterate ad infinitum and collect the stats
for i := 1; errc == nil && merr == nil; i++ {
// Retrieve the database stats
- stats, err := db.db.GetProperty("leveldb.stats")
+ // Stats method resets buffers inside therefore it's okay to just pass the struct.
+ err := db.db.Stats(&stats)
if err != nil {
db.log.Error("Failed to read database stats", "err", err)
merr = err
continue
}
- // Find the compaction table, skip the header
- lines := strings.Split(stats, "\n")
- for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
- lines = lines[1:]
- }
- if len(lines) <= 3 {
- db.log.Error("Compaction leveldbTable not found")
- merr = errors.New("compaction leveldbTable not found")
- continue
- }
- lines = lines[3:]
-
// Iterate over all the leveldbTable rows, and accumulate the entries
for j := 0; j < len(compactions[i%2]); j++ {
compactions[i%2][j] = 0
}
- for _, line := range lines {
- parts := strings.Split(line, "|")
- if len(parts) != 6 {
- break
- }
- for idx, counter := range parts[2:] {
- value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
- if err != nil {
- db.log.Error("Compaction entry parsing failed", "err", err)
- merr = err
- continue
- }
- compactions[i%2][idx] += value
- }
+ compactions[i%2][0] = stats.LevelSizes.Sum()
+ for _, t := range stats.LevelDurations {
+ compactions[i%2][1] += t.Nanoseconds()
}
+ compactions[i%2][2] = stats.LevelRead.Sum()
+ compactions[i%2][3] = stats.LevelWrite.Sum()
// Update all the requested meters
if db.diskSizeGauge != nil {
- db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
+ db.diskSizeGauge.Update(compactions[i%2][0])
}
if db.compTimeMeter != nil {
- db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
+ db.compTimeMeter.Mark(compactions[i%2][1] - compactions[(i-1)%2][1])
}
if db.compReadMeter != nil {
- db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
+ db.compReadMeter.Mark(compactions[i%2][2] - compactions[(i-1)%2][2])
}
if db.compWriteMeter != nil {
- db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
- }
- // Retrieve the write delay statistic
- writedelay, err := db.db.GetProperty("leveldb.writedelay")
- if err != nil {
- db.log.Error("Failed to read database write delay statistic", "err", err)
- merr = err
- continue
+ db.compWriteMeter.Mark(compactions[i%2][3] - compactions[(i-1)%2][3])
}
var (
- delayN int64
- delayDuration string
- duration time.Duration
- paused bool
+ delayN = int64(stats.WriteDelayCount)
+ duration = stats.WriteDelayDuration
+ paused = stats.WritePaused
)
- if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
- db.log.Error("Write delay statistic not found")
- merr = err
- continue
- }
- duration, err = time.ParseDuration(delayDuration)
- if err != nil {
- db.log.Error("Failed to parse delay duration", "err", err)
- merr = err
- continue
- }
if db.writeDelayNMeter != nil {
db.writeDelayNMeter.Mark(delayN - delaystats[0])
}
@@ -394,60 +335,30 @@ func (db *Database) meter(refresh time.Duration) {
}
delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
- // Retrieve the database iostats.
- ioStats, err := db.db.GetProperty("leveldb.iostats")
- if err != nil {
- db.log.Error("Failed to read database iostats", "err", err)
- merr = err
- continue
- }
- var nRead, nWrite float64
- parts := strings.Split(ioStats, " ")
- if len(parts) < 2 {
- db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
- merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
- continue
- }
- if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
- db.log.Error("Bad syntax of read entry", "entry", parts[0])
- merr = err
- continue
- }
- if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
- db.log.Error("Bad syntax of write entry", "entry", parts[1])
- merr = err
- continue
- }
+ var (
+ nRead = int64(stats.IORead)
+ nWrite = int64(stats.IOWrite)
+ )
if db.diskReadMeter != nil {
- db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
+ db.diskReadMeter.Mark(nRead - iostats[0])
}
if db.diskWriteMeter != nil {
- db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
+ db.diskWriteMeter.Mark(nWrite - iostats[1])
}
iostats[0], iostats[1] = nRead, nWrite
- compCount, err := db.db.GetProperty("leveldb.compcount")
- if err != nil {
- db.log.Error("Failed to read database iostats", "err", err)
- merr = err
- continue
- }
+ db.memCompGauge.Update(int64(stats.MemComp))
+ db.level0CompGauge.Update(int64(stats.Level0Comp))
+ db.nonlevel0CompGauge.Update(int64(stats.NonLevel0Comp))
+ db.seekCompGauge.Update(int64(stats.SeekComp))
- var (
- memComp uint32
- level0Comp uint32
- nonLevel0Comp uint32
- seekComp uint32
- )
- if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
- db.log.Error("Compaction count statistic not found")
- merr = err
- continue
+ for i, tables := range stats.LevelTablesCounts {
+ // Append metrics for additional layers
+ if i >= len(db.levelsGauge) {
+ db.levelsGauge = append(db.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil))
+ }
+ db.levelsGauge[i].Update(int64(tables))
}
- db.memCompGauge.Update(int64(memComp))
- db.level0CompGauge.Update(int64(level0Comp))
- db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
- db.seekCompGauge.Update(int64(seekComp))
// Sleep a bit, then repeat the stats collection
select {
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 5f4e6a593c..4a3f833ae7 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -289,7 +289,7 @@ func (d *Database) Put(key []byte, value []byte) error {
if d.closed {
return pebble.ErrClosed
}
- return d.db.Set(key, value, pebble.NoSync)
+ return d.db.Set(key, value, pebble.Sync)
}
// Delete removes the key from the key-value store.
@@ -545,7 +545,7 @@ func (b *batch) Write() error {
if b.db.closed {
return pebble.ErrClosed
}
- return b.b.Commit(pebble.NoSync)
+ return b.b.Commit(pebble.Sync)
}
// Reset resets the batch for reuse.
diff --git a/go.mod b/go.mod
index b4ee923abf..c631eabc7c 100644
--- a/go.mod
+++ b/go.mod
@@ -4,13 +4,13 @@ go 1.20
require (
github.com/VictoriaMetrics/fastcache v1.10.0
- github.com/ava-labs/avalanchego v1.10.10-rc.2
+ github.com/ava-labs/avalanchego v1.10.10-rc.1.0.20230913011517-f71418f73c64
github.com/cespare/cp v0.1.0
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.1.0
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
- github.com/ethereum/go-ethereum v1.12.0
+ github.com/ethereum/go-ethereum v1.12.2
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08
@@ -21,7 +21,7 @@ require (
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e
github.com/holiman/bloomfilter/v2 v2.0.3
- github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
+ github.com/holiman/uint256 v1.2.3
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.16
@@ -36,15 +36,15 @@ require (
github.com/stretchr/testify v1.8.1
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a
github.com/tyler-smith/go-bip39 v1.1.0
- github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
+ github.com/urfave/cli/v2 v2.24.1
go.uber.org/goleak v1.2.1
go.uber.org/mock v0.2.0
- golang.org/x/crypto v0.1.0
- golang.org/x/exp v0.0.0-20230206171751-46f607a40771
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.8.0
- golang.org/x/text v0.8.0
- golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
+ golang.org/x/crypto v0.9.0
+ golang.org/x/exp v0.0.0-20230810033253-352e893a4cad
+ golang.org/x/sync v0.3.0
+ golang.org/x/sys v0.9.0
+ golang.org/x/text v0.9.0
+ golang.org/x/time v0.3.0
google.golang.org/protobuf v1.30.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
@@ -63,11 +63,11 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
+ github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
- github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
+ github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -84,6 +84,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
@@ -130,8 +131,8 @@ require (
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/net v0.8.0 // indirect
- golang.org/x/term v0.7.0 // indirect
+ golang.org/x/net v0.10.0 // indirect
+ golang.org/x/term v0.8.0 // indirect
gonum.org/v1/gonum v0.11.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.55.0 // indirect
diff --git a/go.sum b/go.sum
index a5c8ec88a7..2650a9b827 100644
--- a/go.sum
+++ b/go.sum
@@ -38,7 +38,7 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
@@ -55,8 +55,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/ava-labs/avalanchego v1.10.10-rc.2 h1:nlHc1JwKb5TEc9oqPU2exvOpazhxr11N2ym/LzYxv4k=
-github.com/ava-labs/avalanchego v1.10.10-rc.2/go.mod h1:BN97sZppDSvIMIfEjrLTjdPTFkGLkb0ISJHEcoxMMNk=
+github.com/ava-labs/avalanchego v1.10.10-rc.1.0.20230913011517-f71418f73c64 h1:Dd2EUuV01kudkKSKUKUxp79xqMygMWgzRIcSPsa9Nag=
+github.com/ava-labs/avalanchego v1.10.10-rc.1.0.20230913011517-f71418f73c64/go.mod h1:Dc19+p/9PcI9E1WAaXWxjqGtefDzBhpjRFN1Lg+KNLY=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -134,8 +134,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
-github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
+github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
+github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -177,10 +177,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
-github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8=
-github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
-github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
+github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
+github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
+github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=
+github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
@@ -340,10 +340,12 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
+github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
+github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
@@ -575,8 +577,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
+github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU=
+github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
@@ -650,8 +652,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -662,8 +664,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
-golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=
+golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -735,8 +737,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -759,8 +761,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -828,13 +830,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
-golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -845,15 +847,15 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
-golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/core/state/snapshot/sort.go b/internal/blocktest/test_hash.go
similarity index 50%
rename from core/state/snapshot/sort.go
rename to internal/blocktest/test_hash.go
index 6254d37943..30834ee2ba 100644
--- a/core/state/snapshot/sort.go
+++ b/internal/blocktest/test_hash.go
@@ -1,4 +1,4 @@
-// (c) 2019-2020, Ava Labs, Inc.
+// (c) 2023, Ava Labs, Inc.
//
// This file is a derived work, based on the go-ethereum library whose original
// notices appear below.
@@ -8,7 +8,7 @@
//
// Much love to the original authors for their work.
// **********
-// Copyright 2019 The go-ethereum Authors
+// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -24,23 +24,46 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package snapshot
+// Package utesting provides a standalone replacement for package testing.
+//
+// This package exists because package testing cannot easily be embedded into a
+// standalone go program. It provides an API that mirrors the standard library
+// testing API.
+
+package blocktest
import (
- "bytes"
+ "hash"
"github.com/ethereum/go-ethereum/common"
+ "golang.org/x/crypto/sha3"
)
-// hashes is a helper to implement sort.Interface.
-type hashes []common.Hash
+// testHasher is the helper tool for transaction/receipt list hashing.
+// The original hasher is trie, in order to get rid of import cycle,
+// use the testing hasher instead.
+type testHasher struct {
+ hasher hash.Hash
+}
+
+// NewHasher returns a new testHasher instance.
+func NewHasher() *testHasher {
+ return &testHasher{hasher: sha3.NewLegacyKeccak256()}
+}
-// Len is the number of elements in the collection.
-func (hs hashes) Len() int { return len(hs) }
+// Reset resets the hash state.
+func (h *testHasher) Reset() {
+ h.hasher.Reset()
+}
-// Less reports whether the element with index i should sort before the element
-// with index j.
-func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 }
+// Update updates the hash state with the given key and value.
+func (h *testHasher) Update(key, val []byte) error {
+ h.hasher.Write(key)
+ h.hasher.Write(val)
+ return nil
+}
-// Swap swaps the elements with indexes i and j.
-func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] }
+// Hash returns the hash value.
+func (h *testHasher) Hash() common.Hash {
+ return common.BytesToHash(h.hasher.Sum(nil))
+}
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index a8c7bb2492..96b8f3fe14 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -29,6 +29,7 @@ package debug
import (
"fmt"
"io"
+ "net"
"net/http"
_ "net/http/pprof"
"os"
@@ -316,7 +317,7 @@ func Setup(ctx *cli.Context) error {
port := ctx.Int(pprofPortFlag.Name)
- address := fmt.Sprintf("%s:%d", listenHost, port)
+ address := net.JoinHostPort(listenHost, fmt.Sprintf("%d", port))
StartPProf(address)
}
if len(logFile) > 0 || rotation {
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index f044fa89e3..d068c3d89e 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -665,46 +665,78 @@ type StorageResult struct {
Proof []string `json:"proof"`
}
+// proofList implements ethdb.KeyValueWriter and collects the proofs as
+// hex-strings for delivery to rpc-caller.
+type proofList []string
+
+func (n *proofList) Put(key []byte, value []byte) error {
+ *n = append(*n, hexutil.Encode(value))
+ return nil
+}
+
+func (n *proofList) Delete(key []byte) error {
+ panic("not supported")
+}
+
// GetProof returns the Merkle-proof for a given account and optionally some storage keys.
func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) {
+ var (
+ keys = make([]common.Hash, len(storageKeys))
+ keyLengths = make([]int, len(storageKeys))
+ storageProof = make([]StorageResult, len(storageKeys))
+ storageTrie state.Trie
+ storageHash = types.EmptyRootHash
+ codeHash = types.EmptyCodeHash
+ )
+ // Deserialize all keys. This prevents state access on invalid input.
+ for i, hexKey := range storageKeys {
+ var err error
+ keys[i], keyLengths[i], err = decodeHash(hexKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
- storageTrie, err := state.StorageTrie(address)
- if err != nil {
+ if storageTrie, err = state.StorageTrie(address); err != nil {
return nil, err
}
- storageHash := types.EmptyRootHash
- codeHash := state.GetCodeHash(address)
- storageProof := make([]StorageResult, len(storageKeys))
- // if we have a storageTrie, (which means the account exists), we can update the storagehash
+ // If we have a storageTrie, the account exists and we must update
+ // the storage root hash and the code hash.
if storageTrie != nil {
storageHash = storageTrie.Hash()
- } else {
- // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray.
- codeHash = crypto.Keccak256Hash(nil)
- }
+ codeHash = state.GetCodeHash(address)
+ }
+ // Create the proofs for the storageKeys.
+ for i, key := range keys {
+ // Output key encoding is a bit special: if the input was a 32-byte hash, it is
+ // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the
+ // JSON-RPC spec for getProof. This behavior exists to preserve backwards
+ // compatibility with older client versions.
+ var outputKey string
+ if keyLengths[i] != 32 {
+ outputKey = hexutil.EncodeBig(key.Big())
+ } else {
+ outputKey = hexutil.Encode(key[:])
+ }
- // create the proof for the storageKeys
- for i, hexKey := range storageKeys {
- key, err := decodeHash(hexKey)
- if err != nil {
- return nil, err
+ if storageTrie == nil {
+ storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
+ continue
}
- if storageTrie != nil {
- proof, storageError := state.GetStorageProof(address, key)
- if storageError != nil {
- return nil, storageError
- }
- storageProof[i] = StorageResult{hexKey, (*hexutil.Big)(state.GetState(address, key).Big()), toHexSlice(proof)}
- } else {
- storageProof[i] = StorageResult{hexKey, &hexutil.Big{}, []string{}}
+ var proof proofList
+ if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
+ return nil, err
}
+ value := (*hexutil.Big)(state.GetState(address, key).Big())
+ storageProof[i] = StorageResult{outputKey, value, proof}
}
- // create the accountProof
+ // Create the accountProof.
accountProof, proofErr := state.GetProof(address)
if proofErr != nil {
return nil, proofErr
@@ -723,7 +755,7 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
// decodeHash parses a hex-encoded 32-byte hash. The input may optionally
// be prefixed by 0x and can have a byte length up to 32.
-func decodeHash(s string) (common.Hash, error) {
+func decodeHash(s string) (h common.Hash, inputLength int, err error) {
if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") {
s = s[2:]
}
@@ -732,17 +764,19 @@ func decodeHash(s string) (common.Hash, error) {
}
b, err := hex.DecodeString(s)
if err != nil {
- return common.Hash{}, errors.New("hex string invalid")
+ return common.Hash{}, 0, errors.New("hex string invalid")
}
if len(b) > 32 {
- return common.Hash{}, errors.New("hex string too long, want at most 32 bytes")
+ return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes")
}
- return common.BytesToHash(b), nil
+ return common.BytesToHash(b), len(b), nil
}
// GetHeaderByNumber returns the requested canonical block header.
-// * When blockNr is -1 the chain head is returned.
-// * When blockNr is -2 the pending chain head is returned.
+// - When blockNr is -1 the chain pending header is returned.
+// - When blockNr is -2 the chain latest header is returned.
+// - When blockNr is -3 the chain finalized header is returned.
+// - When blockNr is -4 the chain safe header is returned.
func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
header, err := s.b.HeaderByNumber(ctx, number)
if header != nil && err == nil {
@@ -769,8 +803,10 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m
}
// GetBlockByNumber returns the requested canonical block.
-// - When blockNr is -1 the chain head is returned.
-// - When blockNr is -2 the pending chain head is returned.
+// - When blockNr is -1 the chain pending block is returned.
+// - When blockNr is -2 the chain latest block is returned.
+// - When blockNr is -3 the chain finalized block is returned.
+// - When blockNr is -4 the chain safe block is returned.
// - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
@@ -865,7 +901,7 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address
if state == nil || err != nil {
return nil, err
}
- key, err := decodeHash(hexKey)
+ key, _, err := decodeHash(hexKey)
if err != nil {
return nil, fmt.Errorf("unable to decode storage key: %s", err)
}
@@ -996,18 +1032,11 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H
return header
}
-func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
- defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
-
- state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return nil, err
- }
- if err := overrides.Apply(state); err != nil {
- return nil, err
- }
- // If the request is for the pending block, override the block timestamp, number, and estimated
- // base fee, so that the check runs as if it were run on a newly generated block.
+// applyPendingBlockHeaderOverrides overrides the block timestamp, number, and
+// estimated base fee, if blockNrOrHash represents the pending block (so that
+// the check runs as if it were run on a newly generated block). For other
+// blocks it reuturns the header unmodified.
+func applyPendingBlockHeaderOverrides(ctx context.Context, b Backend, header *types.Header, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber {
// Override header with a copy to ensure the original header is not modified
header = types.CopyHeader(header)
@@ -1024,6 +1053,13 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
header.BaseFee = estimatedBaseFee
}
+ return header, nil
+}
+
+func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
+ if err := overrides.Apply(state); err != nil {
+ return nil, err
+ }
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
var cancel context.CancelFunc
@@ -1071,6 +1107,21 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
return result, nil
}
+func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
+ defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
+
+ state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return nil, err
+ }
+ header, err = applyPendingBlockHeaderOverrides(ctx, b, header, blockNrOrHash)
+ if err != nil {
+ return nil, err
+ }
+
+ return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap)
+}
+
func newRevertError(result *core.ExecutionResult) *revertError {
reason, errUnpack := abi.UnpackRevert(result.Revert())
err := errors.New("execution reverted")
@@ -1152,7 +1203,7 @@ func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrO
return result.Return(), result.Err
}
-func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) {
+func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) {
// Binary search the gas requirement, as it may be higher than the amount used
var (
lo uint64 = params.TxGas - 1
@@ -1194,6 +1245,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
if err != nil {
return 0, err
}
+ err = overrides.Apply(state)
+ if err != nil {
+ return 0, err
+ }
balance := state.GetBalance(*args.From) // from can't be nil
available := new(big.Int).Set(balance)
if args.Value != nil {
@@ -1223,10 +1278,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
+ executable := func(gas uint64, state *state.StateDB, header *types.Header) (bool, *core.ExecutionResult, error) {
args.Gas = (*hexutil.Uint64)(&gas)
- result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap)
+ result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap)
if err != nil {
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
@@ -1235,10 +1290,23 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
}
return result.Failed(), result, nil
}
+ state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return 0, err
+ }
+ err = overrides.Apply(state)
+ if err != nil {
+ return 0, err
+ }
+ header, err = applyPendingBlockHeaderOverrides(ctx, b, header, blockNrOrHash)
+ if err != nil {
+ return 0, err
+ }
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
+ s := state.Copy()
mid := (hi + lo) / 2
- failed, _, err := executable(mid)
+ failed, _, err := executable(mid, s, header)
// If the error is not nil(consensus error), it means the provided message
// call or transaction will never be accepted no matter how much gas it is
@@ -1254,7 +1322,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- failed, result, err := executable(hi)
+ failed, result, err := executable(hi, state, header)
if err != nil {
return 0, err
}
@@ -1274,12 +1342,12 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
// EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
-func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) {
+func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) {
bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
if blockNrOrHash != nil {
bNrOrHash = *blockNrOrHash
}
- return DoEstimateGas(ctx, s.b, args, bNrOrHash, s.b.RPCGasCap())
+ return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap())
}
// RPCMarshalHeader converts the given header to the RPC output .
@@ -1296,7 +1364,6 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
"miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
- "size": hexutil.Uint64(head.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit),
"gasUsed": hexutil.Uint64(head.GasUsed),
"timestamp": hexutil.Uint64(head.Time),
@@ -1321,7 +1388,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
-func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) {
+func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} {
fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
fields["blockExtraData"] = hexutil.Bytes(block.ExtData())
@@ -1349,7 +1416,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
}
fields["uncles"] = uncleHashes
- return fields, nil
+ return fields
}
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires
@@ -1365,16 +1432,13 @@ func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Head
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires
// a `BlockchainAPI`.
func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
- fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig())
- if err != nil {
- return nil, err
- }
+ fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig())
if inclTx {
// Note: Coreth enforces that the difficulty of a block is always 1, such that the total difficulty of a block
// will be equivalent to its height.
fields["totalDifficulty"] = (*hexutil.Big)(b.Number())
}
- return fields, err
+ return fields, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@@ -1398,6 +1462,7 @@ type RPCTransaction struct {
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
+ YParity *hexutil.Uint64 `json:"yParity,omitempty"`
}
// newRPCTransaction returns a transaction that will serialize to the RPC
@@ -1425,25 +1490,32 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index)
}
+
switch tx.Type() {
case types.LegacyTxType:
// if a legacy transaction has an EIP-155 chain id, include it explicitly
if id := tx.ChainId(); id.Sign() != 0 {
result.ChainID = (*hexutil.Big)(id)
}
+
case types.AccessListTxType:
al := tx.AccessList()
+ yparity := hexutil.Uint64(v.Sign())
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
+ result.YParity = &yparity
+
case types.DynamicFeeTxType:
al := tx.AccessList()
+ yparity := hexutil.Uint64(v.Sign())
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
+ result.YParity = &yparity
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
// if the transaction has been mined, compute the effective gas price
if baseFee != nil && blockHash != (common.Hash{}) {
- // price = min(tip, gasFeeCap - baseFee) + baseFee
+ // price = min(gasTipCap + baseFee, gasFeeCap)
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
result.GasPrice = (*hexutil.Big)(price)
} else {
@@ -1593,7 +1665,6 @@ type BadBlockArgs struct {
// and returns them as a JSON list of block hashes.
func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) {
var (
- err error
badBlocks, reasons = s.b.BadBlocks()
results = make([]*BadBlockArgs, 0, len(badBlocks))
)
@@ -1607,9 +1678,7 @@ func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, erro
} else {
blockRlp = fmt.Sprintf("%#x", rlpBytes)
}
- if blockJSON, err = RPCMarshalBlock(block, true, true, s.b.ChainConfig()); err != nil {
- blockJSON = map[string]interface{}{"error": err.Error()}
- }
+ blockJSON = RPCMarshalBlock(block, true, true, s.b.ChainConfig())
results = append(results, &BadBlockArgs{
Hash: block.Hash(),
RLP: blockRlp,
@@ -1748,7 +1817,7 @@ func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash commo
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) {
tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash)
- if err != nil {
+ if tx == nil || err != nil {
// When the transaction doesn't exist, the RPC method should return JSON null
// as per specification.
return nil, nil
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 7b041fa07b..121a8b9819 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -27,15 +27,13 @@
package ethapi
import (
- "bytes"
"context"
"crypto/ecdsa"
"encoding/json"
"errors"
- "hash"
+ "fmt"
"math/big"
"reflect"
- "sort"
"testing"
"time"
@@ -49,6 +47,7 @@ import (
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/core/vm"
"github.com/ava-labs/coreth/ethdb"
+ "github.com/ava-labs/coreth/internal/blocktest"
"github.com/ava-labs/coreth/params"
"github.com/ava-labs/coreth/rpc"
"github.com/ethereum/go-ethereum"
@@ -56,12 +55,18 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
- "golang.org/x/crypto/sha3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
)
func TestTransaction_RoundTripRpcJSON(t *testing.T) {
+ // Note: Update the ChainID here to match the one go-ethereum.
+ testChainConfig := *params.TestChainConfig
+ testChainConfig.ChainID = big.NewInt(1337)
+
var (
- config = params.TestChainConfig
+ config = &testChainConfig
signer = types.LatestSigner(config)
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
tests = allTransactionTypes(common.Address{0xde, 0xad}, config)
@@ -69,7 +74,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Parallel()
for i, tt := range tests {
var tx2 types.Transaction
- tx, err := types.SignNewTx(key, signer, tt)
+ tx, err := types.SignNewTx(key, signer, tt.Tx)
if err != nil {
t.Fatalf("test %d: signing failed: %v", i, err)
}
@@ -82,7 +87,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Fatalf("test %d: stx changed, want %x have %x", i, want, have)
}
- // rpcTransaction
+ // rpcTransaction
rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config)
if data, err := json.Marshal(rpcTx); err != nil {
t.Fatalf("test %d: marshalling failed; %v", i, err)
@@ -90,117 +95,272 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
t.Fatalf("test %d: unmarshal failed: %v", i, err)
} else if want, have := tx.Hash(), tx2.Hash(); want != have {
t.Fatalf("test %d: tx changed, want %x have %x", i, want, have)
+ } else {
+ want, have := tt.Want, string(data)
+ require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have)
}
}
}
-func allTransactionTypes(addr common.Address, config *params.ChainConfig) []types.TxData {
- return []types.TxData{
- &types.LegacyTx{
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- V: big.NewInt(9),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.LegacyTx{
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.AccessListTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+type txData struct {
+ Tx types.TxData
+ Want string
+}
+
+func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData {
+ return []txData{
+ {
+ Tx: &types.LegacyTx{
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ V: big.NewInt(9),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x5f3240454cd09a5d8b1c5d651eefae7a339262875bcd2d0e6676f3d989967008",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x0",
+ "chainId": "0x539",
+ "v": "0xa96",
+ "r": "0xbc85e96592b95f7160825d837abb407f009df9ebe8f1b9158a4b8dd093377f75",
+ "s": "0x1b55ea3af5574c536967b039ba6999ef6c89cf22fc04bcb296e0e8b0b9b576f5"
+ }`,
+ }, {
+ Tx: &types.LegacyTx{
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x806e97f9d712b6cb7e781122001380a2837531b0fc1e5f5d78174ad4cb699873",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x0",
+ "chainId": "0x539",
+ "v": "0xa96",
+ "r": "0x9dc28b267b6ad4e4af6fe9289668f9305c2eb7a3241567860699e478af06835a",
+ "s": "0xa0b51a071aa9bed2cd70aedea859779dff039e3630ea38497d95202e9b1fec7"
+ }`,
+ },
+ {
+ Tx: &types.AccessListTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.AccessListTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasPrice: big.NewInt(6),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x121347468ee5fe0a29f02b49b4ffd1c8342bc4255146bb686cd07117f79e7129",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x1",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0xf372ad499239ae11d91d34c559ffc5dab4daffc0069e03afcabdcdf231a0c16b",
+ "s": "0x28573161d1f9472fa0fd4752533609e72f06414f7ab5588699a7141f65d2abf",
+ "yParity": "0x0"
+ }`,
+ }, {
+ Tx: &types.AccessListTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasPrice: big.NewInt(6),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasTipCap: big.NewInt(6),
- GasFeeCap: big.NewInt(9),
- Gas: 7,
- To: &addr,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{
- types.AccessTuple{
- Address: common.Address{0x2},
- StorageKeys: []common.Hash{types.EmptyRootHash},
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x6",
+ "hash": "0x067c3baebede8027b0f828a9d933be545f7caaec623b00684ac0659726e2055b",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x1",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x539",
+ "v": "0x1",
+ "r": "0x542981b5130d4613897fbab144796cb36d3cb3d7807d47d9c7f89ca7745b085c",
+ "s": "0x7425b9dd6c5deaa42e4ede35d0c4570c4624f68c28d812c10d806ffdf86ce63",
+ "yParity": "0x1"
+ }`,
+ }, {
+ Tx: &types.DynamicFeeTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasTipCap: big.NewInt(6),
+ GasFeeCap: big.NewInt(9),
+ Gas: 7,
+ To: &addr,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{
+ types.AccessTuple{
+ Address: common.Address{0x2},
+ StorageKeys: []common.Hash{types.EmptyRootHash},
+ },
},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
- },
- &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: 5,
- GasTipCap: big.NewInt(6),
- GasFeeCap: big.NewInt(9),
- Gas: 7,
- To: nil,
- Value: big.NewInt(8),
- Data: []byte{0, 1, 2, 3, 4},
- AccessList: types.AccessList{},
- V: big.NewInt(32),
- R: big.NewInt(10),
- S: big.NewInt(11),
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x9",
+ "maxFeePerGas": "0x9",
+ "maxPriorityFeePerGas": "0x6",
+ "hash": "0xb63e0b146b34c3e9cb7fbabb5b3c081254a7ded6f1b65324b5898cc0545d79ff",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x2",
+ "accessList": [
+ {
+ "address": "0x0200000000000000000000000000000000000000",
+ "storageKeys": [
+ "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ ]
+ }
+ ],
+ "chainId": "0x539",
+ "v": "0x1",
+ "r": "0x3b167e05418a8932cd53d7578711fe1a76b9b96c48642402bb94978b7a107e80",
+ "s": "0x22f98a332d15ea2cc80386c1ebaa31b0afebfa79ebc7d039a1e0074418301fef",
+ "yParity": "0x1"
+ }`,
+ }, {
+ Tx: &types.DynamicFeeTx{
+ ChainID: config.ChainID,
+ Nonce: 5,
+ GasTipCap: big.NewInt(6),
+ GasFeeCap: big.NewInt(9),
+ Gas: 7,
+ To: nil,
+ Value: big.NewInt(8),
+ Data: []byte{0, 1, 2, 3, 4},
+ AccessList: types.AccessList{},
+ V: big.NewInt(32),
+ R: big.NewInt(10),
+ S: big.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x7",
+ "gasPrice": "0x9",
+ "maxFeePerGas": "0x9",
+ "maxPriorityFeePerGas": "0x6",
+ "hash": "0xcbab17ee031a9d5b5a09dff909f0a28aedb9b295ac0635d8710d11c7b806ec68",
+ "input": "0x0001020304",
+ "nonce": "0x5",
+ "to": null,
+ "transactionIndex": null,
+ "value": "0x8",
+ "type": "0x2",
+ "accessList": [],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0x6446b8a682db7e619fc6b4f6d1f708f6a17351a41c7fbd63665f469bc78b41b9",
+ "s": "0x7626abc15834f391a117c63450047309dbf84c5ce3e8e609b607062641e2de43",
+ "yParity": "0x0"
+ }`,
},
}
}
type testBackend struct {
- db ethdb.Database
- chain *core.BlockChain
+ db ethdb.Database
+ chain *core.BlockChain
+ pending *types.Block
}
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
var (
- engine = dummy.NewETHFaker()
- backend = &testBackend{
- db: rawdb.NewMemoryDatabase(),
- }
+ engine = dummy.NewETHFaker()
cacheConfig = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
@@ -209,18 +369,30 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
}
)
// Generate blocks for testing
- _, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator)
- chain, err := core.NewBlockChain(backend.db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false)
+ db, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator)
+ lastAccepted := gspec.ToBlock().Hash()
+ chain, err := core.NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, lastAccepted, false)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
if n, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
- backend.chain = chain
+ // Note: Added this compared to go-ethereum so receipts are accessible by
+ // transaction hash.
+ for _, block := range blocks {
+ require.NoError(t, chain.Accept(block))
+ }
+ chain.DrainAcceptorQueue()
+
+ backend := &testBackend{db: db, chain: chain}
return backend
}
+func (b *testBackend) setPendingBlock(block *types.Block) {
+ b.pending = block
+}
+
func (b testBackend) SyncProgress() ethereum.SyncProgress { return ethereum.SyncProgress{} }
func (b testBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
return big.NewInt(0), nil
@@ -240,31 +412,46 @@ func (b testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber)
if number == rpc.LatestBlockNumber {
return b.chain.CurrentBlock(), nil
}
+ if number == rpc.PendingBlockNumber && b.pending != nil {
+ return b.pending.Header(), nil
+ }
return b.chain.GetHeaderByNumber(uint64(number)), nil
}
func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
- panic("implement me")
+ return b.chain.GetHeaderByHash(hash), nil
}
func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
- panic("implement me")
+ if blockNr, ok := blockNrOrHash.Number(); ok {
+ return b.HeaderByNumber(ctx, blockNr)
+ }
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ return b.HeaderByHash(ctx, blockHash)
+ }
+ panic("unknown type rpc.BlockNumberOrHash")
}
-func (b testBackend) CurrentHeader() *types.Header { panic("implement me") }
-func (b testBackend) CurrentBlock() *types.Header { panic("implement me") }
+func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() }
+func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() }
func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
if number == rpc.LatestBlockNumber {
head := b.chain.CurrentBlock()
return b.chain.GetBlock(head.Hash(), head.Number.Uint64()), nil
}
+ if number == rpc.PendingBlockNumber {
+ return b.pending, nil
+ }
return b.chain.GetBlockByNumber(uint64(number)), nil
}
func (b testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
- panic("implement me")
+ return b.chain.GetBlockByHash(hash), nil
}
func (b testBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.BlockByNumber(ctx, blockNr)
}
- panic("implement me")
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ return b.BlockByHash(ctx, blockHash)
+ }
+ panic("unknown type rpc.BlockNumberOrHash")
}
func (b testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
return b.chain.GetBlock(hash, uint64(number.Int64())).Body(), nil
@@ -291,9 +478,19 @@ func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOr
}
func (b testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { panic("implement me") }
func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
- panic("implement me")
+ header, err := b.HeaderByHash(ctx, hash)
+ if header == nil || err != nil {
+ return nil, err
+ }
+ receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config())
+ return receipts, nil
+}
+func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
+ if b.pending != nil && hash == b.pending.Hash() {
+ return nil
+ }
+ return big.NewInt(1)
}
-func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") }
func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) {
vmError := func() error { return nil }
if vmConfig == nil {
@@ -319,7 +516,8 @@ func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) er
panic("implement me")
}
func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
- panic("implement me")
+ tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.db, txHash)
+ return tx, blockHash, blockNumber, index, nil
}
func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") }
func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") }
@@ -327,10 +525,10 @@ func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uin
panic("implement me")
}
func (b testBackend) Stats() (pending int, queued int) { panic("implement me") }
-func (b testBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
panic("implement me")
}
-func (b testBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
+func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
panic("implement me")
}
func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription {
@@ -389,6 +587,7 @@ func TestEstimateGas(t *testing.T) {
var testSuite = []struct {
blockNumber rpc.BlockNumber
call TransactionArgs
+ overrides StateOverride
expectErr error
want uint64
}{
@@ -421,9 +620,30 @@ func TestEstimateGas(t *testing.T) {
expectErr: nil,
want: 53000,
},
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ call: TransactionArgs{},
+ overrides: StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))},
+ },
+ expectErr: nil,
+ want: 53000,
+ },
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ call: TransactionArgs{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ },
+ overrides: StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))},
+ },
+ expectErr: core.ErrInsufficientFunds,
+ },
}
for i, tc := range testSuite {
- result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber})
+ result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides)
if tc.expectErr != nil {
if err == nil {
t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
@@ -618,19 +838,13 @@ type Account struct {
addr common.Address
}
-type Accounts []Account
-
-func (a Accounts) Len() int { return len(a) }
-func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }
-
-func newAccounts(n int) (accounts Accounts) {
+func newAccounts(n int) (accounts []Account) {
for i := 0; i < n; i++ {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
accounts = append(accounts, Account{key: key, addr: addr})
}
- sort.Sort(accounts)
+ slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) })
return accounts
}
@@ -644,32 +858,8 @@ func hex2Bytes(str string) *hexutil.Bytes {
return &rpcBytes
}
-// testHasher is the helper tool for transaction/receipt list hashing.
-// The original hasher is trie, in order to get rid of import cycle,
-// use the testing hasher instead.
-type testHasher struct {
- hasher hash.Hash
-}
-
-func newHasher() *testHasher {
- return &testHasher{hasher: sha3.NewLegacyKeccak256()}
-}
-
-func (h *testHasher) Reset() {
- h.hasher.Reset()
-}
-
-func (h *testHasher) Update(key, val []byte) error {
- h.hasher.Write(key)
- h.hasher.Write(val)
- return nil
-}
-
-func (h *testHasher) Hash() common.Hash {
- return common.BytesToHash(h.hasher.Sum(nil))
-}
-
func TestRPCMarshalBlock(t *testing.T) {
+ t.Parallel()
var (
txs []*types.Transaction
to = common.BytesToAddress([]byte{0x11})
@@ -698,7 +888,7 @@ func TestRPCMarshalBlock(t *testing.T) {
}
txs = append(txs, tx)
}
- block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, newHasher(), nil, false)
+ block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher(), nil, false)
var testSuite = []struct {
inclTx bool
@@ -709,36 +899,1165 @@ func TestRPCMarshalBlock(t *testing.T) {
{
inclTx: false,
fullTx: false,
- want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "blockExtraData": "0x",
+ "difficulty": "0x0",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2b9",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
// only tx hashes
{
inclTx: true,
fullTx: false,
- want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":["0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "blockExtraData": "0x",
+ "difficulty": "0x0",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2b9",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactions": [
+ "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605",
+ "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4",
+ "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5",
+ "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"
+ ],
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
-
// full tx details
{
inclTx: true,
fullTx: true,
- want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":[{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","input":"0x111111","nonce":"0x1","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x0","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","input":"0x111111","nonce":"0x2","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x1","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","input":"0x111111","nonce":"0x3","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x2","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1","input":"0x111111","nonce":"0x4","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x3","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"}],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`,
+ want: `{
+ "blockExtraData": "0x",
+ "difficulty": "0x0",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x64",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2b9",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x0",
+ "transactions": [
+ {
+ "blockHash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605",
+ "input": "0x111111",
+ "nonce": "0x1",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x0",
+ "value": "0x6f",
+ "type": "0x1",
+ "accessList": [],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0",
+ "yParity": "0x0"
+ },
+ {
+ "blockHash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4",
+ "input": "0x111111",
+ "nonce": "0x2",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x1",
+ "value": "0x6f",
+ "type": "0x0",
+ "chainId": "0x7fffffffffffffee",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0"
+ },
+ {
+ "blockHash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5",
+ "input": "0x111111",
+ "nonce": "0x3",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x2",
+ "value": "0x6f",
+ "type": "0x1",
+ "accessList": [],
+ "chainId": "0x539",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0",
+ "yParity": "0x0"
+ },
+ {
+ "blockHash": "0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757",
+ "blockNumber": "0x64",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1",
+ "input": "0x111111",
+ "nonce": "0x4",
+ "to": "0x0000000000000000000000000000000000000011",
+ "transactionIndex": "0x3",
+ "value": "0x6f",
+ "type": "0x0",
+ "chainId": "0x7fffffffffffffee",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0"
+ }
+ ],
+ "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e",
+ "uncles": []
+ }`,
},
}
for i, tc := range testSuite {
- resp, err := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestChainConfig)
+ resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestChainConfig)
+ out, err := json.Marshal(resp)
if err != nil {
- t.Errorf("test %d: got error %v", i, err)
+ t.Errorf("test %d: json marshal error: %v", i, err)
continue
}
- out, err := json.Marshal(resp)
+ assert.JSONEqf(t, tc.want, string(out), "test %d", i)
+ }
+}
+
+func TestRPCGetBlockOrHeader(t *testing.T) {
+ t.Parallel()
+
+ // Initialize test accounts
+ var (
+ acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+ acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
+ acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
+ genesis = &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: core.GenesisAlloc{
+ acc1Addr: {Balance: big.NewInt(params.Ether)},
+ acc2Addr: {Balance: big.NewInt(params.Ether)},
+ },
+ }
+ genBlocks = 10
+ signer = types.HomesteadSigner{}
+ tx = types.NewTx(&types.LegacyTx{
+ Nonce: 11,
+ GasPrice: big.NewInt(11111),
+ Gas: 1111,
+ To: &acc2Addr,
+ Value: big.NewInt(111),
+ Data: []byte{0x11, 0x11, 0x11},
+ })
+ pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, blocktest.NewHasher(), nil, true)
+ )
+ backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ // Transfer from account[0] to account[1]
+ // value: 1000 wei
+ // fee: 0 wei
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key)
+ b.AddTx(tx)
+ })
+ backend.setPendingBlock(pending)
+ api := NewBlockChainAPI(backend)
+ blockHashes := make([]common.Hash, genBlocks+1)
+ ctx := context.Background()
+ for i := 0; i <= genBlocks; i++ {
+ header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if err != nil {
- t.Errorf("test %d: json marshal error: %v", i, err)
+ t.Errorf("failed to get block: %d err: %v", i, err)
+ }
+ blockHashes[i] = header.Hash()
+ }
+ pendingHash := pending.Hash()
+
+ var testSuite = []struct {
+ blockNumber rpc.BlockNumber
+ blockHash *common.Hash
+ fullTx bool
+ reqHeader bool
+ want string
+ expectErr error
+ }{
+ // 0. latest header
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x28a7a56427",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x9e839b3ad2ecd76f842bb6891144e073f015c785f5aad0001968222334131d02",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xdded7f848268f119f12c77fdf32520ac3f98d710164997b6adf038e76c5007fe",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactionsRoot": "0xf578b0855e1c4509f5248387fcc5d3144552ade53be98825df0884f36fbc3ab9"
+ }`,
+ },
+ // 1. genesis header
+ {
+ blockNumber: rpc.BlockNumber(0),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "difficulty": "0x20000",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xb5a65ee2c90afcbaccd940a768b2a719394755b7275bce8a4c0c742991e17131",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ }`,
+ },
+ // 2. #1 header
+ {
+ blockNumber: rpc.BlockNumber(1),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x4729ded876444349cc71f0062017ccf35068c2831d27f75c1d35bc4d3eb0c3ba",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xe6a34faf8c7bd056de278fa20a0bf3236c8c157f6bbf40bced8e5961c51f3691",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactionsRoot": "0x272d13afea9f2f2c9b9ab3d8bbdb492ce5f7b215c493adaac5d98abc9ad62352"
+ }`,
+ },
+ // 3. latest-1 header
+ {
+ blockNumber: rpc.BlockNumber(9),
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x29d101e35b",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2006bc7d2d25edaef78eb0dd7392262bd865a7a0ab4ec70c169ab02eeb3a53f9",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x559db971f1bdcc4a39b612c81c26c3d91ca805d61eb614132352196f03d38192",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactionsRoot": "0x636f1c9b3e00b425fbab75c79989315b6f30c48f5a55ee636b1fc3805538c5f7"
+ }`,
+ },
+ // 4. latest+1 header
+ {
+ blockNumber: rpc.BlockNumber(11),
+ reqHeader: true,
+ want: "null",
+ },
+ // 5. pending header
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ reqHeader: true,
+ want: `{
+ "difficulty": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x7f667c54161c0e19e2a5193bf8c6ae6eeb53a0c429861fae948e0c463472cf67",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xb",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x2a",
+ "totalDifficulty": "0xb",
+ "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37"
+ }`,
+ },
+ // 6. latest block
+ {
+ blockNumber: rpc.LatestBlockNumber,
+ want: `{
+ "baseFeePerGas": "0x28a7a56427",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x9e839b3ad2ecd76f842bb6891144e073f015c785f5aad0001968222334131d02",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0xdded7f848268f119f12c77fdf32520ac3f98d710164997b6adf038e76c5007fe",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactions": [
+ "0x2947d62ddb16c5312dc40e5d9b29d75447bc011e1393c5f1544144bc764e16f8"
+ ],
+ "transactionsRoot": "0xf578b0855e1c4509f5248387fcc5d3144552ade53be98825df0884f36fbc3ab9",
+ "uncles": []
+ }`,
+ },
+ // 7. genesis block
+ {
+ blockNumber: rpc.BlockNumber(0),
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockExtraData": "0x",
+ "difficulty": "0x20000",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x224",
+ "stateRoot": "0xb5a65ee2c90afcbaccd940a768b2a719394755b7275bce8a4c0c742991e17131",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactions": [],
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncles": []
+ }`,
+ },
+ // 8. #1 block
+ {
+ blockNumber: rpc.BlockNumber(1),
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x4729ded876444349cc71f0062017ccf35068c2831d27f75c1d35bc4d3eb0c3ba",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0xe6a34faf8c7bd056de278fa20a0bf3236c8c157f6bbf40bced8e5961c51f3691",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactions": [
+ "0x09220a8629fd020cbb341ab146e6acb4dc4811ab5fdf021bec3d3219c5a29ab3"
+ ],
+ "transactionsRoot": "0x272d13afea9f2f2c9b9ab3d8bbdb492ce5f7b215c493adaac5d98abc9ad62352",
+ "uncles": []
+ }`,
+ },
+ // 9. latest-1 block
+ {
+ blockNumber: rpc.BlockNumber(9),
+ fullTx: true,
+ want: `{
+ "baseFeePerGas": "0x29d101e35b",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2006bc7d2d25edaef78eb0dd7392262bd865a7a0ab4ec70c169ab02eeb3a53f9",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0x559db971f1bdcc4a39b612c81c26c3d91ca805d61eb614132352196f03d38192",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactions": [
+ {
+ "blockHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "blockNumber": "0x9",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gas": "0x5208",
+ "gasPrice": "0x29d101e35b",
+ "hash": "0xc187ad4e657c1a75234a6456f52bb6d8fe3a234729cec11afa46bea7ffbce0d7",
+ "input": "0x",
+ "nonce": "0x8",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionIndex": "0x0",
+ "value": "0x3e8",
+ "type": "0x0",
+ "v": "0x1b",
+ "r": "0xc3590d4884299ac2e6d6db2de1aa36caf8ce3630bd41a5dd862b7aa5820a8501",
+ "s": "0x72325946a27ab5b142405a4db54691fe00b2249eed6dd6667fe9d36f1412bd1"
+ }
+ ],
+ "transactionsRoot": "0x636f1c9b3e00b425fbab75c79989315b6f30c48f5a55ee636b1fc3805538c5f7",
+ "uncles": []
+ }`,
+ },
+ // 10. latest+1 block
+ {
+ blockNumber: rpc.BlockNumber(11),
+ fullTx: true,
+ want: "null",
+ },
+ // 11. pending block
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ want: `{
+ "blockExtraData": "0x",
+ "difficulty": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x7f667c54161c0e19e2a5193bf8c6ae6eeb53a0c429861fae948e0c463472cf67",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xb",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x23d",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x2a",
+ "totalDifficulty": "0xb",
+ "transactions": [
+ "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298"
+ ],
+ "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
+ "uncles": []
+ }`,
+ },
+ // 12. pending block + fullTx
+ {
+ blockNumber: rpc.PendingBlockNumber,
+ fullTx: true,
+ want: `{
+ "blockExtraData": "0x",
+ "difficulty": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x",
+ "gasLimit": "0x0",
+ "gasUsed": "0x0",
+ "hash": "0x7f667c54161c0e19e2a5193bf8c6ae6eeb53a0c429861fae948e0c463472cf67",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xb",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x23d",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "timestamp": "0x2a",
+ "totalDifficulty": "0xb",
+ "transactions": [
+ {
+ "blockHash": "0x7f667c54161c0e19e2a5193bf8c6ae6eeb53a0c429861fae948e0c463472cf67",
+ "blockNumber": "0xb",
+ "from": "0x0000000000000000000000000000000000000000",
+ "gas": "0x457",
+ "gasPrice": "0x2b67",
+ "hash": "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298",
+ "input": "0x111111",
+ "nonce": "0xb",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionIndex": "0x0",
+ "value": "0x6f",
+ "type": "0x0",
+ "chainId": "0x7fffffffffffffee",
+ "v": "0x0",
+ "r": "0x0",
+ "s": "0x0"
+ }
+ ],
+ "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
+ "uncles": []
+ }`,
+ },
+ // 13. latest header by hash
+ {
+ blockHash: &blockHashes[len(blockHashes)-1],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x28a7a56427",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x9e839b3ad2ecd76f842bb6891144e073f015c785f5aad0001968222334131d02",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xdded7f848268f119f12c77fdf32520ac3f98d710164997b6adf038e76c5007fe",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactionsRoot": "0xf578b0855e1c4509f5248387fcc5d3144552ade53be98825df0884f36fbc3ab9"
+ }`,
+ },
+ // 14. genesis header by hash
+ {
+ blockHash: &blockHashes[0],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "difficulty": "0x20000",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xb5a65ee2c90afcbaccd940a768b2a719394755b7275bce8a4c0c742991e17131",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ }`,
+ },
+ // 15. #1 header
+ {
+ blockHash: &blockHashes[1],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x4729ded876444349cc71f0062017ccf35068c2831d27f75c1d35bc4d3eb0c3ba",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0xe6a34faf8c7bd056de278fa20a0bf3236c8c157f6bbf40bced8e5961c51f3691",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactionsRoot": "0x272d13afea9f2f2c9b9ab3d8bbdb492ce5f7b215c493adaac5d98abc9ad62352"
+ }`,
+ },
+ // 16. latest-1 header
+ {
+ blockHash: &blockHashes[len(blockHashes)-2],
+ reqHeader: true,
+ want: `{
+ "baseFeePerGas": "0x29d101e35b",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2006bc7d2d25edaef78eb0dd7392262bd865a7a0ab4ec70c169ab02eeb3a53f9",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "stateRoot": "0x559db971f1bdcc4a39b612c81c26c3d91ca805d61eb614132352196f03d38192",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactionsRoot": "0x636f1c9b3e00b425fbab75c79989315b6f30c48f5a55ee636b1fc3805538c5f7"
+ }`,
+ },
+ // 17. empty hash
+ {
+ blockHash: &common.Hash{},
+ reqHeader: true,
+ want: "null",
+ },
+ // 18. pending hash
+ {
+ blockHash: &pendingHash,
+ reqHeader: true,
+ want: `null`,
+ },
+ // 19. latest block
+ {
+ blockHash: &blockHashes[len(blockHashes)-1],
+ want: `{
+ "baseFeePerGas": "0x28a7a56427",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x9e839b3ad2ecd76f842bb6891144e073f015c785f5aad0001968222334131d02",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0xa",
+ "parentHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0xdded7f848268f119f12c77fdf32520ac3f98d710164997b6adf038e76c5007fe",
+ "timestamp": "0x64",
+ "totalDifficulty": "0xa",
+ "transactions": [
+ "0x2947d62ddb16c5312dc40e5d9b29d75447bc011e1393c5f1544144bc764e16f8"
+ ],
+ "transactionsRoot": "0xf578b0855e1c4509f5248387fcc5d3144552ade53be98825df0884f36fbc3ab9",
+ "uncles": []
+ }`,
+ },
+ // 20. genesis block
+ {
+ blockHash: &blockHashes[0],
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockExtraData": "0x",
+ "difficulty": "0x20000",
+ "extDataHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x0",
+ "hash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x0",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x224",
+ "stateRoot": "0xb5a65ee2c90afcbaccd940a768b2a719394755b7275bce8a4c0c742991e17131",
+ "timestamp": "0x0",
+ "totalDifficulty": "0x0",
+ "transactions": [],
+ "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncles": []
+ }`,
+ },
+ // 21. #1 block
+ {
+ blockHash: &blockHashes[1],
+ want: `{
+ "baseFeePerGas": "0x34630b8a00",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x4729ded876444349cc71f0062017ccf35068c2831d27f75c1d35bc4d3eb0c3ba",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x1",
+ "parentHash": "0x1509a989ede83d85f51c9a489ef5f9a1ef15e08ec50f9d569ad41b56ecc4dffd",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0xe6a34faf8c7bd056de278fa20a0bf3236c8c157f6bbf40bced8e5961c51f3691",
+ "timestamp": "0xa",
+ "totalDifficulty": "0x1",
+ "transactions": [
+ "0x09220a8629fd020cbb341ab146e6acb4dc4811ab5fdf021bec3d3219c5a29ab3"
+ ],
+ "transactionsRoot": "0x272d13afea9f2f2c9b9ab3d8bbdb492ce5f7b215c493adaac5d98abc9ad62352",
+ "uncles": []
+ }`,
+ },
+ // 22. latest-1 block
+ {
+ blockHash: &blockHashes[len(blockHashes)-2],
+ fullTx: true,
+ want: `{
+ "baseFeePerGas": "0x29d101e35b",
+ "blockExtraData": "0x",
+ "blockGasCost": "0x0",
+ "difficulty": "0x1",
+ "extDataGasUsed": "0x0",
+ "extDataHash": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "gasLimit": "0xe4e1c0",
+ "gasUsed": "0x5208",
+ "hash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "number": "0x9",
+ "parentHash": "0x2006bc7d2d25edaef78eb0dd7392262bd865a7a0ab4ec70c169ab02eeb3a53f9",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "size": "0x2df",
+ "stateRoot": "0x559db971f1bdcc4a39b612c81c26c3d91ca805d61eb614132352196f03d38192",
+ "timestamp": "0x5a",
+ "totalDifficulty": "0x9",
+ "transactions": [
+ {
+ "blockHash": "0x3556a043faadc4f03ba18453a084ba91d49c6baec50d7792dab1f3f552d2c973",
+ "blockNumber": "0x9",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gas": "0x5208",
+ "gasPrice": "0x29d101e35b",
+ "hash": "0xc187ad4e657c1a75234a6456f52bb6d8fe3a234729cec11afa46bea7ffbce0d7",
+ "input": "0x",
+ "nonce": "0x8",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionIndex": "0x0",
+ "value": "0x3e8",
+ "type": "0x0",
+ "v": "0x1b",
+ "r": "0xc3590d4884299ac2e6d6db2de1aa36caf8ce3630bd41a5dd862b7aa5820a8501",
+ "s": "0x72325946a27ab5b142405a4db54691fe00b2249eed6dd6667fe9d36f1412bd1"
+ }
+ ],
+ "transactionsRoot": "0x636f1c9b3e00b425fbab75c79989315b6f30c48f5a55ee636b1fc3805538c5f7",
+ "uncles": []
+ }`,
+ },
+ // 23. empty hash + body
+ {
+ blockHash: &common.Hash{},
+ fullTx: true,
+ want: "null",
+ },
+ // 24. pending block
+ {
+ blockHash: &pendingHash,
+ want: `null`,
+ },
+ // 25. pending block + fullTx
+ {
+ blockHash: &pendingHash,
+ fullTx: true,
+ want: `null`,
+ },
+ }
+
+ for i, tt := range testSuite {
+ var (
+ result map[string]interface{}
+ err error
+ )
+ if tt.blockHash != nil {
+ if tt.reqHeader {
+ result = api.GetHeaderByHash(context.Background(), *tt.blockHash)
+ } else {
+ result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx)
+ }
+ } else {
+ if tt.reqHeader {
+ result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber)
+ } else {
+ result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx)
+ }
+ }
+ if tt.expectErr != nil {
+ if err == nil {
+ t.Errorf("test %d: want error %v, have nothing", i, tt.expectErr)
+ continue
+ }
+ if !errors.Is(err, tt.expectErr) {
+ t.Errorf("test %d: error mismatch, want %v, have %v", i, tt.expectErr, err)
+ }
continue
}
- if have := string(out); have != tc.want {
- t.Errorf("test %d: want: %s have: %s", i, tc.want, have)
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ data, err := json.Marshal(result)
+ if err != nil {
+ t.Errorf("test %d: json marshal error", i)
+ continue
+ }
+ want, have := tt.want, string(data)
+ require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
+ }
+}
+
+func TestRPCGetTransactionReceipt(t *testing.T) {
+ t.Parallel()
+
+ // Initialize test accounts
+ var (
+ acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+ acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
+ acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
+ contract = common.HexToAddress("0000000000000000000000000000000000031ec7")
+ genesis = &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: core.GenesisAlloc{
+ acc1Addr: {Balance: big.NewInt(params.Ether)},
+ acc2Addr: {Balance: big.NewInt(params.Ether)},
+ // // SPDX-License-Identifier: GPL-3.0
+ // pragma solidity >=0.7.0 <0.9.0;
+ //
+ // contract Token {
+ // event Transfer(address indexed from, address indexed to, uint256 value);
+ // function transfer(address to, uint256 value) public returns (bool) {
+ // emit Transfer(msg.sender, to, value);
+ // return true;
+ // }
+ // }
+ contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")},
+ },
+ }
+ genBlocks = 5
+ signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
+ txHashes = make([]common.Hash, genBlocks)
+ )
+ backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ var (
+ tx *types.Transaction
+ err error
+ )
+ switch i {
+ case 0:
+ // transfer 1000wei
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key)
+ case 1:
+ // create contract
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key)
+ case 2:
+ // with logs
+ // transfer(address to, uint256 value)
+ data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:])
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key)
+ case 3:
+ // dynamic fee with logs
+ // transfer(address to, uint256 value)
+ data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:])
+ fee := big.NewInt(500)
+ fee.Add(fee, b.BaseFee())
+ tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key)
+ case 4:
+ // access list with contract create
+ accessList := types.AccessList{{
+ Address: contract,
+ StorageKeys: []common.Hash{{0}},
+ }}
+ tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key)
+ }
+ if err != nil {
+ t.Errorf("failed to sign tx: %v", err)
+ }
+ if tx != nil {
+ b.AddTx(tx)
+ txHashes[i] = tx.Hash()
+ }
+ })
+ api := NewTransactionAPI(backend, new(AddrLocker))
+ blockHashes := make([]common.Hash, genBlocks+1)
+ ctx := context.Background()
+ for i := 0; i <= genBlocks; i++ {
+ header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
+ if err != nil {
+ t.Errorf("failed to get block: %d err: %v", i, err)
+ }
+ blockHashes[i] = header.Hash()
+ }
+
+ var testSuite = []struct {
+ txHash common.Hash
+ want string
+ }{
+ // 0. normal success
+ {
+ txHash: txHashes[0],
+ want: `{
+ "blockHash": "0x1db4db39bd2505db96090946cfabd2d1d16fb02fd40af3cf353ee7c24886d38d",
+ "blockNumber": "0x1",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x5208",
+ "effectiveGasPrice": "0x34630b8a00",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x5208",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionHash": "0x09220a8629fd020cbb341ab146e6acb4dc4811ab5fdf021bec3d3219c5a29ab3",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 1. create contract
+ {
+ txHash: txHashes[1],
+ want: `{
+ "blockHash": "0x7e7958ad3c28186f4422d83426582145f66c6bcf1c6bb97d5350a5e56503de91",
+ "blockNumber": "0x2",
+ "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
+ "cumulativeGasUsed": "0xcf50",
+ "effectiveGasPrice": "0x32ee841b80",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0xcf50",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": null,
+ "transactionHash": "0x517f3174bd4501d55f0f93589ef0102152ab808f51bf595f2779461f04871a32",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 2. with logs success
+ {
+ txHash: txHashes[2],
+ want: `{
+ "blockHash": "0x931e848eb68753a8332bee071a17c34870edfceb2a4f7edc019db79ee74cc924",
+ "blockNumber": "0x3",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x5e28",
+ "effectiveGasPrice": "0x318455c568",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x5e28",
+ "logs": [
+ {
+ "address": "0x0000000000000000000000000000000000031ec7",
+ "topics": [
+ "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7",
+ "0x0000000000000000000000000000000000000000000000000000000000000003"
+ ],
+ "data": "0x000000000000000000000000000000000000000000000000000000000000000d",
+ "blockNumber": "0x3",
+ "transactionHash": "0x0e9c460065fee166157eaadf702a01fb6ac1ce27b651e32850a8b09f71f93937",
+ "transactionIndex": "0x0",
+ "blockHash": "0x931e848eb68753a8332bee071a17c34870edfceb2a4f7edc019db79ee74cc924",
+ "logIndex": "0x0",
+ "removed": false
+ }
+ ],
+ "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000",
+ "status": "0x1",
+ "to": "0x0000000000000000000000000000000000031ec7",
+ "transactionHash": "0x0e9c460065fee166157eaadf702a01fb6ac1ce27b651e32850a8b09f71f93937",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }`,
+ },
+ // 3. dynamic tx with logs success
+ {
+ txHash: txHashes[3],
+ want: `{
+ "blockHash": "0x682de8741b70a2fd77d9b1bb553cae3c994ebe5b6fb61daf11d559e130ad1db8",
+ "blockNumber": "0x4",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x538d",
+ "effectiveGasPrice": "0x302436f3a8",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x538d",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x0",
+ "to": "0x0000000000000000000000000000000000031ec7",
+ "transactionHash": "0xcdd1122456f8ea113309e2ba5ecc8f389bbdc2e6bcced8eb103c6fdef201bf1a",
+ "transactionIndex": "0x0",
+ "type": "0x2"
+ }`,
+ },
+ // 4. access list tx with create contract
+ {
+ txHash: txHashes[4],
+ want: `{
+ "blockHash": "0xea3014b7aaa2b451f024d393c6a258876660dfe670d244a827ac3a73fb29676b",
+ "blockNumber": "0x5",
+ "contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
+ "cumulativeGasUsed": "0xe01c",
+ "effectiveGasPrice": "0x2ecde015a8",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0xe01c",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": null,
+ "transactionHash": "0xa9616380994fd7502d0350ee57882bb6e95d6678fa6c4782f179c9f5f3529c48",
+ "transactionIndex": "0x0",
+ "type": "0x1"
+ }`,
+ },
+ // 5. txhash empty
+ {
+ txHash: common.Hash{},
+ want: `null`,
+ },
+ // 6. txhash not found
+ {
+ txHash: common.HexToHash("deadbeef"),
+ want: `null`,
+ },
+ }
+
+ for i, tt := range testSuite {
+ var (
+ result interface{}
+ err error
+ )
+ result, err = api.GetTransactionReceipt(context.Background(), tt.txHash)
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ data, err := json.Marshal(result)
+ if err != nil {
+ t.Errorf("test %d: json marshal error", i)
+ continue
}
+ want, have := tt.want, string(data)
+ require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
}
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 1ea48b3415..0b8c29aeaa 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -87,8 +87,8 @@ type Backend interface {
GetPoolTransaction(txHash common.Hash) *types.Transaction
GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error)
Stats() (pending int, queued int)
- TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions)
- TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions)
+ TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction)
+ TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction)
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
ChainConfig() *params.ChainConfig
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index b5adc73bb5..39ddc94e69 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -123,7 +123,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
AccessList: args.AccessList,
}
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap())
+ estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap())
if err != nil {
return err
}
diff --git a/internal/flags/categories.go b/internal/flags/categories.go
index 02d063a65a..d7500157e3 100644
--- a/internal/flags/categories.go
+++ b/internal/flags/categories.go
@@ -33,7 +33,8 @@ const (
LightCategory = "LIGHT CLIENT"
DevCategory = "DEVELOPER CHAIN"
EthashCategory = "ETHASH"
- TxPoolCategory = "TRANSACTION POOL"
+ TxPoolCategory = "TRANSACTION POOL (EVM)"
+ BlobPoolCategory = "TRANSACTION POOL (BLOB)"
PerfCategory = "PERFORMANCE TUNING"
AccountCategory = "ACCOUNT"
APICategory = "API AND CONSOLE"
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
index e5327d3bd3..8e23c8eeea 100644
--- a/metrics/resetting_timer.go
+++ b/metrics/resetting_timer.go
@@ -2,9 +2,10 @@ package metrics
import (
"math"
- "sort"
"sync"
"time"
+
+ "golang.org/x/exp/slices"
)
// Initial slice capacity for the values stored in a ResettingTimer
@@ -65,7 +66,7 @@ func (NilResettingTimer) Snapshot() ResettingTimer {
}
// Time is a no-op.
-func (NilResettingTimer) Time(func()) {}
+func (NilResettingTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilResettingTimer) Update(time.Duration) {}
@@ -186,7 +187,7 @@ func (t *ResettingTimerSnapshot) Mean() float64 {
}
func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
- sort.Sort(Int64Slice(t.values))
+ slices.Sort(t.values)
count := len(t.values)
if count > 0 {
@@ -232,10 +233,3 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
t.calculated = true
}
-
-// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
-type Int64Slice []int64
-
-func (s Int64Slice) Len() int { return len(s) }
-func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/metrics/sample.go b/metrics/sample.go
index afcaa21184..252a878f58 100644
--- a/metrics/sample.go
+++ b/metrics/sample.go
@@ -3,9 +3,10 @@ package metrics
import (
"math"
"math/rand"
- "sort"
"sync"
"time"
+
+ "golang.org/x/exp/slices"
)
const rescaleThreshold = time.Hour
@@ -282,17 +283,17 @@ func SampleMin(values []int64) int64 {
}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
+func SamplePercentile(values []int64, p float64) float64 {
return SamplePercentiles(values, []float64{p})[0]
}
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+func SamplePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size > 0 {
- sort.Sort(values)
+ slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
@@ -633,9 +634,3 @@ func (h *expDecaySampleHeap) down(i, n int) {
i = j
}
}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/metrics/timer.go b/metrics/timer.go
index a63c9dfb6c..2e1a9be472 100644
--- a/metrics/timer.go
+++ b/metrics/timer.go
@@ -123,7 +123,7 @@ func (NilTimer) Stop() {}
func (NilTimer) Sum() int64 { return 0 }
// Time is a no-op.
-func (NilTimer) Time(func()) {}
+func (NilTimer) Time(f func()) { f() }
// Update is a no-op.
func (NilTimer) Update(time.Duration) {}
diff --git a/metrics/writer.go b/metrics/writer.go
index 256fbd14c9..82434e9d1d 100644
--- a/metrics/writer.go
+++ b/metrics/writer.go
@@ -3,8 +3,10 @@ package metrics
import (
"fmt"
"io"
- "sort"
+ "strings"
"time"
+
+ "golang.org/x/exp/slices"
)
// Write sorts writes each metric in the given registry periodically to the
@@ -18,12 +20,11 @@ func Write(r Registry, d time.Duration, w io.Writer) {
// WriteOnce sorts and writes metrics in the given registry to the given
// io.Writer.
func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
+ var namedMetrics []namedMetric
r.Each(func(name string, i interface{}) {
namedMetrics = append(namedMetrics, namedMetric{name, i})
})
-
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) {
case Counter:
@@ -91,13 +92,6 @@ type namedMetric struct {
m interface{}
}
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
+func (m namedMetric) cmp(other namedMetric) int {
+ return strings.Compare(m.name, other.name)
}
diff --git a/metrics/writer_test.go b/metrics/writer_test.go
index 1aacc28712..8376bf8975 100644
--- a/metrics/writer_test.go
+++ b/metrics/writer_test.go
@@ -1,19 +1,20 @@
package metrics
import (
- "sort"
"testing"
+
+ "golang.org/x/exp/slices"
)
func TestMetricsSorting(t *testing.T) {
- var namedMetrics = namedMetricSlice{
+ var namedMetrics = []namedMetric{
{name: "zzz"},
{name: "bbb"},
{name: "fff"},
{name: "ggg"},
}
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
if namedMetrics[i].name != name {
t.Fail()
diff --git a/miner/worker.go b/miner/worker.go
index 3569382bb5..38a2ac52e9 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -57,8 +57,7 @@ const (
// environment is the worker's current environment and holds all of the current state information.
type environment struct {
- signer types.Signer
-
+ signer types.Signer
state *state.StateDB // apply state changes here
tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions
@@ -173,8 +172,7 @@ func (w *worker) commitNewWork() (*types.Block, error) {
pending := w.eth.TxPool().Pending(true)
// Split the pending transactions into locals and remotes
- localTxs := make(map[common.Address]types.Transactions)
- remoteTxs := pending
+ localTxs, remoteTxs := make(map[common.Address][]*types.Transaction), pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
delete(remoteTxs, account)
diff --git a/params/config.go b/params/config.go
index 86aef28504..eb94c5d01a 100644
--- a/params/config.go
+++ b/params/config.go
@@ -535,6 +535,8 @@ type ChainConfig struct {
DUpgradeBlockTimestamp *uint64 `json:"dUpgradeBlockTimestamp,omitempty"`
// Cancun activates the Cancun upgrade from Ethereum. (nil = no fork, 0 = already activated)
CancunTime *uint64 `json:"cancunTime,omitempty"`
+ // Verkle activates the Verkle upgrade from Ethereum. (nil = no fork, 0 = already activated)
+ VerkleTime *uint64 `json:"verkleTime,omitempty"`
}
// AvalancheContext provides Avalanche specific context directly into the EVM.
@@ -709,6 +711,11 @@ func (c *ChainConfig) IsCancun(time uint64) bool {
return utils.IsTimestampForked(c.CancunTime, time)
}
+// IsVerkle returns whether num is either equal to the Verkle fork time or greater.
+func (c *ChainConfig) IsVerkle(time uint64) bool {
+ return utils.IsTimestampForked(c.VerkleTime, time)
+}
+
// CheckCompatible checks whether scheduled fork transitions have been imported
// with a mismatching chain configuration.
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError {
@@ -798,6 +805,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{name: "cortinaBlockTimestamp", timestamp: c.CortinaBlockTimestamp},
{name: "dUpgradeBlockTimestamp", timestamp: c.DUpgradeBlockTimestamp},
{name: "cancunTime", timestamp: c.CancunTime},
+ {name: "verkleTime", timestamp: c.CancunTime},
} {
if lastFork.name != "" {
// Next one must be higher number
@@ -901,6 +909,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, height *big.Int, time
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, time) {
return newTimestampCompatError("Cancun fork block timestamp", c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp)
}
+ if isForkTimestampIncompatible(c.VerkleTime, newcfg.VerkleTime, time) {
+ return newTimestampCompatError("Verkle fork timestamp", c.VerkleTime, newcfg.VerkleTime)
+ }
return nil
}
@@ -1016,6 +1027,7 @@ type Rules struct {
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsCancun bool
+ IsVerkle bool
// Rules for Avalanche releases
IsApricotPhase1, IsApricotPhase2, IsApricotPhase3, IsApricotPhase4, IsApricotPhase5 bool
@@ -1048,6 +1060,7 @@ func (c *ChainConfig) rules(num *big.Int, timestamp uint64) Rules {
IsPetersburg: c.IsPetersburg(num),
IsIstanbul: c.IsIstanbul(num),
IsCancun: c.IsCancun(timestamp),
+ IsVerkle: c.IsVerkle(timestamp),
}
}
diff --git a/params/protocol_params.go b/params/protocol_params.go
index 8c598d050e..9062963da3 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -163,9 +163,15 @@ const (
Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation
Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation
- BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
- BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs
- BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price
+ BlobTxBytesPerFieldElement = 32 // Size in bytes of a field element
+ BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob
+ BlobTxHashVersion = 0x01 // Version byte of the commitment hash
+ BlobTxMaxBlobGasPerBlock = 1 << 19 // Maximum consumable blob gas for data blobs per block
+ BlobTxTargetBlobGasPerBlock = 1 << 18 // Target consumable blob gas for data blobs per block (for 1559-like pricing)
+ BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
+ BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs
+ BlobTxBlobGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for blob gas price
+ BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile.
// Avalanche Stateful Precompile Params
// Gas price for native asset balance lookup. Based on the cost of an SLOAD operation since native
diff --git a/params/version.go b/params/version.go
index b63bd113a8..f7ce5902fe 100644
--- a/params/version.go
+++ b/params/version.go
@@ -33,7 +33,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 12 // Minor version component of the current release
- VersionPatch = 0 // Patch version component of the current release
+ VersionPatch = 2 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic_backend.go
index f3ea4b88f9..2ae68f710e 100644
--- a/plugin/evm/atomic_backend.go
+++ b/plugin/evm/atomic_backend.go
@@ -168,7 +168,10 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error {
if err := a.atomicTrie.UpdateTrie(tr, height, combinedOps); err != nil {
return err
}
- root, nodes := tr.Commit(false)
+ root, nodes, err := tr.Commit(false)
+ if err != nil {
+ return err
+ }
if err := a.atomicTrie.InsertTrie(nodes, root); err != nil {
return err
}
@@ -181,6 +184,11 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error {
return err
}
}
+ // open the atomic trie again, since committed tries cannot be updated
+ tr, err = a.atomicTrie.OpenTrie(root)
+ if err != nil {
+ return err
+ }
heightsIndexed++
if time.Since(lastUpdate) > progressLogFrequency {
@@ -387,7 +395,10 @@ func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, par
}
// get the new root and pin the atomic trie changes in memory.
- root, nodes := tr.Commit(false)
+ root, nodes, err := tr.Commit(false)
+ if err != nil {
+ return common.Hash{}, err
+ }
if err := a.atomicTrie.InsertTrie(nodes, root); err != nil {
return common.Hash{}, err
}
diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic_syncer.go
index 45003219dc..1c558c4850 100644
--- a/plugin/evm/atomic_syncer.go
+++ b/plugin/evm/atomic_syncer.go
@@ -92,7 +92,10 @@ func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error {
if height > lastHeight {
// If this key belongs to a new height, we commit
// the trie at the previous height before adding this key.
- root, nodes := s.trie.Commit(false)
+ root, nodes, err := s.trie.Commit(false)
+ if err != nil {
+ return err
+ }
if err := s.atomicTrie.InsertTrie(nodes, root); err != nil {
return err
}
@@ -109,6 +112,11 @@ func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error {
return err
}
}
+ // open the atomic trie again, since committed tries cannot be updated
+ s.trie, err = s.atomicTrie.OpenTrie(root)
+ if err != nil {
+ return err
+ }
lastHeight = height
}
@@ -123,7 +131,10 @@ func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error {
// commit the trie to disk and perform the final checks that we synced the target root correctly.
func (s *atomicSyncer) onFinish() error {
// commit the trie on finish
- root, nodes := s.trie.Commit(false)
+ root, nodes, err := s.trie.Commit(false)
+ if err != nil {
+ return err
+ }
if err := s.atomicTrie.InsertTrie(nodes, root); err != nil {
return err
}
diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go
index a434f5ef40..8815e7ef31 100644
--- a/plugin/evm/atomic_trie.go
+++ b/plugin/evm/atomic_trie.go
@@ -266,7 +266,11 @@ func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (AtomicTrieIterat
return nil, err
}
- iter := trie.NewIterator(t.NodeIterator(cursor))
+ nodeIt, err := t.NodeIterator(cursor)
+ if err != nil {
+ return nil, err
+ }
+ iter := trie.NewIterator(nodeIt)
return NewAtomicTrieIterator(iter, a.codec), iter.Err
}
@@ -310,7 +314,7 @@ func (a *atomicTrie) LastAcceptedRoot() common.Hash {
func (a *atomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error {
if nodes != nil {
- if err := a.trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := a.trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic_trie_iterator_test.go
index 494302c1bb..f55e07d20f 100644
--- a/plugin/evm/atomic_trie_iterator_test.go
+++ b/plugin/evm/atomic_trie_iterator_test.go
@@ -94,7 +94,8 @@ func TestIteratorHandlesInvalidData(t *testing.T) {
require.NoError(err)
require.NoError(atomicTrieSnapshot.Update(utils.RandomBytes(50), utils.RandomBytes(50)))
- nextRoot, nodes := atomicTrieSnapshot.Commit(false)
+ nextRoot, nodes, err := atomicTrieSnapshot.Commit(false)
+ require.NoError(err)
err = atomicTrie.InsertTrie(nodes, nextRoot)
require.NoError(err)
isCommit, err := atomicTrie.AcceptTrie(lastCommittedHeight+commitInterval, nextRoot)
diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic_trie_test.go
index 609bb089f5..05e94cc72f 100644
--- a/plugin/evm/atomic_trie_test.go
+++ b/plugin/evm/atomic_trie_test.go
@@ -44,7 +44,10 @@ func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*atomic.R
if err := tr.UpdateTrie(snapshot, height, atomicOps); err != nil {
return err
}
- root, nodes := snapshot.Commit(false)
+ root, nodes, err := snapshot.Commit(false)
+ if err != nil {
+ return err
+ }
if err := tr.InsertTrie(nodes, root); err != nil {
return err
}
diff --git a/plugin/evm/atomic_tx_repository.go b/plugin/evm/atomic_tx_repository.go
index ef2cf0c601..115125c70d 100644
--- a/plugin/evm/atomic_tx_repository.go
+++ b/plugin/evm/atomic_tx_repository.go
@@ -273,8 +273,15 @@ func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error {
// with txs initialized from the txID index.
copyTxs := make([]*Tx, len(txs))
copy(copyTxs, txs)
- slices.SortFunc(copyTxs, func(i, j *Tx) bool {
- return i.Less(j)
+ slices.SortFunc(copyTxs, func(i, j *Tx) int {
+ switch {
+ case i.Less(j):
+ return -1
+ case j.Less(i):
+ return 1
+ default:
+ return 0
+ }
})
txs = copyTxs
}
diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic_tx_repository_test.go
index b17b93d0ba..b491839991 100644
--- a/plugin/evm/atomic_tx_repository_test.go
+++ b/plugin/evm/atomic_tx_repository_test.go
@@ -104,8 +104,15 @@ func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) {
assert.NoErrorf(t, err, "unexpected error on GetByHeight at height=%d", height)
assert.Lenf(t, txs, len(expectedTxs), "wrong len of txs at height=%d", height)
// txs should be stored in order of txID
- slices.SortFunc(expectedTxs, func(i, j *Tx) bool {
- return i.Less(j)
+ slices.SortFunc(expectedTxs, func(i, j *Tx) int {
+ switch {
+ case i.Less(j):
+ return -1
+ case j.Less(i):
+ return 1
+ default:
+ return 0
+ }
})
txIDs := set.Set[ids.ID]{}
diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go
index 392e76070b..482f5e7fe9 100644
--- a/plugin/evm/block_verification.go
+++ b/plugin/evm/block_verification.go
@@ -249,12 +249,15 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error {
}
}
- // Verify the existence / non-existence of excessDataGas
- if rules.IsCancun && ethHeader.ExcessDataGas == nil {
- return errors.New("missing excessDataGas")
+ // Verify the existence / non-existence of excessBlobGas
+ if rules.IsCancun && ethHeader.ExcessBlobGas == nil {
+ return errors.New("missing excessBlobGas")
}
- if !rules.IsCancun && ethHeader.ExcessDataGas != nil {
- return fmt.Errorf("invalid excessDataGas: have %d, expected nil", ethHeader.ExcessDataGas)
+ if !rules.IsCancun && ethHeader.ExcessBlobGas != nil {
+ return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", ethHeader.ExcessBlobGas)
+ }
+ if !rules.IsCancun && ethHeader.BlobGasUsed != nil {
+ return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed)
}
return nil
diff --git a/plugin/evm/config.go b/plugin/evm/config.go
index 7fa3eb33a0..3c49156750 100644
--- a/plugin/evm/config.go
+++ b/plugin/evm/config.go
@@ -8,7 +8,7 @@ import (
"fmt"
"time"
- "github.com/ava-labs/coreth/core/txpool"
+ "github.com/ava-labs/coreth/core/txpool/legacypool"
"github.com/ava-labs/coreth/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/spf13/cast"
@@ -213,14 +213,14 @@ func (c *Config) SetDefaults() {
c.RPCTxFeeCap = defaultRpcTxFeeCap
c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled
- c.TxPoolJournal = txpool.DefaultConfig.Journal
- c.TxPoolRejournal = Duration{txpool.DefaultConfig.Rejournal}
- c.TxPoolPriceLimit = txpool.DefaultConfig.PriceLimit
- c.TxPoolPriceBump = txpool.DefaultConfig.PriceBump
- c.TxPoolAccountSlots = txpool.DefaultConfig.AccountSlots
- c.TxPoolGlobalSlots = txpool.DefaultConfig.GlobalSlots
- c.TxPoolAccountQueue = txpool.DefaultConfig.AccountQueue
- c.TxPoolGlobalQueue = txpool.DefaultConfig.GlobalQueue
+ c.TxPoolJournal = legacypool.DefaultConfig.Journal
+ c.TxPoolRejournal = Duration{legacypool.DefaultConfig.Rejournal}
+ c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit
+ c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump
+ c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots
+ c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots
+ c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue
+ c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue
c.APIMaxDuration.Duration = defaultApiMaxDuration
c.WSCPURefillRate.Duration = defaultWsCpuRefillRate
diff --git a/plugin/evm/gasprice_update.go b/plugin/evm/gasprice_update.go
index 04dad40a43..85dea012c1 100644
--- a/plugin/evm/gasprice_update.go
+++ b/plugin/evm/gasprice_update.go
@@ -21,7 +21,7 @@ type gasPriceUpdater struct {
}
type gasPriceSetter interface {
- SetGasPrice(price *big.Int)
+ SetGasTip(price *big.Int)
SetMinFee(price *big.Int)
}
@@ -39,15 +39,15 @@ func (vm *VM) handleGasPriceUpdates() {
// start handles the appropriate gas price and minimum fee updates required by [gpu.chainConfig]
func (gpu *gasPriceUpdater) start() {
- // Sets the initial gas price to the launch minimum gas price
- gpu.setter.SetGasPrice(big.NewInt(params.LaunchMinGasPrice))
+ // Sets the initial gas tip to the launch minimum gas price
+ gpu.setter.SetGasTip(big.NewInt(params.LaunchMinGasPrice))
- // Updates to the minimum gas price as of ApricotPhase1 if it's already in effect or starts a goroutine to enable it at the correct time
- if disabled := gpu.handleUpdate(gpu.setter.SetGasPrice, gpu.chainConfig.ApricotPhase1BlockTimestamp, big.NewInt(params.ApricotPhase1MinGasPrice)); disabled {
+ // Updates to the minimum gas tip as of ApricotPhase1 if it's already in effect or starts a goroutine to enable it at the correct time
+ if disabled := gpu.handleUpdate(gpu.setter.SetGasTip, gpu.chainConfig.ApricotPhase1BlockTimestamp, big.NewInt(params.ApricotPhase1MinGasPrice)); disabled {
return
}
- // Updates to the minimum gas price as of ApricotPhase3 if it's already in effect or starts a goroutine to enable it at the correct time
- if disabled := gpu.handleUpdate(gpu.setter.SetGasPrice, gpu.chainConfig.ApricotPhase3BlockTimestamp, big.NewInt(0)); disabled {
+ // Updates to the minimum gas tip as of ApricotPhase3 if it's already in effect or starts a goroutine to enable it at the correct time
+ if disabled := gpu.handleUpdate(gpu.setter.SetGasTip, gpu.chainConfig.ApricotPhase3BlockTimestamp, big.NewInt(0)); disabled {
return
}
if disabled := gpu.handleUpdate(gpu.setter.SetMinFee, gpu.chainConfig.ApricotPhase3BlockTimestamp, big.NewInt(params.ApricotPhase3MinBaseFee)); disabled {
diff --git a/plugin/evm/gasprice_update_test.go b/plugin/evm/gasprice_update_test.go
index d9ee265d98..d5acc4a208 100644
--- a/plugin/evm/gasprice_update_test.go
+++ b/plugin/evm/gasprice_update_test.go
@@ -14,15 +14,15 @@ import (
)
type mockGasPriceSetter struct {
- lock sync.Mutex
- price, minFee *big.Int
+ lock sync.Mutex
+ tip, minFee *big.Int
}
-func (m *mockGasPriceSetter) SetGasPrice(price *big.Int) {
+func (m *mockGasPriceSetter) SetGasTip(tip *big.Int) {
m.lock.Lock()
defer m.lock.Unlock()
- m.price = price
+ m.tip = tip
}
func (m *mockGasPriceSetter) SetMinFee(minFee *big.Int) {
@@ -36,7 +36,7 @@ func (m *mockGasPriceSetter) GetStatus() (*big.Int, *big.Int) {
m.lock.Lock()
defer m.lock.Unlock()
- return m.price, m.minFee
+ return m.tip, m.minFee
}
func attemptAwait(t *testing.T, wg *sync.WaitGroup, delay time.Duration) {
@@ -65,7 +65,7 @@ func TestUpdateGasPriceShutsDown(t *testing.T) {
// create a goroutine waiting for an hour before updating the gas price
config.ApricotPhase3BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(time.Hour))
gpu := &gasPriceUpdater{
- setter: &mockGasPriceSetter{price: big.NewInt(1)},
+ setter: &mockGasPriceSetter{tip: big.NewInt(1)},
chainConfig: &config,
shutdownChan: shutdownChan,
wg: wg,
@@ -82,7 +82,7 @@ func TestUpdateGasPriceInitializesPrice(t *testing.T) {
shutdownChan := make(chan struct{})
wg := &sync.WaitGroup{}
gpu := &gasPriceUpdater{
- setter: &mockGasPriceSetter{price: big.NewInt(1)},
+ setter: &mockGasPriceSetter{tip: big.NewInt(1)},
chainConfig: params.TestChainConfig,
shutdownChan: shutdownChan,
wg: wg,
@@ -93,7 +93,7 @@ func TestUpdateGasPriceInitializesPrice(t *testing.T) {
// should be created when all prices should be set from the start
attemptAwait(t, wg, time.Millisecond)
- if gpu.setter.(*mockGasPriceSetter).price.Cmp(big.NewInt(0)) != 0 {
+ if gpu.setter.(*mockGasPriceSetter).tip.Cmp(big.NewInt(0)) != 0 {
t.Fatalf("Expected price to match minimum base fee for apricot phase3")
}
if minFee := gpu.setter.(*mockGasPriceSetter).minFee; minFee == nil || minFee.Cmp(big.NewInt(params.ApricotPhase4MinBaseFee)) != 0 {
@@ -110,7 +110,7 @@ func TestUpdateGasPriceUpdatesPrice(t *testing.T) {
config.ApricotPhase3BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(250 * time.Millisecond))
config.ApricotPhase4BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(3 * time.Second))
gpu := &gasPriceUpdater{
- setter: &mockGasPriceSetter{price: big.NewInt(1)},
+ setter: &mockGasPriceSetter{tip: big.NewInt(1)},
chainConfig: &config,
shutdownChan: shutdownChan,
wg: wg,
diff --git a/plugin/evm/gossiper.go b/plugin/evm/gossiper.go
index 0f7d1a287d..1df559ef84 100644
--- a/plugin/evm/gossiper.go
+++ b/plugin/evm/gossiper.go
@@ -107,7 +107,7 @@ func (vm *VM) createGossiper(stats GossipStats) Gossiper {
// We assume that [txs] contains an array of nonce-ordered transactions for a given
// account. This array of transactions can have gaps and start at a nonce lower
// than the current state of an account.
-func (n *pushGossiper) queueExecutableTxs(state *state.StateDB, baseFee *big.Int, txs map[common.Address]types.Transactions, maxTxs int) types.Transactions {
+func (n *pushGossiper) queueExecutableTxs(state *state.StateDB, baseFee *big.Int, txs map[common.Address][]*types.Transaction, maxTxs int) types.Transactions {
// Setup heap for transactions
heads := make(types.TxByPriceAndTime, 0, len(txs))
for addr, accountTxs := range txs {
@@ -177,8 +177,7 @@ func (n *pushGossiper) queueRegossipTxs() types.Transactions {
pending := n.txPool.Pending(true)
// Split the pending transactions into locals and remotes
- localTxs := make(map[common.Address]types.Transactions)
- remoteTxs := pending
+ localTxs, remoteTxs := make(map[common.Address][]*types.Transaction), pending
for _, account := range n.txPool.Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
delete(remoteTxs, account)
@@ -344,7 +343,7 @@ func (n *pushGossiper) gossipEthTxs(force bool) (int, error) {
selectedTxs := make([]*types.Transaction, 0)
for _, tx := range txs {
txHash := tx.Hash()
- txStatus := n.txPool.Status([]common.Hash{txHash})[0]
+ txStatus := n.txPool.Status(txHash)
if txStatus != txpool.TxStatusPending {
continue
}
diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go
index ae36e793e8..459224c6b9 100644
--- a/plugin/evm/gossiper_eth_gossiping_test.go
+++ b/plugin/evm/gossiper_eth_gossiping_test.go
@@ -102,7 +102,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
// create eth txes
@@ -190,7 +190,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
// create eth txes
@@ -252,7 +252,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
var (
@@ -307,7 +307,7 @@ func TestMempoolEthTxsRegossipSingleAccount(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
// create eth txes
@@ -347,7 +347,7 @@ func TestMempoolEthTxsRegossip(t *testing.T) {
err := vm.Shutdown(context.Background())
assert.NoError(err)
}()
- vm.txPool.SetGasPrice(common.Big1)
+ vm.txPool.SetGasTip(common.Big1)
vm.txPool.SetMinFee(common.Big0)
// create eth txes
diff --git a/plugin/evm/import_tx.go b/plugin/evm/import_tx.go
index 8b034182c4..625c5cc0f9 100644
--- a/plugin/evm/import_tx.go
+++ b/plugin/evm/import_tx.go
@@ -116,8 +116,15 @@ func (utx *UnsignedImportTx) Verify(
return errOutputsNotSortedUnique
}
} else if rules.IsApricotPhase1 {
- if !slices.IsSortedFunc(utx.Outs, func(i, j EVMOutput) bool {
- return i.Less(j)
+ if !slices.IsSortedFunc(utx.Outs, func(i, j EVMOutput) int {
+ switch {
+ case i.Less(j):
+ return -1
+ case j.Less(i):
+ return 1
+ default:
+ return 0
+ }
}) {
return errOutputsNotSorted
}
diff --git a/plugin/evm/tx.go b/plugin/evm/tx.go
index 1f0ba6112b..210c7e37d0 100644
--- a/plugin/evm/tx.go
+++ b/plugin/evm/tx.go
@@ -267,8 +267,15 @@ func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) {
// with txs initialized from the txID index.
copyTxs := make([]*Tx, len(txs))
copy(copyTxs, txs)
- slices.SortFunc(copyTxs, func(i, j *Tx) bool {
- return i.Less(j)
+ slices.SortFunc(copyTxs, func(i, j *Tx) int {
+ switch {
+ case i.Less(j):
+ return -1
+ case j.Less(i):
+ return 1
+ default:
+ return 0
+ }
})
txs = copyTxs
}
diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go
index ac3872a03a..ef844e0d65 100644
--- a/plugin/evm/tx_gossip_test.go
+++ b/plugin/evm/tx_gossip_test.go
@@ -44,8 +44,8 @@ func TestEthTxGossip(t *testing.T) {
require.NoError(vm.Shutdown(context.Background()))
}()
- txPoolNewHeads := make(chan core.NewTxPoolHeadEvent)
- vm.txPool.SubscribeNewHeadEvent(txPoolNewHeads)
+ txPoolNewHeads := make(chan core.NewTxPoolReorgEvent, 1)
+ vm.txPool.SubscribeNewReorgEvent(txPoolNewHeads)
importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]})
require.NoError(err)
diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go
index bd3792df26..4d20e178ad 100644
--- a/plugin/evm/vm_test.go
+++ b/plugin/evm/vm_test.go
@@ -531,72 +531,72 @@ func TestVMContinuosProfiler(t *testing.T) {
func TestVMUpgrades(t *testing.T) {
genesisTests := []struct {
- name string
- genesis string
- expectedGasPrice *big.Int
+ name string
+ genesis string
+ expectedGasTip *big.Int
}{
{
- name: "Apricot Phase 0",
- genesis: genesisJSONApricotPhase0,
- expectedGasPrice: big.NewInt(params.LaunchMinGasPrice),
+ name: "Apricot Phase 0",
+ genesis: genesisJSONApricotPhase0,
+ expectedGasTip: big.NewInt(params.LaunchMinGasPrice),
},
{
- name: "Apricot Phase 1",
- genesis: genesisJSONApricotPhase1,
- expectedGasPrice: big.NewInt(params.ApricotPhase1MinGasPrice),
+ name: "Apricot Phase 1",
+ genesis: genesisJSONApricotPhase1,
+ expectedGasTip: big.NewInt(params.ApricotPhase1MinGasPrice),
},
{
- name: "Apricot Phase 2",
- genesis: genesisJSONApricotPhase2,
- expectedGasPrice: big.NewInt(params.ApricotPhase1MinGasPrice),
+ name: "Apricot Phase 2",
+ genesis: genesisJSONApricotPhase2,
+ expectedGasTip: big.NewInt(params.ApricotPhase1MinGasPrice),
},
{
- name: "Apricot Phase 3",
- genesis: genesisJSONApricotPhase3,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase 3",
+ genesis: genesisJSONApricotPhase3,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Apricot Phase 4",
- genesis: genesisJSONApricotPhase4,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase 4",
+ genesis: genesisJSONApricotPhase4,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Apricot Phase 5",
- genesis: genesisJSONApricotPhase5,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase 5",
+ genesis: genesisJSONApricotPhase5,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Apricot Phase Pre 6",
- genesis: genesisJSONApricotPhasePre6,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase Pre 6",
+ genesis: genesisJSONApricotPhasePre6,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Apricot Phase 6",
- genesis: genesisJSONApricotPhase6,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase 6",
+ genesis: genesisJSONApricotPhase6,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Apricot Phase Post 6",
- genesis: genesisJSONApricotPhasePost6,
- expectedGasPrice: big.NewInt(0),
+ name: "Apricot Phase Post 6",
+ genesis: genesisJSONApricotPhasePost6,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Banff",
- genesis: genesisJSONBanff,
- expectedGasPrice: big.NewInt(0),
+ name: "Banff",
+ genesis: genesisJSONBanff,
+ expectedGasTip: big.NewInt(0),
},
{
- name: "Cortina",
- genesis: genesisJSONCortina,
- expectedGasPrice: big.NewInt(0),
+ name: "Cortina",
+ genesis: genesisJSONCortina,
+ expectedGasTip: big.NewInt(0),
},
}
for _, test := range genesisTests {
t.Run(test.name, func(t *testing.T) {
_, vm, _, _, _ := GenesisVM(t, true, test.genesis, "", "")
- if gasPrice := vm.txPool.GasPrice(); gasPrice.Cmp(test.expectedGasPrice) != 0 {
- t.Fatalf("Expected pool gas price to be %d but found %d", test.expectedGasPrice, gasPrice)
+ if gasTip := vm.txPool.GasTip(); gasTip.Cmp(test.expectedGasTip) != 0 {
+ t.Fatalf("Expected pool gas price to be %d but found %d", test.expectedGasTip, gasTip)
}
defer func() {
shutdownChan := make(chan error, 1)
diff --git a/rpc/client.go b/rpc/client.go
index 989441a6f8..7f972188ae 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -43,14 +43,15 @@ import (
var (
ErrBadResult = errors.New("bad result in JSON-RPC response")
ErrClientQuit = errors.New("client is closed")
- ErrNoResult = errors.New("no result in JSON-RPC response")
+ ErrNoResult = errors.New("JSON-RPC response has no result")
+ ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call")
ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow")
errClientReconnected = errors.New("client reconnected")
errDead = errors.New("connection lost")
)
+// Timeouts
const (
- // Timeouts
defaultDialTimeout = 10 * time.Second // used if context has no deadline
subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls
)
@@ -93,6 +94,10 @@ type Client struct {
// This function, if non-nil, is called when the connection is lost.
reconnectFunc reconnectFunc
+ // config fields
+ batchItemLimit int
+ batchResponseMaxSize int
+
// writeConn is used for writing to the connection on the caller's goroutine. It should
// only be accessed outside of dispatch, with the write lock held. The write lock is
// taken by sending on reqInit and released by sending on reqSent.
@@ -123,7 +128,7 @@ func (c *Client) newClientConn(conn ServerCodec, apiMaxDuration, refillRate, max
ctx := context.Background()
ctx = context.WithValue(ctx, clientContextKey{}, c)
ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo())
- handler := newHandler(ctx, conn, c.idgen, c.services)
+ handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize)
// When [apiMaxDuration] or [refillRate]/[maxStored] is 0 (as is the case for
// all client invocations of this function), it is ignored.
@@ -142,14 +147,17 @@ type readOp struct {
batch bool
}
+// requestOp represents a pending request. This is used for both batch and non-batch
+// requests.
type requestOp struct {
- ids []json.RawMessage
- err error
- resp chan *jsonrpcMessage // receives up to len(ids) responses
- sub *ClientSubscription // only set for EthSubscribe requests
+ ids []json.RawMessage
+ err error
+ resp chan []*jsonrpcMessage // the response goes here
+ sub *ClientSubscription // set for Subscribe requests.
+ hadResponse bool // true when the request was responded to
}
-func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, error) {
+func (op *requestOp) wait(ctx context.Context, c *Client) ([]*jsonrpcMessage, error) {
select {
case <-ctx.Done():
// Send the timeout to dispatch so it can remove the request IDs.
@@ -225,7 +233,7 @@ func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (*
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
- return newClient(ctx, reconnect)
+ return newClient(ctx, cfg, reconnect)
}
// ClientFromContext retrieves the client from the context, if any. This can be used to perform
@@ -235,33 +243,42 @@ func ClientFromContext(ctx context.Context) (*Client, bool) {
return client, ok
}
-func newClient(initctx context.Context, connect reconnectFunc) (*Client, error) {
+func newClient(initctx context.Context, cfg *clientConfig, connect reconnectFunc) (*Client, error) {
conn, err := connect(initctx)
if err != nil {
return nil, err
}
- c := initClient(conn, randomIDGenerator(), new(serviceRegistry), 0, 0, 0)
+ c := initClient(conn, new(serviceRegistry), cfg, 0, 0, 0)
c.reconnectFunc = connect
return c, nil
}
-func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry, apiMaxDuration, refillRate, maxStored time.Duration) *Client {
+func initClient(conn ServerCodec, services *serviceRegistry, cfg *clientConfig, apiMaxDuration, refillRate, maxStored time.Duration) *Client {
_, isHTTP := conn.(*httpConn)
c := &Client{
- idgen: idgen,
- isHTTP: isHTTP,
- services: services,
- writeConn: conn,
- close: make(chan struct{}),
- closing: make(chan struct{}),
- didClose: make(chan struct{}),
- reconnected: make(chan ServerCodec),
- readOp: make(chan readOp),
- readErr: make(chan error),
- reqInit: make(chan *requestOp),
- reqSent: make(chan error, 1),
- reqTimeout: make(chan *requestOp),
- }
+ isHTTP: isHTTP,
+ services: services,
+ idgen: cfg.idgen,
+ batchItemLimit: cfg.batchItemLimit,
+ batchResponseMaxSize: cfg.batchResponseLimit,
+ writeConn: conn,
+ close: make(chan struct{}),
+ closing: make(chan struct{}),
+ didClose: make(chan struct{}),
+ reconnected: make(chan ServerCodec),
+ readOp: make(chan readOp),
+ readErr: make(chan error),
+ reqInit: make(chan *requestOp),
+ reqSent: make(chan error, 1),
+ reqTimeout: make(chan *requestOp),
+ }
+
+ // Set defaults.
+ if c.idgen == nil {
+ c.idgen = randomIDGenerator()
+ }
+
+ // Launch the main loop.
if !c.isHTTP {
go c.dispatch(conn, apiMaxDuration, refillRate, maxStored)
}
@@ -339,7 +356,10 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
if err != nil {
return err
}
- op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)}
+ op := &requestOp{
+ ids: []json.RawMessage{msg.ID},
+ resp: make(chan []*jsonrpcMessage, 1),
+ }
if c.isHTTP {
err = c.sendHTTP(ctx, op, msg)
@@ -351,9 +371,12 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
// dispatch has accepted the request and will close the channel when it quits.
- switch resp, err := op.wait(ctx, c); {
- case err != nil:
+ batchresp, err := op.wait(ctx, c)
+ if err != nil {
return err
+ }
+ resp := batchresp[0]
+ switch {
case resp.Error != nil:
return resp.Error
case len(resp.Result) == 0:
@@ -394,7 +417,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
)
op := &requestOp{
ids: make([]json.RawMessage, len(b)),
- resp: make(chan *jsonrpcMessage, len(b)),
+ resp: make(chan []*jsonrpcMessage, 1),
}
for i, elem := range b {
msg, err := c.newMessage(elem.Method, elem.Args...)
@@ -412,28 +435,48 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
} else {
err = c.send(ctx, op, msgs)
}
+ if err != nil {
+ return err
+ }
+
+ batchresp, err := op.wait(ctx, c)
+ if err != nil {
+ return err
+ }
// Wait for all responses to come back.
- for n := 0; n < len(b) && err == nil; n++ {
- var resp *jsonrpcMessage
- resp, err = op.wait(ctx, c)
- if err != nil {
- break
+ for n := 0; n < len(batchresp) && err == nil; n++ {
+ resp := batchresp[n]
+ if resp == nil {
+ // Ignore null responses. These can happen for batches sent via HTTP.
+ continue
}
+
// Find the element corresponding to this response.
- // The element is guaranteed to be present because dispatch
- // only sends valid IDs to our channel.
- elem := &b[byID[string(resp.ID)]]
- if resp.Error != nil {
- elem.Error = resp.Error
+ index, ok := byID[string(resp.ID)]
+ if !ok {
continue
}
- if len(resp.Result) == 0 {
+ delete(byID, string(resp.ID))
+
+ // Assign result and error.
+ elem := &b[index]
+ switch {
+ case resp.Error != nil:
+ elem.Error = resp.Error
+ case resp.Result == nil:
elem.Error = ErrNoResult
- continue
+ default:
+ elem.Error = json.Unmarshal(resp.Result, elem.Result)
}
- elem.Error = json.Unmarshal(resp.Result, elem.Result)
}
+
+ // Check that all expected responses have been received.
+ for _, index := range byID {
+ elem := &b[index]
+ elem.Error = ErrMissingBatchResponse
+ }
+
return err
}
@@ -494,7 +537,7 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
}
op := &requestOp{
ids: []json.RawMessage{msg.ID},
- resp: make(chan *jsonrpcMessage),
+ resp: make(chan []*jsonrpcMessage, 1),
sub: newClientSubscription(c, namespace, chanVal),
}
@@ -509,6 +552,13 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
return op.sub, nil
}
+// SupportsSubscriptions reports whether subscriptions are supported by the client
+// transport. When this returns false, Subscribe and related methods will return
+// ErrNotificationsUnsupported.
+func (c *Client) SupportsSubscriptions() bool {
+ return !c.isHTTP
+}
+
func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMessage, error) {
msg := &jsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method}
if paramsIn != nil { // prevent sending "params":null
diff --git a/rpc/client_opt.go b/rpc/client_opt.go
index c1b9931253..dfbef66b86 100644
--- a/rpc/client_opt.go
+++ b/rpc/client_opt.go
@@ -38,11 +38,18 @@ type ClientOption interface {
}
type clientConfig struct {
+ // HTTP settings
httpClient *http.Client
httpHeaders http.Header
httpAuth HTTPAuth
+ // WebSocket options
wsDialer *websocket.Dialer
+
+ // RPC handler options
+ idgen func() ID
+ batchItemLimit int
+ batchResponseLimit int
}
func (cfg *clientConfig) initHeaders() {
@@ -114,3 +121,25 @@ func WithHTTPAuth(a HTTPAuth) ClientOption {
// Usually, HTTPAuth functions will call h.Set("authorization", "...") to add
// auth information to the request.
type HTTPAuth func(h http.Header) error
+
+// WithBatchItemLimit changes the maximum number of items allowed in batch requests.
+//
+// Note: this option applies when processing incoming batch requests. It does not affect
+// batch requests sent by the client.
+func WithBatchItemLimit(limit int) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.batchItemLimit = limit
+ })
+}
+
+// WithBatchResponseSizeLimit changes the maximum number of response bytes that can be
+// generated for batch requests. When this limit is reached, further calls in the batch
+// will not be processed.
+//
+// Note: this option applies when processing incoming batch requests. It does not affect
+// batch requests sent by the client.
+func WithBatchResponseSizeLimit(sizeLimit int) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.batchResponseLimit = sizeLimit
+ })
+}
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 4ec0ef4122..057d8f24be 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -177,10 +177,12 @@ func TestClientBatchRequest(t *testing.T) {
}
}
+// This checks that, for HTTP connections, the length of batch responses is validated to
+// match the request exactly.
func TestClientBatchRequest_len(t *testing.T) {
b, err := json.Marshal([]jsonrpcMessage{
- {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)},
- {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)},
+ {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)},
+ {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)},
})
if err != nil {
t.Fatal("failed to encode jsonrpc message:", err)
@@ -193,37 +195,102 @@ func TestClientBatchRequest_len(t *testing.T) {
}))
t.Cleanup(s.Close)
- client, err := Dial(s.URL)
- if err != nil {
- t.Fatal("failed to dial test server:", err)
- }
- defer client.Close()
-
t.Run("too-few", func(t *testing.T) {
+ client, err := Dial(s.URL)
+ if err != nil {
+ t.Fatal("failed to dial test server:", err)
+ }
+ defer client.Close()
+
batch := []BatchElem{
- {Method: "foo"},
- {Method: "bar"},
- {Method: "baz"},
+ {Method: "foo", Result: new(string)},
+ {Method: "bar", Result: new(string)},
+ {Method: "baz", Result: new(string)},
}
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second)
defer cancelFn()
- if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) {
- t.Errorf("expected %q but got: %v", ErrBadResult, err)
+
+ if err := client.BatchCallContext(ctx, batch); err != nil {
+ t.Fatal("error:", err)
+ }
+ for i, elem := range batch[:2] {
+ if elem.Error != nil {
+ t.Errorf("expected no error for batch element %d, got %q", i, elem.Error)
+ }
+ }
+ for i, elem := range batch[2:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Errorf("wrong error %q for batch element %d", elem.Error, i+2)
+ }
}
})
t.Run("too-many", func(t *testing.T) {
+ client, err := Dial(s.URL)
+ if err != nil {
+ t.Fatal("failed to dial test server:", err)
+ }
+ defer client.Close()
+
batch := []BatchElem{
- {Method: "foo"},
+ {Method: "foo", Result: new(string)},
}
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second)
defer cancelFn()
- if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) {
- t.Errorf("expected %q but got: %v", ErrBadResult, err)
+
+ if err := client.BatchCallContext(ctx, batch); err != nil {
+ t.Fatal("error:", err)
+ }
+ for i, elem := range batch[:1] {
+ if elem.Error != nil {
+ t.Errorf("expected no error for batch element %d, got %q", i, elem.Error)
+ }
+ }
+ for i, elem := range batch[1:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Errorf("wrong error %q for batch element %d", elem.Error, i+2)
+ }
}
})
}
+// This checks that the client can handle the case where the server doesn't
+// respond to all requests in a batch.
+func TestClientBatchRequestLimit(t *testing.T) {
+ server := newTestServer()
+ defer server.Stop()
+ server.SetBatchLimits(2, 100000)
+ client := DialInProc(server)
+
+ batch := []BatchElem{
+ {Method: "foo"},
+ {Method: "bar"},
+ {Method: "baz"},
+ }
+ err := client.BatchCall(batch)
+ if err != nil {
+ t.Fatal("unexpected error:", err)
+ }
+
+ // Check that the first response indicates an error with batch size.
+ var err0 Error
+ if !errors.As(batch[0].Error, &err0) {
+ t.Log("error zero:", batch[0].Error)
+ t.Fatalf("batch elem 0 has wrong error type: %T", batch[0].Error)
+ } else {
+ if err0.ErrorCode() != -32600 || err0.Error() != errMsgBatchTooLarge {
+ t.Fatalf("wrong error on batch elem zero: %v", err0)
+ }
+ }
+
+ // Check that remaining response batch elements are reported as absent.
+ for i, elem := range batch[1:] {
+ if elem.Error != ErrMissingBatchResponse {
+ t.Fatalf("batch elem %d has unexpected error: %v", i+1, elem.Error)
+ }
+ }
+}
+
func TestClientNotify(t *testing.T) {
server := newTestServer()
defer server.Stop()
@@ -333,7 +400,7 @@ func testClientCancel(transport string, t *testing.T) {
// _, hasDeadline := ctx.Deadline()
// t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline)
// // default:
-// // t.Logf("got expected error with %v wait time: %v", timeout, err)
+// // t.Logf("got expected error with %v wait time: %v", timeout, err)
// }
// cancel()
// }
@@ -510,7 +577,8 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) {
defer srv.Stop()
// Create the client on the other end of the pipe.
- client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) {
+ cfg := new(clientConfig)
+ client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) {
return NewCodec(p2), nil
})
defer client.Close()
diff --git a/rpc/errors.go b/rpc/errors.go
index 7e39510087..44094715e1 100644
--- a/rpc/errors.go
+++ b/rpc/errors.go
@@ -68,15 +68,19 @@ var (
)
const (
- errcodeDefault = -32000
- errcodeNotificationsUnsupported = -32001
- errcodeTimeout = -32002
- errcodePanic = -32603
- errcodeMarshalError = -32603
+ errcodeDefault = -32000
+ errcodeTimeout = -32002
+ errcodeResponseTooLarge = -32003
+ errcodePanic = -32603
+ errcodeMarshalError = -32603
+
+ legacyErrcodeNotificationsUnsupported = -32001
)
const (
- errMsgTimeout = "request timed out"
+ errMsgTimeout = "request timed out"
+ errMsgResponseTooLarge = "response too large"
+ errMsgBatchTooLarge = "batch too large"
)
type methodNotFoundError struct{ method string }
@@ -87,6 +91,34 @@ func (e *methodNotFoundError) Error() string {
return fmt.Sprintf("the method %s does not exist/is not available", e.method)
}
+type notificationsUnsupportedError struct{}
+
+func (e notificationsUnsupportedError) Error() string {
+ return "notifications not supported"
+}
+
+func (e notificationsUnsupportedError) ErrorCode() int { return -32601 }
+
+// Is checks for equivalence to another error. Here we define that all errors with code
+// -32601 (method not found) are equivalent to notificationsUnsupportedError. This is
+// done to enable the following pattern:
+//
+// sub, err := client.Subscribe(...)
+// if errors.Is(err, rpc.ErrNotificationsUnsupported) {
+// // server doesn't support subscriptions
+// }
+func (e notificationsUnsupportedError) Is(other error) bool {
+ if other == (notificationsUnsupportedError{}) {
+ return true
+ }
+ rpcErr, ok := other.(Error)
+ if ok {
+ code := rpcErr.ErrorCode()
+ return code == -32601 || code == legacyErrcodeNotificationsUnsupported
+ }
+ return false
+}
+
type subscriptionNotFoundError struct{ namespace, subscription string }
func (e *subscriptionNotFoundError) ErrorCode() int { return -32601 }
diff --git a/rpc/handler.go b/rpc/handler.go
index 595b68b7ba..2a5aee5244 100644
--- a/rpc/handler.go
+++ b/rpc/handler.go
@@ -61,17 +61,19 @@ import (
// h.removeRequestOp(op) // timeout, etc.
// }
type handler struct {
- reg *serviceRegistry
- unsubscribeCb *callback
- idgen func() ID // subscription ID generator
- respWait map[string]*requestOp // active client requests
- clientSubs map[string]*ClientSubscription // active client subscriptions
- callWG sync.WaitGroup // pending call goroutines
- rootCtx context.Context // canceled by close()
- cancelRoot func() // cancel function for rootCtx
- conn jsonWriter // where responses will be sent
- log log.Logger
- allowSubscribe bool
+ reg *serviceRegistry
+ unsubscribeCb *callback
+ idgen func() ID // subscription ID generator
+ respWait map[string]*requestOp // active client requests
+ clientSubs map[string]*ClientSubscription // active client subscriptions
+ callWG sync.WaitGroup // pending call goroutines
+ rootCtx context.Context // canceled by close()
+ cancelRoot func() // cancel function for rootCtx
+ conn jsonWriter // where responses will be sent
+ log log.Logger
+ allowSubscribe bool
+ batchRequestLimit int
+ batchResponseMaxSize int
subLock sync.Mutex
serverSubs map[ID]*Subscription
@@ -87,19 +89,21 @@ type callProc struct {
procStart time.Time
}
-func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler {
+func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler {
rootCtx, cancelRoot := context.WithCancel(connCtx)
h := &handler{
- reg: reg,
- idgen: idgen,
- conn: conn,
- respWait: make(map[string]*requestOp),
- clientSubs: make(map[string]*ClientSubscription),
- rootCtx: rootCtx,
- cancelRoot: cancelRoot,
- allowSubscribe: true,
- serverSubs: make(map[ID]*Subscription),
- log: log.Root(),
+ reg: reg,
+ idgen: idgen,
+ conn: conn,
+ respWait: make(map[string]*requestOp),
+ clientSubs: make(map[string]*ClientSubscription),
+ rootCtx: rootCtx,
+ cancelRoot: cancelRoot,
+ allowSubscribe: true,
+ serverSubs: make(map[ID]*Subscription),
+ log: log.Root(),
+ batchRequestLimit: batchRequestLimit,
+ batchResponseMaxSize: batchResponseMaxSize,
}
if conn.remoteAddr() != "" {
h.log = h.log.New("conn", conn.remoteAddr())
@@ -151,16 +155,15 @@ func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) {
b.doWrite(ctx, conn, false)
}
-// timeout sends the responses added so far. For the remaining unanswered call
-// messages, it sends a timeout error response.
-func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) {
+// respondWithError sends the responses added so far. For the remaining unanswered call
+// messages, it responds with the given error.
+func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) {
b.mutex.Lock()
defer b.mutex.Unlock()
for _, msg := range b.calls {
if !msg.isNotification() {
- resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
- b.resp = append(b.resp, resp)
+ b.resp = append(b.resp, msg.errorResponse(err))
}
}
b.doWrite(ctx, conn, true)
@@ -200,17 +203,24 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
})
return
}
+ // Apply limit on total number of requests.
+ if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit {
+ h.startCallProc(func(cp *callProc) {
+ h.respondWithBatchTooLarge(cp, msgs)
+ })
+ return
+ }
- // Handle non-call messages first:
+ // Handle non-call messages first.
+ // Here we need to find the requestOp that sent the request batch.
calls := make([]*jsonrpcMessage, 0, len(msgs))
- for _, msg := range msgs {
- if handled := h.handleImmediate(msg); !handled {
- calls = append(calls, msg)
- }
- }
+ h.handleResponses(msgs, func(msg *jsonrpcMessage) {
+ calls = append(calls, msg)
+ })
if len(calls) == 0 {
return
}
+
// Process calls on a goroutine because they may block indefinitely:
h.startCallProc(func(cp *callProc) {
var (
@@ -228,10 +238,12 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
timer = time.AfterFunc(timeout, func() {
cancel()
- callBuffer.timeout(cp.ctx, h.conn)
+ err := &internalServerError{errcodeTimeout, errMsgTimeout}
+ callBuffer.respondWithError(cp.ctx, h.conn, err)
})
}
+ responseBytes := 0
for {
// No need to handle rest of calls if timed out.
if cp.ctx.Err() != nil {
@@ -243,59 +255,86 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
}
resp := h.handleCallMsg(cp, msg)
callBuffer.pushResponse(resp)
+ if resp != nil && h.batchResponseMaxSize != 0 {
+ responseBytes += len(resp.Result)
+ if responseBytes > h.batchResponseMaxSize {
+ err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge}
+ callBuffer.respondWithError(cp.ctx, h.conn, err)
+ break
+ }
+ }
}
if timer != nil {
timer.Stop()
}
- callBuffer.write(cp.ctx, h.conn)
+
h.addSubscriptions(cp.notifiers)
+ callBuffer.write(cp.ctx, h.conn)
for _, n := range cp.notifiers {
n.activate()
}
})
}
-// handleMsg handles a single message.
-func (h *handler) handleMsg(msg *jsonrpcMessage) {
- if ok := h.handleImmediate(msg); ok {
- return
+func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) {
+ resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge})
+ // Find the first call and add its "id" field to the error.
+ // This is the best we can do, given that the protocol doesn't have a way
+ // of reporting an error for the entire batch.
+ for _, msg := range batch {
+ if msg.isCall() {
+ resp.ID = msg.ID
+ break
+ }
}
- h.startCallProc(func(cp *callProc) {
- var (
- responded sync.Once
- timer *time.Timer
- cancel context.CancelFunc
- )
- cp.ctx, cancel = context.WithCancel(cp.ctx)
- defer cancel()
+ h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, true, h.deadlineContext > 0)
+}
- // Cancel the request context after timeout and send an error response. Since the
- // running method might not return immediately on timeout, we must wait for the
- // timeout concurrently with processing the request.
- if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
- timer = time.AfterFunc(timeout, func() {
- cancel()
- responded.Do(func() {
- resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
- h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0)
- })
- })
- }
+// handleMsg handles a single non-batch message.
+func (h *handler) handleMsg(msg *jsonrpcMessage) {
+ msgs := []*jsonrpcMessage{msg}
+ h.handleResponses(msgs, func(msg *jsonrpcMessage) {
+ h.startCallProc(func(cp *callProc) {
+ h.handleNonBatchCall(cp, msg)
+ })
+ })
+}
- answer := h.handleCallMsg(cp, msg)
- if timer != nil {
- timer.Stop()
- }
- h.addSubscriptions(cp.notifiers)
- if answer != nil {
+func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) {
+ var (
+ responded sync.Once
+ timer *time.Timer
+ cancel context.CancelFunc
+ )
+ cp.ctx, cancel = context.WithCancel(cp.ctx)
+ defer cancel()
+
+ // Cancel the request context after timeout and send an error response. Since the
+ // running method might not return immediately on timeout, we must wait for the
+ // timeout concurrently with processing the request.
+ if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
+ timer = time.AfterFunc(timeout, func() {
+ cancel()
responded.Do(func() {
- h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0)
+ resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
+ h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0)
})
- }
- for _, n := range cp.notifiers {
- n.activate()
- }
- })
+ })
+ }
+
+ answer := h.handleCallMsg(cp, msg)
+ if timer != nil {
+ timer.Stop()
+ }
+ h.addSubscriptions(cp.notifiers)
+ if answer != nil {
+ responded.Do(func() {
+ h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0)
+ })
+ }
+ for _, n := range cp.notifiers {
+ n.activate()
+ }
}
// close cancels all requests except for inflightReq and waits for
@@ -437,23 +476,60 @@ func (h *handler) startCallProc(fn func(*callProc)) {
}
}
-// handleImmediate executes non-call messages. It returns false if the message is a
-// call or requires a reply.
-func (h *handler) handleImmediate(msg *jsonrpcMessage) bool {
- execStart := time.Now()
- switch {
- case msg.isNotification():
- if strings.HasSuffix(msg.Method, notificationMethodSuffix) {
- h.handleSubscriptionResult(msg)
- return true
+// handleResponse processes method call responses.
+func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) {
+ var resolvedops []*requestOp
+ handleResp := func(msg *jsonrpcMessage) {
+ op := h.respWait[string(msg.ID)]
+ if op == nil {
+ h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
+ return
+ }
+ resolvedops = append(resolvedops, op)
+ delete(h.respWait, string(msg.ID))
+
+ // For subscription responses, start the subscription if the server
+ // indicates success. EthSubscribe gets unblocked in either case through
+ // the op.resp channel.
+ if op.sub != nil {
+ if msg.Error != nil {
+ op.err = msg.Error
+ } else {
+ op.err = json.Unmarshal(msg.Result, &op.sub.subid)
+ if op.err == nil {
+ go op.sub.run()
+ h.clientSubs[op.sub.subid] = op.sub
+ }
+ }
+ }
+
+ if !op.hadResponse {
+ op.hadResponse = true
+ op.resp <- batch
}
- return false
- case msg.isResponse():
- h.handleResponse(msg)
- h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(execStart))
- return true
- default:
- return false
+ }
+
+ for _, msg := range batch {
+ start := time.Now()
+ switch {
+ case msg.isResponse():
+ handleResp(msg)
+ h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start))
+
+ case msg.isNotification():
+ if strings.HasSuffix(msg.Method, notificationMethodSuffix) {
+ h.handleSubscriptionResult(msg)
+ continue
+ }
+ handleCall(msg)
+
+ default:
+ handleCall(msg)
+ }
+ }
+
+ for _, op := range resolvedops {
+ h.removeRequestOp(op)
}
}
@@ -469,33 +545,6 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) {
}
}
-// handleResponse processes method call responses.
-func (h *handler) handleResponse(msg *jsonrpcMessage) {
- op := h.respWait[string(msg.ID)]
- if op == nil {
- h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
- return
- }
- delete(h.respWait, string(msg.ID))
- // For normal responses, just forward the reply to Call/BatchCall.
- if op.sub == nil {
- op.resp <- msg
- return
- }
- // For subscription responses, start the subscription if the server
- // indicates success. EthSubscribe gets unblocked in either case through
- // the op.resp channel.
- defer close(op.resp)
- if msg.Error != nil {
- op.err = msg.Error
- return
- }
- if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil {
- go op.sub.run()
- h.clientSubs[op.sub.subid] = op.sub
- }
-}
-
// handleCallMsg executes a call message and returns the answer.
func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
// [callStart] is the time the message was enqueued for handler processing
@@ -514,6 +563,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess
h.handleCall(ctx, msg)
h.log.Debug("Served "+msg.Method, "execTime", time.Since(execStart), "procTime", time.Since(procStart), "totalTime", time.Since(callStart))
return nil
+
case msg.isCall():
resp := h.handleCall(ctx, msg)
var ctx []interface{}
@@ -528,8 +578,10 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess
h.log.Debug("Served "+msg.Method, ctx...)
}
return resp
+
case msg.hasValidID():
return msg.errorResponse(&invalidRequestError{"invalid request"})
+
default:
return errorMessage(&invalidRequestError{"invalid request"})
}
@@ -549,12 +601,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage
if callb == nil {
return msg.errorResponse(&methodNotFoundError{method: msg.Method})
}
+
args, err := parsePositionalArguments(msg.Params, callb.argTypes)
if err != nil {
return msg.errorResponse(&invalidParamsError{err.Error()})
}
start := time.Now()
answer := h.runMethod(cp.ctx, msg, callb, args)
+
// Collect the statistics for RPC calls if metrics is enabled.
// We only care about pure rpc call. Filter out subscription.
if callb != h.unsubscribeCb {
@@ -569,16 +623,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage
updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start))
}
}
+
return answer
}
// handleSubscribe processes *_subscribe method calls.
func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
if !h.allowSubscribe {
- return msg.errorResponse(&internalServerError{
- code: errcodeNotificationsUnsupported,
- message: ErrNotificationsUnsupported.Error(),
- })
+ return msg.errorResponse(ErrNotificationsUnsupported)
}
// Subscription method name is first argument.
diff --git a/rpc/http.go b/rpc/http.go
index 56fea59f2c..7fb42c556f 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -153,7 +153,7 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) {
var cfg clientConfig
cfg.httpClient = client
fn := newClientTransportHTTP(endpoint, &cfg)
- return newClient(context.Background(), fn)
+ return newClient(context.Background(), &cfg, fn)
}
func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc {
@@ -190,11 +190,12 @@ func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) e
}
defer respBody.Close()
- var respmsg jsonrpcMessage
- if err := json.NewDecoder(respBody).Decode(&respmsg); err != nil {
+ var resp jsonrpcMessage
+ batch := [1]*jsonrpcMessage{&resp}
+ if err := json.NewDecoder(respBody).Decode(&resp); err != nil {
return err
}
- op.resp <- &respmsg
+ op.resp <- batch[:]
return nil
}
@@ -205,16 +206,12 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr
return err
}
defer respBody.Close()
- var respmsgs []jsonrpcMessage
+
+ var respmsgs []*jsonrpcMessage
if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil {
return err
}
- if len(respmsgs) != len(msgs) {
- return fmt.Errorf("batch has %d requests but response has %d: %w", len(msgs), len(respmsgs), ErrBadResult)
- }
- for i := 0; i < len(respmsgs); i++ {
- op.resp <- &respmsgs[i]
- }
+ op.resp <- respmsgs
return nil
}
diff --git a/rpc/inproc.go b/rpc/inproc.go
index e008fd8804..6165af0a96 100644
--- a/rpc/inproc.go
+++ b/rpc/inproc.go
@@ -34,7 +34,8 @@ import (
// DialInProc attaches an in-process connection to the given RPC server.
func DialInProc(handler *Server) *Client {
initctx := context.Background()
- c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) {
+ cfg := new(clientConfig)
+ c, _ := newClient(initctx, cfg, func(context.Context) (ServerCodec, error) {
p1, p2 := net.Pipe()
go handler.ServeCodec(NewCodec(p1), 0, 0, 0, 0)
return NewCodec(p2), nil
diff --git a/rpc/server.go b/rpc/server.go
index 13adf8112c..a993fbe96e 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -57,9 +57,11 @@ type Server struct {
idgen func() ID
maximumDuration time.Duration
- mutex sync.Mutex
- codecs map[ServerCodec]struct{}
- run atomic.Bool
+ mutex sync.Mutex
+ codecs map[ServerCodec]struct{}
+ run atomic.Bool
+ batchItemLimit int
+ batchResponseLimit int
}
// NewServer creates a new server instance with no registered handlers.
@@ -81,6 +83,17 @@ func NewServer(maximumDuration time.Duration) *Server {
return server
}
+// SetBatchLimits sets limits applied to batch requests. There are two limits: 'itemLimit'
+// is the maximum number of items in a batch. 'maxResponseSize' is the maximum number of
+// response bytes across all requests in a batch.
+//
+// This method should be called before processing any requests via ServeCodec, ServeHTTP,
+// ServeListener etc.
+func (s *Server) SetBatchLimits(itemLimit, maxResponseSize int) {
+ s.batchItemLimit = itemLimit
+ s.batchResponseLimit = maxResponseSize
+}
+
// RegisterName creates a service for the given receiver type under the given name. When no
// methods on the given receiver match the criteria to be either a RPC method or a
// subscription an error is returned. Otherwise a new service is created and added to the
@@ -102,7 +115,12 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption, apiMaxDurati
}
defer s.untrackCodec(codec)
- c := initClient(codec, s.idgen, &s.services, apiMaxDuration, refillRate, maxStored)
+ cfg := &clientConfig{
+ idgen: s.idgen,
+ batchItemLimit: s.batchItemLimit,
+ batchResponseLimit: s.batchResponseLimit,
+ }
+ c := initClient(codec, &s.services, cfg, apiMaxDuration, refillRate, maxStored)
<-codec.closed()
c.Close()
}
@@ -134,7 +152,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) {
return
}
- h := newHandler(ctx, codec, s.idgen, &s.services)
+ h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit)
h.deadlineContext = s.maximumDuration
h.allowSubscribe = false
defer h.close(io.EOF, nil)
diff --git a/rpc/server_test.go b/rpc/server_test.go
index e3b26623e1..7702002085 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -79,6 +79,7 @@ func TestServer(t *testing.T) {
func runTestScript(t *testing.T, file string) {
server := newTestServer()
+ server.SetBatchLimits(4, 100000)
content, err := os.ReadFile(file)
if err != nil {
t.Fatal(err)
@@ -160,3 +161,41 @@ func runTestScript(t *testing.T, file string) {
// }
// }
// }
+
+func TestServerBatchResponseSizeLimit(t *testing.T) {
+ server := newTestServer()
+ defer server.Stop()
+ server.SetBatchLimits(100, 60)
+ var (
+ batch []BatchElem
+ client = DialInProc(server)
+ )
+ for i := 0; i < 5; i++ {
+ batch = append(batch, BatchElem{
+ Method: "test_echo",
+ Args: []any{"x", 1},
+ Result: new(echoResult),
+ })
+ }
+ if err := client.BatchCall(batch); err != nil {
+ t.Fatal("error sending batch:", err)
+ }
+ for i := range batch {
+ // We expect the first two queries to be ok, but after that the size limit takes effect.
+ if i < 2 {
+ if batch[i].Error != nil {
+ t.Fatalf("batch elem %d has unexpected error: %v", i, batch[i].Error)
+ }
+ continue
+ }
+ // After two, we expect an error.
+ re, ok := batch[i].Error.(Error)
+ if !ok {
+ t.Fatalf("batch elem %d has wrong error: %v", i, batch[i].Error)
+ }
+ wantedCode := errcodeResponseTooLarge
+ if re.ErrorCode() != wantedCode {
+ t.Errorf("batch elem %d wrong error code, have %d want %d", i, re.ErrorCode(), wantedCode)
+ }
+ }
+}
diff --git a/rpc/subscription.go b/rpc/subscription.go
index 3544a69ffb..1174e7e2c0 100644
--- a/rpc/subscription.go
+++ b/rpc/subscription.go
@@ -42,8 +42,17 @@ import (
)
var (
- // ErrNotificationsUnsupported is returned when the connection doesn't support notifications
- ErrNotificationsUnsupported = errors.New("notifications not supported")
+ // ErrNotificationsUnsupported is returned by the client when the connection doesn't
+ // support notifications. You can use this error value to check for subscription
+ // support like this:
+ //
+ // sub, err := client.EthSubscribe(ctx, channel, "newHeads", true)
+ // if errors.Is(err, rpc.ErrNotificationsUnsupported) {
+ // // Server does not support subscriptions, fall back to polling.
+ // }
+ //
+ ErrNotificationsUnsupported = notificationsUnsupportedError{}
+
// ErrSubscriptionNotFound is returned when the notification for the given id is not found
ErrSubscriptionNotFound = errors.New("subscription not found")
)
diff --git a/rpc/testdata/invalid-batch-toolarge.js b/rpc/testdata/invalid-batch-toolarge.js
new file mode 100644
index 0000000000..218fea58aa
--- /dev/null
+++ b/rpc/testdata/invalid-batch-toolarge.js
@@ -0,0 +1,13 @@
+// This file checks the behavior of the batch item limit code.
+// In tests, the batch item limit is set to 4. So to trigger the error,
+// all batches in this file have 5 elements.
+
+// For batches that do not contain any calls, a response message with "id" == null
+// is returned.
+
+--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}]
+<-- [{"jsonrpc":"2.0","id":null,"error":{"code":-32600,"message":"batch too large"}}]
+
+// For batches with at least one call, the call's "id" is used.
+--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","id":3,"method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}]
+<-- [{"jsonrpc":"2.0","id":3,"error":{"code":-32600,"message":"batch too large"}}]
diff --git a/rpc/websocket.go b/rpc/websocket.go
index d753d2667b..b43b1b5a1a 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -211,7 +211,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale
if err != nil {
return nil, err
}
- return newClient(ctx, connect)
+ return newClient(ctx, cfg, connect)
}
// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
@@ -228,7 +228,7 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error
if err != nil {
return nil, err
}
- return newClient(ctx, connect)
+ return newClient(ctx, cfg, connect)
}
func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) {
diff --git a/scripts/build_test.sh b/scripts/build_test.sh
index 848d538acf..84c80f3359 100755
--- a/scripts/build_test.sh
+++ b/scripts/build_test.sh
@@ -17,4 +17,4 @@ source "$CORETH_PATH"/scripts/constants.sh
# We pass in the arguments to this script directly to enable easily passing parameters such as enabling race detection,
# parallelism, and test coverage.
-go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" ./... $@
+go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" ./... $@
\ No newline at end of file
diff --git a/scripts/geth-allowed-packages.txt b/scripts/geth-allowed-packages.txt
index b697ede5cf..cf32723607 100644
--- a/scripts/geth-allowed-packages.txt
+++ b/scripts/geth-allowed-packages.txt
@@ -11,6 +11,7 @@
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bls12381"
"github.com/ethereum/go-ethereum/crypto/bn256"
+"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
diff --git a/scripts/versions.sh b/scripts/versions.sh
index 7b3a476121..74fc0f8fbc 100644
--- a/scripts/versions.sh
+++ b/scripts/versions.sh
@@ -1,4 +1,4 @@
#!/usr/bin/env bash
# Don't export them as they're used in the context of other calls
-avalanche_version=${AVALANCHE_VERSION:-'v1.10.10-rc.2'}
+avalanche_version=${AVALANCHE_VERSION:-'update-cmp-slices'}
diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go
index d57f567571..2a8efeffe4 100644
--- a/sync/handlers/leafs_request.go
+++ b/sync/handlers/leafs_request.go
@@ -102,7 +102,9 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N
// TODO: We should know the state root that accounts correspond to,
// as this information will be necessary to access storage tries when
// the trie is path based.
- stateRoot := common.Hash{}
+ // Since providing a state root that is in the database is mandatory, we
+ // always provide the root of the requested trie even if it is an account.
+ stateRoot := leafsRequest.Root
t, err := trie.New(trie.StorageTrieID(stateRoot, leafsRequest.Account, leafsRequest.Root), lrh.trieDB)
if err != nil {
log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err)
@@ -342,14 +344,14 @@ func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*mem
start = bytes.Repeat([]byte{0x00}, rb.keyLength)
}
- if err := rb.t.Prove(start, 0, proof); err != nil {
+ if err := rb.t.Prove(start, proof); err != nil {
_ = proof.Close() // closing memdb does not error
return nil, err
}
if len(keys) > 0 {
// If there is a non-zero number of keys, set [end] for the range proof to the last key.
end := keys[len(keys)-1]
- if err := rb.t.Prove(end, 0, proof); err != nil {
+ if err := rb.t.Prove(end, proof); err != nil {
_ = proof.Close() // closing memdb does not error
return nil, err
}
@@ -432,7 +434,11 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool,
defer func() { rb.trieReadTime += time.Since(startTime) }()
// create iterator to iterate the trie
- it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey()))
+ nodeIt, err := rb.t.NodeIterator(rb.nextKey())
+ if err != nil {
+ return false, err
+ }
+ it := trie.NewIterator(nodeIt)
more := false
for it.Next() {
// if we're at the end, break this loop
diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go
index 37d7f4e135..42e30735bd 100644
--- a/sync/handlers/leafs_request_test.go
+++ b/sync/handlers/leafs_request_test.go
@@ -489,7 +489,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) {
}
// modify one entry of 1 in 4 segments
if i%(segmentLen*4) == 0 {
- var acc snapshot.Account
+ var acc types.SlimAccount
if err := rlp.DecodeBytes(it.Account(), &acc); err != nil {
t.Fatalf("could not parse snapshot account: %v", err)
}
diff --git a/sync/statesync/sync_helpers.go b/sync/statesync/sync_helpers.go
index f5c51d6bad..312c6ae64d 100644
--- a/sync/statesync/sync_helpers.go
+++ b/sync/statesync/sync_helpers.go
@@ -5,7 +5,6 @@ package statesync
import (
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/trie"
@@ -15,14 +14,18 @@ import (
// writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using
// SlimAccountRLP format (omitting empty code/storage).
func writeAccountSnapshot(db ethdb.KeyValueWriter, accHash common.Hash, acc types.StateAccount) {
- slimAccount := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin)
+ slimAccount := types.SlimAccountRLP(acc)
rawdb.WriteAccountSnapshot(db, accHash, slimAccount)
}
// writeAccountStorageSnapshotFromTrie iterates the trie at [storageTrie] and copies all entries
// to the storage snapshot for [accountHash].
func writeAccountStorageSnapshotFromTrie(batch ethdb.Batch, batchSize int, accountHash common.Hash, storageTrie *trie.Trie) error {
- it := trie.NewIterator(storageTrie.NodeIterator(nil))
+ nodeIt, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ it := trie.NewIterator(nodeIt)
for it.Next() {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(it.Key), it.Value)
if batch.ValueSize() > batchSize {
diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go
index 12cbd5d8df..4e7b9b14d7 100644
--- a/sync/statesync/sync_test.go
+++ b/sync/statesync/sync_test.go
@@ -441,7 +441,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- it := trie.NewIterator(tr.NodeIterator(nil))
+ nodeIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ it := trie.NewIterator(nodeIt)
accountsWithStorage := 0
// keep track of storage tries we delete trie nodes from
diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go
index 74a3686a02..48f4b0faf7 100644
--- a/sync/statesync/test_sync.go
+++ b/sync/statesync/test_sync.go
@@ -10,7 +10,6 @@ import (
"github.com/ava-labs/coreth/accounts/keystore"
"github.com/ava-labs/coreth/core/rawdb"
- "github.com/ava-labs/coreth/core/state/snapshot"
"github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/trie"
@@ -47,7 +46,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database
}
// check snapshot consistency
snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash)
- expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin)
+ expectedSnapshotVal := types.SlimAccountRLP(acc)
assert.Equal(t, expectedSnapshotVal, snapshotVal)
// check code consistency
diff --git a/sync/syncutils/iterators.go b/sync/syncutils/iterators.go
index 751f874f54..ab06a7ef87 100644
--- a/sync/syncutils/iterators.go
+++ b/sync/syncutils/iterators.go
@@ -5,6 +5,7 @@ package syncutils
import (
"github.com/ava-labs/coreth/core/state/snapshot"
+ "github.com/ava-labs/coreth/core/types"
"github.com/ava-labs/coreth/ethdb"
)
@@ -26,7 +27,7 @@ func (it *AccountIterator) Next() bool {
return false
}
for it.AccountIterator.Next() {
- it.val, it.err = snapshot.FullAccountRLP(it.Account())
+ it.val, it.err = types.FullAccountRLP(it.Account())
return it.err == nil
}
it.val = nil
diff --git a/tests/init.go b/tests/init.go
index afc1be4a3f..e8bdc6714e 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -292,6 +292,26 @@ var Forks = map[string]*params.ChainConfig{
CortinaBlockTimestamp: utils.NewUint64(0),
DUpgradeBlockTimestamp: utils.NewUint64(0),
},
+ "Cancun": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ ApricotPhase1BlockTimestamp: utils.NewUint64(0),
+ ApricotPhase2BlockTimestamp: utils.NewUint64(0),
+ ApricotPhase3BlockTimestamp: utils.NewUint64(0),
+ ApricotPhase4BlockTimestamp: utils.NewUint64(0),
+ ApricotPhase5BlockTimestamp: utils.NewUint64(0),
+ BanffBlockTimestamp: utils.NewUint64(0),
+ CortinaBlockTimestamp: utils.NewUint64(0),
+ DUpgradeBlockTimestamp: utils.NewUint64(0),
+ CancunTime: utils.NewUint64(0),
+ },
}
// AvailableForks returns the set of defined fork names
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index c018a19e10..f3fa6dec4f 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -48,7 +48,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(false, false)
+ root, _ := statedb.Commit(0, false, false)
var snaps *snapshot.Tree
if snapshotter {
diff --git a/trie/committer.go b/trie/committer.go
index 1cb318c460..ac317ecb33 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -141,22 +141,15 @@ func (c *committer) store(path []byte, n node) node {
// The node is embedded in its parent, in other words, this node
// will not be stored in the database independently, mark it as
// deleted only if the node was existent in database before.
- prev, ok := c.tracer.accessList[string(path)]
+ _, ok := c.tracer.accessList[string(path)]
if ok {
- c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev))
+ c.nodes.AddNode(path, trienode.NewDeleted())
}
return n
}
// Collect the dirty node to nodeset for return.
- var (
- nhash = common.BytesToHash(hash)
- node = trienode.NewWithPrev(
- nhash,
- nodeToBytes(n),
- c.tracer.accessList[string(path)],
- )
- )
- c.nodes.AddNode(path, node)
+ nhash := common.BytesToHash(hash)
+ c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n)))
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
diff --git a/trie/database_wrap.go b/trie/database.go
similarity index 81%
rename from trie/database_wrap.go
rename to trie/database.go
index 70e19c9feb..fa207d63f4 100644
--- a/trie/database_wrap.go
+++ b/trie/database.go
@@ -23,7 +23,9 @@ import (
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/trie/triedb/hashdb"
+ "github.com/ava-labs/coreth/trie/triedb/pathdb"
"github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
"github.com/ava-labs/coreth/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
@@ -35,10 +37,14 @@ const (
// Config defines all necessary options for database.
type Config struct {
- Cache int // Memory allowance (MB) to use for caching trie nodes in memory
- Journal string // Journal of clean cache to survive node restarts
- Preimages bool // Flag whether the preimage of trie key is recorded
- StatsPrefix string // Prefix for cache stats (disabled if empty)
+ Cache int // Memory allowance (MB) to use for caching trie nodes in memory
+ Journal string // Journal of clean cache to survive node restarts
+ Preimages bool // Flag whether the preimage of trie key is recorded
+ PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet.
+ StatsPrefix string // Prefix for cache stats (disabled if empty)
+
+ // Testing hooks
+ OnCommit func(states *triestate.Set) // Hook invoked when commit is performed
}
// backend defines the methods needed to access/update trie nodes in different
@@ -58,8 +64,11 @@ type backend interface {
// Update performs a state transition by committing dirty nodes contained
// in the given set in order to update state from the specified parent to
// the specified root.
- Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
- UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
+ //
+ // The passed in maps(nodes, states) will be retained to avoid copying
+ // everything. Therefore, these maps must not be changed afterwards.
+ Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
+ UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
// Commit writes all relevant trie nodes belonging to the specified state
// to disk. Report specifies whether logs will be displayed in info level.
@@ -122,27 +131,42 @@ func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
}
// Reader returns a reader for accessing all trie nodes with provided state root.
-// Nil is returned in case the state is not available.
-func (db *Database) Reader(blockRoot common.Hash) Reader {
- return db.backend.(*hashdb.Database).Reader(blockRoot)
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(blockRoot common.Hash) (Reader, error) {
+ switch b := db.backend.(type) {
+ case *hashdb.Database:
+ return b.Reader(blockRoot)
+ case *pathdb.Database:
+ return b.Reader(blockRoot)
+ }
+ return nil, errors.New("unknown backend")
}
// Update performs a state transition by committing dirty nodes contained in the
// given set in order to update state from the specified parent to the specified
// root. The held pre-images accumulated up to this point will be flushed in case
// the size exceeds the threshold.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+//
+// The passed in maps(nodes, states) will be retained to avoid copying everything.
+// Therefore, these maps must not be changed afterwards.
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ if db.config != nil && db.config.OnCommit != nil {
+ db.config.OnCommit(states)
+ }
if db.preimages != nil {
db.preimages.commit(false)
}
- return db.backend.Update(root, parent, nodes)
+ return db.backend.Update(root, parent, block, nodes, states)
}
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ if db.config != nil && db.config.OnCommit != nil {
+ db.config.OnCommit(states)
+ }
if db.preimages != nil {
db.preimages.commit(false)
}
- return db.backend.UpdateAndReferenceRoot(root, parent, nodes)
+ return db.backend.UpdateAndReferenceRoot(root, parent, block, nodes, states)
}
// Commit iterates over all the children of a particular node, writes them out
@@ -184,10 +208,15 @@ func (db *Database) Scheme() string {
// It is meant to be called when closing the blockchain object, so that all
// resources held can be released correctly.
func (db *Database) Close() error {
+ db.WritePreimages()
+ return db.backend.Close()
+}
+
+// WritePreimages flushes all accumulated preimages to disk forcibly.
+func (db *Database) WritePreimages() {
if db.preimages != nil {
db.preimages.commit(true)
}
- return db.backend.Close()
}
// saveCache saves clean state cache to given directory path
diff --git a/trie/database_test.go b/trie/database_test.go
index 3c3b2cc54a..ab1957e4a6 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -30,16 +30,16 @@ import (
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/trie/triedb/hashdb"
+ "github.com/ava-labs/coreth/trie/triedb/pathdb"
)
// newTestDatabase initializes the trie database with specified scheme.
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
db := prepare(diskdb, nil)
if scheme == rawdb.HashScheme {
- db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ db.backend = hashdb.New(diskdb, nil, mptResolver{})
+ } else {
+ db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache
}
- //} else {
- // db.backend = snap.New(diskdb, db.cleans, nil)
- //}
return db
}
diff --git a/trie/errors.go b/trie/errors.go
index b6f90132b6..307a5f8747 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -27,11 +27,17 @@
package trie
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
)
+// ErrCommitted is returned when a already committed trie is requested for usage.
+// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove`
+// and so on.
+var ErrCommitted = errors.New("trie is already committed")
+
// MissingNodeError is returned by the trie functions (Get, Update, Delete)
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index f544738fa1..d6e67bbf34 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -44,7 +44,7 @@ import (
func TestEmptyIterator(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- iter := trie.NodeIterator(nil)
+ iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{})
for iter.Next(true) {
@@ -72,12 +72,12 @@ func TestIterator(t *testing.T) {
all[val.k] = val.v
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
found[string(it.Key)] = string(it.Value)
}
@@ -94,6 +94,10 @@ type kv struct {
t bool
}
+func (k *kv) cmp(other *kv) int {
+ return bytes.Compare(k.k, other.k)
+}
+
func TestIteratorLargeData(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
@@ -107,7 +111,7 @@ func TestIteratorLargeData(t *testing.T) {
vals[string(value2.k)] = value2
}
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
vals[string(it.Key)].t = true
}
@@ -136,7 +140,7 @@ type iterationElement struct {
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
testNodeIteratorCoverage(t, rawdb.HashScheme)
- //testNodeIteratorCoverage(t, rawdb.PathScheme)
+ testNodeIteratorCoverage(t, rawdb.PathScheme)
}
func testNodeIteratorCoverage(t *testing.T, scheme string) {
@@ -145,7 +149,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Gather all the node hashes found by the iterator
var elements = make(map[common.Hash]iterationElement)
- for it := trie.NodeIterator(nil); it.Next(true); {
+ for it := trie.MustNodeIterator(nil); it.Next(true); {
if it.Hash() != (common.Hash{}) {
elements[it.Hash()] = iterationElement{
hash: it.Hash(),
@@ -155,8 +159,12 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
}
}
// Cross check the hashes and the database itself
+ reader, err := nodeDb.Reader(trie.Hash())
+ if err != nil {
+ t.Fatalf("state is not available %x", trie.Hash())
+ }
for _, element := range elements {
- if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil {
+ if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil {
t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
} else if !bytes.Equal(blob, element.blob) {
t.Errorf("node blob is different, want %v got %v", element.blob, blob)
@@ -216,19 +224,19 @@ func TestIteratorSeek(t *testing.T) {
}
// Seek to the middle.
- it := NewIterator(trie.NodeIterator([]byte("fab")))
+ it := NewIterator(trie.MustNodeIterator([]byte("fab")))
if err := checkIteratorOrder(testdata1[4:], it); err != nil {
t.Fatal(err)
}
// Seek to a non-existent key.
- it = NewIterator(trie.NodeIterator([]byte("barc")))
+ it = NewIterator(trie.MustNodeIterator([]byte("barc")))
if err := checkIteratorOrder(testdata1[1:], it); err != nil {
t.Fatal(err)
}
// Seek beyond the end.
- it = NewIterator(trie.NodeIterator([]byte("z")))
+ it = NewIterator(trie.MustNodeIterator([]byte("z")))
if err := checkIteratorOrder(nil, it); err != nil {
t.Fatal(err)
}
@@ -256,8 +264,8 @@ func TestDifferenceIterator(t *testing.T) {
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootA, nodesA := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -265,12 +273,12 @@ func TestDifferenceIterator(t *testing.T) {
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootB, nodesB := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
+ di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil))
it := NewIterator(di)
for it.Next() {
found[string(it.Key)] = string(it.Value)
@@ -298,8 +306,8 @@ func TestUnionIterator(t *testing.T) {
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootA, nodesA := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -307,11 +315,11 @@ func TestUnionIterator(t *testing.T) {
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
- rootB, nodesB := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
- di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
+ di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
it := NewIterator(di)
all := []struct{ k, v string }{
@@ -350,15 +358,15 @@ func TestIteratorNoDups(t *testing.T) {
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
- checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
func TestIteratorContinueAfterError(t *testing.T) {
testIteratorContinueAfterError(t, false, rawdb.HashScheme)
testIteratorContinueAfterError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
@@ -369,13 +377,13 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := tr.Commit(false)
+ tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
tdb.Commit(root, false)
}
tr, _ = New(TrieID(root), tdb)
- wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
var (
paths [][]byte
@@ -434,7 +442,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
}
// Iterate until the error is hit.
seen := make(map[string]bool)
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
if !ok || missing.NodeHash != rhash {
@@ -463,8 +471,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
func TestIteratorContinueAfterSeekError(t *testing.T) {
testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
@@ -479,14 +487,14 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
for _, val := range testdata1 {
ctr.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := ctr.Commit(false)
+ root, nodes, _ := ctr.Commit(false)
for path, n := range nodes.Nodes {
if n.Hash == barNodeHash {
barNodePath = []byte(path)
break
}
}
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, false)
}
@@ -502,7 +510,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- it := tr.NodeIterator([]byte("bars"))
+ it := tr.MustNodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
t.Fatal("want MissingNodeError, got", it.Error())
@@ -536,7 +544,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
func TestIteratorNodeBlob(t *testing.T) {
testIteratorNodeBlob(t, rawdb.HashScheme)
- //testIteratorNodeBlob(t, rawdb.PathScheme)
+ testIteratorNodeBlob(t, rawdb.PathScheme)
}
type loggingDb struct {
@@ -602,9 +610,12 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) {
val = crypto.Keccak256(val)
trie.MustUpdate(key, val)
}
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Commit(root, false)
+
// Return the generated trie
+ trie, _ = NewStateTrie(TrieID(root), triedb)
return triedb, trie, logDb
}
@@ -616,8 +627,8 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
// Do a seek operation
trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885"))
// master: 24 get operations
- // this pr: 5 get operations
- if have, want := logDb.getCount, uint64(5); have != want {
+ // this pr: 6 get operations
+ if have, want := logDb.getCount, uint64(6); have != want {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
@@ -642,13 +653,13 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
all[val.k] = val.v
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
triedb.Commit(root, false)
var found = make(map[common.Hash][]byte)
trie, _ = New(TrieID(root), triedb)
- it := trie.NodeIterator(nil)
+ it := trie.MustNodeIterator(nil)
for it.Next(true) {
if it.Hash() == (common.Hash{}) {
continue
diff --git a/trie/proof.go b/trie/proof.go
index e127cc4d87..72eabf340f 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -43,7 +43,11 @@ import (
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
+func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
// Collect all nodes on the path to key.
var (
prefix []byte
@@ -91,10 +95,6 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
defer returnHasherToPool(hasher)
for i, n := range nodes {
- if fromLevel > 0 {
- fromLevel--
- continue
- }
var hn node
n, hn = hasher.proofHash(n)
if hash, ok := hn.(hashNode); ok || i == 0 {
@@ -117,8 +117,8 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *StateTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
- return t.trie.Prove(key, fromLevel, proofDb)
+func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ return t.trie.Prove(key, proofDb)
}
// VerifyProof checks merkle proofs. The given proof must contain the value for
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 93f5f5739d..abb7df4598 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -32,13 +32,13 @@ import (
"encoding/binary"
"fmt"
mrand "math/rand"
- "sort"
"testing"
"github.com/ava-labs/coreth/core/rawdb"
"github.com/ava-labs/coreth/ethdb/memorydb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "golang.org/x/exp/slices"
)
// Prng is a pseudo random number generator seeded by strong randomness.
@@ -67,13 +67,13 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database {
// Create a direct trie based Merkle prover
provers = append(provers, func(key []byte) *memorydb.Database {
proof := memorydb.New()
- trie.Prove(key, 0, proof)
+ trie.Prove(key, proof)
return proof
})
// Create a leaf iterator based Merkle prover
provers = append(provers, func(key []byte) *memorydb.Database {
proof := memorydb.New()
- if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
+ if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
for _, p := range it.Prove() {
proof.Put(crypto.Keccak256(p), p)
}
@@ -160,7 +160,7 @@ func TestMissingKeyProof(t *testing.T) {
for i, key := range []string{"a", "j", "l", "z"} {
proof := memorydb.New()
- trie.Prove([]byte(key), 0, proof)
+ trie.Prove([]byte(key), proof)
if proof.Len() != 1 {
t.Errorf("test %d: proof should have one element", i)
@@ -175,30 +175,24 @@ func TestMissingKeyProof(t *testing.T) {
}
}
-type entrySlice []*kv
-
-func (p entrySlice) Len() int { return len(p) }
-func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
-func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
// TestRangeProof tests normal range proof with both edge proofs
// as the existent proof. The test cases are generated randomly.
func TestRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -218,11 +212,11 @@ func TestRangeProof(t *testing.T) {
// The test cases are generated randomly.
func TestRangeProofWithNonExistentProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
@@ -246,10 +240,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
if bytes.Compare(last, entries[end-1].k) < 0 {
continue
}
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -267,10 +261,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
proof := memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var k [][]byte
@@ -290,21 +284,21 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
// - There exists a gap between the last element and the right edge proof
func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Case 1
start, end := 100, 200
first := decreaseKey(common.CopyBytes(entries[start].k))
proof := memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
start = 105 // Gap created
@@ -323,10 +317,10 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
start, end = 100, 200
last := increaseKey(common.CopyBytes(entries[end-1].k))
proof = memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
end = 195 // Capped slice
@@ -347,17 +341,17 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
// non-existent one.
func TestOneElementRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// One element with existent edge proof, both edge proofs
// point to the SAME key.
start := 1000
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -369,10 +363,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
first := decreaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -384,10 +378,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
last := increaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -399,10 +393,10 @@ func TestOneElementRangeProof(t *testing.T) {
start = 1000
first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k))
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
@@ -418,10 +412,10 @@ func TestOneElementRangeProof(t *testing.T) {
first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last = entry.k
proof = memorydb.New()
- if err := tinyTrie.Prove(first, 0, proof); err != nil {
+ if err := tinyTrie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := tinyTrie.Prove(last, 0, proof); err != nil {
+ if err := tinyTrie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof)
@@ -434,11 +428,11 @@ func TestOneElementRangeProof(t *testing.T) {
// The edge proofs can be nil.
func TestAllElementsProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var k [][]byte
var v [][]byte
@@ -453,10 +447,10 @@ func TestAllElementsProof(t *testing.T) {
// With edge proofs, it should still work.
proof := memorydb.New()
- if err := trie.Prove(entries[0].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[0].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof)
@@ -468,10 +462,10 @@ func TestAllElementsProof(t *testing.T) {
proof = memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
@@ -484,21 +478,21 @@ func TestAllElementsProof(t *testing.T) {
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
for _, pos := range cases {
proof := memorydb.New()
- if err := trie.Prove(common.Hash{}.Bytes(), 0, proof); err != nil {
+ if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[pos].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[pos].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
k := make([][]byte, 0)
@@ -519,22 +513,22 @@ func TestSingleSideRangeProof(t *testing.T) {
func TestReverseSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
for _, pos := range cases {
proof := memorydb.New()
- if err := trie.Prove(entries[pos].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[pos].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if err := trie.Prove(last.Bytes(), 0, proof); err != nil {
+ if err := trie.Prove(last.Bytes(), proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
k := make([][]byte, 0)
@@ -555,20 +549,20 @@ func TestReverseSingleSideRangeProof(t *testing.T) {
// The prover is expected to detect the error.
func TestBadRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
for i := 0; i < 500; i++ {
start := mrand.Intn(len(entries))
end := mrand.Intn(len(entries)-start) + start + 1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -634,10 +628,10 @@ func TestGappedRangeProof(t *testing.T) {
}
first, last := 2, 8
proof := memorydb.New()
- if err := trie.Prove(entries[first].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[first].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[last-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[last-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -658,11 +652,11 @@ func TestGappedRangeProof(t *testing.T) {
// TestSameSideProofs tests the element is not in the range covered by proofs
func TestSameSideProofs(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
pos := 1000
first := decreaseKey(common.CopyBytes(entries[pos].k))
@@ -670,10 +664,10 @@ func TestSameSideProofs(t *testing.T) {
last := decreaseKey(common.CopyBytes(entries[pos].k))
proof := memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
@@ -686,10 +680,10 @@ func TestSameSideProofs(t *testing.T) {
last = increaseKey(last)
proof = memorydb.New()
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, 0, proof); err != nil {
+ if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
@@ -700,13 +694,13 @@ func TestSameSideProofs(t *testing.T) {
func TestHasRightElement(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries entrySlice
+ var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []struct {
start int
@@ -734,23 +728,23 @@ func TestHasRightElement(t *testing.T) {
)
if c.start == -1 {
firstKey, start = common.Hash{}.Bytes(), 0
- if err := trie.Prove(firstKey, 0, proof); err != nil {
+ if err := trie.Prove(firstKey, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
} else {
firstKey = entries[c.start].k
- if err := trie.Prove(entries[c.start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[c.start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
}
if c.end == -1 {
lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
- if err := trie.Prove(lastKey, 0, proof); err != nil {
+ if err := trie.Prove(lastKey, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
} else {
lastKey = entries[c.end-1].k
- if err := trie.Prove(entries[c.end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
}
@@ -774,11 +768,11 @@ func TestHasRightElement(t *testing.T) {
// The first edge proof must be a non-existent proof.
func TestEmptyRangeProof(t *testing.T) {
trie, vals := randomTrie(4096)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var cases = []struct {
pos int
@@ -790,7 +784,7 @@ func TestEmptyRangeProof(t *testing.T) {
for _, c := range cases {
proof := memorydb.New()
first := increaseKey(common.CopyBytes(entries[c.pos].k))
- if err := trie.Prove(first, 0, proof); err != nil {
+ if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
_, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof)
@@ -809,11 +803,11 @@ func TestEmptyRangeProof(t *testing.T) {
func TestBloatedProof(t *testing.T) {
// Use a small trie
trie, kvs := nonRandomTrie(100)
- var entries entrySlice
+ var entries []*kv
for _, kv := range kvs {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var keys [][]byte
var vals [][]byte
@@ -821,7 +815,7 @@ func TestBloatedProof(t *testing.T) {
// In the 'malicious' case, we add proofs for every single item
// (but only one key/value pair used as leaf)
for i, entry := range entries {
- trie.Prove(entry.k, 0, proof)
+ trie.Prove(entry.k, proof)
if i == 50 {
keys = append(keys, entry.k)
vals = append(vals, entry.v)
@@ -830,8 +824,8 @@ func TestBloatedProof(t *testing.T) {
// For reference, we use the same function, but _only_ prove the first
// and last element
want := memorydb.New()
- trie.Prove(keys[0], 0, want)
- trie.Prove(keys[len(keys)-1], 0, want)
+ trie.Prove(keys[0], want)
+ trie.Prove(keys[len(keys)-1], want)
if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil {
t.Fatalf("expected bloated proof to succeed, got %v", err)
@@ -843,11 +837,11 @@ func TestBloatedProof(t *testing.T) {
// noop technically, but practically should be rejected.
func TestEmptyValueRangeProof(t *testing.T) {
trie, values := randomTrie(512)
- var entries entrySlice
+ var entries []*kv
for _, kv := range values {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Create a new entry with a slightly modified key
mid := len(entries) / 2
@@ -864,10 +858,10 @@ func TestEmptyValueRangeProof(t *testing.T) {
start, end := 1, len(entries)-1
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -887,11 +881,11 @@ func TestEmptyValueRangeProof(t *testing.T) {
// practically should be rejected.
func TestAllElementsEmptyValueRangeProof(t *testing.T) {
trie, values := randomTrie(512)
- var entries entrySlice
+ var entries []*kv
for _, kv := range values {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
// Create a new entry with a slightly modified key
mid := len(entries) / 2
@@ -959,7 +953,7 @@ func BenchmarkProve(b *testing.B) {
for i := 0; i < b.N; i++ {
kv := vals[keys[i%len(keys)]]
proofs := memorydb.New()
- if trie.Prove(kv.k, 0, proofs); proofs.Len() == 0 {
+ if trie.Prove(kv.k, proofs); proofs.Len() == 0 {
b.Fatalf("zero length proof for %x", kv.k)
}
}
@@ -973,7 +967,7 @@ func BenchmarkVerifyProof(b *testing.B) {
for k := range vals {
keys = append(keys, k)
proof := memorydb.New()
- trie.Prove([]byte(k), 0, proof)
+ trie.Prove([]byte(k), proof)
proofs = append(proofs, proof)
}
@@ -993,19 +987,19 @@ func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b,
func benchmarkVerifyRangeProof(b *testing.B, size int) {
trie, vals := randomTrie(8192)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
start := 2
end := start + size
proof := memorydb.New()
- if err := trie.Prove(entries[start].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[start].k, proof); err != nil {
b.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(entries[end-1].k, 0, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
b.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -1030,11 +1024,11 @@ func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof
func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
trie, vals := randomTrie(size)
- var entries entrySlice
+ var entries []*kv
for _, kv := range vals {
entries = append(entries, kv)
}
- sort.Sort(entries)
+ slices.SortFunc(entries, (*kv).cmp)
var keys [][]byte
var values [][]byte
@@ -1104,10 +1098,10 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
proof := memorydb.New()
start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if err := trie.Prove(start, 0, proof); err != nil {
+ if err := trie.Prove(start, proof); err != nil {
t.Fatalf("failed to prove start: %v", err)
}
- if err := trie.Prove(end, 0, proof); err != nil {
+ if err := trie.Prove(end, proof); err != nil {
t.Fatalf("failed to prove end: %v", err)
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 98132b8bd6..bb81f85819 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -96,7 +96,12 @@ func (t *StateTrie) MustGet(key []byte) []byte {
// If the specified storage slot is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
- return t.trie.Get(t.hashKey(key))
+ enc, err := t.trie.Get(t.hashKey(key))
+ if err != nil || len(enc) == 0 {
+ return nil, err
+ }
+ _, content, _, err := rlp.Split(enc)
+ return content, err
}
// GetAccount attempts to retrieve an account with provided account address.
@@ -158,7 +163,8 @@ func (t *StateTrie) MustUpdate(key, value []byte) {
// If a node is not found in the database, a MissingNodeError is returned.
func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
hk := t.hashKey(key)
- err := t.trie.Update(hk, value)
+ v, _ := rlp.EncodeToBytes(value)
+ err := t.trie.Update(hk, v)
if err != nil {
return err
}
@@ -180,6 +186,10 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
+func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
+ return nil
+}
+
// MustDelete removes any existing value for key from the trie. This function
// will omit any encountered error but just print out an error message.
func (t *StateTrie) MustDelete(key []byte) {
@@ -223,7 +233,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
// All cached preimages will be also flushed if preimages recording is enabled.
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil {
@@ -254,12 +264,18 @@ func (t *StateTrie) Copy() *StateTrie {
}
}
-// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration
-// starts at the key after the given start key.
-func (t *StateTrie) NodeIterator(start []byte) NodeIterator {
+// NodeIterator returns an iterator that returns nodes of the underlying trie.
+// Iteration starts at the key after the given start key.
+func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) {
return t.trie.NodeIterator(start)
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator {
+ return t.trie.MustNodeIterator(start)
+}
+
// hashKey returns the hash of key as an ephemeral buffer.
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index 2935c3bc48..4bf32b2b74 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -70,8 +70,8 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
trie.MustUpdate(key, val)
}
}
- root, nodes := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ root, nodes, _ := trie.Commit(false)
+ if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
// Re-create the trie based on the new state
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 7e6b79b497..532f278790 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -62,8 +62,8 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str
trie.MustUpdate(key, val)
}
}
- root, nodes := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ root, nodes, _ := trie.Commit(false)
+ if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
if err := triedb.Commit(root, false); err != nil {
diff --git a/trie/test_trie.go b/trie/test_trie.go
index 3df5c58ac0..fb68896975 100644
--- a/trie/test_trie.go
+++ b/trie/test_trie.go
@@ -33,8 +33,9 @@ func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (com
keys, values := FillTrie(t, numKeys, keySize, testTrie)
// Commit the root to [trieDB]
- root, nodes := testTrie.Commit(false)
- err := trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, err := testTrie.Commit(false)
+ assert.NoError(t, err)
+ err = trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
assert.NoError(t, err)
err = trieDB.Commit(root, false)
assert.NoError(t, err)
@@ -80,8 +81,17 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLea
t.Fatalf("error creating trieB, root=%s, err=%v", root, err)
}
- itA := NewIterator(trieA.NodeIterator(nil))
- itB := NewIterator(trieB.NodeIterator(nil))
+ nodeItA, err := trieA.NodeIterator(nil)
+ if err != nil {
+ t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err)
+ }
+ itA := NewIterator(nodeItA)
+
+ nodeItB, err := trieB.NodeIterator(nil)
+ if err != nil {
+ t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err)
+ }
+ itB := NewIterator(nodeItB)
count := 0
for itA.Next() && itB.Next() {
count++
@@ -110,7 +120,10 @@ func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) {
t.Fatal(err)
}
- nodeIt := tr.NodeIterator(nil)
+ nodeIt, err := tr.NodeIterator(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
count := 0
for nodeIt.Next(true) {
count++
@@ -172,8 +185,11 @@ func FillAccounts(
accounts[key] = &acc
}
- newRoot, nodes := tr.Commit(false)
- if err := trieDB.Update(newRoot, root, trienode.NewWithNodeSet(nodes)); err != nil {
+ newRoot, nodes, err := tr.Commit(false)
+ if err != nil {
+ t.Fatalf("error committing trie: %v", err)
+ }
+ if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
t.Fatalf("error updating trieDB: %v", err)
}
if err := trieDB.Commit(newRoot, false); err != nil {
diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go
new file mode 100644
index 0000000000..722f62669d
--- /dev/null
+++ b/trie/testutil/utils.go
@@ -0,0 +1,71 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package testutil
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ mrand "math/rand"
+
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+// Prng is a pseudo random number generator seeded by strong randomness.
+// The randomness is printed on startup in order to make failures reproducible.
+var prng = initRand()
+
+func initRand() *mrand.Rand {
+ var seed [8]byte
+ crand.Read(seed[:])
+ rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
+ return rnd
+}
+
+// RandBytes generates a random byte slice with specified length.
+func RandBytes(n int) []byte {
+ r := make([]byte, n)
+ prng.Read(r)
+ return r
+}
+
+// RandomHash generates a random blob of data and returns it as a hash.
+func RandomHash() common.Hash {
+ return common.BytesToHash(RandBytes(common.HashLength))
+}
+
+// RandomAddress generates a random blob of data and returns it as an address.
+func RandomAddress() common.Address {
+ return common.BytesToAddress(RandBytes(common.AddressLength))
+}
+
+// RandomNode generates a random node.
+func RandomNode() *trienode.Node {
+ val := RandBytes(100)
+ return trienode.New(crypto.Keccak256Hash(val), val)
+}
diff --git a/trie/tracer.go b/trie/tracer.go
index 51079149ef..5786af4d3e 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -17,7 +17,6 @@
package trie
import (
- "github.com/ava-labs/coreth/trie/trienode"
"github.com/ethereum/go-ethereum/common"
)
@@ -114,16 +113,18 @@ func (t *tracer) copy() *tracer {
}
}
-// markDeletions puts all tracked deletions into the provided nodeset.
-func (t *tracer) markDeletions(set *trienode.NodeSet) {
+// deletedNodes returns a list of node paths which are deleted from the trie.
+func (t *tracer) deletedNodes() []string {
+ var paths []string
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
// in their parent before, the deletions can be no
// effect by deleting nothing, filter them out.
- prev, ok := t.accessList[path]
+ _, ok := t.accessList[path]
if !ok {
continue
}
- set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev))
+ paths = append(paths, path)
}
+ return paths
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index e11348c23d..8f4ed2af9e 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -70,8 +70,8 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
}
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -136,8 +136,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -151,8 +151,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -169,8 +169,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
keys = append(keys, string(key))
trie.MustUpdate(key, randBytes(32))
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -184,8 +184,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, key := range keys {
trie.MustUpdate([]byte(key), nil)
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -199,8 +199,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), nil)
}
- root, nodes = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -218,22 +218,22 @@ func TestAccessListLeak(t *testing.T) {
for _, val := range standard {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
var cases = []struct {
op func(tr *Trie)
}{
{
func(tr *Trie) {
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
for it.Next(true) {
}
},
},
{
func(tr *Trie) {
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
}
},
@@ -241,7 +241,7 @@ func TestAccessListLeak(t *testing.T) {
{
func(tr *Trie) {
for _, val := range standard {
- tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
+ tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase())
}
},
},
@@ -268,8 +268,8 @@ func TestTinyTree(t *testing.T) {
for _, val := range tiny {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
- root, set := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
+ root, set, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil)
parent := root
trie, _ = New(TrieID(root), db)
@@ -277,8 +277,8 @@ func TestTinyTree(t *testing.T) {
for _, val := range tiny {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- root, set = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set))
+ root, set, _ = trie.Commit(false)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
@@ -300,7 +300,7 @@ func compareSet(setA, setB map[string]struct{}) bool {
func forNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
@@ -319,7 +319,7 @@ func iterNodes(db *Database, root common.Hash) map[string][]byte {
func forHashedNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
diff --git a/trie/trie.go b/trie/trie.go
index f079863026..66c3d60627 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -49,6 +49,10 @@ type Trie struct {
root node
owner common.Hash
+ // Flag whether the commit operation is already performed. If so the
+ // trie is not usable(latest states is invisible).
+ committed bool
+
// Keep track of the number leaves which have been inserted since the last
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes.
@@ -70,11 +74,12 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: t.root,
- owner: t.owner,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: t.root,
+ owner: t.owner,
+ committed: t.committed,
+ unhashed: t.unhashed,
+ reader: t.reader,
+ tracer: t.tracer.copy(),
}
}
@@ -84,7 +89,7 @@ func (t *Trie) Copy() *Trie {
// zero hash or the sha3 hash of an empty string, then trie is initially
// empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not.
-func New(id *ID, db NodeReader) (*Trie, error) {
+func New(id *ID, db *Database) (*Trie, error) {
reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
@@ -110,10 +115,24 @@ func NewEmpty(db *Database) *Trie {
return tr
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *Trie) MustNodeIterator(start []byte) NodeIterator {
+ it, err := t.NodeIterator(start)
+ if err != nil {
+ log.Error("Unhandled trie error in Trie.NodeIterator", "err", err)
+ }
+ return it
+}
+
// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) NodeIterator {
- return newNodeIterator(t, start)
+func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
+ return newNodeIterator(t, start), nil
}
// MustGet is a wrapper of Get and will omit any encountered error but just
@@ -132,6 +151,10 @@ func (t *Trie) MustGet(key []byte) []byte {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Get(key []byte) ([]byte, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
@@ -191,6 +214,10 @@ func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, 0, ErrCommitted
+ }
item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0)
if err != nil {
return nil, resolved, err
@@ -283,6 +310,10 @@ func (t *Trie) MustUpdate(key, value []byte) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Update(key, value []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
return t.update(key, value)
}
@@ -397,6 +428,10 @@ func (t *Trie) MustDelete(key []byte) {
// If the requested node is not present in trie, no error will be returned.
// If the trie is corrupted, a MissingNodeError is returned.
func (t *Trie) Delete(key []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
@@ -582,17 +617,25 @@ func (t *Trie) Hash() common.Hash {
// The returned nodeset can be nil if the trie is clean (nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
-
- nodes := trienode.NewNodeSet(t.owner)
- t.tracer.markDeletions(nodes)
-
+ defer func() {
+ t.committed = true
+ }()
// Trie is empty and can be classified into two types of situations:
- // - The trie was empty and no update happens
- // - The trie was non-empty and all nodes are dropped
+ // (a) The trie was empty and no update happens => return nil
+ // (b) The trie was non-empty and all nodes are dropped => return
+ // the node set includes all deleted nodes
if t.root == nil {
- return types.EmptyRootHash, nodes
+ paths := t.tracer.deletedNodes()
+ if len(paths) == 0 {
+ return types.EmptyRootHash, nil, nil // case (a)
+ }
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range paths {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
+ }
+ return types.EmptyRootHash, nodes, nil // case (b)
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
@@ -604,10 +647,14 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
// Replace the root node with the origin hash in order to
// ensure all resolved nodes are dropped after the commit.
t.root = hashedNode
- return rootHash, nil
+ return rootHash, nil, nil
+ }
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range t.tracer.deletedNodes() {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
}
t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
- return rootHash, nodes
+ return rootHash, nodes, nil
}
// hashRoot calculates the root hash of the given trie
@@ -631,4 +678,5 @@ func (t *Trie) Reset() {
t.owner = common.Hash{}
t.unhashed = 0
t.tracer.reset()
+ t.committed = false
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 1112f9d245..48c8708197 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -27,26 +27,24 @@
package trie
import (
- "fmt"
-
+ "github.com/ava-labs/coreth/core/types"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
)
// Reader wraps the Node method of a backing trie store.
type Reader interface {
- // Node retrieves the RLP-encoded trie node blob with the provided trie
- // identifier, node path and the corresponding node hash. No error will
- // be returned if the node is not found.
+ // Node retrieves the trie node blob with the provided trie identifier, node path and
+ // the corresponding node hash. No error will be returned if the node is not found.
+ //
+ // When looking up nodes in the account trie, 'owner' is the zero hash. For contract
+ // storage trie nodes, 'owner' is the hash of the account address that containing the
+ // storage.
+ //
+ // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS.
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
-// NodeReader wraps all the necessary functions for accessing trie node.
-type NodeReader interface {
- // Reader returns a reader for accessing all trie nodes with provided
- // state root. Nil is returned in case the state is not available.
- Reader(root common.Hash) Reader
-}
-
// trieReader is a wrapper of the underlying node reader. It's not safe
// for concurrent usage.
type trieReader struct {
@@ -56,10 +54,16 @@ type trieReader struct {
}
// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
- reader := db.Reader(stateRoot)
- if reader == nil {
- return nil, fmt.Errorf("state not found #%x", stateRoot)
+func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) {
+ if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
+ if stateRoot == (common.Hash{}) {
+ log.Error("Zero state root hash!")
+ }
+ return &trieReader{owner: owner}, nil
+ }
+ reader, err := db.Reader(stateRoot)
+ if err != nil {
+ return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
}
return &trieReader{owner: owner, reader: reader}, nil
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 50e2e0f0a7..b26750ded6 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -87,9 +87,9 @@ func TestMissingRoot(t *testing.T) {
func TestMissingNode(t *testing.T) {
testMissingNode(t, false, rawdb.HashScheme)
- //testMissingNode(t, false, rawdb.PathScheme)
+ testMissingNode(t, false, rawdb.PathScheme)
testMissingNode(t, true, rawdb.HashScheme)
- //testMissingNode(t, true, rawdb.PathScheme)
+ testMissingNode(t, true, rawdb.PathScheme)
}
func testMissingNode(t *testing.T, memonly bool, scheme string) {
@@ -99,8 +99,8 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
require.NoError(t, triedb.Commit(root, false))
@@ -188,7 +188,7 @@ func TestInsert(t *testing.T) {
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, _ = trie.Commit(false)
+ root, _, _ = trie.Commit(false)
if root != exp {
t.Errorf("case 2: exp %x got %x", exp, root)
}
@@ -213,8 +213,8 @@ func TestGet(t *testing.T) {
if i == 1 {
return
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
}
}
@@ -285,8 +285,8 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db)
@@ -298,14 +298,14 @@ func TestReplication(t *testing.T) {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
}
}
- hash, nodes := trie2.Commit(false)
+ hash, nodes, _ := trie2.Commit(false)
if hash != root {
t.Errorf("root failure. expected %x got %x", root, hash)
}
// recreate the trie after commit
if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
}
trie2, err = New(TrieID(hash), db)
if err != nil {
@@ -433,44 +433,44 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- if len(n.Prev) > 0 {
- return errors.New("unexpected origin value")
- }
+ //if len(n.Prev) > 0 {
+ // return errors.New("unexpected origin value")
+ //}
}
// Check deletion set
- for path, blob := range deletes {
+ for path := range deletes {
n, ok := set.Nodes[path]
if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ //if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ //}
+ //if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ //}
}
// Check update set
- for path, blob := range updates {
+ for path := range updates {
n, ok := set.Nodes[path]
if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ //if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ //}
+ //if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ //}
}
return nil
}
func runRandTest(rt randTest) bool {
var scheme = rawdb.HashScheme
- //if rand.Intn(2) == 0 {
- // scheme = rawdb.PathScheme
- //}
+ if rand.Intn(2) == 0 {
+ scheme = rawdb.PathScheme
+ }
var (
origin = types.EmptyRootHash
triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
@@ -500,7 +500,7 @@ func runRandTest(rt randTest) bool {
continue
}
proofDb := rawdb.NewMemoryDatabase()
- err := tr.Prove(step.key, 0, proofDb)
+ err := tr.Prove(step.key, proofDb)
if err != nil {
rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
}
@@ -511,9 +511,9 @@ func runRandTest(rt randTest) bool {
case opHash:
tr.Hash()
case opCommit:
- root, nodes := tr.Commit(true)
+ root, nodes, _ := tr.Commit(true)
if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil)
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -531,7 +531,7 @@ func runRandTest(rt randTest) bool {
origin = root
case opItercheckhash:
checktr := NewEmpty(triedb)
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
}
@@ -540,8 +540,8 @@ func runRandTest(rt randTest) bool {
}
case opNodeDiff:
var (
- origIter = origTrie.NodeIterator(nil)
- curIter = tr.NodeIterator(nil)
+ origIter = origTrie.MustNodeIterator(nil)
+ curIter = tr.MustNodeIterator(nil)
origSeen = make(map[string]struct{})
curSeen = make(map[string]struct{})
)
@@ -727,7 +727,7 @@ func TestTinyTrie(t *testing.T) {
t.Errorf("3: got %x, exp %x", root, exp)
}
checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
}
@@ -751,7 +751,7 @@ func TestCommitAfterHash(t *testing.T) {
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
- root, _ = trie.Commit(false)
+ root, _, _ = trie.Commit(false)
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
@@ -853,8 +853,8 @@ func TestCommitSequence(t *testing.T) {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -894,8 +894,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
trie.MustUpdate(key, val)
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -933,9 +933,9 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stTrie.Update(key, val)
}
// Flush trie -> database
- root, nodes := trie.Commit(false)
+ root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
@@ -981,9 +981,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie.Update(key, []byte{0x1})
stTrie.Update(key, []byte{0x1})
// Flush trie -> database
- root, nodes := trie.Commit(false)
+ root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
@@ -1154,8 +1154,8 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
- root, nodes := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go
index c31388af14..c0fcbb939f 100644
--- a/trie/triedb/hashdb/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -28,6 +28,7 @@ package hashdb
import (
"errors"
+ "fmt"
"reflect"
"sync"
"time"
@@ -37,40 +38,41 @@ import (
"github.com/ava-labs/coreth/ethdb"
"github.com/ava-labs/coreth/metrics"
"github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
var (
- memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
- memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
- memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
- memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
-
- memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
- memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
- memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
- memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
- memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil)
- memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil)
- memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil)
-
- memcacheFlushMeter = metrics.NewRegisteredMeter("trie/memcache/flush/count", nil)
- memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
- memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/locktime", nil)
- memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
- memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
-
- memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
- memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
- memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
-
- memcacheCommitMeter = metrics.NewRegisteredMeter("trie/memcache/commit/count", nil)
- memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
- memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/locktime", nil)
- memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
- memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
+ memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
+ memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
+ memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
+ memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
+
+ memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
+ memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
+ memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
+ memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
+ memcacheDirtyBytesGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/bytes", nil)
+ memcacheDirtyChildBytesGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/childbytes", nil)
+ memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("hashdb/memcache/dirty/nodes", nil)
+
+ memcacheFlushMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/count", nil)
+ memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
+ memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/locktime", nil)
+ memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
+ memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
+
+ memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
+ memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
+ memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
+
+ memcacheCommitMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/count", nil)
+ memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
+ memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/locktime", nil)
+ memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
+ memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
)
// ChildResolver defines the required method to decode the provided
@@ -297,12 +299,12 @@ func (db *Database) Dereference(root common.Hash) {
db.gcsize += storage - db.dirtiesSize
db.gctime += time.Since(start)
- memcacheDirtySizeGauge.Update(float64(db.dirtiesSize))
- memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize))
+ memcacheDirtyBytesGauge.Update(float64(db.dirtiesSize))
+ memcacheDirtyChildBytesGauge.Update(float64(db.childrenSize))
memcacheDirtyNodesGauge.Update(int64(len(db.dirties)))
memcacheGCTimeTimer.Update(time.Since(start))
- memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -452,14 +454,14 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.flushsize += storage - db.dirtiesSize
db.flushtime += time.Since(start)
- memcacheDirtySizeGauge.Update(float64(db.dirtiesSize))
- memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize))
+ memcacheDirtyBytesGauge.Update(float64(db.dirtiesSize))
+ memcacheDirtyChildBytesGauge.Update(float64(db.childrenSize))
memcacheDirtyNodesGauge.Update(int64(len(db.dirties)))
memcacheFlushMeter.Mark(1)
memcacheFlushTimeTimer.Update(time.Since(start))
memcacheFlushLockTimeTimer.Update(lockTime + time.Since(lockStart))
- memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -502,14 +504,14 @@ func (db *Database) Commit(node common.Hash, report bool) error {
db.removeFromDirties(item.hash, item.rlp)
}
- memcacheDirtySizeGauge.Update(float64(db.dirtiesSize))
- memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize))
+ memcacheDirtyBytesGauge.Update(float64(db.dirtiesSize))
+ memcacheDirtyChildBytesGauge.Update(float64(db.childrenSize))
memcacheDirtyNodesGauge.Update(int64(len(db.dirties)))
memcacheCommitMeter.Mark(1)
memcacheCommitTimeTimer.Update(time.Since(start))
memcacheCommitLockTimeTimer.Update(lockTime + time.Since(lockStart))
- memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
logger := log.Info
@@ -606,7 +608,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
// Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 {
@@ -622,7 +624,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode
// UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into
// database and links the account trie with multiple storage tries if necessary,
// then adds a reference [from] root to the metaroot while holding the db's lock.
-func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 {
@@ -703,8 +705,12 @@ func (db *Database) Scheme() string {
}
// Reader retrieves a node reader belonging to the given state root.
-func (db *Database) Reader(root common.Hash) *reader {
- return &reader{db: db}
+// An error will be returned if the requested state is not available.
+func (db *Database) Reader(root common.Hash) (*reader, error) {
+ if _, err := db.Node(root); err != nil {
+ return nil, fmt.Errorf("state %#x is not available, %v", root, err)
+ }
+ return &reader{db: db}, nil
}
// reader is a state reader of Database which implements the Reader interface.
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
new file mode 100644
index 0000000000..31b586813e
--- /dev/null
+++ b/trie/triedb/pathdb/database.go
@@ -0,0 +1,372 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/ethdb"
+ "github.com/ava-labs/coreth/params"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// maxDiffLayers is the maximum diff layers allowed in the layer tree.
+const maxDiffLayers = 128
+
+// layer is the interface implemented by all state layers which includes some
+// public methods and some additional methods for internal usage.
+type layer interface {
+ // Node retrieves the trie node with the node info. An error will be returned
+ // if the read operation exits abnormally. For example, if the layer is already
+ // stale, or the associated state is regarded as corrupted. Notably, no error
+ // will be returned if the requested node is not found in database.
+ Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+
+ // rootHash returns the root hash for which this layer was made.
+ rootHash() common.Hash
+
+ // stateID returns the associated state id of layer.
+ stateID() uint64
+
+ // parentLayer returns the subsequent layer of it, or nil if the disk was reached.
+ parentLayer() layer
+
+ // update creates a new layer on top of the existing layer diff tree with
+ // the provided dirty trie nodes along with the state change set.
+ //
+ // Note, the maps are retained by the method to avoid copying everything.
+ update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer
+
+ // journal commits an entire diff hierarchy to disk into a single journal entry.
+ // This is meant to be used during shutdown to persist the layer without
+ // flattening everything down (bad for reorgs).
+ journal(w io.Writer) error
+}
+
+// Config contains the settings for database.
+type Config struct {
+ StateLimit uint64 // Number of recent blocks to maintain state history for
+ CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes
+ DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes
+ ReadOnly bool // Flag whether the database is opened in read only mode.
+}
+
+var (
+ // defaultCleanSize is the default memory allowance of clean cache.
+ defaultCleanSize = 16 * 1024 * 1024
+
+ // defaultBufferSize is the default memory allowance of node buffer
+ // that aggregates the writes from above until it's flushed into the
+ // disk. Do not increase the buffer size arbitrarily, otherwise the
+ // system pause time will increase when the database writes happen.
+ defaultBufferSize = 128 * 1024 * 1024
+)
+
+// Defaults contains default settings for Ethereum mainnet.
+var Defaults = &Config{
+ StateLimit: params.FullImmutabilityThreshold,
+ CleanSize: defaultCleanSize,
+ DirtySize: defaultBufferSize,
+}
+
+// Database is a multiple-layered structure for maintaining in-memory trie nodes.
+// It consists of one persistent base layer backed by a key-value store, on top
+// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
+// can form a tree with branching, but the disk layer is singleton and common to
+// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
+// be applied to rollback. The deepest reorg that can be handled depends on the
+// amount of state histories tracked in the disk.
+//
+// At most one readable and writable database can be opened at the same time in
+// the whole system which ensures that only one database writer can operate disk
+// state. Unexpected open operations can cause the system to panic.
+type Database struct {
+ // readOnly is the flag whether the mutation is allowed to be applied.
+ // It will be set automatically when the database is journaled during
+ // the shutdown to reject all following unexpected mutations.
+ readOnly bool // Indicator if database is opened in read only mode
+ bufferSize int // Memory allowance (in bytes) for caching dirty nodes
+ config *Config // Configuration for database
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ tree *layerTree // The group for all known layers
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
+
+ // freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests
+}
+
+// New attempts to load an already existing layer from a persistent key-value
+// store (with a number of memory layers from a journal). If the journal is not
+// matched with the base persistent layer, all the recorded diff layers are discarded.
+func New(diskdb ethdb.Database, config *Config) *Database {
+ if config == nil {
+ config = Defaults
+ }
+ db := &Database{
+ readOnly: config.ReadOnly,
+ bufferSize: config.DirtySize,
+ config: config,
+ diskdb: diskdb,
+ }
+ // Construct the layer tree by resolving the in-disk singleton state
+ // and in-memory layer journal.
+ db.tree = newLayerTree(db.loadLayers())
+
+ // TODO(freezer): enable this after trie history is implemented
+ // Open the freezer for state history if the passed database contains an
+ // ancient store. Otherwise, all the relevant functionalities are disabled.
+ //
+ // Because the freezer can only be opened once at the same time, this
+ // mechanism also ensures that at most one **non-readOnly** database
+ // is opened at the same time to prevent accidental mutation.
+ // if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly {
+ // freezer, err := rawdb.NewStateHistoryFreezer(ancient, false)
+ // if err != nil {
+ // log.Crit("Failed to open state history freezer", "err", err)
+ // }
+ // db.freezer = freezer
+
+ // // Truncate the extra state histories above in freezer in case
+ // // it's not aligned with the disk layer.
+ // pruned, err := truncateFromHead(db.diskdb, freezer, db.tree.bottom().stateID())
+ // if err != nil {
+ // log.Crit("Failed to truncate extra state histories", "err", err)
+ // }
+ // if pruned != 0 {
+ // log.Warn("Truncated extra state histories", "number", pruned)
+ // }
+ // }
+ log.Warn("Path-based state scheme is an experimental feature")
+ return db
+}
+
+// Reader retrieves a layer belonging to the given state root.
+func (db *Database) Reader(root common.Hash) (layer, error) {
+ l := db.tree.get(root)
+ if l == nil {
+ return nil, fmt.Errorf("state %#x is not available", root)
+ }
+ return l, nil
+}
+
+// Update adds a new layer into the tree, if that can be linked to an existing
+// old parent. It is disallowed to insert a disk layer (the origin of all). Apart
+// from that this function will flatten the extra diff layers at bottom into disk
+// to only keep 128 diff layers in memory by default.
+//
+// The passed in maps(nodes, states) will be retained to avoid copying everything.
+// Therefore, these maps must not be changed afterwards.
+func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
+ return err
+ }
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ // - head-128 layer(disk layer) is paired with HEAD-128 state
+ return db.tree.cap(root, maxDiffLayers)
+}
+
+// Commit traverses downwards the layer tree from a specified layer with the
+// provided state root and all the layers below are flattened downwards. It
+// can be used alone and mostly for test purposes.
+func (db *Database) Commit(root common.Hash, report bool) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ return db.tree.cap(root, 0)
+}
+
+// Reset rebuilds the database with the specified state as the base.
+//
+// - if target state is empty, clear the stored state and all layers on top
+// - if target state is non-empty, ensure the stored state matches with it
+// and clear all other layers on top.
+func (db *Database) Reset(root common.Hash) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ batch := db.diskdb.NewBatch()
+ root = types.TrieRootHash(root)
+ if root == types.EmptyRootHash {
+ // Empty state is requested as the target, nuke out
+ // the root node and leave all others as dangling.
+ rawdb.DeleteAccountTrieNode(batch, nil)
+ } else {
+ // Ensure the requested state is existent before any
+ // action is applied.
+ _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ if hash != root {
+ return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
+ }
+ }
+ // Mark the disk layer as stale before applying any mutation.
+ db.tree.bottom().markStale()
+
+ // Drop the stale state journal in persistent database and
+ // reset the persistent state id back to zero.
+ rawdb.DeleteTrieJournal(batch)
+ rawdb.WritePersistentStateID(batch, 0)
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ // TODO(freezer): enable this after trie history is implemented
+ // Clean up all state histories in freezer. Theoretically
+ // all root->id mappings should be removed as well. Since
+ // mappings can be huge and might take a while to clear
+ // them, just leave them in disk and wait for overwriting.
+ // if db.freezer != nil {
+ // if err := db.freezer.Reset(); err != nil {
+ // return err
+ // }
+ // }
+ // Re-construct a new disk layer backed by persistent state
+ // with **empty clean cache and node buffer**.
+ dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+ db.tree.reset(dl)
+ log.Info("Rebuilt trie database", "root", root)
+ return nil
+}
+
+// Recover rollbacks the database to a specified historical point.
+// The state is supported as the rollback destination only if it's
+// canonical state and the corresponding trie histories are existent.
+func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error {
+ // TODO(freezer): enable this after trie history is implemented
+ return errors.New("state rollback is non-supported")
+}
+
+// Recoverable returns the indicator if the specified state is recoverable.
+func (db *Database) Recoverable(root common.Hash) bool {
+ // Ensure the requested state is a known state.
+ root = types.TrieRootHash(root)
+ id := rawdb.ReadStateID(db.diskdb, root)
+ if id == nil {
+ return false
+ }
+ // Recoverable state must below the disk layer. The recoverable
+ // state only refers the state that is currently not available,
+ // but can be restored by applying state history.
+ dl := db.tree.bottom()
+ if *id >= dl.stateID() {
+ return false
+ }
+ return false
+ // TODO(freezer): enable this after trie history is implemented
+ // Ensure the requested state is a canonical state and all state
+ // histories in range [id+1, disklayer.ID] are present and complete.
+ // parent := root
+ // return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
+ // if m.parent != parent {
+ // return errors.New("unexpected state history")
+ // }
+ // if len(m.incomplete) > 0 {
+ // return errors.New("incomplete state history")
+ // }
+ // parent = m.root
+ // return nil
+ // }) == nil
+}
+
+// Close closes the trie database and the held freezer.
+func (db *Database) Close() error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.readOnly = true
+ return nil
+ // TODO(freezer): enable this after trie history is implemented
+ // if db.freezer == nil {
+ // return nil
+ // }
+ // return db.freezer.Close()
+}
+
+// Size returns the current storage size of the memory cache in front of the
+// persistent database layer.
+func (db *Database) Size() (size common.StorageSize) {
+ db.tree.forEach(func(layer layer) {
+ if diff, ok := layer.(*diffLayer); ok {
+ size += common.StorageSize(diff.memory)
+ }
+ if disk, ok := layer.(*diskLayer); ok {
+ size += disk.size()
+ }
+ })
+ return size
+}
+
+// Initialized returns an indicator if the state data is already
+// initialized in path-based scheme.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ var inited bool
+ db.tree.forEach(func(layer layer) {
+ if layer.rootHash() != types.EmptyRootHash {
+ inited = true
+ }
+ })
+ return inited
+}
+
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+func (db *Database) SetBufferSize(size int) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.bufferSize = size
+ return db.tree.bottom().setBufferSize(db.bufferSize)
+}
+
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return rawdb.PathScheme
+}
diff --git a/trie/triedb/pathdb/database_ext.go b/trie/triedb/pathdb/database_ext.go
new file mode 100644
index 0000000000..a5f8d507e7
--- /dev/null
+++ b/trie/triedb/pathdb/database_ext.go
@@ -0,0 +1,14 @@
+// (c) 2023, Ava Labs, Inc. All rights reserved.
+// See the file LICENSE for licensing terms.
+
+package pathdb
+
+import (
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func (db *Database) UpdateAndReferenceRoot(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ return db.Update(root, parentRoot, block, nodes, states)
+}
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
new file mode 100644
index 0000000000..33ef926015
--- /dev/null
+++ b/trie/triedb/pathdb/database_test.go
@@ -0,0 +1,556 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/testutil"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/stretchr/testify/require"
+)
+
+func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
+ h, err := newTestHasher(addrHash, root, cleans)
+ if err != nil {
+ panic(fmt.Errorf("failed to create hasher, err: %w", err))
+ }
+ for key, val := range dirties {
+ if len(val) == 0 {
+ h.Delete(key.Bytes())
+ } else {
+ h.Update(key.Bytes(), val)
+ }
+ }
+ return h.Commit(false)
+}
+
+func generateAccount(storageRoot common.Hash) types.StateAccount {
+ return types.StateAccount{
+ Nonce: uint64(rand.Intn(100)),
+ Balance: big.NewInt(rand.Int63()),
+ CodeHash: testutil.RandBytes(32),
+ Root: storageRoot,
+ }
+}
+
+const (
+ createAccountOp int = iota
+ modifyAccountOp
+ deleteAccountOp
+ opLen
+)
+
+type genctx struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+ accountOrigin map[common.Address][]byte
+ storageOrigin map[common.Address]map[common.Hash][]byte
+ nodes *trienode.MergedNodeSet
+}
+
+func newCtx() *genctx {
+ return &genctx{
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountOrigin: make(map[common.Address][]byte),
+ storageOrigin: make(map[common.Address]map[common.Hash][]byte),
+ nodes: trienode.NewMergedNodeSet(),
+ }
+}
+
+type tester struct {
+ db *Database
+ roots []common.Hash
+ preimages map[common.Hash]common.Address
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+
+ // state snapshots
+ snapAccounts map[common.Hash]map[common.Hash][]byte
+ snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
+}
+
+func newTester(t *testing.T) *tester {
+ var (
+ // disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
+ disk = rawdb.NewMemoryDatabase()
+ db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024})
+ obj = &tester{
+ db: db,
+ preimages: make(map[common.Hash]common.Address),
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
+ snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
+ }
+ )
+ for i := 0; i < 2*128; i++ {
+ var parent = types.EmptyRootHash
+ if len(obj.roots) != 0 {
+ parent = obj.roots[len(obj.roots)-1]
+ }
+ root, nodes, states := obj.generate(parent)
+ if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
+ panic(fmt.Errorf("failed to update state changes, err: %w", err))
+ }
+ obj.roots = append(obj.roots, root)
+ }
+ return obj
+}
+
+func (t *tester) release() {
+ t.db.Close()
+ t.db.diskdb.Close()
+}
+
+func (t *tester) randAccount() (common.Address, []byte) {
+ for addrHash, account := range t.accounts {
+ return t.preimages[addrHash], account
+ }
+ return common.Address{}, nil
+}
+
+func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for i := 0; i < 10; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil)
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+
+ if len(origin) == 3 {
+ break
+ }
+ }
+ for i := 0; i < 3; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash])
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+ }
+ root, set := updateTrie(addrHash, root, storage, t.storages[addrHash])
+ if root != types.EmptyRootHash {
+ panic("failed to clear storage trie")
+ }
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
+ var (
+ ctx = newCtx()
+ dirties = make(map[common.Hash]struct{})
+ )
+ for i := 0; i < 20; i++ {
+ switch rand.Intn(opLen) {
+ case createAccountOp:
+ // account creation
+ addr := testutil.RandomAddress()
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := t.accounts[addrHash]; ok {
+ continue
+ }
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ root := t.generateStorage(ctx, addr)
+ ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
+ ctx.accountOrigin[addr] = nil
+ t.preimages[addrHash] = addr
+
+ case modifyAccountOp:
+ // account mutation
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ stRoot := t.mutateStorage(ctx, addr, acct.Root)
+ newAccount := types.SlimAccountRLP(generateAccount(stRoot))
+
+ ctx.accounts[addrHash] = newAccount
+ ctx.accountOrigin[addr] = account
+
+ case deleteAccountOp:
+ // account deletion
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ if acct.Root != types.EmptyRootHash {
+ t.clearStorage(ctx, addr, acct.Root)
+ }
+ ctx.accounts[addrHash] = nil
+ ctx.accountOrigin[addr] = account
+ }
+ }
+ root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts)
+ ctx.nodes.Merge(set)
+
+ // Save state snapshot before commit
+ t.snapAccounts[parent] = copyAccounts(t.accounts)
+ t.snapStorages[parent] = copyStorages(t.storages)
+
+ // Commit all changes to live state set
+ for addrHash, account := range ctx.accounts {
+ if len(account) == 0 {
+ delete(t.accounts, addrHash)
+ } else {
+ t.accounts[addrHash] = account
+ }
+ }
+ for addrHash, slots := range ctx.storages {
+ if _, ok := t.storages[addrHash]; !ok {
+ t.storages[addrHash] = make(map[common.Hash][]byte)
+ }
+ for sHash, slot := range slots {
+ if len(slot) == 0 {
+ delete(t.storages[addrHash], sHash)
+ } else {
+ t.storages[addrHash][sHash] = slot
+ }
+ }
+ }
+ return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil)
+}
+
+// lastRoot returns the latest root hash, or empty if nothing is cached.
+func (t *tester) lastHash() common.Hash {
+ if len(t.roots) == 0 {
+ return common.Hash{}
+ }
+ return t.roots[len(t.roots)-1]
+}
+
+func (t *tester) verifyState(root common.Hash) error {
+ reader, err := t.db.Reader(root)
+ if err != nil {
+ return err
+ }
+ _, err = reader.Node(common.Hash{}, nil, root)
+ if err != nil {
+ return errors.New("root node is not available")
+ }
+ for addrHash, account := range t.snapAccounts[root] {
+ blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account))
+ if err != nil || !bytes.Equal(blob, account) {
+ return fmt.Errorf("account is mismatched: %w", err)
+ }
+ }
+ for addrHash, slots := range t.snapStorages[root] {
+ for hash, slot := range slots {
+ blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot))
+ if err != nil || !bytes.Equal(blob, slot) {
+ return fmt.Errorf("slot is mismatched: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+// bottomIndex returns the index of current disk layer.
+func (t *tester) bottomIndex() int {
+ bottom := t.db.tree.bottom()
+ for i := 0; i < len(t.roots); i++ {
+ if t.roots[i] == bottom.rootHash() {
+ return i
+ }
+ }
+ return -1
+}
+
+func TestDatabaseRollback(t *testing.T) {
+ // Verify state histories
+ tester := newTester(t)
+ defer tester.release()
+
+ // Revert database from top to bottom
+ for i := tester.bottomIndex(); i >= 0; i-- {
+ root := tester.roots[i]
+ parent := types.EmptyRootHash
+ if i > 0 {
+ parent = tester.roots[i-1]
+ }
+ loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root])
+ require.ErrorContains(t, tester.db.Recover(parent, loader), "state rollback is non-supported")
+ //if err := tester.db.Recover(parent, loader); err != nil {
+ // t.Fatalf("Failed to revert db, err: %v", err)
+ //}
+ tester.verifyState(parent)
+ }
+ // if tester.db.tree.len() != 1 {
+ // t.Fatal("Only disk layer is expected")
+ // }
+}
+
+func TestDatabaseRecoverable(t *testing.T) {
+ var (
+ tester = newTester(t)
+ index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ var cases = []struct {
+ root common.Hash
+ expect bool
+ }{
+ // Unknown state should be unrecoverable
+ {common.Hash{0x1}, false},
+
+ // Initial state should be recoverable
+ {types.EmptyRootHash, true},
+
+ // Initial state should be recoverable
+ {common.Hash{}, true},
+
+ // Layers below current disk layer are recoverable
+ {tester.roots[index-1], true},
+
+ // Disklayer itself is not recoverable, since it's
+ // available for accessing.
+ {tester.roots[index], false},
+
+ // Layers above current disk layer are not recoverable
+ // since they are available for accessing.
+ {tester.roots[index+1], false},
+ }
+ for i, c := range cases {
+ result := tester.db.Recoverable(c.root)
+ // TODO(freezer): should be result != c.expect
+ // for now always expect false.
+ if result != false {
+ t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result)
+ }
+ }
+}
+
+func TestReset(t *testing.T) {
+ var (
+ tester = newTester(t)
+ // index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ // Reset database to unknown target, should reject it
+ if err := tester.db.Reset(testutil.RandomHash()); err == nil {
+ t.Fatal("Failed to reject invalid reset")
+ }
+ // Reset database to state persisted in the disk
+ if err := tester.db.Reset(types.EmptyRootHash); err != nil {
+ t.Fatalf("Failed to reset database %v", err)
+ }
+ // Ensure journal is deleted from disk
+ if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
+ t.Fatal("Failed to clean journal")
+ }
+ // Ensure all trie histories are removed
+ // TODO(freezer): enable this after trie history is implemented
+ // for i := 0; i <= index; i++ {
+ // _, err := readHistory(tester.db.freezer, uint64(i+1))
+ // if err == nil {
+ // t.Fatalf("Failed to clean state history, index %d", i+1)
+ // }
+ // }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatalf("Extra layer kept %d", tester.db.tree.len())
+ }
+ if tester.db.tree.bottom().rootHash() != types.EmptyRootHash {
+ t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash())
+ }
+}
+
+func TestCommit(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Commit(tester.lastHash(), false); err != nil {
+ t.Fatalf("Failed to cap database, err: %v", err)
+ }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ if tester.db.tree.bottom().rootHash() != tester.lastHash() {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ // Verify states
+ if err := tester.verifyState(tester.lastHash()); err != nil {
+ t.Fatalf("State is invalid, err: %v", err)
+ }
+ // Verify state histories
+ // TODO(freezer): enable this after trie history is implemented
+ // if err := tester.verifyHistory(); err != nil {
+ // t.Fatalf("State history is invalid, err: %v", err)
+ // }
+}
+
+func TestJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ tester.db = New(tester.db.diskdb, nil)
+
+ // Verify states including disk layer and all diff on top.
+ for i := 0; i < len(tester.roots); i++ {
+ if i >= tester.bottomIndex() {
+ if err := tester.verifyState(tester.roots[i]); err != nil {
+ t.Fatalf("Invalid state, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+func TestCorruptedJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
+
+ // Mutate the journal in disk, it should be regarded as invalid
+ blob := rawdb.ReadTrieJournal(tester.db.diskdb)
+ blob[0] = 1
+ rawdb.WriteTrieJournal(tester.db.diskdb, blob)
+
+ // Verify states, all not-yet-written states should be discarded
+ tester.db = New(tester.db.diskdb, nil)
+ for i := 0; i < len(tester.roots); i++ {
+ if tester.roots[i] == root {
+ if err := tester.verifyState(root); err != nil {
+ t.Fatalf("Disk state is corrupted, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+// copyAccounts returns a deep-copied account set of the provided one.
+func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
+ copied := make(map[common.Hash][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copyStorages returns a deep-copied storage set of the provided one.
+func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ copied := make(map[common.Hash]map[common.Hash][]byte, len(set))
+ for addrHash, subset := range set {
+ copied[addrHash] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addrHash][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go
new file mode 100644
index 0000000000..88b18d46f0
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer.go
@@ -0,0 +1,184 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// diffLayer represents a collection of modifications made to the in-memory tries
+// along with associated state changes after running a block on top.
+//
+// The goal of a diff layer is to act as a journal, tracking recent modifications
+// made to the state, that have not yet graduated into a semi-immutable state.
+type diffLayer struct {
+ // Immutables
+ root common.Hash // Root hash to which this layer diff belongs to
+ id uint64 // Corresponding state id
+ block uint64 // Associated block number
+ nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path
+ states *triestate.Set // Associated state change set for building history
+ memory uint64 // Approximate guess as to how much memory we use
+
+ parent layer // Parent layer modified by this one, never nil, **can be changed**
+ lock sync.RWMutex // Lock used to protect parent
+}
+
+// newDiffLayer creates a new diff layer on top of an existing layer.
+func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ var (
+ size int64
+ count int
+ )
+ dl := &diffLayer{
+ root: root,
+ id: id,
+ block: block,
+ nodes: nodes,
+ states: states,
+ parent: parent,
+ }
+ for _, subset := range nodes {
+ for path, n := range subset {
+ dl.memory += uint64(n.Size() + len(path))
+ size += int64(len(n.Blob) + len(path))
+ }
+ count += len(subset)
+ }
+ if states != nil {
+ dl.memory += uint64(states.Size())
+ }
+ dirtyWriteMeter.Mark(size)
+ diffLayerNodesMeter.Mark(int64(count))
+ diffLayerBytesMeter.Mark(int64(dl.memory))
+ log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory))
+ return dl
+}
+
+// rootHash implements the layer interface, returning the root hash of
+// corresponding state.
+func (dl *diffLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of the layer.
+func (dl *diffLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parentLayer implements the layer interface, returning the subsequent
+// layer of the diff layer.
+func (dl *diffLayer) parentLayer() layer {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.parent
+}
+
+// node retrieves the node with provided node information. It's the internal
+// version of Node function with additional accessed layer tracked. No error
+// will be returned if node is not found.
+func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) {
+ // Hold the lock, ensure the parent won't be changed during the
+ // state accessing.
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the trie node is known locally, return it
+ subset, ok := dl.nodes[owner]
+ if ok {
+ n, ok := subset[string(path)]
+ if ok {
+ // If the trie node is not hash matched, or marked as removed,
+ // bubble up an error here. It shouldn't happen at all.
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path)
+ }
+ dirtyHitMeter.Mark(1)
+ dirtyNodeHitDepthHist.Update(int64(depth))
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ }
+ // Trie node unknown to this layer, resolve from parent
+ if diff, ok := dl.parent.(*diffLayer); ok {
+ return diff.node(owner, path, hash, depth+1)
+ }
+ // Failed to resolve through diff layers, fallback to disk layer
+ return dl.parent.Node(owner, path, hash)
+}
+
+// Node implements the layer interface, retrieving the trie node blob with the
+// provided node information. No error will be returned if the node is not found.
+func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ return dl.node(owner, path, hash, 0)
+}
+
+// update implements the layer interface, creating a new layer on top of the
+// existing layer tree with the specified data items.
+func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// persist flushes the diff layer and all its parent layers to disk layer.
+func (dl *diffLayer) persist(force bool) (layer, error) {
+ if parent, ok := dl.parentLayer().(*diffLayer); ok {
+ // Hold the lock to prevent any read operation until the new
+ // parent is linked correctly.
+ dl.lock.Lock()
+
+ // The merging of diff layers starts at the bottom-most layer,
+ // therefore we recurse down here, flattening on the way up
+ // (diffToDisk).
+ result, err := parent.persist(force)
+ if err != nil {
+ dl.lock.Unlock()
+ return nil, err
+ }
+ dl.parent = result
+ dl.lock.Unlock()
+ }
+ return diffToDisk(dl, force)
+}
+
+// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
+// it. The method will panic if called onto a non-bottom-most diff layer.
+func diffToDisk(layer *diffLayer, force bool) (layer, error) {
+ disk, ok := layer.parentLayer().(*diskLayer)
+ if !ok {
+ panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer()))
+ }
+ return disk.commit(layer, force)
+}
diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go
new file mode 100644
index 0000000000..ee1b00d6aa
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer_test.go
@@ -0,0 +1,180 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/trie/testutil"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func emptyLayer() *diskLayer {
+ return &diskLayer{
+ db: New(rawdb.NewMemoryDatabase(), nil),
+ buffer: newNodeBuffer(defaultBufferSize, nil, 0),
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch128Layers
+// BenchmarkSearch128Layers-8 243826 4755 ns/op
+func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch512Layers
+// BenchmarkSearch512Layers-8 49686 24256 ns/op
+func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch1Layer
+// BenchmarkSearch1Layer-8 14062725 88.40 ns/op
+func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) }
+
+func benchmarkSearch(b *testing.B, depth int, total int) {
+ var (
+ npath []byte
+ nhash common.Hash
+ nblob []byte
+ )
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer, index int) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ if npath == nil && depth == index {
+ npath = common.CopyBytes(path)
+ nblob = common.CopyBytes(node.Blob)
+ nhash = node.Hash
+ }
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < total; i++ {
+ layer = fill(layer, i)
+ }
+ b.ResetTimer()
+
+ var (
+ have []byte
+ err error
+ )
+ for i := 0; i < b.N; i++ {
+ have, err = layer.Node(common.Hash{}, npath, nhash)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ if !bytes.Equal(have, nblob) {
+ b.Fatalf("have %x want %x", have, nblob)
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkPersist
+// BenchmarkPersist-8 10 111252975 ns/op
+func BenchmarkPersist(b *testing.B) {
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ var layer layer
+ layer = emptyLayer()
+ for i := 1; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.StartTimer()
+
+ dl, ok := layer.(*diffLayer)
+ if !ok {
+ break
+ }
+ dl.persist(false)
+ }
+}
+
+// BenchmarkJournal benchmarks the performance for journaling the layers.
+//
+// BenchmarkJournal
+// BenchmarkJournal-8 10 110969279 ns/op
+func BenchmarkJournal(b *testing.B) {
+ b.SkipNow()
+
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ // TODO(rjl493456442) a non-nil state set is expected.
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ layer.journal(new(bytes.Buffer))
+ }
+}
diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go
new file mode 100644
index 0000000000..88df043ed6
--- /dev/null
+++ b/trie/triedb/pathdb/disklayer.go
@@ -0,0 +1,309 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "golang.org/x/crypto/sha3"
+)
+
+// diskLayer is a low level persistent layer built on top of a key-value store.
+type diskLayer struct {
+ root common.Hash // Immutable, root hash to which this layer was made for
+ id uint64 // Immutable, corresponding state id
+ db *Database // Path-based trie database
+ cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
+ buffer *nodebuffer // Node buffer to aggregate writes
+ stale bool // Signals that the layer became stale (state progressed)
+ lock sync.RWMutex // Lock used to protect stale flag
+}
+
+// newDiskLayer creates a new disk layer based on the passing arguments.
+func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer {
+ // Initialize a clean cache if the memory allowance is not zero
+ // or reuse the provided cache if it is not nil (inherited from
+ // the original disk layer).
+ if cleans == nil && db.config.CleanSize != 0 {
+ cleans = fastcache.New(db.config.CleanSize)
+ }
+ return &diskLayer{
+ root: root,
+ id: id,
+ db: db,
+ cleans: cleans,
+ buffer: buffer,
+ }
+}
+
+// root implements the layer interface, returning root hash of corresponding state.
+func (dl *diskLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of disk layer.
+func (dl *diskLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parent implements the layer interface, returning nil as there's no layer
+// below the disk.
+func (dl *diskLayer) parentLayer() layer {
+ return nil
+}
+
+// isStale return whether this layer has become stale (was flattened across) or if
+// it's still live.
+func (dl *diskLayer) isStale() bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.stale
+}
+
+// markStale sets the stale flag as true.
+func (dl *diskLayer) markStale() {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ if dl.stale {
+ panic("triedb disk layer is stale") // we've committed into the same base from two children, boom
+ }
+ dl.stale = true
+}
+
+// Node implements the layer interface, retrieving the trie node with the
+// provided node info. No error will be returned if the node is not found.
+func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return nil, errSnapshotStale
+ }
+ // Try to retrieve the trie node from the not-yet-written
+ // node buffer first. Note the buffer is lock free since
+ // it's impossible to mutate the buffer before tagging the
+ // layer as stale.
+ n, err := dl.buffer.node(owner, path, hash)
+ if err != nil {
+ return nil, err
+ }
+ if n != nil {
+ dirtyHitMeter.Mark(1)
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ dirtyMissMeter.Mark(1)
+
+ // Try to retrieve the trie node from the clean memory cache
+ key := cacheKey(owner, path)
+ if dl.cleans != nil {
+ if blob := dl.cleans.Get(nil, key); len(blob) > 0 {
+ h := newHasher()
+ defer h.release()
+
+ got := h.hash(blob)
+ if got == hash {
+ cleanHitMeter.Mark(1)
+ cleanReadMeter.Mark(int64(len(blob)))
+ return blob, nil
+ }
+ cleanFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got)
+ }
+ cleanMissMeter.Mark(1)
+ }
+ // Try to retrieve the trie node from the disk.
+ var (
+ nBlob []byte
+ nHash common.Hash
+ )
+ if owner == (common.Hash{}) {
+ nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path)
+ } else {
+ nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
+ }
+ if nHash != hash {
+ diskFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
+ return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path)
+ }
+ if dl.cleans != nil && len(nBlob) > 0 {
+ dl.cleans.Set(key, nBlob)
+ cleanWriteMeter.Mark(int64(len(nBlob)))
+ }
+ return nBlob, nil
+}
+
+// update implements the layer interface, returning a new diff layer on top
+// with the given state set.
+func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// commit merges the given bottom-most diff layer into the node buffer
+// and returns a newly constructed disk layer. Note the current disk
+// layer must be tagged as stale first to prevent re-access.
+func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ // TODO(freezer): enable this after trie history is implemented
+ // Construct and store the state history first. If crash happens
+ // after storing the state history but without flushing the
+ // corresponding states(journal), the stored state history will
+ // be truncated in the next restart.
+ // if dl.db.freezer != nil {
+ // return nil, errors.New("state history is not supported - requires freezer")
+ // err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit)
+ // if err != nil {
+ // return nil, err
+ // }
+ // }
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.stale = true
+
+ // Store the root->id lookup afterwards. All stored lookups are
+ // identified by the **unique** state root. It's impossible that
+ // in the same chain blocks are not adjacent but have the same
+ // root.
+ if dl.id == 0 {
+ rawdb.WriteStateID(dl.db.diskdb, dl.root, 0)
+ }
+ rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
+
+ // Construct a new disk layer by merging the nodes from the provided
+ // diff layer, and flush the content in disk layer if there are too
+ // many nodes cached. The clean cache is inherited from the original
+ // disk layer for reusing.
+ ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes))
+ err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force)
+ if err != nil {
+ return nil, err
+ }
+ return ndl, nil
+}
+
+// nolint:unused
+// revert applies the given state history and return a reverted disk layer.
+func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) {
+ if h.meta.root != dl.rootHash() {
+ return nil, errUnexpectedHistory
+ }
+ // Reject if the provided state history is incomplete. It's due to
+ // a large construct SELF-DESTRUCT which can't be handled because
+ // of memory limitation.
+ if len(h.meta.incomplete) > 0 {
+ return nil, errors.New("incomplete state history")
+ }
+ if dl.id == 0 {
+ return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
+ }
+ // Apply the reverse state changes upon the current state. This must
+ // be done before holding the lock in order to access state in "this"
+ // layer.
+ nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader)
+ if err != nil {
+ return nil, err
+ }
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ dl.stale = true
+
+ // State change may be applied to node buffer, or the persistent
+ // state, depends on if node buffer is empty or not. If the node
+ // buffer is not empty, it means that the state transition that
+ // needs to be reverted is not yet flushed and cached in node
+ // buffer, otherwise, manipulate persistent state directly.
+ if !dl.buffer.empty() {
+ err := dl.buffer.revert(dl.db.diskdb, nodes)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ batch := dl.db.diskdb.NewBatch()
+ writeNodes(batch, nodes, dl.cleans)
+ rawdb.WritePersistentStateID(batch, dl.id-1)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write states", "err", err)
+ }
+ }
+ return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil
+}
+
+// setBufferSize sets the node buffer size to the provided value.
+func (dl *diskLayer) setBufferSize(size int) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return errSnapshotStale
+ }
+ return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id)
+}
+
+// size returns the approximate size of cached nodes in the disk layer.
+func (dl *diskLayer) size() common.StorageSize {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return 0
+ }
+ return common.StorageSize(dl.buffer.size)
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
+}
diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go
new file mode 100644
index 0000000000..4882429e7b
--- /dev/null
+++ b/trie/triedb/pathdb/errors.go
@@ -0,0 +1,63 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ // errSnapshotReadOnly is returned if the database is opened in read only mode
+ // and mutation is requested.
+ errSnapshotReadOnly = errors.New("read only")
+
+ // errSnapshotStale is returned from data accessors if the underlying layer
+ // layer had been invalidated due to the chain progressing forward far enough
+ // to not maintain the layer's original state.
+ errSnapshotStale = errors.New("layer stale")
+
+ // nolint:unused
+ // errUnexpectedHistory is returned if an unmatched state history is applied
+ // to the database for state rollback.
+ errUnexpectedHistory = errors.New("unexpected state history")
+
+ // nolint:unused
+ // errStateUnrecoverable is returned if state is required to be reverted to
+ // a destination without associated state history available.
+ errStateUnrecoverable = errors.New("state is unrecoverable")
+
+ // errUnexpectedNode is returned if the requested node with specified path is
+ // not hash matched with expectation.
+ errUnexpectedNode = errors.New("unexpected node")
+)
+
+func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error {
+ return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash)
+}
diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go
new file mode 100644
index 0000000000..c261c53ea6
--- /dev/null
+++ b/trie/triedb/pathdb/history.go
@@ -0,0 +1,496 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "golang.org/x/exp/slices"
+)
+
+// State history records the state changes involved in executing a block. The
+// state can be reverted to the previous version by applying the associated
+// history object (state reverse diff). State history objects are kept to
+// guarantee that the system can perform state rollbacks in case of deep reorg.
+//
+// Each state transition will generate a state history object. Note that not
+// every block has a corresponding state history object. If a block performs
+// no state changes whatsoever, no state is created for it. Each state history
+// will have a sequentially increasing number acting as its unique identifier.
+//
+// The state history is written to disk (ancient store) when the corresponding
+// diff layer is merged into the disk layer. At the same time, system can prune
+// the oldest histories according to config.
+//
+// Disk State
+// ^
+// |
+// +------------+ +---------+ +---------+ +---------+
+// | Init State |---->| State 1 |---->| ... |---->| State n |
+// +------------+ +---------+ +---------+ +---------+
+//
+// +-----------+ +------+ +-----------+
+// | History 1 |----> | ... |---->| History n |
+// +-----------+ +------+ +-----------+
+//
+// # Rollback
+//
+// If the system wants to roll back to a previous state n, it needs to ensure
+// all history objects from n+1 up to the current disk layer are existent. The
+// history objects are applied to the state in reverse order, starting from the
+// current disk layer.
+
+const (
+ accountIndexSize = common.AddressLength + 13 // The length of encoded account index
+ slotIndexSize = common.HashLength + 5 // The length of encoded slot index
+ historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object
+
+ stateHistoryVersion = uint8(0) // initial version of state history structure.
+)
+
+// Each state history entry is consisted of five elements:
+//
+// # metadata
+// This object contains a few meta fields, such as the associated state root,
+// block number, version tag and so on. This object may contain an extra
+// accountHash list which means the storage changes belong to these accounts
+// are not complete due to large contract destruction. The incomplete history
+// can not be used for rollback and serving archive state request.
+//
+// # account index
+// This object contains some index information of account. For example, offset
+// and length indicate the location of the data belonging to the account. Besides,
+// storageOffset and storageSlots indicate the storage modification location
+// belonging to the account.
+//
+// The size of each account index is *fixed*, and all indexes are sorted
+// lexicographically. Thus binary search can be performed to quickly locate a
+// specific account.
+//
+// # account data
+// Account data is a concatenated byte stream composed of all account data.
+// The account data can be solved by the offset and length info indicated
+// by corresponding account index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | length
+// offset |----------------+
+// v v
+// +----------------+----------------+----------------+----------------+
+// | Account data 1 | Account data 2 | ... | Account data N |
+// +----------------+----------------+----------------+----------------+
+//
+// # storage index
+// This object is similar with account index. It's also fixed size and contains
+// the location info of storage slot data.
+//
+// # storage data
+// Storage data is a concatenated byte stream composed of all storage slot data.
+// The storage slot data can be solved by the location info indicated by
+// corresponding account index and storage slot index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | storage slots
+// storage offset |-----------------------------------------------------+
+// v v
+// +-----------------+-----------------+-----------------+
+// | storage index 1 | storage index 2 | storage index 3 |
+// +-----------------+-----------------+-----------------+
+// | length
+// offset |-------------+
+// v v
+// +-------------+
+// | slot data 1 |
+// +-------------+
+
+// accountIndex describes the metadata belonging to an account.
+type accountIndex struct {
+ address common.Address // The address of account
+ length uint8 // The length of account data, size limited by 255
+ offset uint32 // The offset of item in account data table
+ storageOffset uint32 // The offset of storage index in storage index table
+ storageSlots uint32 // The number of mutated storage slots belonging to the account
+}
+
+// encode packs account index into byte stream.
+func (i *accountIndex) encode() []byte {
+ var buf [accountIndexSize]byte
+ copy(buf[:], i.address.Bytes())
+ buf[common.AddressLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots)
+ return buf[:]
+}
+
+// decode unpacks account index from byte stream.
+func (i *accountIndex) decode(blob []byte) {
+ i.address = common.BytesToAddress(blob[:common.AddressLength])
+ i.length = blob[common.AddressLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
+ i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
+ i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
+}
+
+// slotIndex describes the metadata belonging to a storage slot.
+type slotIndex struct {
+ hash common.Hash // The hash of slot key
+ length uint8 // The length of storage slot, up to 32 bytes defined in protocol
+ offset uint32 // The offset of item in storage slot data table
+}
+
+// encode packs slot index into byte stream.
+func (i *slotIndex) encode() []byte {
+ var buf [slotIndexSize]byte
+ copy(buf[:common.HashLength], i.hash.Bytes())
+ buf[common.HashLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
+ return buf[:]
+}
+
+// decode unpack slot index from the byte stream.
+func (i *slotIndex) decode(blob []byte) {
+ i.hash = common.BytesToHash(blob[:common.HashLength])
+ i.length = blob[common.HashLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
+}
+
+// meta describes the meta data of state history object.
+type meta struct {
+ version uint8 // version tag of history object
+ parent common.Hash // prev-state root before the state transition
+ root common.Hash // post-state root after the state transition
+ block uint64 // associated block number
+ incomplete []common.Address // list of address whose storage set is incomplete
+}
+
+// encode packs the meta object into byte stream.
+func (m *meta) encode() []byte {
+ buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength)
+ buf[0] = m.version
+ copy(buf[1:1+common.HashLength], m.parent.Bytes())
+ copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
+ binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
+ for i, h := range m.incomplete {
+ copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes())
+ }
+ return buf[:]
+}
+
+// decode unpacks the meta object from byte stream.
+func (m *meta) decode(blob []byte) error {
+ if len(blob) < 1 {
+ return fmt.Errorf("no version tag")
+ }
+ switch blob[0] {
+ case stateHistoryVersion:
+ if len(blob) < historyMetaSize {
+ return fmt.Errorf("invalid state history meta, len: %d", len(blob))
+ }
+ if (len(blob)-historyMetaSize)%common.AddressLength != 0 {
+ return fmt.Errorf("corrupted state history meta, len: %d", len(blob))
+ }
+ m.version = blob[0]
+ m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
+ m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
+ m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
+ for pos := historyMetaSize; pos < len(blob); {
+ m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength]))
+ pos += common.AddressLength
+ }
+ return nil
+ default:
+ return fmt.Errorf("unknown version %d", blob[0])
+ }
+}
+
+// history represents a set of state changes belong to a block along with
+// the metadata including the state roots involved in the state transition.
+// State history objects in disk are linked with each other by a unique id
+// (8-bytes integer), the oldest state history object can be pruned on demand
+// in order to control the storage size.
+type history struct {
+ meta *meta // Meta data of history
+ accounts map[common.Address][]byte // Account data keyed by its address hash
+ accountList []common.Address // Sorted account hash list
+ storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
+ storageList map[common.Address][]common.Hash // Sorted slot hash list
+}
+
+// newHistory constructs the state history object with provided state change set.
+func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history {
+ var (
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+ incomplete []common.Address
+ )
+ for addr := range states.Accounts {
+ accountList = append(accountList, addr)
+ }
+ slices.SortFunc(accountList, common.Address.Cmp)
+
+ for addr, slots := range states.Storages {
+ slist := make([]common.Hash, 0, len(slots))
+ for slotHash := range slots {
+ slist = append(slist, slotHash)
+ }
+ slices.SortFunc(slist, common.Hash.Cmp)
+ storageList[addr] = slist
+ }
+ for addr := range states.Incomplete {
+ incomplete = append(incomplete, addr)
+ }
+ slices.SortFunc(incomplete, common.Address.Cmp)
+
+ return &history{
+ meta: &meta{
+ version: stateHistoryVersion,
+ parent: parent,
+ root: root,
+ block: block,
+ incomplete: incomplete,
+ },
+ accounts: states.Accounts,
+ accountList: accountList,
+ storages: states.Storages,
+ storageList: storageList,
+ }
+}
+
+// encode serializes the state history and returns four byte streams represent
+// concatenated account/storage data, account/storage indexes respectively.
+func (h *history) encode() ([]byte, []byte, []byte, []byte) {
+ var (
+ slotNumber uint32 // the number of processed slots
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+ )
+ for _, addr := range h.accountList {
+ accIndex := accountIndex{
+ address: addr,
+ length: uint8(len(h.accounts[addr])),
+ offset: uint32(len(accountData)),
+ }
+ slots, exist := h.storages[addr]
+ if exist {
+ // Encode storage slots in order
+ for _, slotHash := range h.storageList[addr] {
+ sIndex := slotIndex{
+ hash: slotHash,
+ length: uint8(len(slots[slotHash])),
+ offset: uint32(len(storageData)),
+ }
+ storageData = append(storageData, slots[slotHash]...)
+ storageIndexes = append(storageIndexes, sIndex.encode()...)
+ }
+ // Fill up the storage meta in account index
+ accIndex.storageOffset = slotNumber
+ accIndex.storageSlots = uint32(len(slots))
+ slotNumber += uint32(len(slots))
+ }
+ accountData = append(accountData, h.accounts[addr]...)
+ accountIndexes = append(accountIndexes, accIndex.encode()...)
+ }
+ return accountData, storageData, accountIndexes, storageIndexes
+}
+
+// decoder wraps the byte streams for decoding with extra meta fields.
+type decoder struct {
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+
+ lastAccount *common.Address // the address of last resolved account
+ lastAccountRead uint32 // the read-cursor position of account data
+ lastSlotIndexRead uint32 // the read-cursor position of storage slot index
+ lastSlotDataRead uint32 // the read-cursor position of storage slot data
+}
+
+// verify validates the provided byte streams for decoding state history. A few
+// checks will be performed to quickly detect data corruption. The byte stream
+// is regarded as corrupted if:
+//
+// - account indexes buffer is empty(empty state set is invalid)
+// - account indexes/storage indexer buffer is not aligned
+//
+// note, these situations are allowed:
+//
+// - empty account data: all accounts were not present
+// - empty storage set: no slots are modified
+func (r *decoder) verify() error {
+ if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
+ return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
+ }
+ if len(r.storageIndexes)%slotIndexSize != 0 {
+ return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
+ }
+ return nil
+}
+
+// readAccount parses the account from the byte stream with specified position.
+func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
+ // Decode account index from the index byte stream.
+ var index accountIndex
+ if (pos+1)*accountIndexSize > len(r.accountIndexes) {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
+
+ // Perform validation before parsing account data, ensure
+ // - account is sorted in order in byte stream
+ // - account data is strictly encoded with no gap inside
+ // - account data is not out-of-slice
+ if r.lastAccount != nil { // zero address is possible
+ if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
+ return accountIndex{}, nil, errors.New("account is not in order")
+ }
+ }
+ if index.offset != r.lastAccountRead {
+ return accountIndex{}, nil, errors.New("account data buffer is gaped")
+ }
+ last := index.offset + uint32(index.length)
+ if uint32(len(r.accountData)) < last {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ data := r.accountData[index.offset:last]
+
+ r.lastAccount = &index.address
+ r.lastAccountRead = last
+
+ return index, data, nil
+}
+
+// readStorage parses the storage slots from the byte stream with specified account.
+func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
+ var (
+ last common.Hash
+ list []common.Hash
+ storage = make(map[common.Hash][]byte)
+ )
+ for j := 0; j < int(accIndex.storageSlots); j++ {
+ var (
+ index slotIndex
+ start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
+ end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
+ )
+ // Perform validation before parsing storage slot data, ensure
+ // - slot index is not out-of-slice
+ // - slot data is not out-of-slice
+ // - slot is sorted in order in byte stream
+ // - slot indexes is strictly encoded with no gap inside
+ // - slot data is strictly encoded with no gap inside
+ if start != r.lastSlotIndexRead {
+ return nil, nil, errors.New("storage index buffer is gapped")
+ }
+ if uint32(len(r.storageIndexes)) < end {
+ return nil, nil, errors.New("storage index buffer is corrupted")
+ }
+ index.decode(r.storageIndexes[start:end])
+
+ if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
+ return nil, nil, errors.New("storage slot is not in order")
+ }
+ if index.offset != r.lastSlotDataRead {
+ return nil, nil, errors.New("storage data buffer is gapped")
+ }
+ sEnd := index.offset + uint32(index.length)
+ if uint32(len(r.storageData)) < sEnd {
+ return nil, nil, errors.New("storage data buffer is corrupted")
+ }
+ storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
+ list = append(list, index.hash)
+
+ last = index.hash
+ r.lastSlotIndexRead = end
+ r.lastSlotDataRead = sEnd
+ }
+ return list, storage, nil
+}
+
+// decode deserializes the account and storage data from the provided byte stream.
+func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+
+ r = &decoder{
+ accountData: accountData,
+ storageData: storageData,
+ accountIndexes: accountIndexes,
+ storageIndexes: storageIndexes,
+ }
+ )
+ if err := r.verify(); err != nil {
+ return err
+ }
+ for i := 0; i < len(accountIndexes)/accountIndexSize; i++ {
+ // Resolve account first
+ accIndex, accData, err := r.readAccount(i)
+ if err != nil {
+ return err
+ }
+ accounts[accIndex.address] = accData
+ accountList = append(accountList, accIndex.address)
+
+ // Resolve storage slots
+ slotList, slotData, err := r.readStorage(accIndex)
+ if err != nil {
+ return err
+ }
+ if len(slotList) > 0 {
+ storageList[accIndex.address] = slotList
+ storages[accIndex.address] = slotData
+ }
+ }
+ h.accounts = accounts
+ h.accountList = accountList
+ h.storages = storages
+ h.storageList = storageList
+ return nil
+}
diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go
new file mode 100644
index 0000000000..906e0f18e4
--- /dev/null
+++ b/trie/triedb/pathdb/history_test.go
@@ -0,0 +1,171 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/testutil"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// randomStateSet generates a random state change set.
+func randomStateSet(n int) *triestate.Set {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ )
+ for i := 0; i < n; i++ {
+ addr := testutil.RandomAddress()
+ storages[addr] = make(map[common.Hash][]byte)
+ for j := 0; j < 3; j++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ storages[addr][testutil.RandomHash()] = v
+ }
+ account := generateAccount(types.EmptyRootHash)
+ accounts[addr] = types.SlimAccountRLP(account)
+ }
+ return triestate.New(accounts, storages, nil)
+}
+
+func makeHistory() *history {
+ return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3))
+}
+
+// nolint:unused
+func makeHistories(n int) []*history {
+ var (
+ parent = types.EmptyRootHash
+ result []*history
+ )
+ for i := 0; i < n; i++ {
+ root := testutil.RandomHash()
+ h := newHistory(root, parent, uint64(i), randomStateSet(3))
+ parent = root
+ result = append(result, h)
+ }
+ return result
+}
+
+func TestEncodeDecodeHistory(t *testing.T) {
+ var (
+ m meta
+ dec history
+ obj = makeHistory()
+ )
+ // check if meta data can be correctly encode/decode
+ blob := obj.meta.encode()
+ if err := m.decode(blob); err != nil {
+ t.Fatalf("Failed to decode %v", err)
+ }
+ if !reflect.DeepEqual(&m, obj.meta) {
+ t.Fatal("meta is mismatched")
+ }
+
+ // check if account/storage data can be correctly encode/decode
+ accountData, storageData, accountIndexes, storageIndexes := obj.encode()
+ if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
+ t.Fatalf("Failed to decode, err: %v", err)
+ }
+ if !compareSet(dec.accounts, obj.accounts) {
+ t.Fatal("account data is mismatched")
+ }
+ if !compareStorages(dec.storages, obj.storages) {
+ t.Fatal("storage data is mismatched")
+ }
+ if !compareList(dec.accountList, obj.accountList) {
+ t.Fatal("account list is mismatched")
+ }
+ if !compareStorageList(dec.storageList, obj.storageList) {
+ t.Fatal("storage list is mismatched")
+ }
+}
+
+func compareSet[k comparable](a, b map[k][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for key, valA := range a {
+ valB, ok := b[key]
+ if !ok {
+ return false
+ }
+ if !bytes.Equal(valA, valB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareList[k comparable](a, b []k) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, subA := range a {
+ subB, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareSet(subA, subB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorageList(a, b map[common.Address][]common.Hash) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, la := range a {
+ lb, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareList(la, lb) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go
new file mode 100644
index 0000000000..be69c6074c
--- /dev/null
+++ b/trie/triedb/pathdb/journal.go
@@ -0,0 +1,388 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+ errMissJournal = errors.New("journal not found")
+ errMissVersion = errors.New("version not found")
+ errUnexpectedVersion = errors.New("unexpected journal version")
+ errMissDiskRoot = errors.New("disk layer root not found")
+ errUnmatchedJournal = errors.New("unmatched journal")
+)
+
+const journalVersion uint64 = 0
+
+// journalNode represents a trie node persisted in the journal.
+type journalNode struct {
+ Path []byte // Path of the node in the trie
+ Blob []byte // RLP-encoded trie node blob, nil means the node is deleted
+}
+
+// journalNodes represents a list trie nodes belong to a single account
+// or the main account trie.
+type journalNodes struct {
+ Owner common.Hash
+ Nodes []journalNode
+}
+
+// journalAccounts represents a list accounts belong to the layer.
+type journalAccounts struct {
+ Addresses []common.Address
+ Accounts [][]byte
+}
+
+// journalStorage represents a list of storage slots belong to an account.
+type journalStorage struct {
+ Incomplete bool
+ Account common.Address
+ Hashes []common.Hash
+ Slots [][]byte
+}
+
+// loadJournal tries to parse the layer journal from the disk.
+func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
+ journal := rawdb.ReadTrieJournal(db.diskdb)
+ if len(journal) == 0 {
+ return nil, errMissJournal
+ }
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+ // Firstly, resolve the first element as the journal version
+ version, err := r.Uint64()
+ if err != nil {
+ return nil, errMissVersion
+ }
+ if version != journalVersion {
+ return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version)
+ }
+ // Secondly, resolve the disk layer root, ensure it's continuous
+ // with disk layer. Note now we can ensure it's the layer journal
+ // correct version, so we expect everything can be resolved properly.
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, errMissDiskRoot
+ }
+ // The journal is not matched with persistent state, discard them.
+ // It can happen that geth crashes without persisting the journal.
+ if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) {
+ return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot)
+ }
+ // Load the disk layer from the journal
+ base, err := db.loadDiskLayer(r)
+ if err != nil {
+ return nil, err
+ }
+ // Load all the diff layers from the journal
+ head, err := db.loadDiffLayer(base, r)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash())
+ return head, nil
+}
+
+// loadLayers loads a pre-existing state layer backed by a key-value store.
+func (db *Database) loadLayers() layer {
+ // Retrieve the root node of persistent state.
+ _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ root = types.TrieRootHash(root)
+
+ // Load the layers by resolving the journal
+ head, err := db.loadJournal(root)
+ if err == nil {
+ return head
+ }
+ // journal is not matched(or missing) with the persistent state, discard
+ // it. Display log for discarding journal, but try to avoid showing
+ // useless information when the db is created from scratch.
+ if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) {
+ log.Info("Failed to load journal, discard it", "err", err)
+ }
+ // Return single layer with persistent state.
+ return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+}
+
+// loadDiskLayer reads the binary blob from the layer journal, reconstructing
+// a new disk layer on it.
+func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
+ // Resolve disk layer root
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, fmt.Errorf("load disk root: %v", err)
+ }
+ // Resolve the state id of disk layer, it can be different
+ // with the persistent id tracked in disk, the id distance
+ // is the number of transitions aggregated in disk layer.
+ var id uint64
+ if err := r.Decode(&id); err != nil {
+ return nil, fmt.Errorf("load state id: %v", err)
+ }
+ stored := rawdb.ReadPersistentStateID(db.diskdb)
+ if stored > id {
+ return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id)
+ }
+ // Resolve nodes cached in node buffer
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load disk nodes: %v", err)
+ }
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ if len(n.Blob) > 0 {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ }
+ nodes[entry.Owner] = subset
+ }
+ // Calculate the internal state transitions by id difference.
+ base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored))
+ return base, nil
+}
+
+// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
+// diff and verifying that it can be linked to the requested parent.
+func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
+ // Read the next diff journal entry
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ // The first read may fail with EOF, marking the end of the journal
+ if err == io.EOF {
+ return parent, nil
+ }
+ return nil, fmt.Errorf("load diff root: %v", err)
+ }
+ var block uint64
+ if err := r.Decode(&block); err != nil {
+ return nil, fmt.Errorf("load block number: %v", err)
+ }
+ // Read in-memory trie nodes from journal
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load diff nodes: %v", err)
+ }
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ if len(n.Blob) > 0 {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ }
+ nodes[entry.Owner] = subset
+ }
+ // Read state changes from journal
+ var (
+ jaccounts journalAccounts
+ jstorages []journalStorage
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ incomplete = make(map[common.Address]struct{})
+ )
+ if err := r.Decode(&jaccounts); err != nil {
+ return nil, fmt.Errorf("load diff accounts: %v", err)
+ }
+ for i, addr := range jaccounts.Addresses {
+ accounts[addr] = jaccounts.Accounts[i]
+ }
+ if err := r.Decode(&jstorages); err != nil {
+ return nil, fmt.Errorf("load diff storages: %v", err)
+ }
+ for _, entry := range jstorages {
+ set := make(map[common.Hash][]byte)
+ for i, h := range entry.Hashes {
+ if len(entry.Slots[i]) > 0 {
+ set[h] = entry.Slots[i]
+ } else {
+ set[h] = nil
+ }
+ }
+ if entry.Incomplete {
+ incomplete[entry.Account] = struct{}{}
+ }
+ storages[entry.Account] = set
+ }
+ return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r)
+}
+
+// journal implements the layer interface, marshaling the un-flushed trie nodes
+// along with layer meta data into provided byte buffer.
+func (dl *diskLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // Ensure the layer didn't get stale
+ if dl.stale {
+ return errSnapshotStale
+ }
+ // Step one, write the disk root into the journal.
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ // Step two, write the corresponding state id into the journal
+ if err := rlp.Encode(w, dl.id); err != nil {
+ return err
+ }
+ // Step three, write all unwritten nodes into the journal
+ nodes := make([]journalNodes, 0, len(dl.buffer.nodes))
+ for owner, subset := range dl.buffer.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes))
+ return nil
+}
+
+// journal implements the layer interface, writing the memory layer contents
+// into a buffer to be stored in the database as the layer journal.
+func (dl *diffLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // journal the parent first
+ if err := dl.parent.journal(w); err != nil {
+ return err
+ }
+ // Everything below was journaled, persist this layer too
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ if err := rlp.Encode(w, dl.block); err != nil {
+ return err
+ }
+ // Write the accumulated trie nodes into buffer
+ nodes := make([]journalNodes, 0, len(dl.nodes))
+ for owner, subset := range dl.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ // Write the accumulated state changes into buffer
+ var jacct journalAccounts
+ for addr, account := range dl.states.Accounts {
+ jacct.Addresses = append(jacct.Addresses, addr)
+ jacct.Accounts = append(jacct.Accounts, account)
+ }
+ if err := rlp.Encode(w, jacct); err != nil {
+ return err
+ }
+ storage := make([]journalStorage, 0, len(dl.states.Storages))
+ for addr, slots := range dl.states.Storages {
+ entry := journalStorage{Account: addr}
+ if _, ok := dl.states.Incomplete[addr]; ok {
+ entry.Incomplete = true
+ }
+ for slotHash, slot := range slots {
+ entry.Hashes = append(entry.Hashes, slotHash)
+ entry.Slots = append(entry.Slots, slot)
+ }
+ storage = append(storage, entry)
+ }
+ if err := rlp.Encode(w, storage); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes))
+ return nil
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the layer without
+// flattening everything down (bad for reorgs). And this function will mark the
+// database as read-only to prevent all following mutation to disk.
+func (db *Database) Journal(root common.Hash) error {
+ // Retrieve the head layer to journal from.
+ l := db.tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ // Run the journaling
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ // Firstly write out the metadata of journal
+ journal := new(bytes.Buffer)
+ if err := rlp.Encode(journal, journalVersion); err != nil {
+ return err
+ }
+ // The stored state in disk might be empty, convert the
+ // root to emptyRoot in this case.
+ _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ diskroot = types.TrieRootHash(diskroot)
+
+ // Secondly write out the state root in disk, ensure all layers
+ // on top are continuous with disk.
+ if err := rlp.Encode(journal, diskroot); err != nil {
+ return err
+ }
+ // Finally write out the journal of each layer in reverse order.
+ if err := l.journal(journal); err != nil {
+ return err
+ }
+ // Store the journal into the database and return
+ rawdb.WriteTrieJournal(db.diskdb, journal.Bytes())
+
+ // Set the db in read only mode to reject all following mutations
+ db.readOnly = true
+ log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len()))
+ return nil
+}
diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go
new file mode 100644
index 0000000000..b3a16329f5
--- /dev/null
+++ b/trie/triedb/pathdb/layertree.go
@@ -0,0 +1,224 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// layerTree is a group of state layers identified by the state root.
+// This structure defines a few basic operations for manipulating
+// state layers linked with each other in a tree structure. It's
+// thread-safe to use. However, callers need to ensure the thread-safety
+// of the referenced layer by themselves.
+type layerTree struct {
+ lock sync.RWMutex
+ layers map[common.Hash]layer
+}
+
+// newLayerTree constructs the layerTree with the given head layer.
+func newLayerTree(head layer) *layerTree {
+ tree := new(layerTree)
+ tree.reset(head)
+ return tree
+}
+
+// reset initializes the layerTree by the given head layer.
+// All the ancestors will be iterated out and linked in the tree.
+func (tree *layerTree) reset(head layer) {
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ var layers = make(map[common.Hash]layer)
+ for head != nil {
+ layers[head.rootHash()] = head
+ head = head.parentLayer()
+ }
+ tree.layers = layers
+}
+
+// get retrieves a layer belonging to the given state root.
+func (tree *layerTree) get(root common.Hash) layer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return tree.layers[types.TrieRootHash(root)]
+}
+
+// forEach iterates the stored layers inside and applies the
+// given callback on them.
+func (tree *layerTree) forEach(onLayer func(layer)) {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ for _, layer := range tree.layers {
+ onLayer(layer)
+ }
+}
+
+// len returns the number of layers cached.
+func (tree *layerTree) len() int {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return len(tree.layers)
+}
+
+// add inserts a new layer into the tree if it can be linked to an existing old parent.
+func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Reject noop updates to avoid self-loops. This is a special case that can
+ // happen for clique networks and proof-of-stake networks where empty blocks
+ // don't modify the state (0 block subsidy).
+ //
+ // Although we could silently ignore this internally, it should be the caller's
+ // responsibility to avoid even attempting to insert such a layer.
+ root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot)
+ if root == parentRoot {
+ return errors.New("layer cycle")
+ }
+ parent := tree.get(parentRoot)
+ if parent == nil {
+ return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot)
+ }
+ l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states)
+
+ tree.lock.Lock()
+ tree.layers[l.rootHash()] = l
+ tree.lock.Unlock()
+ return nil
+}
+
+// cap traverses downwards the diff tree until the number of allowed diff layers
+// are crossed. All diffs beyond the permitted number are flattened downwards.
+func (tree *layerTree) cap(root common.Hash, layers int) error {
+ // Retrieve the head layer to cap from
+ root = types.TrieRootHash(root)
+ l := tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ diff, ok := l.(*diffLayer)
+ if !ok {
+ return fmt.Errorf("triedb layer [%#x] is disk layer", root)
+ }
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ // If full commit was requested, flatten the diffs and merge onto disk
+ if layers == 0 {
+ base, err := diff.persist(true)
+ if err != nil {
+ return err
+ }
+ // Replace the entire layer tree with the flat base
+ tree.layers = map[common.Hash]layer{base.rootHash(): base}
+ return nil
+ }
+ // Dive until we run out of layers or reach the persistent database
+ for i := 0; i < layers-1; i++ {
+ // If we still have diff layers below, continue down
+ if parent, ok := diff.parentLayer().(*diffLayer); ok {
+ diff = parent
+ } else {
+ // Diff stack too shallow, return without modifications
+ return nil
+ }
+ }
+ // We're out of layers, flatten anything below, stopping if it's the disk or if
+ // the memory limit is not yet exceeded.
+ switch parent := diff.parentLayer().(type) {
+ case *diskLayer:
+ return nil
+
+ case *diffLayer:
+ // Hold the lock to prevent any read operations until the new
+ // parent is linked correctly.
+ diff.lock.Lock()
+
+ base, err := parent.persist(false)
+ if err != nil {
+ diff.lock.Unlock()
+ return err
+ }
+ tree.layers[base.rootHash()] = base
+ diff.parent = base
+
+ diff.lock.Unlock()
+
+ default:
+ panic(fmt.Sprintf("unknown data layer in triedb: %T", parent))
+ }
+ // Remove any layer that is stale or links into a stale layer
+ children := make(map[common.Hash][]common.Hash)
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diffLayer); ok {
+ parent := dl.parentLayer().rootHash()
+ children[parent] = append(children[parent], root)
+ }
+ }
+ var remove func(root common.Hash)
+ remove = func(root common.Hash) {
+ delete(tree.layers, root)
+ for _, child := range children[root] {
+ remove(child)
+ }
+ delete(children, root)
+ }
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diskLayer); ok && dl.isStale() {
+ remove(root)
+ }
+ }
+ return nil
+}
+
+// bottom returns the bottom-most disk layer in this tree.
+func (tree *layerTree) bottom() *diskLayer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ if len(tree.layers) == 0 {
+ return nil // Shouldn't happen, empty tree
+ }
+ // pick a random one as the entry point
+ var current layer
+ for _, layer := range tree.layers {
+ current = layer
+ break
+ }
+ for current.parentLayer() != nil {
+ current = current.parentLayer()
+ }
+ return current.(*diskLayer)
+}
diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go
new file mode 100644
index 0000000000..795cf1abba
--- /dev/null
+++ b/trie/triedb/pathdb/metrics.go
@@ -0,0 +1,63 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import "github.com/ava-labs/coreth/metrics"
+
+var (
+ cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil)
+ cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil)
+ cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil)
+ cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil)
+
+ dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil)
+ dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil)
+ dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil)
+ dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil)
+ dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
+ dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
+ diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
+
+ commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
+ commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
+ commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil)
+
+ gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil)
+ gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil)
+
+ diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil)
+ diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil)
+
+ // nolint:unused
+ historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
+ // nolint:unused
+ historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
+ // nolint:unused
+ historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil)
+)
diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go
new file mode 100644
index 0000000000..b3b3105bf6
--- /dev/null
+++ b/trie/triedb/pathdb/nodebuffer.go
@@ -0,0 +1,287 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ava-labs/coreth/core/rawdb"
+ "github.com/ava-labs/coreth/ethdb"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// nodebuffer is a collection of modified trie nodes to aggregate the disk
+// write. The content of the nodebuffer must be checked before diving into
+// disk (since it basically is not-yet-written data).
+type nodebuffer struct {
+ layers uint64 // The number of diff layers aggregated inside
+ size uint64 // The size of aggregated writes
+ limit uint64 // The maximum memory allowance in bytes
+ nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path
+}
+
+// newNodeBuffer initializes the node buffer with the provided nodes.
+func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer {
+ if nodes == nil {
+ nodes = make(map[common.Hash]map[string]*trienode.Node)
+ }
+ var size uint64
+ for _, subset := range nodes {
+ for path, n := range subset {
+ size += uint64(len(n.Blob) + len(path))
+ }
+ }
+ return &nodebuffer{
+ layers: layers,
+ nodes: nodes,
+ size: size,
+ limit: uint64(limit),
+ }
+}
+
+// node retrieves the trie node with given node info.
+func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) {
+ subset, ok := b.nodes[owner]
+ if !ok {
+ return nil, nil
+ }
+ n, ok := subset[string(path)]
+ if !ok {
+ return nil, nil
+ }
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path)
+ }
+ return n, nil
+}
+
+// commit merges the dirty nodes into the nodebuffer. This operation won't take
+// the ownership of the nodes map which belongs to the bottom-most diff layer.
+// It will just hold the node references from the given map which are safe to
+// copy.
+func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer {
+ var (
+ delta int64
+ overwrite int64
+ overwriteSize int64
+ )
+ for owner, subset := range nodes {
+ current, exist := b.nodes[owner]
+ if !exist {
+ // Allocate a new map for the subset instead of claiming it directly
+ // from the passed map to avoid potential concurrent map read/write.
+ // The nodes belong to original diff layer are still accessible even
+ // after merging, thus the ownership of nodes map should still belong
+ // to original layer and any mutation on it should be prevented.
+ current = make(map[string]*trienode.Node)
+ for path, n := range subset {
+ current[path] = n
+ delta += int64(len(n.Blob) + len(path))
+ }
+ b.nodes[owner] = current
+ continue
+ }
+ for path, n := range subset {
+ if orig, exist := current[path]; !exist {
+ delta += int64(len(n.Blob) + len(path))
+ } else {
+ delta += int64(len(n.Blob) - len(orig.Blob))
+ overwrite++
+ overwriteSize += int64(len(orig.Blob) + len(path))
+ }
+ current[path] = n
+ }
+ b.nodes[owner] = current
+ }
+ b.updateSize(delta)
+ b.layers++
+ gcNodesMeter.Mark(overwrite)
+ gcBytesMeter.Mark(overwriteSize)
+ return b
+}
+
+// nolint:unused
+// revert is the reverse operation of commit. It also merges the provided nodes
+// into the nodebuffer, the difference is that the provided node set should
+// revert the changes made by the last state transition.
+func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
+ // Short circuit if no embedded state transition to revert.
+ if b.layers == 0 {
+ return errStateUnrecoverable
+ }
+ b.layers--
+
+ // Reset the entire buffer if only a single transition left.
+ if b.layers == 0 {
+ b.reset()
+ return nil
+ }
+ var delta int64
+ for owner, subset := range nodes {
+ current, ok := b.nodes[owner]
+ if !ok {
+ panic(fmt.Sprintf("non-existent subset (%x)", owner))
+ }
+ for path, n := range subset {
+ orig, ok := current[path]
+ if !ok {
+ // There is a special case in MPT that one child is removed from
+ // a fullNode which only has two children, and then a new child
+ // with different position is immediately inserted into the fullNode.
+ // In this case, the clean child of the fullNode will also be
+ // marked as dirty because of node collapse and expansion.
+ //
+ // In case of database rollback, don't panic if this "clean"
+ // node occurs which is not present in buffer.
+ var nhash common.Hash
+ if owner == (common.Hash{}) {
+ _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path))
+ } else {
+ _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path))
+ }
+ // Ignore the clean node in the case described above.
+ if nhash == n.Hash {
+ continue
+ }
+ panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
+ }
+ current[path] = n
+ delta += int64(len(n.Blob)) - int64(len(orig.Blob))
+ }
+ }
+ b.updateSize(delta)
+ return nil
+}
+
+// updateSize updates the total cache size by the given delta.
+func (b *nodebuffer) updateSize(delta int64) {
+ size := int64(b.size) + delta
+ if size >= 0 {
+ b.size = uint64(size)
+ return
+ }
+ s := b.size
+ b.size = 0
+ log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta))
+}
+
+// reset cleans up the disk cache.
+func (b *nodebuffer) reset() {
+ b.layers = 0
+ b.size = 0
+ b.nodes = make(map[common.Hash]map[string]*trienode.Node)
+}
+
+// nolint:unused
+// empty returns an indicator if nodebuffer contains any state transition inside.
+func (b *nodebuffer) empty() bool {
+ return b.layers == 0
+}
+
+// setSize sets the buffer size to the provided number, and invokes a flush
+// operation if the current memory usage exceeds the new limit.
+func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error {
+ b.limit = uint64(size)
+ return b.flush(db, clean, id, false)
+}
+
+// flush persists the in-memory dirty trie node into the disk if the configured
+// memory threshold is reached. Note, all data must be written atomically.
+func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
+ if b.size <= b.limit && !force {
+ return nil
+ }
+ // Ensure the target state id is aligned with the internal counter.
+ head := rawdb.ReadPersistentStateID(db)
+ if head+b.layers != id {
+ return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id)
+ }
+ var (
+ start = time.Now()
+ batch = db.NewBatchWithSize(int(b.size))
+ )
+ nodes := writeNodes(batch, b.nodes, clean)
+ rawdb.WritePersistentStateID(batch, id)
+
+ // Flush all mutations in a single batch
+ size := batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ commitBytesMeter.Mark(int64(size))
+ commitNodesMeter.Mark(int64(nodes))
+ commitTimeTimer.UpdateSince(start)
+ log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start)))
+ b.reset()
+ return nil
+}
+
+// writeNodes writes the trie nodes into the provided database batch.
+// Note this function will also inject all the newly written nodes
+// into clean cache.
+func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) {
+ for owner, subset := range nodes {
+ for path, n := range subset {
+ if n.IsDeleted() {
+ if owner == (common.Hash{}) {
+ rawdb.DeleteAccountTrieNode(batch, []byte(path))
+ } else {
+ rawdb.DeleteStorageTrieNode(batch, owner, []byte(path))
+ }
+ if clean != nil {
+ clean.Del(cacheKey(owner, []byte(path)))
+ }
+ } else {
+ if owner == (common.Hash{}) {
+ rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob)
+ } else {
+ rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob)
+ }
+ if clean != nil {
+ clean.Set(cacheKey(owner, []byte(path)), n.Blob)
+ }
+ }
+ }
+ total += len(subset)
+ }
+ return total
+}
+
+// cacheKey constructs the unique key of clean cache.
+func cacheKey(owner common.Hash, path []byte) []byte {
+ if owner == (common.Hash{}) {
+ return path
+ }
+ return append(owner.Bytes(), path...)
+}
diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go
new file mode 100644
index 0000000000..be8def9b11
--- /dev/null
+++ b/trie/triedb/pathdb/testutils.go
@@ -0,0 +1,166 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ava-labs/coreth/trie/triestate"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "golang.org/x/exp/slices"
+)
+
+// testHasher is a test utility for computing root hash of a batch of state
+// elements. The hash algorithm is to sort all the elements in lexicographical
+// order, concat the key and value in turn, and perform hash calculation on
+// the concatenated bytes. Except the root hash, a nodeset will be returned
+// once Commit is called, which contains all the changes made to hasher.
+type testHasher struct {
+ owner common.Hash // owner identifier
+ root common.Hash // original root
+ dirties map[common.Hash][]byte // dirty states
+ cleans map[common.Hash][]byte // clean states
+}
+
+// newTestHasher constructs a hasher object with provided states.
+func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) {
+ if cleans == nil {
+ cleans = make(map[common.Hash][]byte)
+ }
+ if got, _ := hash(cleans); got != root {
+ return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got)
+ }
+ return &testHasher{
+ owner: owner,
+ root: root,
+ dirties: make(map[common.Hash][]byte),
+ cleans: cleans,
+ }, nil
+}
+
+// Get returns the value for key stored in the trie.
+func (h *testHasher) Get(key []byte) ([]byte, error) {
+ hash := common.BytesToHash(key)
+ val, ok := h.dirties[hash]
+ if ok {
+ return val, nil
+ }
+ return h.cleans[hash], nil
+}
+
+// Update associates key with value in the trie.
+func (h *testHasher) Update(key, value []byte) error {
+ h.dirties[common.BytesToHash(key)] = common.CopyBytes(value)
+ return nil
+}
+
+// Delete removes any existing value for key from the trie.
+func (h *testHasher) Delete(key []byte) error {
+ h.dirties[common.BytesToHash(key)] = nil
+ return nil
+}
+
+// Commit computes the new hash of the states and returns the set with all
+// state changes.
+func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+ var (
+ nodes = make(map[common.Hash][]byte)
+ set = trienode.NewNodeSet(h.owner)
+ )
+ for hash, val := range h.cleans {
+ nodes[hash] = val
+ }
+ for hash, val := range h.dirties {
+ nodes[hash] = val
+ if bytes.Equal(val, h.cleans[hash]) {
+ continue
+ }
+ if len(val) == 0 {
+ set.AddNode(hash.Bytes(), trienode.NewDeleted())
+ } else {
+ set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val))
+ }
+ }
+ root, blob := hash(nodes)
+
+ // Include the dirty root node as well.
+ if root != types.EmptyRootHash && root != h.root {
+ set.AddNode(nil, trienode.New(root, blob))
+ }
+ if root == types.EmptyRootHash && h.root != types.EmptyRootHash {
+ set.AddNode(nil, trienode.NewDeleted())
+ }
+ return root, set
+}
+
+// hash performs the hash computation upon the provided states.
+func hash(states map[common.Hash][]byte) (common.Hash, []byte) {
+ var hs []common.Hash
+ for hash := range states {
+ hs = append(hs, hash)
+ }
+ slices.SortFunc(hs, common.Hash.Cmp)
+
+ var input []byte
+ for _, hash := range hs {
+ if len(states[hash]) == 0 {
+ continue
+ }
+ input = append(input, hash.Bytes()...)
+ input = append(input, states[hash]...)
+ }
+ if len(input) == 0 {
+ return types.EmptyRootHash, nil
+ }
+ return crypto.Keccak256Hash(input), input
+}
+
+type hashLoader struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+}
+
+func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader {
+ return &hashLoader{
+ accounts: accounts,
+ storages: storages,
+ }
+}
+
+// OpenTrie opens the main account trie.
+func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(common.Hash{}, root, l.accounts)
+}
+
+// OpenStorageTrie opens the storage trie of an account.
+func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(addrHash, root, l.storages[addrHash])
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 8152eab6c0..98d5588b6d 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -25,8 +25,8 @@ import (
)
// Node is a wrapper which contains the encoded blob of the trie node and its
-// unique hash identifier. It is general enough that can be used to represent
-// trie nodes corresponding to different trie implementations.
+// node hash. It is general enough that can be used to represent trie node
+// corresponding to different trie implementations.
type Node struct {
Hash common.Hash // Node hash, empty for deleted node
Blob []byte // Encoded node blob, nil for the deleted node
@@ -42,35 +42,13 @@ func (n *Node) IsDeleted() bool {
return n.Hash == (common.Hash{})
}
-// WithPrev wraps the Node with the previous node value attached.
-type WithPrev struct {
- *Node
- Prev []byte // Encoded original value, nil means it's non-existent
-}
-
-// Unwrap returns the internal Node object.
-func (n *WithPrev) Unwrap() *Node {
- return n.Node
-}
-
-// Size returns the total memory size used by this node. It overloads
-// the function in Node by counting the size of previous value as well.
-func (n *WithPrev) Size() int {
- return n.Node.Size() + len(n.Prev)
-}
-
// New constructs a node with provided node information.
func New(hash common.Hash, blob []byte) *Node {
return &Node{Hash: hash, Blob: blob}
}
-// NewWithPrev constructs a node with provided node information.
-func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev {
- return &WithPrev{
- Node: New(hash, blob),
- Prev: prev,
- }
-}
+// NewDeleted constructs a node which is deleted.
+func NewDeleted() *Node { return New(common.Hash{}, nil) }
// leaf represents a trie leaf node
type leaf struct {
@@ -83,7 +61,7 @@ type leaf struct {
type NodeSet struct {
Owner common.Hash
Leaves []*leaf
- Nodes map[string]*WithPrev
+ Nodes map[string]*Node
updates int // the count of updated and inserted nodes
deletes int // the count of deleted nodes
}
@@ -93,26 +71,26 @@ type NodeSet struct {
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
Owner: owner,
- Nodes: make(map[string]*WithPrev),
+ Nodes: make(map[string]*Node),
}
}
// ForEachWithOrder iterates the nodes with the order from bottom to top,
// right to left, nodes with the longest path will be iterated first.
func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
- var paths sort.StringSlice
+ var paths []string
for path := range set.Nodes {
paths = append(paths, path)
}
- // Bottom-up, longest path first
- sort.Sort(sort.Reverse(paths))
+ // Bottom-up, the longest path first
+ sort.Sort(sort.Reverse(sort.StringSlice(paths)))
for _, path := range paths {
- callback(path, set.Nodes[path].Unwrap())
+ callback(path, set.Nodes[path])
}
}
// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *WithPrev) {
+func (set *NodeSet) AddNode(path []byte, n *Node) {
if n.IsDeleted() {
set.deletes += 1
} else {
@@ -121,6 +99,26 @@ func (set *NodeSet) AddNode(path []byte, n *WithPrev) {
set.Nodes[string(path)] = n
}
+// Merge adds a set of nodes into the set.
+func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
+ if set.Owner != owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
+ }
+ for path, node := range nodes {
+ prev, ok := set.Nodes[path]
+ if ok {
+ // overwrite happens, revoke the counter
+ if prev.IsDeleted() {
+ set.deletes -= 1
+ } else {
+ set.updates -= 1
+ }
+ }
+ set.AddNode([]byte(path), node)
+ }
+ return nil
+}
+
// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
// we get rid of it?
func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
@@ -150,16 +148,11 @@ func (set *NodeSet) Summary() string {
for path, n := range set.Nodes {
// Deletion
if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev)
+ fmt.Fprintf(out, " [-]: %x\n", path)
continue
}
- // Insertion
- if len(n.Prev) == 0 {
- fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
- continue
- }
- // Update
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev)
+ // Insertion or update
+ fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
}
}
for _, n := range set.Leaves {
@@ -188,10 +181,19 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
// Merge merges the provided dirty nodes of a trie into the set. The assumption
// is held that no duplicated set belonging to the same trie will be merged twice.
func (set *MergedNodeSet) Merge(other *NodeSet) error {
- _, present := set.Sets[other.Owner]
+ subset, present := set.Sets[other.Owner]
if present {
- return fmt.Errorf("duplicate trie for owner %#x", other.Owner)
+ return subset.Merge(other.Owner, other.Nodes)
}
set.Sets[other.Owner] = other
return nil
}
+
+// Flatten returns a two-dimensional map for internal nodes.
+func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
+ nodes := make(map[common.Hash]map[string]*Node)
+ for owner, set := range set.Sets {
+ nodes[owner] = set.Nodes
+ }
+ return nodes
+}
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
new file mode 100644
index 0000000000..3069e6e2af
--- /dev/null
+++ b/trie/triestate/state.go
@@ -0,0 +1,277 @@
+// (c) 2023, Ava Labs, Inc.
+//
+// This file is a derived work, based on the go-ethereum library whose original
+// notices appear below.
+//
+// It is distributed under a license compatible with the licensing terms of the
+// original code from which it is derived.
+//
+// Much love to the original authors for their work.
+// **********
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package triestate
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ava-labs/coreth/core/types"
+ "github.com/ava-labs/coreth/trie/trienode"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
+// tree or Verkle tree.
+type Trie interface {
+ // Get returns the value for key stored in the trie.
+ Get(key []byte) ([]byte, error)
+
+ // Update associates key with value in the trie.
+ Update(key, value []byte) error
+
+ // Delete removes any existing value for key from the trie.
+ Delete(key []byte) error
+
+ // Commit the trie and returns a set of dirty nodes generated along with
+ // the new root hash.
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+}
+
+// TrieLoader wraps functions to load tries.
+type TrieLoader interface {
+ // OpenTrie opens the main account trie.
+ OpenTrie(root common.Hash) (Trie, error)
+
+ // OpenStorageTrie opens the storage trie of an account.
+ OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
+}
+
+// Set represents a collection of mutated states during a state transition.
+// The value refers to the original content of state before the transition
+// is made. Nil means that the state was not present previously.
+type Set struct {
+ Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
+ Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
+ Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion
+ size common.StorageSize // Approximate size of set
+}
+
+// New constructs the state set with provided data.
+func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
+ return &Set{
+ Accounts: accounts,
+ Storages: storages,
+ Incomplete: incomplete,
+ }
+}
+
+// Size returns the approximate memory size occupied by the set.
+func (s *Set) Size() common.StorageSize {
+ if s.size != 0 {
+ return s.size
+ }
+ for _, account := range s.Accounts {
+ s.size += common.StorageSize(common.AddressLength + len(account))
+ }
+ for _, slots := range s.Storages {
+ for _, val := range slots {
+ s.size += common.StorageSize(common.HashLength + len(val))
+ }
+ s.size += common.StorageSize(common.AddressLength)
+ }
+ s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
+ return s.size
+}
+
+// context wraps all fields for executing state diffs.
+type context struct {
+ prevRoot common.Hash
+ postRoot common.Hash
+ accounts map[common.Address][]byte
+ storages map[common.Address]map[common.Hash][]byte
+ accountTrie Trie
+ nodes *trienode.MergedNodeSet
+}
+
+// Apply traverses the provided state diffs, apply them in the associated
+// post-state and return the generated dirty trie nodes. The state can be
+// loaded via the provided trie loader.
+func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
+ tr, err := loader.OpenTrie(postRoot)
+ if err != nil {
+ return nil, err
+ }
+ ctx := &context{
+ prevRoot: prevRoot,
+ postRoot: postRoot,
+ accounts: accounts,
+ storages: storages,
+ accountTrie: tr,
+ nodes: trienode.NewMergedNodeSet(),
+ }
+ for addr, account := range accounts {
+ var err error
+ if len(account) == 0 {
+ err = deleteAccount(ctx, loader, addr)
+ } else {
+ err = updateAccount(ctx, loader, addr)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to revert state, err: %w", err)
+ }
+ }
+ root, result := tr.Commit(false)
+ if root != prevRoot {
+ return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
+ }
+ if err := ctx.nodes.Merge(result); err != nil {
+ return nil, err
+ }
+ return ctx.nodes.Flatten(), nil
+}
+
+// updateAccount the account was present in prev-state, and may or may not
+// existent in post-state. Apply the reverse diff and verify if the storage
+// root matches the one in prev-state account.
+func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account was present in prev-state, decode it from the
+ // 'slim-rlp' format bytes.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ prev, err := types.FullAccount(ctx.accounts[addr])
+ if err != nil {
+ return err
+ }
+ // The account may or may not existent in post-state, try to
+ // load it and decode if it's found.
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ post := types.NewEmptyStateAccount()
+ if len(blob) != 0 {
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ }
+ // Apply all storage changes into the post-state storage trie.
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ var err error
+ if len(val) == 0 {
+ err = st.Delete(key.Bytes())
+ } else {
+ err = st.Update(key.Bytes(), val)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != prev.Root {
+ return errors.New("failed to reset storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Write the prev-state account into the main trie
+ full, err := rlp.EncodeToBytes(prev)
+ if err != nil {
+ return err
+ }
+ return ctx.accountTrie.Update(addrHash.Bytes(), full)
+}
+
+// deleteAccount the account was not present in prev-state, and is expected
+// to be existent in post-state. Apply the reverse diff and verify if the
+// account and storage is wiped out correctly.
+func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account must be existent in post-state, load the account.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(blob) == 0 {
+ return fmt.Errorf("account is non-existent %#x", addrHash)
+ }
+ var post types.StateAccount
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ if len(val) != 0 {
+ return errors.New("expect storage deletion")
+ }
+ if err := st.Delete(key.Bytes()); err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != types.EmptyRootHash {
+ return errors.New("failed to clear storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Delete the post-state account from the main trie.
+ return ctx.accountTrie.Delete(addrHash.Bytes())
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
+}