diff --git a/.github/workflows/evm-tests.yml b/.github/workflows/evm-tests.yml
deleted file mode 100644
index a7a025d8c1..0000000000
--- a/.github/workflows/evm-tests.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-name: EVM Test
-
-on:
- push:
- branches:
- - master
- - develop
-
- pull_request:
- branches:
- - master
- - develop
-
-jobs:
- evm-test:
- strategy:
- matrix:
- go-version: [1.21.x]
- os: [ubuntu-latest]
- runs-on: ${{ matrix.os }}
- steps:
- - name: Install Go
- uses: actions/setup-go@v3
- with:
- go-version: ${{ matrix.go-version }}
-
- - name: Checkout code
- uses: actions/checkout@v3
-
- - uses: actions/cache@v3
- with:
- # In order:
- # * Module download cache
- # * Build cache (Linux)
- # * Build cache (Mac)
- # * Build cache (Windows)
- path: |
- ~/go/pkg/mod
- ~/.cache/go-build
- ~/Library/Caches/go-build
- ~\AppData\Local\go-build
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
-
- - name: EVM Test
- env:
- CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
- CGO_CFLAGS_ALLOW: "-O -D__BLST_PORTABLE__"
- ANDROID_HOME: "" # Skip android test
- run: |
- git submodule update --init --depth 1 --recursive
- go mod download
- cd tests
- sed -i -e 's/\/\/ bt.skipLoad/bt.skipLoad/g' block_test.go
- bash -x run-evm-tests.sh
diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml
deleted file mode 100644
index ff12eb4bec..0000000000
--- a/.github/workflows/integration-test.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Integration Test
-
-on:
- push:
- branches:
- - master
- - develop
-
- pull_request:
- branches:
- - master
- - develop
-
-jobs:
- truffle-test:
- strategy:
- matrix:
- os: [ubuntu-latest]
- runs-on: ${{ matrix.os }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v3
-
- - name: Truffle test
- run: |
- make truffle-test
diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go
deleted file mode 100644
index b2a93f65aa..0000000000
--- a/cmd/geth/pruneblock_test.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
-
-package main
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "math/big"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state/pruner"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/leveldb"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-var (
- canonicalSeed = 1
- blockPruneBackUpBlockNumber = 128
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- balance = big.NewInt(100000000000000000)
- gspec = &core.Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: balance}}, BaseFee: big.NewInt(params.InitialBaseFee)}
- signer = types.LatestSigner(gspec.Config)
- config = &core.CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 0, // Disable snapshot
- TriesInMemory: 128,
- }
- engine = ethash.NewFullFaker()
-)
-
-func TestOfflineBlockPrune(t *testing.T) {
- //Corner case for 0 remain in ancinetStore.
- testOfflineBlockPruneWithAmountReserved(t, 0)
- //General case.
- testOfflineBlockPruneWithAmountReserved(t, 100)
-}
-
-func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) {
- kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
- if err != nil {
- return nil, err
- }
- frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false)
- if err != nil {
- kvdb.Close()
- return nil, err
- }
- return frdb, nil
-}
-
-func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64) {
- datadir := t.TempDir()
-
- chaindbPath := filepath.Join(datadir, "chaindata")
- oldAncientPath := filepath.Join(chaindbPath, "ancient")
- newAncientPath := filepath.Join(chaindbPath, "ancient_back")
-
- db, blocks, blockList, receiptsList, externTdList, startBlockNumber, _ := BlockchainCreator(t, chaindbPath, oldAncientPath, amountReserved)
- node, _ := startEthService(t, gspec, blocks, chaindbPath)
- defer node.Close()
-
- //Initialize a block pruner for pruning, only remain amountReserved blocks backward.
- testBlockPruner := pruner.NewBlockPruner(db, node, oldAncientPath, newAncientPath, amountReserved)
- if err := testBlockPruner.BlockPruneBackUp(chaindbPath, 512, utils.MakeDatabaseHandles(0), "", false, false); err != nil {
- t.Fatalf("Failed to back up block: %v", err)
- }
-
- dbBack, err := NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false, false)
- if err != nil {
- t.Fatalf("failed to create database with ancient backend")
- }
- defer dbBack.Close()
-
- //check against if the backup data matched original one
- for blockNumber := startBlockNumber; blockNumber < startBlockNumber+amountReserved; blockNumber++ {
- blockHash := rawdb.ReadCanonicalHash(dbBack, blockNumber)
- block := rawdb.ReadBlock(dbBack, blockHash, blockNumber)
-
- if block.Hash() != blockHash {
- t.Fatalf("block data did not match between oldDb and backupDb")
- }
- if blockList[blockNumber-startBlockNumber].Hash() != blockHash {
- t.Fatalf("block data did not match between oldDb and backupDb")
- }
-
- receipts := rawdb.ReadRawReceipts(dbBack, blockHash, blockNumber)
- if err := checkReceiptsRLP(receipts, receiptsList[blockNumber-startBlockNumber]); err != nil {
- t.Fatalf("receipts did not match between oldDb and backupDb")
- }
- // // Calculate the total difficulty of the block
- td := rawdb.ReadTd(dbBack, blockHash, blockNumber)
- if td == nil {
- t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor)
- }
- if td.Cmp(externTdList[blockNumber-startBlockNumber]) != 0 {
- t.Fatalf("externTd did not match between oldDb and backupDb")
- }
- }
-
- //check if ancientDb freezer replaced successfully
- testBlockPruner.AncientDbReplacer()
- if _, err := os.Stat(newAncientPath); err != nil {
- if !os.IsNotExist(err) {
- t.Fatalf("ancientDb replaced unsuccessfully")
- }
- }
- if _, err := os.Stat(oldAncientPath); err != nil {
- t.Fatalf("ancientDb replaced unsuccessfully")
- }
-}
-
-func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemain uint64) (ethdb.Database, []*types.Block, []*types.Block, []types.Receipts, []*big.Int, uint64, *core.BlockChain) {
- //create a database with ancient freezer
- db, err := NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create database with ancient backend")
- }
- defer db.Close()
-
- triedb := triedb.NewDatabase(db, nil)
- defer triedb.Close()
-
- genesis := gspec.MustCommit(db, triedb)
- // Initialize a fresh chain with only a genesis block
- blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create chain: %v", err)
- }
-
- // Make chain starting from genesis
- blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 500, func(i int, block *core.BlockGen) {
- block.SetCoinbase(common.Address{0: byte(canonicalSeed), 19: byte(i)})
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key)
- if err != nil {
- panic(err)
- }
- block.AddTx(tx)
- block.SetDifficulty(big.NewInt(1000000))
- })
- if _, err := blockchain.InsertChain(blocks); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
-
- // Force run a freeze cycle
- type freezer interface {
- Freeze(threshold uint64) error
- Ancients() (uint64, error)
- }
- db.(freezer).Freeze(10)
-
- frozen, err := db.Ancients()
- //make sure there're frozen items
- if err != nil || frozen == 0 {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- if frozen < blockRemain {
- t.Fatalf("block amount is not enough for pruning: %v", err)
- }
-
- oldOffSet := rawdb.ReadOffSetOfCurrentAncientFreezer(db)
- // Get the actual start block number.
- startBlockNumber := frozen - blockRemain + oldOffSet
- // Initialize the slice to buffer the block data left.
- blockList := make([]*types.Block, 0, blockPruneBackUpBlockNumber)
- receiptsList := make([]types.Receipts, 0, blockPruneBackUpBlockNumber)
- externTdList := make([]*big.Int, 0, blockPruneBackUpBlockNumber)
- // All ancient data within the most recent 128 blocks write into memory buffer for future new ancient_back directory usage.
- for blockNumber := startBlockNumber; blockNumber < frozen+oldOffSet; blockNumber++ {
- blockHash := rawdb.ReadCanonicalHash(db, blockNumber)
- block := rawdb.ReadBlock(db, blockHash, blockNumber)
- blockList = append(blockList, block)
- receipts := rawdb.ReadRawReceipts(db, blockHash, blockNumber)
- receiptsList = append(receiptsList, receipts)
- // Calculate the total difficulty of the block
- td := rawdb.ReadTd(db, blockHash, blockNumber)
- if td == nil {
- t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor)
- }
- externTdList = append(externTdList, td)
- }
-
- return db, blocks, blockList, receiptsList, externTdList, startBlockNumber, blockchain
-}
-
-func checkReceiptsRLP(have, want types.Receipts) error {
- if len(have) != len(want) {
- return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
- }
- for i := 0; i < len(want); i++ {
- rlpHave, err := rlp.EncodeToBytes(have[i])
- if err != nil {
- return err
- }
- rlpWant, err := rlp.EncodeToBytes(want[i])
- if err != nil {
- return err
- }
- if !bytes.Equal(rlpHave, rlpWant) {
- return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
- }
- }
- return nil
-}
-
-// startEthService creates a full node instance for testing.
-func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block, chaindbPath string) (*node.Node, *eth.Ethereum) {
- t.Helper()
- n, err := node.New(&node.Config{DataDir: chaindbPath})
- if err != nil {
- t.Fatal("can't create node:", err)
- }
-
- if err := n.Start(); err != nil {
- t.Fatal("can't start node:", err)
- }
-
- return n, nil
-}
diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go
deleted file mode 100644
index ba206e9823..0000000000
--- a/cmd/utils/history_test.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2023 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
-
-package utils
-
-import (
- "bytes"
- "crypto/sha256"
- "io"
- "math/big"
- "os"
- "path"
- "strings"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/internal/era"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-var (
- count uint64 = 128
- step uint64 = 16
-)
-
-func TestHistoryImportAndExport(t *testing.T) {
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- genesis = &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}},
- }
- signer = types.LatestSigner(genesis.Config)
- )
-
- // Generate chain.
- db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) {
- if i == 0 {
- return
- }
- tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{
- ChainID: genesis.Config.ChainID,
- Nonce: uint64(i - 1),
- GasTipCap: common.Big0,
- GasFeeCap: g.PrevBlock(0).BaseFee(),
- Gas: 50000,
- To: &common.Address{0xaa},
- Value: big.NewInt(int64(i)),
- Data: nil,
- AccessList: nil,
- })
- if err != nil {
- t.Fatalf("error creating tx: %v", err)
- }
- g.AddTx(tx)
- })
-
- // Initialize BlockChain.
- chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("unable to initialize chain: %v", err)
- }
- if _, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("error insterting chain: %v", err)
- }
-
- // Make temp directory for era files.
- dir, err := os.MkdirTemp("", "history-export-test")
- if err != nil {
- t.Fatalf("error creating temp test directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- // Export history to temp directory.
- if err := ExportHistory(chain, dir, 0, count, step); err != nil {
- t.Fatalf("error exporting history: %v", err)
- }
-
- // Read checksums.
- b, err := os.ReadFile(path.Join(dir, "checksums.txt"))
- if err != nil {
- t.Fatalf("failed to read checksums: %v", err)
- }
- checksums := strings.Split(string(b), "\n")
-
- // Verify each Era.
- entries, _ := era.ReadDir(dir, "mainnet")
- for i, filename := range entries {
- func() {
- f, err := os.Open(path.Join(dir, filename))
- if err != nil {
- t.Fatalf("error opening era file: %v", err)
- }
- var (
- h = sha256.New()
- buf = bytes.NewBuffer(nil)
- )
- if _, err := io.Copy(h, f); err != nil {
- t.Fatalf("unable to recalculate checksum: %v", err)
- }
- if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
- t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
- }
- e, err := era.From(f)
- if err != nil {
- t.Fatalf("error opening era: %v", err)
- }
- defer e.Close()
- it, err := era.NewIterator(e)
- if err != nil {
- t.Fatalf("error making era reader: %v", err)
- }
- for j := 0; it.Next(); j++ {
- n := i*int(step) + j
- if it.Error() != nil {
- t.Fatalf("error reading block entry %d: %v", n, it.Error())
- }
- block, receipts, err := it.BlockAndReceipts()
- if err != nil {
- t.Fatalf("error reading block entry %d: %v", n, err)
- }
- want := chain.GetBlockByNumber(uint64(n))
- if want, got := uint64(n), block.NumberU64(); want != got {
- t.Fatalf("blocks out of order: want %d, got %d", want, got)
- }
- if want.Hash() != block.Hash() {
- t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex())
- }
- if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() {
- t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got)
- }
- if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() {
- t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got)
- }
- if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() {
- t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got)
- }
- }
- }()
- }
-
- // Now import Era.
- freezer := t.TempDir()
- db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
- if err != nil {
- panic(err)
- }
- t.Cleanup(func() {
- db2.Close()
- })
-
- genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
- imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("unable to initialize chain: %v", err)
- }
- if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil {
- t.Fatalf("failed to import chain: %v", err)
- }
- if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
- t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
- }
-}
diff --git a/core/bench_test.go b/core/bench_test.go
deleted file mode 100644
index 97713868a5..0000000000
--- a/core/bench_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "crypto/ecdsa"
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-func BenchmarkInsertChain_empty_memdb(b *testing.B) {
- benchInsertChain(b, false, nil)
-}
-func BenchmarkInsertChain_empty_diskdb(b *testing.B) {
- benchInsertChain(b, true, nil)
-}
-func BenchmarkInsertChain_valueTx_memdb(b *testing.B) {
- benchInsertChain(b, false, genValueTx(0))
-}
-func BenchmarkInsertChain_valueTx_diskdb(b *testing.B) {
- benchInsertChain(b, true, genValueTx(0))
-}
-func BenchmarkInsertChain_valueTx_100kB_memdb(b *testing.B) {
- benchInsertChain(b, false, genValueTx(100*1024))
-}
-func BenchmarkInsertChain_valueTx_100kB_diskdb(b *testing.B) {
- benchInsertChain(b, true, genValueTx(100*1024))
-}
-func BenchmarkInsertChain_uncles_memdb(b *testing.B) {
- benchInsertChain(b, false, genUncles)
-}
-func BenchmarkInsertChain_uncles_diskdb(b *testing.B) {
- benchInsertChain(b, true, genUncles)
-}
-func BenchmarkInsertChain_ring200_memdb(b *testing.B) {
- benchInsertChain(b, false, genTxRing(200))
-}
-func BenchmarkInsertChain_ring200_diskdb(b *testing.B) {
- benchInsertChain(b, true, genTxRing(200))
-}
-func BenchmarkInsertChain_ring1000_memdb(b *testing.B) {
- benchInsertChain(b, false, genTxRing(1000))
-}
-func BenchmarkInsertChain_ring1000_diskdb(b *testing.B) {
- benchInsertChain(b, true, genTxRing(1000))
-}
-
-var (
- // This is the content of the genesis block used by the benchmarks.
- benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
- benchRootFunds = math.BigPow(2, 200)
-)
-
-// genValueTx returns a block generator that includes a single
-// value-transfer transaction with n bytes of extra data in each
-// block.
-func genValueTx(nbytes int) func(int, *BlockGen) {
- return func(i int, gen *BlockGen) {
- toaddr := common.Address{}
- data := make([]byte, nbytes)
- gas, _ := IntrinsicGas(data, nil, false, false, false, false)
- signer := gen.Signer()
- gasPrice := big.NewInt(0)
- if gen.header.BaseFee != nil {
- gasPrice = gen.header.BaseFee
- }
- tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{
- Nonce: gen.TxNonce(benchRootAddr),
- To: &toaddr,
- Value: big.NewInt(1),
- Gas: gas,
- Data: data,
- GasPrice: gasPrice,
- })
- gen.AddTx(tx)
- }
-}
-
-var (
- ringKeys = make([]*ecdsa.PrivateKey, 1000)
- ringAddrs = make([]common.Address, len(ringKeys))
-)
-
-func init() {
- ringKeys[0] = benchRootKey
- ringAddrs[0] = benchRootAddr
- for i := 1; i < len(ringKeys); i++ {
- ringKeys[i], _ = crypto.GenerateKey()
- ringAddrs[i] = crypto.PubkeyToAddress(ringKeys[i].PublicKey)
- }
-}
-
-// genTxRing returns a block generator that sends ether in a ring
-// among n accounts. This is creates n entries in the state database
-// and fills the blocks with many small transactions.
-func genTxRing(naccounts int) func(int, *BlockGen) {
- from := 0
- availableFunds := new(big.Int).Set(benchRootFunds)
- return func(i int, gen *BlockGen) {
- block := gen.PrevBlock(i - 1)
- gas := block.GasLimit()
- gasPrice := big.NewInt(0)
- if gen.header.BaseFee != nil {
- gasPrice = gen.header.BaseFee
- }
- signer := gen.Signer()
- for {
- gas -= params.TxGas
- if gas < params.TxGas {
- break
- }
- to := (from + 1) % naccounts
- burn := new(big.Int).SetUint64(params.TxGas)
- burn.Mul(burn, gen.header.BaseFee)
- availableFunds.Sub(availableFunds, burn)
- if availableFunds.Cmp(big.NewInt(1)) < 0 {
- panic("not enough funds")
- }
- tx, err := types.SignNewTx(ringKeys[from], signer,
- &types.LegacyTx{
- Nonce: gen.TxNonce(ringAddrs[from]),
- To: &ringAddrs[to],
- Value: availableFunds,
- Gas: params.TxGas,
- GasPrice: gasPrice,
- })
- if err != nil {
- panic(err)
- }
- gen.AddTx(tx)
- from = to
- }
- }
-}
-
-// genUncles generates blocks with two uncle headers.
-func genUncles(i int, gen *BlockGen) {
- if i >= 7 {
- b2 := gen.PrevBlock(i - 6).Header()
- b2.Extra = []byte("foo")
- gen.AddUncle(b2)
- b3 := gen.PrevBlock(i - 6).Header()
- b3.Extra = []byte("bar")
- gen.AddUncle(b3)
- }
-}
-
-func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
- // Create the database in memory or in a temporary directory.
- var db ethdb.Database
- var err error
- if !disk {
- db = rawdb.NewMemoryDatabase()
- } else {
- dir := b.TempDir()
- db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
- if err != nil {
- b.Fatalf("cannot create temporary database: %v", err)
- }
- defer db.Close()
- }
-
- // Generate a chain of b.N blocks using the supplied block
- // generator function.
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
- }
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), b.N, gen)
-
- // Time the insertion of the new chain.
- // State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer chainman.Stop()
- b.ReportAllocs()
- b.ResetTimer()
- if i, err := chainman.InsertChain(chain); err != nil {
- b.Fatalf("insert error (block %d): %v\n", i, err)
- }
-}
-
-func BenchmarkChainRead_header_10k(b *testing.B) {
- benchReadChain(b, false, 10000)
-}
-func BenchmarkChainRead_full_10k(b *testing.B) {
- benchReadChain(b, true, 10000)
-}
-func BenchmarkChainRead_header_100k(b *testing.B) {
- benchReadChain(b, false, 100000)
-}
-func BenchmarkChainRead_full_100k(b *testing.B) {
- benchReadChain(b, true, 100000)
-}
-func BenchmarkChainRead_header_500k(b *testing.B) {
- benchReadChain(b, false, 500000)
-}
-func BenchmarkChainRead_full_500k(b *testing.B) {
- benchReadChain(b, true, 500000)
-}
-func BenchmarkChainWrite_header_10k(b *testing.B) {
- benchWriteChain(b, false, 10000)
-}
-func BenchmarkChainWrite_full_10k(b *testing.B) {
- benchWriteChain(b, true, 10000)
-}
-func BenchmarkChainWrite_header_100k(b *testing.B) {
- benchWriteChain(b, false, 100000)
-}
-func BenchmarkChainWrite_full_100k(b *testing.B) {
- benchWriteChain(b, true, 100000)
-}
-func BenchmarkChainWrite_header_500k(b *testing.B) {
- benchWriteChain(b, false, 500000)
-}
-func BenchmarkChainWrite_full_500k(b *testing.B) {
- benchWriteChain(b, true, 500000)
-}
-
-// makeChainForBench writes a given number of headers or empty blocks/receipts
-// into a database.
-func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uint64) {
- var hash common.Hash
- for n := uint64(0); n < count; n++ {
- header := &types.Header{
- Coinbase: common.Address{},
- Number: big.NewInt(int64(n)),
- ParentHash: hash,
- Difficulty: big.NewInt(1),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- }
- if n == 0 {
- header = genesis.ToBlock().Header()
- }
- hash = header.Hash()
-
- rawdb.WriteHeader(db, header)
- rawdb.WriteCanonicalHash(db, hash, n)
- rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
-
- if n == 0 {
- rawdb.WriteChainConfig(db, hash, genesis.Config)
- }
- rawdb.WriteHeadHeaderHash(db, hash)
-
- if full || n == 0 {
- block := types.NewBlockWithHeader(header)
- rawdb.WriteBody(db, hash, n, block.Body())
- rawdb.WriteReceipts(db, hash, n, nil)
- rawdb.WriteHeadBlockHash(db, hash)
- }
- }
-}
-
-func benchWriteChain(b *testing.B, full bool, count uint64) {
- genesis := &Genesis{Config: params.AllEthashProtocolChanges}
- for i := 0; i < b.N; i++ {
- dir := b.TempDir()
- db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
- if err != nil {
- b.Fatalf("error opening database at %v: %v", dir, err)
- }
- makeChainForBench(db, genesis, full, count)
- db.Close()
- }
-}
-
-func benchReadChain(b *testing.B, full bool, count uint64) {
- dir := b.TempDir()
-
- db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
- if err != nil {
- b.Fatalf("error opening database at %v: %v", dir, err)
- }
- genesis := &Genesis{Config: params.AllEthashProtocolChanges}
- makeChainForBench(db, genesis, full, count)
- db.Close()
- cacheConfig := *defaultCacheConfig
- cacheConfig.TrieDirtyDisabled = true
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
- if err != nil {
- b.Fatalf("error opening database at %v: %v", dir, err)
- }
- chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- b.Fatalf("error creating chain: %v", err)
- }
-
- for n := uint64(0); n < count; n++ {
- header := chain.GetHeaderByNumber(n)
- if full {
- hash := header.Hash()
- rawdb.ReadBody(db, hash, n)
- rawdb.ReadReceipts(db, hash, n, header.Time, chain.Config())
- }
- }
- chain.Stop()
- db.Close()
- }
-}
diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go
index bb8374e90f..b94e73c797 100644
--- a/core/vote/vote_pool_test.go
+++ b/core/vote/vote_pool_test.go
@@ -17,12 +17,10 @@
package vote
import (
- "container/heap"
"context"
"encoding/json"
"errors"
"fmt"
- "math/big"
"os"
"path/filepath"
"testing"
@@ -37,15 +35,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/params"
)
var (
@@ -133,273 +125,6 @@ func (journal *VoteJournal) verifyJournal(size, lastLatestVoteNumber int) bool {
return false
}
-func TestValidVotePool(t *testing.T) {
- testVotePool(t, true)
-}
-
-func TestInvalidVotePool(t *testing.T) {
- testVotePool(t, false)
-}
-
-func testVotePool(t *testing.T, isValidRules bool) {
- walletPasswordDir, walletDir := setUpKeyManager(t)
-
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
- }
-
- mux := new(event.TypeMux)
- db := rawdb.NewMemoryDatabase()
- chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
-
- var mockEngine consensus.PoSA
- if isValidRules {
- mockEngine = &mockPOSA{}
- } else {
- mockEngine = &mockInvalidPOSA{}
- }
-
- // Create vote pool
- votePool := NewVotePool(chain, mockEngine)
-
- // Create vote manager
- // Create a temporary file for the votes journal
- file, err := os.CreateTemp("", "")
- if err != nil {
- t.Fatalf("failed to create temporary file path: %v", err)
- }
- journal := file.Name()
- defer os.Remove(journal)
-
- // Clean up the temporary file, we only need the path for now
- file.Close()
- os.Remove(journal)
-
- voteManager, err := NewVoteManager(newTestBackend(), chain, votePool, journal, walletPasswordDir, walletDir, mockEngine)
- if err != nil {
- t.Fatalf("failed to create vote managers")
- }
-
- voteJournal := voteManager.journal
-
- // Send the done event of downloader
- time.Sleep(10 * time.Millisecond)
- mux.Post(downloader.DoneEvent{})
-
- bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- for i := 0; i < 10+blocksNumberSinceMining; i++ {
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- }
-
- if !isValidRules {
- if votePool.verifyStructureSizeOfVotePool(11, 11, 0, 11, 0) {
- t.Fatalf("put vote failed")
- }
- return
- }
-
- if !votePool.verifyStructureSizeOfVotePool(11, 11, 0, 11, 0) {
- t.Fatalf("put vote failed")
- }
-
- // Verify if votesPq is min heap
- votesPq := votePool.curVotesPq
- pqBuffer := make([]*types.VoteData, 0)
- lastVotedBlockNumber := uint64(0)
- for votesPq.Len() > 0 {
- voteData := heap.Pop(votesPq).(*types.VoteData)
- if voteData.TargetNumber < lastVotedBlockNumber {
- t.Fatalf("votesPq verification failed")
- }
- lastVotedBlockNumber = voteData.TargetNumber
- pqBuffer = append(pqBuffer, voteData)
- }
- for _, voteData := range pqBuffer {
- heap.Push(votesPq, voteData)
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(11, 11) {
- t.Fatalf("journal failed")
- }
-
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
-
- if !votePool.verifyStructureSizeOfVotePool(12, 12, 0, 12, 0) {
- t.Fatalf("put vote failed")
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(12, 12) {
- t.Fatalf("journal failed")
- }
-
- for i := 0; i < 256; i++ {
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(268, 268) {
- t.Fatalf("journal failed")
- }
-
- // currently chain size is 268, and votePool should be pruned, so vote pool size should be 256!
- if !votePool.verifyStructureSizeOfVotePool(256, 256, 0, 256, 0) {
- t.Fatalf("put vote failed")
- }
-
- // Test invalid vote whose number larger than latestHeader + 13
- invalidVote := &types.VoteEnvelope{
- Data: &types.VoteData{
- TargetNumber: 1000,
- },
- }
- voteManager.pool.PutVote(invalidVote)
-
- if !votePool.verifyStructureSizeOfVotePool(256, 256, 0, 256, 0) {
- t.Fatalf("put vote failed")
- }
-
- votes := votePool.GetVotes()
- if len(votes) != 256 {
- t.Fatalf("get votes failed")
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(268, 268) {
- t.Fatalf("journal failed")
- }
-
- // Test future votes scenario: votes number within latestBlockHeader ~ latestBlockHeader + 13
- futureVote := &types.VoteEnvelope{
- Data: &types.VoteData{
- TargetNumber: 279,
- },
- }
- if err := voteManager.signer.SignVote(futureVote); err != nil {
- t.Fatalf("sign vote failed")
- }
- voteManager.pool.PutVote(futureVote)
-
- if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) {
- t.Fatalf("put vote failed")
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(268, 268) {
- t.Fatalf("journal failed")
- }
-
- // Test duplicate vote case, shouldn'd be put into vote pool
- duplicateVote := &types.VoteEnvelope{
- Data: &types.VoteData{
- TargetNumber: 279,
- },
- }
- if err := voteManager.signer.SignVote(duplicateVote); err != nil {
- t.Fatalf("sign vote failed")
- }
- voteManager.pool.PutVote(duplicateVote)
-
- if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) {
- t.Fatalf("put vote failed")
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(268, 268) {
- t.Fatalf("journal failed")
- }
-
- // Test future votes larger than latestBlockNumber + 13 should be rejected
- futureVote = &types.VoteEnvelope{
- Data: &types.VoteData{
- TargetNumber: 282,
- TargetHash: common.Hash{},
- },
- }
- voteManager.pool.PutVote(futureVote)
- if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) {
- t.Fatalf("put vote failed")
- }
-
- // Test transfer votes from future to cur, latest block header is #288 after the following generation
- // For the above BlockNumber 279, it did not have blockHash, should be assigned as well below.
- curNumber := 268
- var futureBlockHash common.Hash
- for i := 0; i < 20; i++ {
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- curNumber += 1
- if curNumber == 279 {
- futureBlockHash = bs[0].Hash()
- futureVotesMap := votePool.futureVotes
- voteBox := futureVotesMap[common.Hash{}]
- futureVotesMap[futureBlockHash] = voteBox
- delete(futureVotesMap, common.Hash{})
- futureVotesPq := votePool.futureVotesPq
- futureVotesPq.Peek().TargetHash = futureBlockHash
- }
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- }
-
- for i := 0; i < timeThreshold; i++ {
- time.Sleep(1 * time.Second)
- _, ok := votePool.curVotes[futureBlockHash]
- if ok && len(votePool.curVotes[futureBlockHash].voteMessages) == 2 {
- break
- }
- }
- if votePool.curVotes[futureBlockHash] == nil || len(votePool.curVotes[futureBlockHash].voteMessages) != 2 {
- t.Fatalf("transfer vote failed")
- }
-
- // Pruner will keep the size of votePool as latestBlockHeader-255~latestBlockHeader, then final result should be 256!
- if !votePool.verifyStructureSizeOfVotePool(257, 256, 0, 256, 0) {
- t.Fatalf("put vote failed")
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(288, 288) {
- t.Fatalf("journal failed")
- }
-
- for i := 0; i < 224; i++ {
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- }
-
- // Verify journal
- if !voteJournal.verifyJournal(512, 512) {
- t.Fatalf("journal failed")
- }
-
- bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
-
- // Verify if journal no longer than 512
- if !voteJournal.verifyJournal(512, 513) {
- t.Fatalf("journal failed")
- }
-}
-
func setUpKeyManager(t *testing.T) (string, string) {
walletDir := filepath.Join(t.TempDir(), "wallet")
opts := []accounts.Option{}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
deleted file mode 100644
index 3c113b9134..0000000000
--- a/eth/downloader/downloader_test.go
+++ /dev/null
@@ -1,1335 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package downloader
-
-import (
- "fmt"
- "math/big"
- "os"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/eth/protocols/eth"
- "github.com/ethereum/go-ethereum/eth/protocols/snap"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
-)
-
-// downloadTester is a test simulator for mocking out local block chain.
-type downloadTester struct {
- freezer string
- chain *core.BlockChain
- downloader *Downloader
-
- peers map[string]*downloadTesterPeer
- lock sync.RWMutex
-}
-
-// newTester creates a new downloader test mocker.
-func newTester(t *testing.T) *downloadTester {
- return newTesterWithNotification(t, nil)
-}
-
-// newTester creates a new downloader test mocker.
-func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
- freezer := t.TempDir()
- db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
- if err != nil {
- panic(err)
- }
- t.Cleanup(func() {
- db.Close()
- })
- gspec := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- panic(err)
- }
- tester := &downloadTester{
- freezer: freezer,
- chain: chain,
- peers: make(map[string]*downloadTesterPeer),
- }
- tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success)
- return tester
-}
-
-// terminate aborts any operations on the embedded downloader and releases all
-// held resources.
-func (dl *downloadTester) terminate() {
- dl.downloader.Terminate()
- dl.chain.Stop()
-
- os.RemoveAll(dl.freezer)
-}
-
-// sync starts synchronizing with a remote peer, blocking until it completes.
-func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
- head := dl.peers[id].chain.CurrentBlock()
- if td == nil {
- // If no particular TD was requested, load from the peer's blockchain
- td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64())
- }
- // Synchronise with the chosen peer and ensure proper cleanup afterwards
- err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
- select {
- case <-dl.downloader.cancelCh:
- // Ok, downloader fully cancelled after sync cycle
- default:
- // Downloader is still accepting packets, can block a peer up
- panic("downloader active post sync cycle") // panic will be caught by tester
- }
- return err
-}
-
-// newPeer registers a new block download source into the downloader.
-func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- peer := &downloadTesterPeer{
- dl: dl,
- id: id,
- chain: newTestBlockchain(blocks),
- withholdHeaders: make(map[common.Hash]struct{}),
- }
- dl.peers[id] = peer
-
- if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
- panic(err)
- }
- if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
- panic(err)
- }
- return peer
-}
-
-// dropPeer simulates a hard peer removal from the connection pool.
-func (dl *downloadTester) dropPeer(id string) {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- delete(dl.peers, id)
- dl.downloader.SnapSyncer.Unregister(id)
- dl.downloader.UnregisterPeer(id)
-}
-
-type downloadTesterPeer struct {
- dl *downloadTester
- id string
- chain *core.BlockChain
-
- withholdHeaders map[common.Hash]struct{}
-}
-
-func (dlp *downloadTesterPeer) MarkLagging() {
-}
-
-// Head constructs a function to retrieve a peer's current head hash
-// and total difficulty.
-func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
- head := dlp.chain.CurrentBlock()
- return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
-}
-
-func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
- var headers = make([]*types.Header, len(rlpdata))
- for i, data := range rlpdata {
- var h types.Header
- if err := rlp.DecodeBytes(data, &h); err != nil {
- panic(err)
- }
- headers[i] = &h
- }
- return headers
-}
-
-// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
-// origin; associated with a particular peer in the download tester. The returned
-// function can be used to retrieve batches of headers from the particular peer.
-func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
- // Service the header query via the live handler code
- rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
- Origin: eth.HashOrNumber{
- Hash: origin,
- },
- Amount: uint64(amount),
- Skip: uint64(skip),
- Reverse: reverse,
- }, nil)
- headers := unmarshalRlpHeaders(rlpHeaders)
- // If a malicious peer is simulated withholding headers, delete them
- for hash := range dlp.withholdHeaders {
- for i, header := range headers {
- if header.Hash() == hash {
- headers = append(headers[:i], headers[i+1:]...)
- break
- }
- }
- }
- hashes := make([]common.Hash, len(headers))
- for i, header := range headers {
- hashes[i] = header.Hash()
- }
- // Deliver the headers to the downloader
- req := ð.Request{
- Peer: dlp.id,
- }
- res := ð.Response{
- Req: req,
- Res: (*eth.BlockHeadersRequest)(&headers),
- Meta: hashes,
- Time: 1,
- Done: make(chan error, 1), // Ignore the returned status
- }
- go func() {
- sink <- res
- }()
- return req, nil
-}
-
-// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
-// origin; associated with a particular peer in the download tester. The returned
-// function can be used to retrieve batches of headers from the particular peer.
-func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
- // Service the header query via the live handler code
- rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
- Origin: eth.HashOrNumber{
- Number: origin,
- },
- Amount: uint64(amount),
- Skip: uint64(skip),
- Reverse: reverse,
- }, nil)
- headers := unmarshalRlpHeaders(rlpHeaders)
- // If a malicious peer is simulated withholding headers, delete them
- for hash := range dlp.withholdHeaders {
- for i, header := range headers {
- if header.Hash() == hash {
- headers = append(headers[:i], headers[i+1:]...)
- break
- }
- }
- }
- hashes := make([]common.Hash, len(headers))
- for i, header := range headers {
- hashes[i] = header.Hash()
- }
- // Deliver the headers to the downloader
- req := ð.Request{
- Peer: dlp.id,
- }
- res := ð.Response{
- Req: req,
- Res: (*eth.BlockHeadersRequest)(&headers),
- Meta: hashes,
- Time: 1,
- Done: make(chan error, 1), // Ignore the returned status
- }
- go func() {
- sink <- res
- }()
- return req, nil
-}
-
-// RequestBodies constructs a getBlockBodies method associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of block bodies from the particularly requested peer.
-func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
- blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
-
- bodies := make([]*eth.BlockBody, len(blobs))
- for i, blob := range blobs {
- bodies[i] = new(eth.BlockBody)
- rlp.DecodeBytes(blob, bodies[i])
- }
- var (
- txsHashes = make([]common.Hash, len(bodies))
- uncleHashes = make([]common.Hash, len(bodies))
- withdrawalHashes = make([]common.Hash, len(bodies))
- )
- hasher := trie.NewStackTrie(nil)
- for i, body := range bodies {
- txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
- uncleHashes[i] = types.CalcUncleHash(body.Uncles)
- }
- req := ð.Request{
- Peer: dlp.id,
- }
- res := ð.Response{
- Req: req,
- Res: (*eth.BlockBodiesResponse)(&bodies),
- Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
- Time: 1,
- Done: make(chan error, 1), // Ignore the returned status
- }
- go func() {
- sink <- res
- }()
- return req, nil
-}
-
-// RequestReceipts constructs a getReceipts method associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of block receipts from the particularly requested peer.
-func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
- blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
-
- receipts := make([][]*types.Receipt, len(blobs))
- for i, blob := range blobs {
- rlp.DecodeBytes(blob, &receipts[i])
- }
- hasher := trie.NewStackTrie(nil)
- hashes = make([]common.Hash, len(receipts))
- for i, receipt := range receipts {
- hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
- }
- req := ð.Request{
- Peer: dlp.id,
- }
- res := ð.Response{
- Req: req,
- Res: (*eth.ReceiptsResponse)(&receipts),
- Meta: hashes,
- Time: 1,
- Done: make(chan error, 1), // Ignore the returned status
- }
- go func() {
- sink <- res
- }()
- return req, nil
-}
-
-// ID retrieves the peer's unique identifier.
-func (dlp *downloadTesterPeer) ID() string {
- return dlp.id
-}
-
-// RequestAccountRange fetches a batch of accounts rooted in a specific account
-// trie, starting with the origin.
-func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
- // Create the request and service it
- req := &snap.GetAccountRangePacket{
- ID: id,
- Root: root,
- Origin: origin,
- Limit: limit,
- Bytes: bytes,
- }
- slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
-
- // We need to convert to non-slim format, delegate to the packet code
- res := &snap.AccountRangePacket{
- ID: id,
- Accounts: slimaccs,
- Proof: proofs,
- }
- hashes, accounts, _ := res.Unpack()
-
- go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
- return nil
-}
-
-// RequestStorageRanges fetches a batch of storage slots belonging to one or
-// more accounts. If slots from only one account is requested, an origin marker
-// may also be used to retrieve from there.
-func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
- // Create the request and service it
- req := &snap.GetStorageRangesPacket{
- ID: id,
- Accounts: accounts,
- Root: root,
- Origin: origin,
- Limit: limit,
- Bytes: bytes,
- }
- storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
-
- // We need to convert to demultiplex, delegate to the packet code
- res := &snap.StorageRangesPacket{
- ID: id,
- Slots: storage,
- Proof: proofs,
- }
- hashes, slots := res.Unpack()
-
- go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
- return nil
-}
-
-// RequestByteCodes fetches a batch of bytecodes by hash.
-func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
- req := &snap.GetByteCodesPacket{
- ID: id,
- Hashes: hashes,
- Bytes: bytes,
- }
- codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
- go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
- return nil
-}
-
-// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
-// a specific state trie.
-func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
- req := &snap.GetTrieNodesPacket{
- ID: id,
- Root: root,
- Paths: paths,
- Bytes: bytes,
- }
- nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
- go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
- return nil
-}
-
-// Log retrieves the peer's own contextual logger.
-func (dlp *downloadTesterPeer) Log() log.Logger {
- return log.New("peer", dlp.id)
-}
-
-// assertOwnChain checks if the local chain contains the correct number of items
-// of the various chain components.
-func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
- // Mark this method as a helper to report errors at callsite, not in here
- t.Helper()
-
- headers, blocks, receipts := length, length, length
- if tester.downloader.getMode() == LightSync {
- blocks, receipts = 1, 1
- }
- if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
- t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
- }
- if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks {
- t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
- }
- if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts {
- t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
- }
-}
-
-func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
-func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
-func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
-
-func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create a small enough block chain to download
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
- tester.newPeer("peer", protocol, chain.blocks[1:])
-
- // Synchronise with the peer and make sure all relevant data was retrieved
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-}
-
-// Tests that if a large batch of blocks are being downloaded, it is throttled
-// until the cached blocks are retrieved.
-func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
-func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
-
-func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create a long block chain to download and the tester
- targetBlocks := len(testChainBase.blocks) - 1
- tester.newPeer("peer", protocol, testChainBase.blocks[1:])
-
- // Wrap the importer to allow stepping
- var blocked atomic.Uint32
- proceed := make(chan struct{})
- tester.downloader.chainInsertHook = func(results []*fetchResult, _ chan struct{}) {
- blocked.Store(uint32(len(results)))
- <-proceed
- }
- // Start a synchronisation concurrently
- errc := make(chan error, 1)
- go func() {
- errc <- tester.sync("peer", nil, mode)
- }()
- // Iteratively take some blocks, always checking the retrieval count
- for {
- // Check the retrieval count synchronously (! reason for this ugly block)
- tester.lock.RLock()
- retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
- tester.lock.RUnlock()
- if retrieved >= targetBlocks+1 {
- break
- }
- // Wait a bit for sync to throttle itself
- var cached, frozen int
- for start := time.Now(); time.Since(start) < 3*time.Second; {
- time.Sleep(25 * time.Millisecond)
-
- tester.lock.Lock()
- tester.downloader.queue.lock.Lock()
- tester.downloader.queue.resultCache.lock.Lock()
- {
- cached = tester.downloader.queue.resultCache.countCompleted()
- frozen = int(blocked.Load())
- retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
- }
- tester.downloader.queue.resultCache.lock.Unlock()
- tester.downloader.queue.lock.Unlock()
- tester.lock.Unlock()
-
- if cached == blockCacheMaxItems ||
- cached == blockCacheMaxItems-reorgProtHeaderDelay ||
- retrieved+cached+frozen == targetBlocks+1 ||
- retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
- break
- }
- }
- // Make sure we filled up the cache, then exhaust it
- time.Sleep(25 * time.Millisecond) // give it a chance to screw up
- tester.lock.RLock()
- retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
- tester.lock.RUnlock()
- if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
- t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
- }
- // Permit the blocked blocks to import
- if blocked.Load() > 0 {
- blocked.Store(uint32(0))
- proceed <- struct{}{}
- }
- }
- // Check that we haven't pulled more blocks than available
- assertOwnChain(t, tester, targetBlocks+1)
- if err := <-errc; err != nil {
- t.Fatalf("block synchronization failed: %v", err)
- }
-}
-
-// Tests that simple synchronization against a forked chain works correctly. In
-// this test common ancestor lookup should *not* be short circuited, and a full
-// binary search should be executed.
-func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
-func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
-func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
-
-func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
- chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
- tester.newPeer("fork A", protocol, chainA.blocks[1:])
- tester.newPeer("fork B", protocol, chainB.blocks[1:])
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("fork A", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainA.blocks))
-
- // Synchronise with the second peer and make sure that fork is pulled too
- if err := tester.sync("fork B", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainB.blocks))
-}
-
-// Tests that synchronising against a much shorter but much heavier fork works
-// currently and is not dropped.
-func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
-func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
-func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
-
-func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
- chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
- tester.newPeer("light", protocol, chainA.blocks[1:])
- tester.newPeer("heavy", protocol, chainB.blocks[1:])
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("light", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainA.blocks))
-
- // Synchronise with the second peer and make sure that fork is pulled too
- if err := tester.sync("heavy", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainB.blocks))
-}
-
-// Tests that chain forks are contained within a certain interval of the current
-// chain head, ensuring that malicious peers cannot waste resources by feeding
-// long dead chains.
-func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
-func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
-func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
-
-func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chainA := testChainForkLightA
- chainB := testChainForkLightB
- tester.newPeer("original", protocol, chainA.blocks[1:])
- tester.newPeer("rewriter", protocol, chainB.blocks[1:])
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("original", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainA.blocks))
-
- // Synchronise with the second peer and ensure that the fork is rejected to being too old
- if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
- t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
- }
-}
-
-// Tests that chain forks are contained within a certain interval of the current
-// chain head for short but heavy forks too. These are a bit special because they
-// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync68Full(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH68, FullSync)
-}
-func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync)
-}
-func TestBoundedHeavyForkedSync68Light(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
-}
-
-func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create a long enough forked chain
- chainA := testChainForkLightA
- chainB := testChainForkHeavy
- tester.newPeer("original", protocol, chainA.blocks[1:])
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("original", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chainA.blocks))
-
- tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
- // Synchronise with the second peer and ensure that the fork is rejected to being too old
- if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
- t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
- }
-}
-
-// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
-func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
-func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
-
-func testCancel(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(MaxHeaderFetch)
- tester.newPeer("peer", protocol, chain.blocks[1:])
-
- // Make sure canceling works with a pristine downloader
- tester.downloader.Cancel()
- if !tester.downloader.queue.Idle() {
- t.Errorf("download queue not idle")
- }
- // Synchronise with the peer, but cancel afterwards
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- tester.downloader.Cancel()
- if !tester.downloader.queue.Idle() {
- t.Errorf("download queue not idle")
- }
-}
-
-// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
-func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
-func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
-
-func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create various peers with various parts of the chain
- targetPeers := 8
- chain := testChainBase.shorten(targetPeers * 100)
-
- for i := 0; i < targetPeers; i++ {
- id := fmt.Sprintf("peer #%d", i)
- tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
- }
- if err := tester.sync("peer #0", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-}
-
-// Tests that synchronisations behave well in multi-version protocol environments
-// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
-func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
-func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
-
-func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create a small enough block chain to download
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- // Create peers of every type
- tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
-
- // Synchronise with the requested peer and make sure all blocks were retrieved
- if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-
- // Check that no peers have been dropped off
- for _, version := range []int{68} {
- peer := fmt.Sprintf("peer %d", version)
- if _, ok := tester.peers[peer]; !ok {
- t.Errorf("%s dropped", peer)
- }
- }
-}
-
-// Tests that if a block is empty (e.g. header only), no body request should be
-// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
-func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
-func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
-
-func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- // Create a block chain to download
- chain := testChainBase
- tester.newPeer("peer", protocol, chain.blocks[1:])
-
- // Instrument the downloader to signal body requests
- var bodiesHave, receiptsHave atomic.Int32
- tester.downloader.bodyFetchHook = func(headers []*types.Header) {
- bodiesHave.Add(int32(len(headers)))
- }
- tester.downloader.receiptFetchHook = func(headers []*types.Header) {
- receiptsHave.Add(int32(len(headers)))
- }
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-
- // Validate the number of block bodies that should have been requested
- bodiesNeeded, receiptsNeeded := 0, 0
- for _, block := range chain.blocks[1:] {
- if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
- bodiesNeeded++
- }
- }
- for _, block := range chain.blocks[1:] {
- if mode == SnapSync && len(block.Transactions()) > 0 {
- receiptsNeeded++
- }
- }
- if int(bodiesHave.Load()) != bodiesNeeded {
- t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
- }
- if int(receiptsHave.Load()) != receiptsNeeded {
- t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
- }
-}
-
-// Tests that headers are enqueued continuously, preventing malicious nodes from
-// stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
-func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
-func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
-
-func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
- attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
-
- if err := tester.sync("attack", nil, mode); err == nil {
- t.Fatalf("succeeded attacker synchronisation")
- }
- // Synchronise with the valid peer and make sure sync succeeds
- tester.newPeer("valid", protocol, chain.blocks[1:])
- if err := tester.sync("valid", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-}
-
-// Tests that if requested headers are shifted (i.e. first is missing), the queue
-// detects the invalid numbering.
-func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
-func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
-func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
-
-func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- // Attempt a full sync with an attacker feeding shifted headers
- attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
- attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
-
- if err := tester.sync("attack", nil, mode); err == nil {
- t.Fatalf("succeeded attacker synchronisation")
- }
- // Synchronise with the valid peer and make sure sync succeeds
- tester.newPeer("valid", protocol, chain.blocks[1:])
- if err := tester.sync("valid", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, len(chain.blocks))
-}
-
-// Tests that a peer advertising a high TD doesn't get to stall the downloader
-// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack68Full(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH68, FullSync)
-}
-func TestHighTDStarvationAttack68Snap(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH68, SnapSync)
-}
-func TestHighTDStarvationAttack68Light(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH68, LightSync)
-}
-
-func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(1)
- tester.newPeer("attack", protocol, chain.blocks[1:])
- if err := tester.sync("attack", big.NewInt(1000000), mode); err != errLaggingPeer {
- t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errLaggingPeer)
- }
-}
-
-// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
-
-func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
- // Define the disconnection requirement for individual hash fetch errors
- tests := []struct {
- result error
- drop bool
- }{
- {nil, false}, // Sync succeeded, all is well
- {errBusy, false}, // Sync is already in progress, no problem
- {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
- {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
- {errStallingPeer, true}, // Peer was detected to be stalling, drop it
- {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
- {errNoPeers, false}, // No peers to download from, soft race, no issue
- {errTimeout, true}, // No hashes received in due time, drop the peer
- {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
- {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
- {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
- {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
- {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
- {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
- {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
- }
- // Run the tests and check disconnection status
- tester := newTester(t)
- defer tester.terminate()
- chain := testChainBase.shorten(1)
-
- for i, tt := range tests {
- // Register a new peer and ensure its presence
- id := fmt.Sprintf("test %d", i)
- tester.newPeer(id, protocol, chain.blocks[1:])
- if _, ok := tester.peers[id]; !ok {
- t.Fatalf("test %d: registered peer not found", i)
- }
- // Simulate a synchronisation and check the required result
- tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
-
- tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
- if _, ok := tester.peers[id]; !ok != tt.drop {
- t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
- }
- }
-}
-
-// Tests that synchronisation progress (origin block number, current block number
-// and highest block number) is tracked and updated correctly.
-func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
-func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
-func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
-
-func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Synchronise half the blocks and check initial progress
- tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
- pending := new(sync.WaitGroup)
- pending.Add(1)
-
- go func() {
- defer pending.Done()
- if err := tester.sync("peer-half", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(len(chain.blocks)/2 - 1),
- })
- progress <- struct{}{}
- pending.Wait()
-
- // Synchronise all the blocks and check continuation progress
- tester.newPeer("peer-full", protocol, chain.blocks[1:])
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("peer-full", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
- StartingBlock: uint64(len(chain.blocks)/2 - 1),
- CurrentBlock: uint64(len(chain.blocks)/2 - 1),
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- StartingBlock: uint64(len(chain.blocks)/2 - 1),
- CurrentBlock: uint64(len(chain.blocks) - 1),
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
-}
-
-func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
- // Mark this method as a helper to report errors at callsite, not in here
- t.Helper()
-
- p := d.Progress()
- if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
- t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
- }
-}
-
-// Tests that synchronisation progress (origin block number and highest block
-// number) is tracked and updated correctly in case of a fork (or manual head
-// revertal).
-func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
-func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
-func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
-
-func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
- chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Synchronise with one of the forks and check progress
- tester.newPeer("fork A", protocol, chainA.blocks[1:])
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("fork A", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
-
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(len(chainA.blocks) - 1),
- })
- progress <- struct{}{}
- pending.Wait()
-
- // Simulate a successful sync above the fork
- tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
-
- // Synchronise with the second fork and check progress resets
- tester.newPeer("fork B", protocol, chainB.blocks[1:])
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("fork B", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
- StartingBlock: uint64(len(testChainBase.blocks)) - 1,
- CurrentBlock: uint64(len(chainA.blocks) - 1),
- HighestBlock: uint64(len(chainB.blocks) - 1),
- })
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- StartingBlock: uint64(len(testChainBase.blocks)) - 1,
- CurrentBlock: uint64(len(chainB.blocks) - 1),
- HighestBlock: uint64(len(chainB.blocks) - 1),
- })
-}
-
-// Tests that if synchronisation is aborted due to some failure, then the progress
-// origin is not updated in the next sync cycle, as it should be considered the
-// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
-func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
-func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
-
-func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Attempt a full sync with a faulty peer
- missing := len(chain.blocks)/2 - 1
-
- faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
- faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
-
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("faulty", nil, mode); err == nil {
- panic("succeeded faulty synchronisation")
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
- progress <- struct{}{}
- pending.Wait()
- afterFailedSync := tester.downloader.Progress()
-
- // Synchronise with a good peer and check that the progress origin remind the same
- // after a failure
- tester.newPeer("valid", protocol, chain.blocks[1:])
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("valid", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", afterFailedSync)
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- CurrentBlock: uint64(len(chain.blocks) - 1),
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
-}
-
-// Tests that if an attacker fakes a chain height, after the attack is detected,
-// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
-func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
-func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
-
-func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- tester := newTester(t)
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Create and sync with an attacker that promises a higher chain than available.
- attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
- numMissing := 5
- for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
- attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
- }
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("attack", nil, mode); err == nil {
- panic("succeeded attacker synchronisation")
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
- progress <- struct{}{}
- pending.Wait()
- afterFailedSync := tester.downloader.Progress()
-
- // it is no longer valid to sync to a lagging peer
- laggingChain := chain.shorten(800 / 2)
- tester.newPeer("lagging", protocol, laggingChain.blocks[1:])
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("lagging", nil, mode); err != errLaggingPeer {
- panic(fmt.Sprintf("unexpected lagging synchronisation err:%v", err))
- }
- }()
- // lagging peer will return before syncInitHook, skip <-starting and progress <- struct{}{}
- checkProgress(t, tester.downloader, "lagging", ethereum.SyncProgress{
- CurrentBlock: afterFailedSync.CurrentBlock,
- HighestBlock: uint64(len(chain.blocks) - 1),
- })
- pending.Wait()
-
- // Synchronise with a good peer and check that the progress height has been increased to
- // the true value.
- validChain := chain.shorten(len(chain.blocks))
- tester.newPeer("valid", protocol, validChain.blocks[1:])
- pending.Add(1)
-
- go func() {
- defer pending.Done()
- if err := tester.sync("valid", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
- CurrentBlock: afterFailedSync.CurrentBlock,
- HighestBlock: uint64(len(validChain.blocks) - 1),
- })
- // Check final progress after successful sync.
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- CurrentBlock: uint64(len(validChain.blocks) - 1),
- HighestBlock: uint64(len(validChain.blocks) - 1),
- })
-}
-
-func TestRemoteHeaderRequestSpan(t *testing.T) {
- testCases := []struct {
- remoteHeight uint64
- localHeight uint64
- expected []int
- }{
- // Remote is way higher. We should ask for the remote head and go backwards
- {1500, 1000,
- []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
- },
- {15000, 13006,
- []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
- },
- // Remote is pretty close to us. We don't have to fetch as many
- {1200, 1150,
- []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
- },
- // Remote is equal to us (so on a fork with higher td)
- // We should get the closest couple of ancestors
- {1500, 1500,
- []int{1497, 1499},
- },
- // We're higher than the remote! Odd
- {1000, 1500,
- []int{997, 999},
- },
- // Check some weird edgecases that it behaves somewhat rationally
- {0, 1500,
- []int{0, 2},
- },
- {6000000, 0,
- []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
- },
- {0, 0,
- []int{0, 2},
- },
- }
- reqs := func(from, count, span int) []int {
- var r []int
- num := from
- for len(r) < count {
- r = append(r, num)
- num += span + 1
- }
- return r
- }
- for i, tt := range testCases {
- from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
- data := reqs(int(from), count, span)
-
- if max != uint64(data[len(data)-1]) {
- t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
- }
- failed := false
- if len(data) != len(tt.expected) {
- failed = true
- t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
- } else {
- for j, n := range data {
- if n != tt.expected[j] {
- failed = true
- break
- }
- }
- }
- if failed {
- res := strings.ReplaceAll(fmt.Sprint(data), " ", ",")
- exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",")
- t.Logf("got: %v\n", res)
- t.Logf("exp: %v\n", exp)
- t.Errorf("test %d: wrong values", i)
- }
- }
-}
-
-/*
-// Tests that peers below a pre-configured checkpoint block are prevented from
-// being fast-synced from, avoiding potential cheap eclipse attacks.
-func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
-func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
-
-func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
- //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
-
- var cases = []struct {
- name string // The name of testing scenario
- local int // The length of local chain(canonical chain assumed), 0 means genesis is the head
- }{
- {name: "Beacon sync since genesis", local: 0},
- {name: "Beacon sync with short local chain", local: 1},
- {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2},
- {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1},
- }
- for _, c := range cases {
- t.Run(c.name, func(t *testing.T) {
- success := make(chan struct{})
- tester := newTesterWithNotification(t, func() {
- close(success)
- })
- defer tester.terminate()
-
- chain := testChainBase.shorten(blockCacheMaxItems - 15)
- tester.newPeer("peer", protocol, chain.blocks[1:])
-
- // Build the local chain segment if it's required
- if c.local > 0 {
- tester.chain.InsertChain(chain.blocks[1 : c.local+1])
- }
- if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
- t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
- }
- select {
- case <-success:
- // Ok, downloader fully cancelled after sync cycle
- if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) {
- t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
- }
- case <-time.NewTimer(time.Second * 3).C:
- t.Fatalf("Failed to sync chain in three seconds")
- }
- })
- }
-}
-*/
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
deleted file mode 100644
index 52a8cedf0a..0000000000
--- a/eth/downloader/testchain_test.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package downloader
-
-import (
- "fmt"
- "math/big"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-// Test chain parameters.
-var (
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
- testDB = rawdb.NewMemoryDatabase()
-
- testGspec = &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- testGenesis = testGspec.MustCommit(testDB, triedb.NewDatabase(testDB, triedb.HashDefaults))
-)
-
-// The common prefix of all test chains:
-var testChainBase *testChain
-
-// Different forks on top of the base chain:
-var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain
-
-var pregenerated bool
-
-func init() {
- // Reduce some of the parameters to make the tester faster
- FullMaxForkAncestry = 10000
- lightMaxForkAncestry = 10000
- blockCacheMaxItems = 1024
- fsHeaderSafetyNet = 256
- fsHeaderContCheck = 500 * time.Millisecond
-
- testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis)
-
- var forkLen = int(FullMaxForkAncestry + 50)
- var wg sync.WaitGroup
-
- // Generate the test chains to seed the peers with
- wg.Add(3)
- go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }()
- go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }()
- go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }()
- wg.Wait()
-
- // Generate the test peers used by the tests to avoid overloading during testing.
- // These seemingly random chains are used in various downloader tests. We're just
- // pre-generating them here.
- chains := []*testChain{
- testChainBase,
- testChainForkLightA,
- testChainForkLightB,
- testChainForkHeavy,
- testChainBase.shorten(1),
- testChainBase.shorten(blockCacheMaxItems - 15),
- testChainBase.shorten((blockCacheMaxItems - 15) / 2),
- testChainBase.shorten(blockCacheMaxItems - 15 - 5),
- testChainBase.shorten(MaxHeaderFetch),
- testChainBase.shorten(800),
- testChainBase.shorten(800 / 2),
- testChainBase.shorten(800 / 3),
- testChainBase.shorten(800 / 4),
- testChainBase.shorten(800 / 5),
- testChainBase.shorten(800 / 6),
- testChainBase.shorten(800 / 7),
- testChainBase.shorten(800 / 8),
- testChainBase.shorten(3*fsHeaderSafetyNet + 256 + fsMinFullBlocks),
- testChainBase.shorten(fsMinFullBlocks + 256 - 1),
- testChainForkLightA.shorten(len(testChainBase.blocks) + 80),
- testChainForkLightB.shorten(len(testChainBase.blocks) + 81),
- testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch),
- testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch),
- testChainForkHeavy.shorten(len(testChainBase.blocks) + 79),
- }
- wg.Add(len(chains))
- for _, chain := range chains {
- go func(blocks []*types.Block) {
- newTestBlockchain(blocks)
- wg.Done()
- }(chain.blocks[1:])
- }
- wg.Wait()
-
- // Mark the chains pregenerated. Generating a new one will lead to a panic.
- pregenerated = true
-}
-
-type testChain struct {
- blocks []*types.Block
-}
-
-// newTestChain creates a blockchain of the given length.
-func newTestChain(length int, genesis *types.Block) *testChain {
- tc := &testChain{
- blocks: []*types.Block{genesis},
- }
- tc.generate(length-1, 0, genesis, false)
- return tc
-}
-
-// makeFork creates a fork on top of the test chain.
-func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {
- fork := tc.copy(len(tc.blocks) + length)
- fork.generate(length, seed, tc.blocks[len(tc.blocks)-1], heavy)
- return fork
-}
-
-// shorten creates a copy of the chain with the given length. It panics if the
-// length is longer than the number of available blocks.
-func (tc *testChain) shorten(length int) *testChain {
- if length > len(tc.blocks) {
- panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, len(tc.blocks)))
- }
- return tc.copy(length)
-}
-
-func (tc *testChain) copy(newlen int) *testChain {
- if newlen > len(tc.blocks) {
- newlen = len(tc.blocks)
- }
- cpy := &testChain{
- blocks: append([]*types.Block{}, tc.blocks[:newlen]...),
- }
- return cpy
-}
-
-// generate creates a chain of n blocks starting at and including parent.
-// the returned hash chain is ordered head->parent. In addition, every 22th block
-// contains a transaction and every 5th an uncle to allow testing correct block
-// reassembly.
-func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) {
- blocks, _ := core.GenerateChain(testGspec.Config, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
- block.SetCoinbase(common.Address{seed})
- // If a heavy chain is requested, delay blocks to raise difficulty
- if heavy {
- block.OffsetTime(-9)
- }
- // Include transactions to the miner to make blocks more interesting.
- if parent == tc.blocks[0] && i%22 == 0 {
- signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp())
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTx(tx)
- }
- // if the block number is a multiple of 5, add a bonus uncle to the block
- if i > 0 && i%5 == 0 {
- block.AddUncle(&types.Header{
- ParentHash: block.PrevBlock(i - 2).Hash(),
- Number: big.NewInt(block.Number().Int64() - 1),
- })
- }
- })
- tc.blocks = append(tc.blocks, blocks...)
-}
-
-var (
- testBlockchains = make(map[common.Hash]*testBlockchain)
- testBlockchainsLock sync.Mutex
-)
-
-type testBlockchain struct {
- chain *core.BlockChain
- gen sync.Once
-}
-
-// newTestBlockchain creates a blockchain database built by running the given blocks,
-// either actually running them, or reusing a previously created one. The returned
-// chains are *shared*, so *do not* mutate them.
-func newTestBlockchain(blocks []*types.Block) *core.BlockChain {
- // Retrieve an existing database, or create a new one
- head := testGenesis.Hash()
- if len(blocks) > 0 {
- head = blocks[len(blocks)-1].Hash()
- }
- testBlockchainsLock.Lock()
- if _, ok := testBlockchains[head]; !ok {
- testBlockchains[head] = new(testBlockchain)
- }
- tbc := testBlockchains[head]
- testBlockchainsLock.Unlock()
-
- // Ensure that the database is generated
- tbc.gen.Do(func() {
- if pregenerated {
- panic("Requested chain generation outside of init")
- }
- chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- panic(err)
- }
- if n, err := chain.InsertChain(blocks); err != nil {
- panic(fmt.Sprintf("block %d: %v", n, err))
- }
- tbc.chain = chain
- })
- return tbc.chain
-}
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
deleted file mode 100644
index 59b6165863..0000000000
--- a/eth/filters/filter_test.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package filters
-
-import (
- "context"
- "encoding/json"
- "math/big"
- "strings"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-func makeReceipt(addr common.Address) *types.Receipt {
- receipt := types.NewReceipt(nil, false, 0)
- receipt.Logs = []*types.Log{
- {Address: addr},
- }
- receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
- return receipt
-}
-
-func BenchmarkFilters(b *testing.B) {
- var (
- db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false)
- _, sys = newTestFilterSystem(b, db, Config{})
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = common.BytesToAddress([]byte("jeff"))
- addr3 = common.BytesToAddress([]byte("ethereum"))
- addr4 = common.BytesToAddress([]byte("random addresses please"))
-
- gspec = &core.Genesis{
- Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.TestChainConfig,
- }
- )
- defer db.Close()
- _, chain, receipts := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 100010, func(i int, gen *core.BlockGen) {
- switch i {
- case 2403:
- receipt := makeReceipt(addr1)
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
- case 1034:
- receipt := makeReceipt(addr2)
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
- case 34:
- receipt := makeReceipt(addr3)
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
- case 99999:
- receipt := makeReceipt(addr4)
- gen.AddUncheckedReceipt(receipt)
- gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
- }
- })
- // The test txs are not properly signed, can't simply create a chain
- // and then import blocks. TODO(rjl493456442) try to get rid of the
- // manual database writes.
- gspec.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
-
- for i, block := range chain {
- rawdb.WriteBlock(db, block)
- rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- rawdb.WriteHeadBlockHash(db, block.Hash())
- rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
- }
- b.ResetTimer()
-
- filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil, false)
-
- for i := 0; i < b.N; i++ {
- filter.begin = 0
- logs, _ := filter.Logs(context.Background())
- if len(logs) != 4 {
- b.Fatal("expected 4 logs, got", len(logs))
- }
- }
-}
-
-func TestFilters(t *testing.T) {
- var (
- db = rawdb.NewMemoryDatabase()
- _, sys = newTestFilterSystem(t, db, Config{})
- // Sender account
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key1.PublicKey)
- signer = types.NewLondonSigner(big.NewInt(1))
- // Logging contract
- contract = common.Address{0xfe}
- contract2 = common.Address{0xff}
- abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]`
- /*
- // SPDX-License-Identifier: GPL-3.0
- pragma solidity >=0.7.0 <0.9.0;
-
- contract Logger {
- function log0() external {
- assembly {
- log0(0, 0)
- }
- }
-
- function log1(uint t1) external {
- assembly {
- log1(0, 0, t1)
- }
- }
-
- function log2(uint t1, uint t2) external {
- assembly {
- log2(0, 0, t1, t2)
- }
- }
-
- function log3(uint t1, uint t2, uint t3) external {
- assembly {
- log3(0, 0, t1, t2, t3)
- }
- }
-
- function log4(uint t1, uint t2, uint t3, uint t4) external {
- assembly {
- log4(0, 0, t1, t2, t3, t4)
- }
- }
- }
- */
- bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033")
-
- hash1 = common.BytesToHash([]byte("topic1"))
- hash2 = common.BytesToHash([]byte("topic2"))
- hash3 = common.BytesToHash([]byte("topic3"))
- hash4 = common.BytesToHash([]byte("topic4"))
- hash5 = common.BytesToHash([]byte("topic5"))
-
- gspec = &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))},
- contract: {Balance: big.NewInt(0), Code: bytecode},
- contract2: {Balance: big.NewInt(0), Code: bytecode},
- },
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- )
-
- contractABI, err := abi.JSON(strings.NewReader(abiStr))
- if err != nil {
- t.Fatal(err)
- }
-
- // Hack: GenerateChainWithGenesis creates a new db.
- // Commit the genesis manually and use GenerateChain.
- _, err = gspec.Commit(db, triedb.NewDatabase(db, nil))
- if err != nil {
- t.Fatal(err)
- }
- chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {
- switch i {
- case 1:
- data, err := contractABI.Pack("log1", hash1.Big())
- if err != nil {
- t.Fatal(err)
- }
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 0,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx)
- tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 1,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract2,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx2)
- case 2:
- data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big())
- if err != nil {
- t.Fatal(err)
- }
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 2,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx)
- case 998:
- data, err := contractABI.Pack("log1", hash3.Big())
- if err != nil {
- t.Fatal(err)
- }
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 3,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract2,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx)
- case 999:
- data, err := contractABI.Pack("log1", hash4.Big())
- if err != nil {
- t.Fatal(err)
- }
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 4,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx)
- }
- })
- var l uint64
- bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
- if err != nil {
- t.Fatal(err)
- }
- _, err = bc.InsertChain(chain)
- if err != nil {
- t.Fatal(err)
- }
-
- // Set block 998 as Finalized (-3)
- // bc.SetFinalized(chain[998].Header())
-
- // Generate pending block
- pchain, preceipts := core.GenerateChain(gspec.Config, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {
- data, err := contractABI.Pack("log1", hash5.Big())
- if err != nil {
- t.Fatal(err)
- }
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: 5,
- GasPrice: gen.BaseFee(),
- Gas: 30000,
- To: &contract,
- Data: data,
- }), signer, key1)
- gen.AddTx(tx)
- })
- sys.backend.(*testBackend).pendingBlock = pchain[0]
- sys.backend.(*testBackend).pendingReceipts = preceipts[0]
-
- for i, tc := range []struct {
- f *Filter
- want string
- err string
- }{
- {
- f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}, false),
- },
- {
- f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}, false),
- want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}, false),
- },
- {
- f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil, false),
- },
- {
- f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}, false),
- },
- {
- f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
- },
- /*
- {
- f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false),
- want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil, false),
- want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil),
- },
- {
- f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil),
- err: "safe header not found",
- },
- {
- f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil),
- err: "safe header not found",
- },
- {
- f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil),
- err: "safe header not found",
- },
- */
- {
- f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, false),
- want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`,
- },
- {
- f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false),
- err: errInvalidBlockRange.Error(),
- },
- } {
- logs, err := tc.f.Logs(context.Background())
- if err == nil && tc.err != "" {
- t.Fatalf("test %d, expected error %q, got nil", i, tc.err)
- } else if err != nil && err.Error() != tc.err {
- t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error())
- }
- if tc.want == "" && len(logs) == 0 {
- continue
- }
- have, err := json.Marshal(logs)
- if err != nil {
- t.Fatal(err)
- }
- if string(have) != tc.want {
- t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want)
- }
- }
-
- t.Run("timeout", func(t *testing.T) {
- f := sys.NewRangeFilter(0, -1, nil, nil, false)
- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour))
- defer cancel()
- _, err := f.Logs(ctx)
- if err == nil {
- t.Fatal("expected error")
- }
- if err != context.DeadlineExceeded {
- t.Fatalf("expected context.DeadlineExceeded, got %v", err)
- }
- })
-}
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
deleted file mode 100644
index c6ce443a06..0000000000
--- a/eth/gasprice/gasprice_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package gasprice
-
-import (
- "context"
- "errors"
- "math"
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rpc"
-)
-
-const testHead = 32
-
-type testBackend struct {
- chain *core.BlockChain
- pending bool // pending block available
-}
-
-func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
- if number > testHead {
- return nil, nil
- }
- if number == rpc.EarliestBlockNumber {
- number = 0
- }
- if number == rpc.FinalizedBlockNumber {
- header := b.chain.CurrentFinalBlock()
- if header == nil {
- return nil, errors.New("finalized block not found")
- }
- number = rpc.BlockNumber(header.Number.Uint64())
- }
- if number == rpc.SafeBlockNumber {
- header := b.chain.CurrentSafeBlock()
- if header == nil {
- return nil, errors.New("safe block not found")
- }
- number = rpc.BlockNumber(header.Number.Uint64())
- }
- if number == rpc.LatestBlockNumber {
- number = testHead
- }
- if number == rpc.PendingBlockNumber {
- if b.pending {
- number = testHead + 1
- } else {
- return nil, nil
- }
- }
- return b.chain.GetHeaderByNumber(uint64(number)), nil
-}
-
-func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
- if number > testHead {
- return nil, nil
- }
- if number == rpc.EarliestBlockNumber {
- number = 0
- }
- if number == rpc.FinalizedBlockNumber {
- number = rpc.BlockNumber(b.chain.CurrentFinalBlock().Number.Uint64())
- }
- if number == rpc.SafeBlockNumber {
- number = rpc.BlockNumber(b.chain.CurrentSafeBlock().Number.Uint64())
- }
- if number == rpc.LatestBlockNumber {
- number = testHead
- }
- if number == rpc.PendingBlockNumber {
- if b.pending {
- number = testHead + 1
- } else {
- return nil, nil
- }
- }
- return b.chain.GetBlockByNumber(uint64(number)), nil
-}
-
-func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
- return b.chain.GetReceiptsByHash(hash), nil
-}
-
-func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
- if b.pending {
- block := b.chain.GetBlockByNumber(testHead + 1)
- return block, b.chain.GetReceiptsByHash(block.Hash())
- }
- return nil, nil
-}
-
-func (b *testBackend) ChainConfig() *params.ChainConfig {
- return b.chain.Config()
-}
-
-func (b *testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
- return nil
-}
-
-func (b *testBackend) teardown() {
- b.chain.Stop()
-}
-
-// newTestBackend creates a test backend. OBS: don't forget to invoke tearDown
-// after use, otherwise the blockchain instance will mem-leak via goroutines.
-func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBackend {
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key.PublicKey)
- config = *params.TestChainConfig // needs copy because it is modified below
- gspec = &core.Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
- }
- signer = types.LatestSigner(gspec.Config)
- )
- config.LondonBlock = londonBlock
- config.ArrowGlacierBlock = londonBlock
- config.GrayGlacierBlock = londonBlock
- config.GibbsBlock = nil
- config.LubanBlock = nil
- config.PlatoBlock = nil
- config.HertzBlock = nil
- config.HertzfixBlock = nil
- config.TerminalTotalDifficulty = common.Big0
- engine := ethash.NewFaker()
-
- // Generate testing blocks
- _, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, testHead+1, func(i int, b *core.BlockGen) {
- b.SetCoinbase(common.Address{1})
-
- var txdata types.TxData
- if londonBlock != nil && b.Number().Cmp(londonBlock) >= 0 {
- txdata = &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: b.TxNonce(addr),
- To: &common.Address{},
- Gas: 30000,
- GasFeeCap: big.NewInt(100 * params.GWei),
- GasTipCap: big.NewInt(int64(i+1) * params.GWei),
- Data: []byte{},
- }
- } else {
- txdata = &types.LegacyTx{
- Nonce: b.TxNonce(addr),
- To: &common.Address{},
- Gas: 21000,
- GasPrice: big.NewInt(int64(i+1) * params.GWei),
- Value: big.NewInt(100),
- Data: []byte{},
- }
- }
- b.AddTx(types.MustSignNewTx(key, signer, txdata))
- })
- // Construct testing chain
- chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create local chain, %v", err)
- }
- _, err = chain.InsertChain(blocks)
- if err != nil {
- t.Fatalf("Failed to insert blocks, %v", err)
- }
- return &testBackend{chain: chain, pending: pending}
-}
-
-func (b *testBackend) CurrentHeader() *types.Header {
- return b.chain.CurrentHeader()
-}
-
-func (b *testBackend) GetBlockByNumber(number uint64) *types.Block {
- return b.chain.GetBlockByNumber(number)
-}
-
-func TestSuggestTipCap(t *testing.T) {
- config := Config{
- Blocks: 3,
- Percentile: 60,
- Default: big.NewInt(params.GWei),
- }
- var cases = []struct {
- fork *big.Int // London fork number
- expect *big.Int // Expected gasprice suggestion
- }{
- {nil, big.NewInt(params.GWei * int64(30))},
- {big.NewInt(0), big.NewInt(params.GWei * int64(30))}, // Fork point in genesis
- {big.NewInt(1), big.NewInt(params.GWei * int64(30))}, // Fork point in first block
- {big.NewInt(32), big.NewInt(params.GWei * int64(30))}, // Fork point in last block
- {big.NewInt(33), big.NewInt(params.GWei * int64(30))}, // Fork point in the future
- }
- for _, c := range cases {
- backend := newTestBackend(t, c.fork, false)
- oracle := NewOracle(backend, config)
-
- // The gas price sampled is: 32G, 31G, 30G, 29G, 28G, 27G
- got, err := oracle.SuggestTipCap(context.Background())
- backend.teardown()
- if err != nil {
- t.Fatalf("Failed to retrieve recommended gas price: %v", err)
- }
- if got.Cmp(c.expect) != 0 {
- t.Fatalf("Gas price mismatch, want %d, got %d", c.expect, got)
- }
- }
-}
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
deleted file mode 100644
index de49912e57..0000000000
--- a/eth/protocols/eth/handler_test.go
+++ /dev/null
@@ -1,656 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package eth
-
-import (
- rand2 "crypto/rand"
- "io"
- "math"
- "math/big"
- "math/rand"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/txpool"
- "github.com/ethereum/go-ethereum/core/txpool/legacypool"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
- "github.com/ethereum/go-ethereum/eth/protocols/bsc"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/holiman/uint256"
-)
-
-var (
- // testKey is a private key to use for funding a tester account.
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
-
- // testAddr is the Ethereum address of the tester account.
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
-)
-
-func u64(val uint64) *uint64 { return &val }
-
-// testBackend is a mock implementation of the live Ethereum message handler. Its
-// purpose is to allow testing the request/reply workflows and wire serialization
-// in the `eth` protocol without actually doing any data processing.
-type testBackend struct {
- db ethdb.Database
- chain *core.BlockChain
- txpool *txpool.TxPool
-}
-
-// newTestBackend creates an empty chain and wraps it into a mock backend.
-func newTestBackend(blocks int) *testBackend {
- return newTestBackendWithGenerator(blocks, false, nil)
-}
-
-// newTestBackend creates a chain with a number of explicitly defined blocks and
-// wraps it into a mock backend.
-func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
- var (
- // Create a database pre-initialize with a genesis block
- db = rawdb.NewMemoryDatabase()
- config = params.TestChainConfig
- engine consensus.Engine = ethash.NewFaker()
- )
-
- if shanghai {
- config = ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- DAOForkBlock: nil,
- DAOForkSupport: true,
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- BerlinBlock: big.NewInt(0),
- LondonBlock: big.NewInt(0),
- ArrowGlacierBlock: big.NewInt(0),
- GrayGlacierBlock: big.NewInt(0),
- MergeNetsplitBlock: big.NewInt(0),
- ShanghaiTime: u64(0),
- TerminalTotalDifficulty: big.NewInt(0),
- TerminalTotalDifficultyPassed: true,
- Ethash: new(params.EthashConfig),
- }
- engine = beacon.NewFaker()
- }
-
- gspec := &core.Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
- }
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
-
- _, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
- for _, block := range bs {
- chain.TrieDB().Commit(block.Root(), false)
- }
- txconfig := legacypool.DefaultConfig
- txconfig.Journal = "" // Don't litter the disk with test journals
-
- pool := legacypool.New(txconfig, chain)
- txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool})
-
- return &testBackend{
- db: db,
- chain: chain,
- txpool: txpool,
- }
-}
-
-// close tears down the transaction pool and chain behind the mock backend.
-func (b *testBackend) close() {
- b.txpool.Close()
- b.chain.Stop()
-}
-
-func (b *testBackend) Chain() *core.BlockChain { return b.chain }
-func (b *testBackend) TxPool() TxPool { return b.txpool }
-
-func (b *testBackend) RunPeer(peer *Peer, handler Handler) error {
- // Normally the backend would do peer maintenance and handshakes. All that
- // is omitted, and we will just give control back to the handler.
- return handler(peer)
-}
-func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") }
-
-func (b *testBackend) AcceptTxs() bool {
- panic("data processing tests should be done in the handler package")
-}
-func (b *testBackend) Handle(*Peer, Packet) error {
- return nil
-}
-
-// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
-
-func testGetBlockHeaders(t *testing.T, protocol uint) {
- t.Parallel()
-
- backend := newTestBackend(maxHeadersServe + 15)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- // Create a "random" unknown hash for testing
- var unknown common.Hash
- for i := range unknown {
- unknown[i] = byte(i)
- }
- getHashes := func(from, limit uint64) (hashes []common.Hash) {
- for i := uint64(0); i < limit; i++ {
- hashes = append(hashes, backend.chain.GetCanonicalHash(from-1-i))
- }
- return hashes
- }
- // Create a batch of tests for various scenarios
- limit := uint64(maxHeadersServe)
- tests := []struct {
- query *GetBlockHeadersRequest // The query to execute for header retrieval
- expect []common.Hash // The hashes of the block whose headers are expected
- }{
- // A single random block should be retrievable by hash
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
- []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
- },
- // A single random block should be retrievable by number
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
- []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
- },
- // Multiple headers should be retrievable in both directions
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
- []common.Hash{
- backend.chain.GetBlockByNumber(limit / 2).Hash(),
- backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
- backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
- },
- }, {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
- []common.Hash{
- backend.chain.GetBlockByNumber(limit / 2).Hash(),
- backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
- backend.chain.GetBlockByNumber(limit/2 - 2).Hash(),
- },
- },
- // Multiple headers with skip lists should be retrievable
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
- []common.Hash{
- backend.chain.GetBlockByNumber(limit / 2).Hash(),
- backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
- backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
- },
- }, {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
- []common.Hash{
- backend.chain.GetBlockByNumber(limit / 2).Hash(),
- backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
- backend.chain.GetBlockByNumber(limit/2 - 8).Hash(),
- },
- },
- // The chain endpoints should be retrievable
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
- []common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
- },
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
- []common.Hash{backend.chain.CurrentBlock().Hash()},
- },
- { // If the peer requests a bit into the future, we deliver what we have
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
- []common.Hash{backend.chain.CurrentBlock().Hash()},
- },
- // Ensure protocol limits are honored
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
- getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
- },
- // Check that requesting more than available is handled gracefully
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
- []common.Hash{
- backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
- backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
- },
- }, {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
- []common.Hash{
- backend.chain.GetBlockByNumber(4).Hash(),
- backend.chain.GetBlockByNumber(0).Hash(),
- },
- },
- // Check that requesting more than available is handled gracefully, even if mid skip
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
- []common.Hash{
- backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
- backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
- },
- }, {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
- []common.Hash{
- backend.chain.GetBlockByNumber(4).Hash(),
- backend.chain.GetBlockByNumber(1).Hash(),
- },
- },
- // Check a corner case where requesting more can iterate past the endpoints
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
- []common.Hash{
- backend.chain.GetBlockByNumber(2).Hash(),
- backend.chain.GetBlockByNumber(1).Hash(),
- backend.chain.GetBlockByNumber(0).Hash(),
- },
- },
- // Check a corner case where skipping overflow loops back into the chain start
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
- []common.Hash{
- backend.chain.GetBlockByNumber(3).Hash(),
- },
- },
- // Check a corner case where skipping overflow loops back to the same header
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
- []common.Hash{
- backend.chain.GetBlockByNumber(1).Hash(),
- },
- },
- // Check that non-existing headers aren't returned
- {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
- []common.Hash{},
- }, {
- &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
- []common.Hash{},
- },
- }
- // Run each of the tests and verify the results against the chain
- for i, tt := range tests {
- // Collect the headers to expect in the response
- var headers []*types.Header
- for _, hash := range tt.expect {
- headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
- }
- // Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
- RequestId: 123,
- GetBlockHeadersRequest: tt.query,
- })
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
- RequestId: 123,
- BlockHeadersRequest: headers,
- }); err != nil {
- t.Errorf("test %d: headers mismatch: %v", i, err)
- }
- // If the test used number origins, repeat with hashes as the too
- if tt.query.Origin.Hash == (common.Hash{}) {
- if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
- tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
-
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
- RequestId: 456,
- GetBlockHeadersRequest: tt.query,
- })
- expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
- t.Errorf("test %d by hash: headers mismatch: %v", i, err)
- }
- }
- }
- }
-}
-
-// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
-
-func testGetBlockBodies(t *testing.T, protocol uint) {
- t.Parallel()
-
- gen := func(n int, g *core.BlockGen) {
- if n%2 == 0 {
- w := &types.Withdrawal{
- Address: common.Address{0xaa},
- Amount: 42,
- }
- g.AddWithdrawal(w)
- }
- }
-
- backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- // Create a batch of tests for various scenarios
- limit := maxBodiesServe
- tests := []struct {
- random int // Number of blocks to fetch randomly from the chain
- explicit []common.Hash // Explicitly requested blocks
- available []bool // Availability of explicitly requested blocks
- expected int // Total number of existing blocks to expect
- }{
- {1, nil, nil, 1}, // A single random block should be retrievable
- {10, nil, nil, 10}, // Multiple random blocks should be retrievable
- {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
- {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
- {0, []common.Hash{backend.chain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
- {0, []common.Hash{backend.chain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
- {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
-
- // Existing and non-existing blocks interleaved should not cause problems
- {0, []common.Hash{
- {},
- backend.chain.GetBlockByNumber(1).Hash(),
- {},
- backend.chain.GetBlockByNumber(10).Hash(),
- {},
- backend.chain.GetBlockByNumber(100).Hash(),
- {},
- }, []bool{false, true, false, true, false, true, false}, 3},
- }
- // Run each of the tests and verify the results against the chain
- for i, tt := range tests {
- // Collect the hashes to request, and the response to expect
- var (
- hashes []common.Hash
- bodies []*BlockBody
- seen = make(map[int64]bool)
- )
- for j := 0; j < tt.random; j++ {
- for {
- num := rand.Int63n(int64(backend.chain.CurrentBlock().Number.Uint64()))
- if !seen[num] {
- seen[num] = true
-
- block := backend.chain.GetBlockByNumber(uint64(num))
- hashes = append(hashes, block.Hash())
- if len(bodies) < tt.expected {
- bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()})
- }
- break
- }
- }
- }
- for j, hash := range tt.explicit {
- hashes = append(hashes, hash)
- if tt.available[j] && len(bodies) < tt.expected {
- block := backend.chain.GetBlockByHash(hash)
- bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()})
- }
- }
-
- // Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
- RequestId: 123,
- GetBlockBodiesRequest: hashes,
- })
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
- RequestId: 123,
- BlockBodiesResponse: bodies,
- }); err != nil {
- t.Fatalf("test %d: bodies mismatch: %v", i, err)
- }
- }
-}
-
-// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
-
-func testGetBlockReceipts(t *testing.T, protocol uint) {
- t.Parallel()
-
- // Define three accounts to simulate transactions with
- acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
- acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
- acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
-
- signer := types.HomesteadSigner{}
- // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
- generator := func(i int, block *core.BlockGen) {
- switch i {
- case 0:
- // In block 1, the test bank sends account #1 some ether.
- tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- block.AddTx(tx)
- case 1:
- // In block 2, the test bank sends some more ether to account #1.
- // acc1Addr passes it on to account #2.
- tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
- block.AddTx(tx1)
- block.AddTx(tx2)
- case 2:
- // Block 3 is empty but was mined by account #2.
- block.SetCoinbase(acc2Addr)
- block.SetExtra([]byte("yeehaw"))
- case 3:
- // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
- b2 := block.PrevBlock(1).Header()
- b2.Extra = []byte("foo")
- block.AddUncle(b2)
- b3 := block.PrevBlock(2).Header()
- b3.Extra = []byte("foo")
- block.AddUncle(b3)
- }
- }
- // Assemble the test environment
- backend := newTestBackendWithGenerator(4, false, generator)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- // Collect the hashes to request, and the response to expect
- var (
- hashes []common.Hash
- receipts [][]*types.Receipt
- )
- for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
- block := backend.chain.GetBlockByNumber(i)
-
- hashes = append(hashes, block.Hash())
- receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
- }
- // Send the hash request and verify the response
- p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
- RequestId: 123,
- GetReceiptsRequest: hashes,
- })
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
- RequestId: 123,
- ReceiptsResponse: receipts,
- }); err != nil {
- t.Errorf("receipts mismatch: %v", err)
- }
-}
-
-func TestHandleNewBlock(t *testing.T) {
- t.Parallel()
-
- gen := func(n int, g *core.BlockGen) {
- if n%2 == 0 {
- w := &types.Withdrawal{
- Address: common.Address{0xaa},
- Amount: 42,
- }
- g.AddWithdrawal(w)
- }
- }
-
- backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", ETH68, backend)
- defer peer.close()
-
- v := new(uint32)
- *v = 1
- genBlobs := makeBlkBlobs(1, 2)
- tx1 := types.NewTx(&types.BlobTx{
- ChainID: new(uint256.Int).SetUint64(1),
- GasTipCap: new(uint256.Int),
- GasFeeCap: new(uint256.Int),
- Gas: 0,
- Value: new(uint256.Int),
- Data: nil,
- BlobFeeCap: new(uint256.Int),
- BlobHashes: []common.Hash{common.HexToHash("0x34ec6e64f9cda8fe0451a391e4798085a3ef51a65ed1bfb016e34fc1a2028f8f"), common.HexToHash("0xb9a412e875f29fac436acde234f954e91173c4cf79814f6dcf630d8a6345747f")},
- Sidecar: genBlobs[0],
- V: new(uint256.Int),
- R: new(uint256.Int),
- S: new(uint256.Int),
- })
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(0),
- Extra: []byte("test block"),
- UncleHash: types.EmptyUncleHash,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- })
- sidecars := types.BlobSidecars{types.NewBlobSidecarFromTx(tx1)}
- for _, s := range sidecars {
- s.BlockNumber = block.Number()
- s.BlockHash = block.Hash()
- }
- dataNil := NewBlockPacket{
- Block: block,
- TD: big.NewInt(1),
- Sidecars: nil,
- }
- dataNonNil := NewBlockPacket{
- Block: block,
- TD: big.NewInt(1),
- Sidecars: sidecars,
- }
- sizeNonNil, rNonNil, _ := rlp.EncodeToReader(dataNonNil)
- sizeNil, rNil, _ := rlp.EncodeToReader(dataNil)
-
- // Define the test cases
- testCases := []struct {
- name string
- msg p2p.Msg
- err error
- }{
- {
- name: "Valid block",
- msg: p2p.Msg{
- Code: 1,
- Size: uint32(sizeNonNil),
- Payload: rNonNil,
- },
- err: nil,
- },
- {
- name: "Nil sidecars",
- msg: p2p.Msg{
- Code: 2,
- Size: uint32(sizeNil),
- Payload: rNil,
- },
- err: nil,
- },
- }
-
- protos := []p2p.Protocol{
- {
- Name: "eth",
- Version: ETH68,
- },
- {
- Name: "bsc",
- Version: bsc.Bsc1,
- },
- }
- caps := []p2p.Cap{
- {
- Name: "eth",
- Version: ETH68,
- },
- {
- Name: "bsc",
- Version: bsc.Bsc1,
- },
- }
- // Create a source handler to send messages through and a sink peer to receive them
- p2pEthSrc, p2pEthSink := p2p.MsgPipe()
- defer p2pEthSrc.Close()
- defer p2pEthSink.Close()
-
- localEth := NewPeer(ETH68, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pEthSrc, nil)
-
- // Run the tests
- for _, tc := range testCases {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- err := handleNewBlock(backend, tc.msg, localEth)
- if err != tc.err {
- t.Errorf("expected error %v, got %v", tc.err, err)
- }
- })
- }
-}
-
-func makeBlkBlobs(n, nPerTx int) []*types.BlobTxSidecar {
- if n <= 0 {
- return nil
- }
- ret := make([]*types.BlobTxSidecar, n)
- for i := 0; i < n; i++ {
- blobs := make([]kzg4844.Blob, nPerTx)
- commitments := make([]kzg4844.Commitment, nPerTx)
- proofs := make([]kzg4844.Proof, nPerTx)
- for i := 0; i < nPerTx; i++ {
- io.ReadFull(rand2.Reader, blobs[i][:])
- commitments[i], _ = kzg4844.BlobToCommitment(blobs[i])
- proofs[i], _ = kzg4844.ComputeBlobProof(blobs[i], commitments[i])
- }
- ret[i] = &types.BlobTxSidecar{
- Blobs: blobs,
- Commitments: commitments,
- Proofs: proofs,
- }
- }
- return ret
-}
diff --git a/eth/protocols/snap/handler_fuzzing_test.go b/eth/protocols/snap/handler_fuzzing_test.go
deleted file mode 100644
index 4e234ad21b..0000000000
--- a/eth/protocols/snap/handler_fuzzing_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package snap
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- fuzz "github.com/google/gofuzz"
-)
-
-func FuzzARange(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- doFuzz(data, &GetAccountRangePacket{}, GetAccountRangeMsg)
- })
-}
-
-func FuzzSRange(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- doFuzz(data, &GetStorageRangesPacket{}, GetStorageRangesMsg)
- })
-}
-
-func FuzzByteCodes(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- doFuzz(data, &GetByteCodesPacket{}, GetByteCodesMsg)
- })
-}
-
-func FuzzTrieNodes(f *testing.F) {
- f.Fuzz(func(t *testing.T, data []byte) {
- doFuzz(data, &GetTrieNodesPacket{}, GetTrieNodesMsg)
- })
-}
-
-func doFuzz(input []byte, obj interface{}, code int) {
- bc := getChain()
- defer bc.Stop()
- fuzz.NewFromGoFuzz(input).Fuzz(obj)
- var data []byte
- switch p := obj.(type) {
- case *GetTrieNodesPacket:
- p.Root = trieRoot
- data, _ = rlp.EncodeToBytes(obj)
- default:
- data, _ = rlp.EncodeToBytes(obj)
- }
- cli := &dummyRW{
- code: uint64(code),
- data: data,
- }
- peer := NewFakePeer(65, "gazonk01", cli)
- err := HandleMessage(&dummyBackend{bc}, peer)
- switch {
- case err == nil && cli.writeCount != 1:
- panic(fmt.Sprintf("Expected 1 response, got %d", cli.writeCount))
- case err != nil && cli.writeCount != 0:
- panic(fmt.Sprintf("Expected 0 response, got %d", cli.writeCount))
- }
-}
-
-var trieRoot common.Hash
-
-func getChain() *core.BlockChain {
- ga := make(types.GenesisAlloc, 1000)
- var a = make([]byte, 20)
- var mkStorage = func(k, v int) (common.Hash, common.Hash) {
- var kB = make([]byte, 32)
- var vB = make([]byte, 32)
- binary.LittleEndian.PutUint64(kB, uint64(k))
- binary.LittleEndian.PutUint64(vB, uint64(v))
- return common.BytesToHash(kB), common.BytesToHash(vB)
- }
- storage := make(map[common.Hash]common.Hash)
- for i := 0; i < 10; i++ {
- k, v := mkStorage(i, i)
- storage[k] = v
- }
- for i := 0; i < 1000; i++ {
- binary.LittleEndian.PutUint64(a, uint64(i+0xff))
- acc := types.Account{Balance: big.NewInt(int64(i))}
- if i%2 == 1 {
- acc.Storage = storage
- }
- ga[common.BytesToAddress(a)] = acc
- }
- gspec := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: ga,
- }
- _, blocks, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *core.BlockGen) {})
- cacheConf := &core.CacheConfig{
- TrieCleanLimit: 0,
- TrieDirtyLimit: 0,
- TrieTimeLimit: 5 * time.Minute,
- TrieCleanNoPrefetch: true,
- SnapshotLimit: 100,
- SnapshotWait: true,
- }
- trieRoot = blocks[len(blocks)-1].Root()
- bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if _, err := bc.InsertChain(blocks); err != nil {
- panic(err)
- }
- return bc
-}
-
-type dummyBackend struct {
- chain *core.BlockChain
-}
-
-func (d *dummyBackend) Chain() *core.BlockChain { return d.chain }
-func (d *dummyBackend) RunPeer(*Peer, Handler) error { return nil }
-func (d *dummyBackend) PeerInfo(enode.ID) interface{} { return "Foo" }
-func (d *dummyBackend) Handle(*Peer, Packet) error { return nil }
-
-type dummyRW struct {
- code uint64
- data []byte
- writeCount int
-}
-
-func (d *dummyRW) ReadMsg() (p2p.Msg, error) {
- return p2p.Msg{
- Code: d.code,
- Payload: bytes.NewReader(d.data),
- ReceivedAt: time.Now(),
- Size: uint32(len(d.data)),
- }, nil
-}
-
-func (d *dummyRW) WriteMsg(msg p2p.Msg) error {
- d.writeCount++
- return nil
-}
diff --git a/eth/protocols/trust/handler_test.go b/eth/protocols/trust/handler_test.go
deleted file mode 100644
index 187b29c932..0000000000
--- a/eth/protocols/trust/handler_test.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package trust
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/clique"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/txpool/legacypool"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-var (
- // testKey is a private key to use for funding a tester account.
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
-
- // testAddr is the Ethereum address of the tester account.
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
-)
-
-// testBackend is a mock implementation of the live Ethereum message handler. Its
-// purpose is to allow testing the request/reply workflows and wire serialization
-// in the `eth` protocol without actually doing any data processing.
-type testBackend struct {
- db ethdb.Database
- chain *core.BlockChain
- txpool *legacypool.LegacyPool
-}
-
-// newTestBackend creates an empty chain and wraps it into a mock backend.
-func newTestBackend(blocks int) *testBackend {
- return newTestBackendWithGenerator(blocks)
-}
-
-// newTestBackend creates a chain with a number of explicitly defined blocks and
-// wraps it into a mock backend.
-func newTestBackendWithGenerator(blocks int) *testBackend {
- signer := types.HomesteadSigner{}
- db := rawdb.NewMemoryDatabase()
- engine := clique.New(params.AllCliqueProtocolChanges.Clique, db)
- genspec := &core.Genesis{
- Config: params.AllCliqueProtocolChanges,
- ExtraData: make([]byte, 32+common.AddressLength+65),
- Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- BaseFee: big.NewInt(0),
- }
- copy(genspec.ExtraData[32:], testAddr[:])
- genesis := genspec.MustCommit(db, triedb.NewDatabase(db, nil))
-
- chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
- generator := func(i int, block *core.BlockGen) {
- // The chain maker doesn't have access to a chain, so the difficulty will be
- // lets unset (nil). Set it here to the correct value.
- // block.SetCoinbase(testAddr)
- block.SetDifficulty(big.NewInt(2))
-
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), common.Address{0x01}, big.NewInt(1), params.TxGas, nil, nil), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain, tx)
- }
-
- bs, _ := core.GenerateChain(params.AllCliqueProtocolChanges, genesis, engine, db, blocks, generator)
- for i, block := range bs {
- header := block.Header()
- if i > 0 {
- header.ParentHash = bs[i-1].Hash()
- }
- header.Extra = make([]byte, 32+65)
- header.Difficulty = big.NewInt(2)
-
- sig, _ := crypto.Sign(clique.SealHash(header).Bytes(), testKey)
- copy(header.Extra[len(header.Extra)-65:], sig)
- bs[i] = block.WithSeal(header)
- }
-
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
-
- txconfig := legacypool.DefaultConfig
- txconfig.Journal = "" // Don't litter the disk with test journals
-
- return &testBackend{
- db: db,
- chain: chain,
- txpool: legacypool.New(txconfig, chain),
- }
-}
-
-// close tears down the transaction pool and chain behind the mock backend.
-func (b *testBackend) close() {
- b.txpool.Close()
- b.chain.Stop()
-}
-
-func (b *testBackend) Chain() *core.BlockChain { return b.chain }
-
-func (b *testBackend) RunPeer(peer *Peer, handler Handler) error {
- // Normally the backend would do peer mainentance and handshakes. All that
- // is omitted and we will just give control back to the handler.
- return handler(peer)
-}
-func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") }
-
-func (b *testBackend) Handle(*Peer, Packet) error {
- panic("data processing tests should be done in the handler package")
-}
-
-func TestRequestRoot(t *testing.T) { testRequestRoot(t, Trust1) }
-
-func testRequestRoot(t *testing.T, protocol uint) {
- t.Parallel()
-
- blockNum := 1032 // The latest 1024 blocks' DiffLayer will be cached.
- backend := newTestBackend(blockNum)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- pairs := []struct {
- req RootRequestPacket
- res RootResponsePacket
- }{
- {
- req: RootRequestPacket{
- RequestId: 1,
- BlockNumber: 1,
- },
- res: RootResponsePacket{
- RequestId: 1,
- Status: types.StatusPartiallyVerified,
- BlockNumber: 1,
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 2,
- BlockNumber: 128,
- },
- res: RootResponsePacket{
- RequestId: 2,
- Status: types.StatusFullVerified,
- BlockNumber: 128,
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 3,
- BlockNumber: 128,
- BlockHash: types.EmptyRootHash,
- DiffHash: types.EmptyRootHash,
- },
- res: RootResponsePacket{
- RequestId: 3,
- Status: types.StatusImpossibleFork,
- BlockNumber: 128,
- BlockHash: types.EmptyRootHash,
- Root: common.Hash{},
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 4,
- BlockNumber: 128,
- DiffHash: types.EmptyRootHash,
- },
- res: RootResponsePacket{
- RequestId: 4,
- Status: types.StatusDiffHashMismatch,
- BlockNumber: 128,
- Root: common.Hash{},
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 5,
- BlockNumber: 1024,
- },
- res: RootResponsePacket{
- RequestId: 5,
- Status: types.StatusFullVerified,
- BlockNumber: 1024,
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 6,
- BlockNumber: 1024,
- BlockHash: types.EmptyRootHash,
- DiffHash: types.EmptyRootHash,
- },
- res: RootResponsePacket{
- RequestId: 6,
- Status: types.StatusPossibleFork,
- BlockNumber: 1024,
- BlockHash: types.EmptyRootHash,
- Root: common.Hash{},
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 7,
- BlockNumber: 1033,
- BlockHash: types.EmptyRootHash,
- DiffHash: types.EmptyRootHash,
- },
- res: RootResponsePacket{
- RequestId: 7,
- Status: types.StatusBlockNewer,
- BlockNumber: 1033,
- BlockHash: types.EmptyRootHash,
- Root: common.Hash{},
- Extra: defaultExtra,
- },
- },
- {
- req: RootRequestPacket{
- RequestId: 8,
- BlockNumber: 1044,
- BlockHash: types.EmptyRootHash,
- DiffHash: types.EmptyRootHash,
- },
- res: RootResponsePacket{
- RequestId: 8,
- Status: types.StatusBlockTooNew,
- BlockNumber: 1044,
- BlockHash: types.EmptyRootHash,
- Root: common.Hash{},
- Extra: defaultExtra,
- },
- },
- }
-
- for idx, pair := range pairs {
- header := backend.Chain().GetHeaderByNumber(pair.req.BlockNumber)
- if header != nil {
- if pair.res.Status.Code&0xFF00 == types.StatusVerified.Code {
- pair.req.BlockHash = header.Hash()
- pair.req.DiffHash, _ = core.CalculateDiffHash(backend.Chain().GetTrustedDiffLayer(header.Hash()))
- pair.res.BlockHash = pair.req.BlockHash
- pair.res.Root = header.Root
- } else if pair.res.Status.Code == types.StatusDiffHashMismatch.Code {
- pair.req.BlockHash = header.Hash()
- pair.res.BlockHash = pair.req.BlockHash
- }
- }
-
- p2p.Send(peer.app, RequestRootMsg, pair.req)
- if err := p2p.ExpectMsg(peer.app, RespondRootMsg, pair.res); err != nil {
- t.Errorf("test %d: root response not expected: %v", idx, err)
- }
- }
-}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
deleted file mode 100644
index cd07b0638b..0000000000
--- a/eth/tracers/api_test.go
+++ /dev/null
@@ -1,998 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package tracers
-
-import (
- "context"
- "crypto/ecdsa"
- "encoding/json"
- "errors"
- "fmt"
- "math/big"
- "reflect"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/tracers/logger"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/internal/ethapi"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rpc"
- "golang.org/x/exp/slices"
-)
-
-var (
- errStateNotFound = errors.New("state not found")
- errBlockNotFound = errors.New("block not found")
-)
-
-type testBackend struct {
- chainConfig *params.ChainConfig
- engine consensus.Engine
- chaindb ethdb.Database
- chain *core.BlockChain
-
- refHook func() // Hook is invoked when the requested state is referenced
- relHook func() // Hook is invoked when the requested state is released
-}
-
-// testBackend creates a new test backend. OBS: After test is done, teardown must be
-// invoked in order to release associated resources.
-func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
- backend := &testBackend{
- chainConfig: gspec.Config,
- engine: ethash.NewFaker(),
- chaindb: rawdb.NewMemoryDatabase(),
- }
- // Generate blocks for testing
- _, blocks, _ := core.GenerateChainWithGenesis(gspec, backend.engine, n, generator)
-
- // Import the canonical chain
- cacheConfig := &core.CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 0,
- TriesInMemory: 128,
- TrieDirtyDisabled: true, // Archive mode
- }
- chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- backend.chain = chain
- return backend
-}
-
-func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
- return b.chain.GetHeaderByHash(hash), nil
-}
-
-func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
- if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber {
- return b.chain.CurrentHeader(), nil
- }
- return b.chain.GetHeaderByNumber(uint64(number)), nil
-}
-
-func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
- return b.chain.GetBlockByHash(hash), nil
-}
-
-func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
- if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber {
- return b.chain.GetBlockByNumber(b.chain.CurrentBlock().Number.Uint64()), nil
- }
- return b.chain.GetBlockByNumber(uint64(number)), nil
-}
-
-func (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) {
- tx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash)
- return tx != nil, tx, hash, blockNumber, index, nil
-}
-
-func (b *testBackend) RPCGasCap() uint64 {
- return 25000000
-}
-
-func (b *testBackend) ChainConfig() *params.ChainConfig {
- return b.chainConfig
-}
-
-func (b *testBackend) Engine() consensus.Engine {
- return b.engine
-}
-
-func (b *testBackend) ChainDb() ethdb.Database {
- return b.chaindb
-}
-
-// teardown releases the associated resources.
-func (b *testBackend) teardown() {
- b.chain.Stop()
-}
-
-func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) {
- statedb, err := b.chain.StateAt(block.Root())
- if err != nil {
- return nil, nil, errStateNotFound
- }
- if b.refHook != nil {
- b.refHook()
- }
- release := func() {
- if b.relHook != nil {
- b.relHook()
- }
- }
- return statedb, release, nil
-}
-
-func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) {
- parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
- if parent == nil {
- return nil, vm.BlockContext{}, nil, nil, errBlockNotFound
- }
- statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false)
- if err != nil {
- return nil, vm.BlockContext{}, nil, nil, errStateNotFound
- }
- if txIndex == 0 && len(block.Transactions()) == 0 {
- return nil, vm.BlockContext{}, statedb, release, nil
- }
- // Recompute transactions up to the target index.
- signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time())
- for idx, tx := range block.Transactions() {
- msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
- txContext := core.NewEVMTxContext(msg)
- context := core.NewEVMBlockContext(block.Header(), b.chain, nil)
- if idx == txIndex {
- return msg, context, statedb, release, nil
- }
- vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{})
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
- return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
- }
- statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
- }
- return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
-}
-
-func TestTraceCall(t *testing.T) {
- t.Parallel()
-
- // Initialize test accounts
- accounts := newAccounts(3)
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- accounts[0].addr: {Balance: big.NewInt(params.Ether)},
- accounts[1].addr: {Balance: big.NewInt(params.Ether)},
- accounts[2].addr: {Balance: big.NewInt(params.Ether)},
- },
- }
- genBlocks := 10
- signer := types.HomesteadSigner{}
- nonce := uint64(0)
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
- // Transfer from account[0] to account[1]
- // value: 1000 wei
- // fee: 0 wei
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: nonce,
- To: &accounts[1].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- nonce++
-
- if i == genBlocks-2 {
- // Transfer from account[0] to account[2]
- tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: nonce,
- To: &accounts[2].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- nonce++
-
- // Transfer from account[0] to account[1] again
- tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: nonce,
- To: &accounts[1].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- nonce++
- }
- })
-
- uintPtr := func(i int) *hexutil.Uint { x := hexutil.Uint(i); return &x }
-
- defer backend.teardown()
- api := NewAPI(backend)
- var testSuite = []struct {
- blockNumber rpc.BlockNumber
- call ethapi.TransactionArgs
- config *TraceCallConfig
- expectErr error
- expect string
- }{
- // Standard JSON trace upon the genesis, plain transfer.
- {
- blockNumber: rpc.BlockNumber(0),
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- To: &accounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: nil,
- expectErr: nil,
- expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`,
- },
- // Standard JSON trace upon the head, plain transfer.
- {
- blockNumber: rpc.BlockNumber(genBlocks),
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- To: &accounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: nil,
- expectErr: nil,
- expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`,
- },
- // Upon the last state, default to the post block's state
- {
- blockNumber: rpc.BlockNumber(genBlocks - 1),
- call: ethapi.TransactionArgs{
- From: &accounts[2].addr,
- To: &accounts[0].addr,
- Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))),
- },
- config: nil,
- expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`,
- },
- // Before the first transaction, should be failed
- {
- blockNumber: rpc.BlockNumber(genBlocks - 1),
- call: ethapi.TransactionArgs{
- From: &accounts[2].addr,
- To: &accounts[0].addr,
- Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))),
- },
- config: &TraceCallConfig{TxIndex: uintPtr(0)},
- expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr),
- },
- // Before the target transaction, should be failed
- {
- blockNumber: rpc.BlockNumber(genBlocks - 1),
- call: ethapi.TransactionArgs{
- From: &accounts[2].addr,
- To: &accounts[0].addr,
- Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))),
- },
- config: &TraceCallConfig{TxIndex: uintPtr(1)},
- expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr),
- },
- // After the target transaction, should be succeed
- {
- blockNumber: rpc.BlockNumber(genBlocks - 1),
- call: ethapi.TransactionArgs{
- From: &accounts[2].addr,
- To: &accounts[0].addr,
- Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))),
- },
- config: &TraceCallConfig{TxIndex: uintPtr(2)},
- expectErr: nil,
- expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`,
- },
- // Standard JSON trace upon the non-existent block, error expects
- {
- blockNumber: rpc.BlockNumber(genBlocks + 1),
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- To: &accounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: nil,
- expectErr: fmt.Errorf("block #%d not found", genBlocks+1),
- // expect: nil,
- },
- // Standard JSON trace upon the latest block
- {
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- To: &accounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: nil,
- expectErr: nil,
- expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`,
- },
- // Tracing on 'pending' should fail:
- {
- blockNumber: rpc.PendingBlockNumber,
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- To: &accounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: nil,
- expectErr: errors.New("tracing on top of pending is not supported"),
- },
- {
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- Input: &hexutil.Bytes{0x43}, // blocknumber
- },
- config: &TraceCallConfig{
- BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))},
- },
- expectErr: nil,
- expect: ` {"gas":53018,"failed":false,"returnValue":"","structLogs":[
- {"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]},
- {"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`,
- },
- }
- for i, testspec := range testSuite {
- result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config)
- if testspec.expectErr != nil {
- if err == nil {
- t.Errorf("test %d: expect error %v, got nothing", i, testspec.expectErr)
- continue
- }
- if !reflect.DeepEqual(err.Error(), testspec.expectErr.Error()) {
- t.Errorf("test %d: error mismatch, want '%v', got '%v'", i, testspec.expectErr, err)
- }
- } else {
- if err != nil {
- t.Errorf("test %d: expect no error, got %v", i, err)
- continue
- }
- var have *logger.ExecutionResult
- if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil {
- t.Errorf("test %d: failed to unmarshal result %v", i, err)
- }
- var want *logger.ExecutionResult
- if err := json.Unmarshal([]byte(testspec.expect), &want); err != nil {
- t.Errorf("test %d: failed to unmarshal result %v", i, err)
- }
- if !reflect.DeepEqual(have, want) {
- t.Errorf("test %d: result mismatch, want %v, got %v", i, testspec.expect, string(result.(json.RawMessage)))
- }
- }
- }
-}
-
-func TestTraceTransaction(t *testing.T) {
- t.Parallel()
-
- // Initialize test accounts
- accounts := newAccounts(2)
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- accounts[0].addr: {Balance: big.NewInt(params.Ether)},
- accounts[1].addr: {Balance: big.NewInt(params.Ether)},
- },
- }
- target := common.Hash{}
- signer := types.HomesteadSigner{}
- backend := newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) {
- // Transfer from account[0] to account[1]
- // value: 1000 wei
- // fee: 0 wei
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: uint64(i),
- To: &accounts[1].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- target = tx.Hash()
- })
- defer backend.chain.Stop()
- api := NewAPI(backend)
- result, err := api.TraceTransaction(context.Background(), target, nil)
- if err != nil {
- t.Errorf("Failed to trace transaction %v", err)
- }
- var have *logger.ExecutionResult
- if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil {
- t.Errorf("failed to unmarshal result %v", err)
- }
- if !reflect.DeepEqual(have, &logger.ExecutionResult{
- Gas: params.TxGas,
- Failed: false,
- ReturnValue: "",
- StructLogs: []logger.StructLogRes{},
- }) {
- t.Error("Transaction tracing result is different")
- }
-
- // Test non-existent transaction
- _, err = api.TraceTransaction(context.Background(), common.Hash{42}, nil)
- if !errors.Is(err, errTxNotFound) {
- t.Fatalf("want %v, have %v", errTxNotFound, err)
- }
-}
-
-func TestTraceBlock(t *testing.T) {
- t.Parallel()
-
- // Initialize test accounts
- accounts := newAccounts(3)
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- accounts[0].addr: {Balance: big.NewInt(params.Ether)},
- accounts[1].addr: {Balance: big.NewInt(params.Ether)},
- accounts[2].addr: {Balance: big.NewInt(params.Ether)},
- },
- }
- genBlocks := 10
- signer := types.HomesteadSigner{}
- var txHash common.Hash
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
- // Transfer from account[0] to account[1]
- // value: 1000 wei
- // fee: 0 wei
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: uint64(i),
- To: &accounts[1].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- txHash = tx.Hash()
- })
- defer backend.chain.Stop()
- api := NewAPI(backend)
-
- var testSuite = []struct {
- blockNumber rpc.BlockNumber
- config *TraceConfig
- want string
- expectErr error
- }{
- // Trace genesis block, expect error
- {
- blockNumber: rpc.BlockNumber(0),
- expectErr: errors.New("genesis is not traceable"),
- },
- // Trace head block
- {
- blockNumber: rpc.BlockNumber(genBlocks),
- want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash),
- },
- // Trace non-existent block
- {
- blockNumber: rpc.BlockNumber(genBlocks + 1),
- expectErr: fmt.Errorf("block #%d not found", genBlocks+1),
- },
- // Trace latest block
- {
- blockNumber: rpc.LatestBlockNumber,
- want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash),
- },
- // Trace pending block
- {
- blockNumber: rpc.PendingBlockNumber,
- want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash),
- },
- }
- for i, tc := range testSuite {
- result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config)
- if tc.expectErr != nil {
- if err == nil {
- t.Errorf("test %d, want error %v", i, tc.expectErr)
- continue
- }
- if !reflect.DeepEqual(err, tc.expectErr) {
- t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err)
- }
- continue
- }
- if err != nil {
- t.Errorf("test %d, want no error, have %v", i, err)
- continue
- }
- have, _ := json.Marshal(result)
- want := tc.want
- if string(have) != want {
- t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want)
- }
- }
-}
-
-func TestTracingWithOverrides(t *testing.T) {
- t.Parallel()
- // Initialize test accounts
- accounts := newAccounts(3)
- storageAccount := common.Address{0x13, 37}
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- accounts[0].addr: {Balance: big.NewInt(params.Ether)},
- accounts[1].addr: {Balance: big.NewInt(params.Ether)},
- accounts[2].addr: {Balance: big.NewInt(params.Ether)},
- // An account with existing storage
- storageAccount: {
- Balance: new(big.Int),
- Storage: map[common.Hash]common.Hash{
- common.HexToHash("0x03"): common.HexToHash("0x33"),
- common.HexToHash("0x04"): common.HexToHash("0x44"),
- },
- },
- },
- }
- genBlocks := 10
- signer := types.HomesteadSigner{}
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
- // Transfer from account[0] to account[1]
- // value: 1000 wei
- // fee: 0 wei
- tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
- Nonce: uint64(i),
- To: &accounts[1].addr,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: b.BaseFee(),
- Data: nil}),
- signer, accounts[0].key)
- b.AddTx(tx)
- })
- defer backend.chain.Stop()
- api := NewAPI(backend)
- randomAccounts := newAccounts(3)
- type res struct {
- Gas int
- Failed bool
- ReturnValue string
- }
- var testSuite = []struct {
- blockNumber rpc.BlockNumber
- call ethapi.TransactionArgs
- config *TraceCallConfig
- expectErr error
- want string
- }{
- // Call which can only succeed if state is state overridden
- {
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &randomAccounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- randomAccounts[0].addr: ethapi.OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))},
- },
- },
- want: `{"gas":21000,"failed":false,"returnValue":""}`,
- },
- // Invalid call without state overriding
- {
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &randomAccounts[1].addr,
- Value: (*hexutil.Big)(big.NewInt(1000)),
- },
- config: &TraceCallConfig{},
- expectErr: core.ErrInsufficientFunds,
- },
- // Successful simple contract call
- //
- // // SPDX-License-Identifier: GPL-3.0
- //
- // pragma solidity >=0.7.0 <0.8.0;
- //
- // /**
- // * @title Storage
- // * @dev Store & retrieve value in a variable
- // */
- // contract Storage {
- // uint256 public number;
- // constructor() {
- // number = block.number;
- // }
- // }
- {
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &randomAccounts[2].addr,
- Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number()
- },
- config: &TraceCallConfig{
- // Tracer: &tracer,
- StateOverrides: ðapi.StateOverride{
- randomAccounts[2].addr: ethapi.OverrideAccount{
- Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033")),
- StateDiff: newStates([]common.Hash{{}}, []common.Hash{common.BigToHash(big.NewInt(123))}),
- },
- },
- },
- want: `{"gas":23347,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000007b"}`,
- },
- { // Override blocknumber
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- // BLOCKNUMBER PUSH1 MSTORE
- Input: newRPCBytes(common.Hex2Bytes("4360005260206000f3")),
- // &hexutil.Bytes{0x43}, // blocknumber
- },
- config: &TraceCallConfig{
- BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))},
- },
- want: `{"gas":59537,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000001337"}`,
- },
- { // Override blocknumber, and query a blockhash
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &accounts[0].addr,
- Input: &hexutil.Bytes{
- 0x60, 0x00, 0x40, // BLOCKHASH(0)
- 0x60, 0x00, 0x52, // STORE memory offset 0
- 0x61, 0x13, 0x36, 0x40, // BLOCKHASH(0x1336)
- 0x60, 0x20, 0x52, // STORE memory offset 32
- 0x61, 0x13, 0x37, 0x40, // BLOCKHASH(0x1337)
- 0x60, 0x40, 0x52, // STORE memory offset 64
- 0x60, 0x60, 0x60, 0x00, 0xf3, // RETURN (0-96)
-
- }, // blocknumber
- },
- config: &TraceCallConfig{
- BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))},
- },
- want: `{"gas":72666,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`,
- },
- /*
- pragma solidity =0.8.12;
-
- contract Test {
- uint private x;
-
- function test2() external {
- x = 1337;
- revert();
- }
-
- function test() external returns (uint) {
- x = 1;
- try this.test2() {} catch (bytes memory) {}
- return x;
- }
- }
- */
- { // First with only code override, not storage override
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &randomAccounts[2].addr,
- Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), //
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- randomAccounts[2].addr: ethapi.OverrideAccount{
- Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")),
- },
- },
- },
- want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`,
- },
- { // Same again, this time with storage override
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &randomAccounts[2].addr,
- Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), //
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- randomAccounts[2].addr: ethapi.OverrideAccount{
- Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")),
- State: newStates([]common.Hash{{}}, []common.Hash{{}}),
- },
- },
- },
- // want: `{"gas":46900,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000539"}`,
- want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`,
- },
- { // No state override
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &storageAccount,
- Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), //
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- storageAccount: ethapi.OverrideAccount{
- Code: newRPCBytes([]byte{
- // SLOAD(3) + SLOAD(4) (which is 0x77)
- byte(vm.PUSH1), 0x04,
- byte(vm.SLOAD),
- byte(vm.PUSH1), 0x03,
- byte(vm.SLOAD),
- byte(vm.ADD),
- // 0x77 -> MSTORE(0)
- byte(vm.PUSH1), 0x00,
- byte(vm.MSTORE),
- // RETURN (0, 32)
- byte(vm.PUSH1), 32,
- byte(vm.PUSH1), 00,
- byte(vm.RETURN),
- }),
- },
- },
- },
- want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000077"}`,
- },
- { // Full state override
- // The original storage is
- // 3: 0x33
- // 4: 0x44
- // With a full override, where we set 3:0x11, the slot 4 should be
- // removed. So SLOT(3)+SLOT(4) should be 0x11.
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &storageAccount,
- Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), //
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- storageAccount: ethapi.OverrideAccount{
- Code: newRPCBytes([]byte{
- // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x00)
- byte(vm.PUSH1), 0x04,
- byte(vm.SLOAD),
- byte(vm.PUSH1), 0x03,
- byte(vm.SLOAD),
- byte(vm.ADD),
- // 0x11 -> MSTORE(0)
- byte(vm.PUSH1), 0x00,
- byte(vm.MSTORE),
- // RETURN (0, 32)
- byte(vm.PUSH1), 32,
- byte(vm.PUSH1), 00,
- byte(vm.RETURN),
- }),
- State: newStates(
- []common.Hash{common.HexToHash("0x03")},
- []common.Hash{common.HexToHash("0x11")}),
- },
- },
- },
- want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000011"}`,
- },
- { // Partial state override
- // The original storage is
- // 3: 0x33
- // 4: 0x44
- // With a partial override, where we set 3:0x11, the slot 4 as before.
- // So SLOT(3)+SLOT(4) should be 0x55.
- blockNumber: rpc.LatestBlockNumber,
- call: ethapi.TransactionArgs{
- From: &randomAccounts[0].addr,
- To: &storageAccount,
- Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), //
- },
- config: &TraceCallConfig{
- StateOverrides: ðapi.StateOverride{
- storageAccount: ethapi.OverrideAccount{
- Code: newRPCBytes([]byte{
- // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x44)
- byte(vm.PUSH1), 0x04,
- byte(vm.SLOAD),
- byte(vm.PUSH1), 0x03,
- byte(vm.SLOAD),
- byte(vm.ADD),
- // 0x55 -> MSTORE(0)
- byte(vm.PUSH1), 0x00,
- byte(vm.MSTORE),
- // RETURN (0, 32)
- byte(vm.PUSH1), 32,
- byte(vm.PUSH1), 00,
- byte(vm.RETURN),
- }),
- StateDiff: &map[common.Hash]common.Hash{
- common.HexToHash("0x03"): common.HexToHash("0x11"),
- },
- },
- },
- },
- want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000055"}`,
- },
- }
- for i, tc := range testSuite {
- result, err := api.TraceCall(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, tc.config)
- if tc.expectErr != nil {
- if err == nil {
- t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
- continue
- }
- if !errors.Is(err, tc.expectErr) {
- t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err)
- }
- continue
- }
- if err != nil {
- t.Errorf("test %d: want no error, have %v", i, err)
- continue
- }
- // Turn result into res-struct
- var (
- have res
- want res
- )
- resBytes, _ := json.Marshal(result)
- json.Unmarshal(resBytes, &have)
- json.Unmarshal([]byte(tc.want), &want)
- if !reflect.DeepEqual(have, want) {
- t.Logf("result: %v\n", string(resBytes))
- t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, have, want)
- }
- }
-}
-
-type Account struct {
- key *ecdsa.PrivateKey
- addr common.Address
-}
-
-func newAccounts(n int) (accounts []Account) {
- for i := 0; i < n; i++ {
- key, _ := crypto.GenerateKey()
- addr := crypto.PubkeyToAddress(key.PublicKey)
- accounts = append(accounts, Account{key: key, addr: addr})
- }
- slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) })
- return accounts
-}
-
-func newRPCBalance(balance *big.Int) **hexutil.Big {
- rpcBalance := (*hexutil.Big)(balance)
- return &rpcBalance
-}
-
-func newRPCBytes(bytes []byte) *hexutil.Bytes {
- rpcBytes := hexutil.Bytes(bytes)
- return &rpcBytes
-}
-
-func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.Hash {
- if len(keys) != len(vals) {
- panic("invalid input")
- }
- m := make(map[common.Hash]common.Hash)
- for i := 0; i < len(keys); i++ {
- m[keys[i]] = vals[i]
- }
- return &m
-}
-
-func TestTraceChain(t *testing.T) {
- // Initialize test accounts
- accounts := newAccounts(3)
- genesis := &core.Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- accounts[0].addr: {Balance: big.NewInt(params.Ether)},
- accounts[1].addr: {Balance: big.NewInt(params.Ether)},
- accounts[2].addr: {Balance: big.NewInt(params.Ether)},
- },
- }
- genBlocks := 50
- signer := types.HomesteadSigner{}
-
- var (
- ref atomic.Uint32 // total refs has made
- rel atomic.Uint32 // total rels has made
- nonce uint64
- )
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
- // Transfer from account[0] to account[1]
- // value: 1000 wei
- // fee: 0 wei
- for j := 0; j < i+1; j++ {
- tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key)
- b.AddTx(tx)
- nonce += 1
- }
- })
- backend.refHook = func() { ref.Add(1) }
- backend.relHook = func() { rel.Add(1) }
- api := NewAPI(backend)
-
- single := `{"txHash":"0x0000000000000000000000000000000000000000000000000000000000000000","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}`
- var cases = []struct {
- start uint64
- end uint64
- config *TraceConfig
- }{
- {0, 50, nil}, // the entire chain range, blocks [1, 50]
- {10, 20, nil}, // the middle chain range, blocks [11, 20]
- }
- for _, c := range cases {
- ref.Store(0)
- rel.Store(0)
-
- from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start))
- to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end))
- resCh := api.traceChain(from, to, c.config, nil)
-
- next := c.start + 1
- for result := range resCh {
- if have, want := uint64(result.Block), next; have != want {
- t.Fatalf("unexpected tracing block, have %d want %d", have, want)
- }
- if have, want := len(result.Traces), int(next); have != want {
- t.Fatalf("unexpected result length, have %d want %d", have, want)
- }
- for _, trace := range result.Traces {
- trace.TxHash = common.Hash{}
- blob, _ := json.Marshal(trace)
- if have, want := string(blob), single; have != want {
- t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want)
- }
- }
- next += 1
- }
- if next != c.end+1 {
- t.Error("Missing tracing block")
- }
-
- if nref, nrel := ref.Load(), rel.Load(); nref != nrel {
- t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel)
- }
- }
-}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
deleted file mode 100644
index 54ce597b09..0000000000
--- a/ethclient/ethclient_test.go
+++ /dev/null
@@ -1,780 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package ethclient
-
-import (
- "context"
- "errors"
- "math/big"
- "reflect"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/internal/ethapi"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-// Verify that Client implements the ethereum interfaces.
-var (
- _ = ethereum.ChainReader(&Client{})
- _ = ethereum.TransactionReader(&Client{})
- _ = ethereum.ChainStateReader(&Client{})
- _ = ethereum.ChainSyncReader(&Client{})
- _ = ethereum.ContractCaller(&Client{})
- _ = ethereum.GasEstimator(&Client{})
- _ = ethereum.GasPricer(&Client{})
- _ = ethereum.LogFilterer(&Client{})
- _ = ethereum.PendingStateReader(&Client{})
- // _ = ethereum.PendingStateEventer(&Client{})
- _ = ethereum.PendingContractCaller(&Client{})
-)
-
-func TestToFilterArg(t *testing.T) {
- blockHashErr := errors.New("cannot specify both BlockHash and FromBlock/ToBlock")
- addresses := []common.Address{
- common.HexToAddress("0xD36722ADeC3EdCB29c8e7b5a47f352D701393462"),
- }
- blockHash := common.HexToHash(
- "0xeb94bb7d78b73657a9d7a99792413f50c0a45c51fc62bdcb08a53f18e9a2b4eb",
- )
-
- for _, testCase := range []struct {
- name string
- input ethereum.FilterQuery
- output interface{}
- err error
- }{
- {
- "without BlockHash",
- ethereum.FilterQuery{
- Addresses: addresses,
- FromBlock: big.NewInt(1),
- ToBlock: big.NewInt(2),
- Topics: [][]common.Hash{},
- },
- map[string]interface{}{
- "address": addresses,
- "fromBlock": "0x1",
- "toBlock": "0x2",
- "topics": [][]common.Hash{},
- },
- nil,
- },
- {
- "with nil fromBlock and nil toBlock",
- ethereum.FilterQuery{
- Addresses: addresses,
- Topics: [][]common.Hash{},
- },
- map[string]interface{}{
- "address": addresses,
- "fromBlock": "0x0",
- "toBlock": "latest",
- "topics": [][]common.Hash{},
- },
- nil,
- },
- {
- "with negative fromBlock and negative toBlock",
- ethereum.FilterQuery{
- Addresses: addresses,
- FromBlock: big.NewInt(-1),
- ToBlock: big.NewInt(-1),
- Topics: [][]common.Hash{},
- },
- map[string]interface{}{
- "address": addresses,
- "fromBlock": "pending",
- "toBlock": "pending",
- "topics": [][]common.Hash{},
- },
- nil,
- },
- {
- "with blockhash",
- ethereum.FilterQuery{
- Addresses: addresses,
- BlockHash: &blockHash,
- Topics: [][]common.Hash{},
- },
- map[string]interface{}{
- "address": addresses,
- "blockHash": blockHash,
- "topics": [][]common.Hash{},
- },
- nil,
- },
- {
- "with blockhash and from block",
- ethereum.FilterQuery{
- Addresses: addresses,
- BlockHash: &blockHash,
- FromBlock: big.NewInt(1),
- Topics: [][]common.Hash{},
- },
- nil,
- blockHashErr,
- },
- {
- "with blockhash and to block",
- ethereum.FilterQuery{
- Addresses: addresses,
- BlockHash: &blockHash,
- ToBlock: big.NewInt(1),
- Topics: [][]common.Hash{},
- },
- nil,
- blockHashErr,
- },
- {
- "with blockhash and both from / to block",
- ethereum.FilterQuery{
- Addresses: addresses,
- BlockHash: &blockHash,
- FromBlock: big.NewInt(1),
- ToBlock: big.NewInt(2),
- Topics: [][]common.Hash{},
- },
- nil,
- blockHashErr,
- },
- } {
- t.Run(testCase.name, func(t *testing.T) {
- output, err := toFilterArg(testCase.input)
- if (testCase.err == nil) != (err == nil) {
- t.Fatalf("expected error %v but got %v", testCase.err, err)
- }
- if testCase.err != nil {
- if testCase.err.Error() != err.Error() {
- t.Fatalf("expected error %v but got %v", testCase.err, err)
- }
- } else if !reflect.DeepEqual(testCase.output, output) {
- t.Fatalf("expected filter arg %v but got %v", testCase.output, output)
- }
- })
- }
-}
-
-var (
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
- testBalance = big.NewInt(2e18)
- testGasPrice = big.NewInt(3e9) // 3Gwei
- testBlockNum = 128
- testBlocks = []testBlockParam{
- {
- blockNr: 1,
- txs: []testTransactionParam{
- {
- to: common.Address{0x10},
- value: big.NewInt(0),
- gasPrice: testGasPrice,
- data: nil,
- },
- {
- to: common.Address{0x11},
- value: big.NewInt(0),
- gasPrice: testGasPrice,
- data: nil,
- },
- },
- },
- {
- // This txs params also used to default block.
- blockNr: 10,
- txs: []testTransactionParam{},
- },
- {
- blockNr: 11,
- txs: []testTransactionParam{
- {
- to: common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- },
- },
- {
- blockNr: 12,
- txs: []testTransactionParam{
- {
- to: common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- {
- to: common.Address{0x02},
- value: big.NewInt(2),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- },
- },
- {
- blockNr: 13,
- txs: []testTransactionParam{
- {
- to: common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- {
- to: common.Address{0x02},
- value: big.NewInt(2),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- {
- to: common.Address{0x03},
- value: big.NewInt(3),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- },
- },
- }
-)
-
-var genesis = &core.Genesis{
- Config: params.AllEthashProtocolChanges,
- Alloc: types.GenesisAlloc{testAddr: {Balance: testBalance}},
- ExtraData: []byte("test genesis"),
- Timestamp: 9000,
- BaseFee: big.NewInt(params.InitialBaseFeeForBSC),
-}
-
-var testTx1 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &types.LegacyTx{
- Nonce: 254,
- Value: big.NewInt(12),
- GasPrice: testGasPrice,
- Gas: params.TxGas,
- To: &common.Address{2},
-})
-
-var testTx2 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &types.LegacyTx{
- Nonce: 255,
- Value: big.NewInt(8),
- GasPrice: testGasPrice,
- Gas: params.TxGas,
- To: &common.Address{2},
-})
-
-type testTransactionParam struct {
- to common.Address
- value *big.Int
- gasPrice *big.Int
- data []byte
-}
-
-type testBlockParam struct {
- blockNr int
- txs []testTransactionParam
-}
-
-func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
- // Generate test chain.
- blocks := generateTestChain()
-
- // Create node
- n, err := node.New(&node.Config{})
- if err != nil {
- t.Fatalf("can't create new node: %v", err)
- }
- // Create Ethereum Service
- config := ðconfig.Config{Genesis: genesis}
- config.SnapshotCache = 256
- config.TriesInMemory = 128
- ethservice, err := eth.New(n, config)
- if err != nil {
- t.Fatalf("can't create new ethereum service: %v", err)
- }
- // Import the test chain.
- if err := n.Start(); err != nil {
- t.Fatalf("can't start test node: %v", err)
- }
- if _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil {
- t.Fatalf("can't import test blocks: %v", err)
- }
- // Ensure the tx indexing is fully generated
- for ; ; time.Sleep(time.Millisecond * 100) {
- progress, err := ethservice.BlockChain().TxIndexProgress()
- if err == nil && progress.Done() {
- break
- }
- }
- return n, blocks
-}
-
-func generateTestChain() []*types.Block {
- signer := types.HomesteadSigner{}
- // Create a database pre-initialize with a genesis block
- db := rawdb.NewMemoryDatabase()
- genesis.MustCommit(db, triedb.NewDatabase(db, nil))
- chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil, core.EnablePersistDiff(860000))
- generate := func(i int, block *core.BlockGen) {
- block.OffsetTime(5)
- block.SetExtra([]byte("test"))
- //block.SetCoinbase(testAddr)
-
- for idx, testBlock := range testBlocks {
- // Specific block setting, the index in this generator has 1 diff from specified blockNr.
- if i+1 == testBlock.blockNr {
- for _, testTransaction := range testBlock.txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), testTransaction.to,
- testTransaction.value, params.TxGas, testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain, tx)
- }
- break
- }
-
- // Default block setting.
- if idx == len(testBlocks)-1 {
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- for _, testTransaction := range testBlocks[0].txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), testTransaction.to,
- testTransaction.value, params.TxGas, testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain, tx)
- }
- }
- }
- // for testTransactionInBlock
- if i+1 == testBlockNum {
- block.AddTxWithChain(chain, testTx1)
- block.AddTxWithChain(chain, testTx2)
- }
- }
- gblock := genesis.MustCommit(db, triedb.NewDatabase(db, nil))
- engine := ethash.NewFaker()
- blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, testBlockNum, generate)
- blocks = append([]*types.Block{gblock}, blocks...)
- return blocks
-}
-
-func TestEthClient(t *testing.T) {
- backend, chain := newTestBackend(t)
- client := backend.Attach()
- defer backend.Close()
- defer client.Close()
-
- tests := map[string]struct {
- test func(t *testing.T)
- }{
- "Header": {
- func(t *testing.T) { testHeader(t, chain, client) },
- },
- "BalanceAt": {
- func(t *testing.T) { testBalanceAt(t, client) },
- },
- "TxInBlockInterrupted": {
- func(t *testing.T) { testTransactionInBlock(t, client) },
- },
- "ChainID": {
- func(t *testing.T) { testChainID(t, client) },
- },
- "GetBlock": {
- func(t *testing.T) { testGetBlock(t, client) },
- },
- "StatusFunctions": {
- func(t *testing.T) { testStatusFunctions(t, client) },
- },
- "CallContract": {
- func(t *testing.T) { testCallContract(t, client) },
- },
- "CallContractAtHash": {
- func(t *testing.T) { testCallContractAtHash(t, client) },
- },
- // DO not have TestAtFunctions now, because we do not have pending block now
- // "AtFunctions": {
- // func(t *testing.T) { testAtFunctions(t, client) },
- // },
- "TestSendTransactionConditional": {
- func(t *testing.T) { testSendTransactionConditional(t, client) },
- },
- }
-
- t.Parallel()
- for name, tt := range tests {
- t.Run(name, tt.test)
- }
-}
-
-func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) {
- tests := map[string]struct {
- block *big.Int
- want *types.Header
- wantErr error
- }{
- "genesis": {
- block: big.NewInt(0),
- want: chain[0].Header(),
- },
- "first_block": {
- block: big.NewInt(1),
- want: chain[1].Header(),
- },
- "future_block": {
- block: big.NewInt(1000000000),
- want: nil,
- wantErr: ethereum.NotFound,
- },
- }
- for name, tt := range tests {
- t.Run(name, func(t *testing.T) {
- ec := NewClient(client)
- ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
- defer cancel()
-
- got, err := ec.HeaderByNumber(ctx, tt.block)
- if !errors.Is(err, tt.wantErr) {
- t.Fatalf("HeaderByNumber(%v) error = %q, want %q", tt.block, err, tt.wantErr)
- }
-
- gotBytes, err := rlp.EncodeToBytes(got)
- if err != nil {
- t.Fatalf("Error serializing received block header.")
- }
- wantBytes, err := rlp.EncodeToBytes(tt.want)
- if err != nil {
- t.Fatalf("Error serializing wanted block header.")
- }
-
- // Instead of comparing the Header's compare the serialized bytes,
- // because reflect.DeepEqual(*types.Header, *types.Header) sometimes
- // returns false even though the underlying field values are exactly the same.
- if !reflect.DeepEqual(gotBytes, wantBytes) {
- t.Fatalf("HeaderByNumber(%v) got = %v, want %v", tt.block, got, tt.want)
- }
- })
- }
-}
-
-func testBalanceAt(t *testing.T, client *rpc.Client) {
- tests := map[string]struct {
- account common.Address
- block *big.Int
- want *big.Int
- wantErr error
- }{
- "valid_account_genesis": {
- account: testAddr,
- block: big.NewInt(0),
- want: testBalance,
- },
- "valid_account": {
- account: testAddr,
- block: big.NewInt(1),
- want: big.NewInt(0).Sub(testBalance, big.NewInt(0).Mul(big.NewInt(2*21000), testGasPrice)),
- },
- "non_existent_account": {
- account: common.Address{1},
- block: big.NewInt(1),
- want: big.NewInt(0),
- },
- "future_block": {
- account: testAddr,
- block: big.NewInt(1000000000),
- want: big.NewInt(0),
- wantErr: errors.New("header not found"),
- },
- }
- for name, tt := range tests {
- t.Run(name, func(t *testing.T) {
- ec := NewClient(client)
- ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
- defer cancel()
-
- got, err := ec.BalanceAt(ctx, tt.account, tt.block)
- if tt.wantErr != nil && (err == nil || err.Error() != tt.wantErr.Error()) {
- t.Fatalf("BalanceAt(%x, %v) error = %q, want %q", tt.account, tt.block, err, tt.wantErr)
- }
- if got.Cmp(tt.want) != 0 {
- t.Fatalf("BalanceAt(%x, %v) = %v, want %v", tt.account, tt.block, got, tt.want)
- }
- })
- }
-}
-
-func testTransactionInBlock(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- // Get current block by number.
- block, err := ec.BlockByNumber(context.Background(), nil)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- // Test tx in block not found.
- if _, err := ec.TransactionInBlock(context.Background(), block.Hash(), 20); err != ethereum.NotFound {
- t.Fatal("error should be ethereum.NotFound")
- }
-
- // Test tx in block found.
- tx, err := ec.TransactionInBlock(context.Background(), block.Hash(), 2)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if tx.Hash() != testTx1.Hash() {
- t.Fatalf("unexpected transaction: %v", tx)
- }
-
- tx, err = ec.TransactionInBlock(context.Background(), block.Hash(), 3)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if tx.Hash() != testTx2.Hash() {
- t.Fatalf("unexpected transaction: %v", tx)
- }
-}
-
-func testChainID(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
- id, err := ec.ChainID(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if id == nil || id.Cmp(params.AllEthashProtocolChanges.ChainID) != 0 {
- t.Fatalf("ChainID returned wrong number: %+v", id)
- }
-}
-
-func testGetBlock(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- // Get current block number
- blockNumber, err := ec.BlockNumber(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if blockNumber != uint64(testBlockNum) {
- t.Fatalf("BlockNumber returned wrong number: %d", blockNumber)
- }
- // Get current block by number
- block, err := ec.BlockByNumber(context.Background(), new(big.Int).SetUint64(blockNumber))
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if block.NumberU64() != blockNumber {
- t.Fatalf("BlockByNumber returned wrong block: want %d got %d", blockNumber, block.NumberU64())
- }
- // Get current block by hash
- blockH, err := ec.BlockByHash(context.Background(), block.Hash())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if block.Hash() != blockH.Hash() {
- t.Fatalf("BlockByHash returned wrong block: want %v got %v", block.Hash().Hex(), blockH.Hash().Hex())
- }
- // Get header by number
- header, err := ec.HeaderByNumber(context.Background(), new(big.Int).SetUint64(blockNumber))
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if block.Header().Hash() != header.Hash() {
- t.Fatalf("HeaderByNumber returned wrong header: want %v got %v", block.Header().Hash().Hex(), header.Hash().Hex())
- }
- // Get header by hash
- headerH, err := ec.HeaderByHash(context.Background(), block.Hash())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if block.Header().Hash() != headerH.Hash() {
- t.Fatalf("HeaderByHash returned wrong header: want %v got %v", block.Header().Hash().Hex(), headerH.Hash().Hex())
- }
-}
-
-func testStatusFunctions(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- // Sync progress
- progress, err := ec.SyncProgress(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if progress != nil {
- t.Fatalf("unexpected progress: %v", progress)
- }
-
- // NetworkID
- networkID, err := ec.NetworkID(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if networkID.Cmp(big.NewInt(1337)) != 0 {
- t.Fatalf("unexpected networkID: %v", networkID)
- }
-
- // SuggestGasPrice
- gasPrice, err := ec.SuggestGasPrice(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if gasPrice.Cmp(testGasPrice) != 0 {
- t.Fatalf("unexpected gas price: %v", gasPrice)
- }
-
- // SuggestGasTipCap
- gasTipCap, err := ec.SuggestGasTipCap(context.Background())
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if gasTipCap.Cmp(testGasPrice) != 0 {
- t.Fatalf("unexpected gas tip cap: %v", gasTipCap)
- }
-
- // FeeHistory
- history, err := ec.FeeHistory(context.Background(), 1, big.NewInt(2), []float64{95, 99})
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- want := ðereum.FeeHistory{
- OldestBlock: big.NewInt(2),
- Reward: [][]*big.Int{
- {
- testGasPrice,
- testGasPrice,
- },
- },
- BaseFee: []*big.Int{
- big.NewInt(params.InitialBaseFeeForBSC),
- big.NewInt(params.InitialBaseFeeForBSC),
- },
- GasUsedRatio: []float64{0.008912678667376286},
- }
- if !reflect.DeepEqual(history, want) {
- t.Fatalf("FeeHistory result doesn't match expected: (got: %v, want: %v)", history, want)
- }
-}
-
-func testCallContractAtHash(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- // EstimateGas
- msg := ethereum.CallMsg{
- From: testAddr,
- To: &common.Address{},
- Gas: 21000,
- Value: big.NewInt(1),
- }
- gas, err := ec.EstimateGas(context.Background(), msg)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if gas != 21000 {
- t.Fatalf("unexpected gas price: %v", gas)
- }
- block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1))
- if err != nil {
- t.Fatalf("BlockByNumber error: %v", err)
- }
- // CallContract
- if _, err := ec.CallContractAtHash(context.Background(), msg, block.Hash()); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-}
-
-func testCallContract(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- // EstimateGas
- msg := ethereum.CallMsg{
- From: testAddr,
- To: &common.Address{},
- Gas: 21000,
- Value: big.NewInt(1),
- }
- gas, err := ec.EstimateGas(context.Background(), msg)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if gas != 21000 {
- t.Fatalf("unexpected gas price: %v", gas)
- }
- // CallContract
- if _, err := ec.CallContract(context.Background(), msg, big.NewInt(1)); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- // PendingCallContract
- if _, err := ec.PendingCallContract(context.Background(), msg); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-}
-
-func testSendTransactionConditional(t *testing.T, client *rpc.Client) {
- ec := NewClient(client)
-
- if err := sendTransactionConditional(ec); err != nil {
- t.Fatalf("error: %v", err)
- }
-}
-
-func sendTransactionConditional(ec *Client) error {
- chainID, err := ec.ChainID(context.Background())
- if err != nil {
- return err
- }
-
- nonce, err := ec.PendingNonceAt(context.Background(), testAddr)
- if err != nil {
- return err
- }
-
- signer := types.LatestSignerForChainID(chainID)
-
- tx, err := types.SignNewTx(testKey, signer, &types.LegacyTx{
- Nonce: nonce,
- To: &common.Address{2},
- Value: big.NewInt(1),
- Gas: 22000,
- GasPrice: big.NewInt(params.InitialBaseFee),
- })
- if err != nil {
- return err
- }
-
- root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- return ec.SendTransactionConditional(context.Background(), tx, ethapi.TransactionOpts{
- KnownAccounts: map[common.Address]ethapi.AccountStorage{
- testAddr: ethapi.AccountStorage{
- StorageRoot: &root,
- },
- },
- })
-}
diff --git a/miner/miner_test.go b/miner/miner_test.go
deleted file mode 100644
index 5907fb4464..0000000000
--- a/miner/miner_test.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package miner implements Ethereum block creation and mining.
-package miner
-
-import (
- "errors"
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/clique"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/txpool"
- "github.com/ethereum/go-ethereum/core/txpool/legacypool"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/downloader"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-type mockBackend struct {
- bc *core.BlockChain
- txPool *txpool.TxPool
-}
-
-func NewMockBackend(bc *core.BlockChain, txPool *txpool.TxPool) *mockBackend {
- return &mockBackend{
- bc: bc,
- txPool: txPool,
- }
-}
-
-func (m *mockBackend) BlockChain() *core.BlockChain {
- return m.bc
-}
-
-func (m *mockBackend) TxPool() *txpool.TxPool {
- return m.txPool
-}
-
-func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
- return nil, errors.New("not supported")
-}
-
-type testBlockChain struct {
- root common.Hash
- config *params.ChainConfig
- statedb *state.StateDB
- gasLimit uint64
- chainHeadFeed *event.Feed
-}
-
-func (bc *testBlockChain) Config() *params.ChainConfig {
- return bc.config
-}
-
-func (bc *testBlockChain) CurrentBlock() *types.Header {
- return &types.Header{
- Number: new(big.Int),
- GasLimit: bc.gasLimit,
- }
-}
-
-func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
- return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil))
-}
-
-func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
- return bc.statedb, nil
-}
-
-func (bc *testBlockChain) HasState(root common.Hash) bool {
- return bc.root == root
-}
-
-func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
- return bc.chainHeadFeed.Subscribe(ch)
-}
-
-func TestMiner(t *testing.T) {
- t.Parallel()
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
-
- miner.Start()
- waitForMiningState(t, miner, true)
- // Start the downloader
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
- // Stop the downloader and wait for the update loop to run
- mux.Post(downloader.DoneEvent{})
- waitForMiningState(t, miner, true)
-
- // Subsequent downloader events after a successful DoneEvent should not cause the
- // miner to start or stop. This prevents a security vulnerability
- // that would allow entities to present fake high blocks that would
- // stop mining operations by causing a downloader sync
- // until it was discovered they were invalid, whereon mining would resume.
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, true)
-
- mux.Post(downloader.FailedEvent{})
- waitForMiningState(t, miner, true)
-}
-
-// TestMinerDownloaderFirstFails tests that mining is only
-// permitted to run indefinitely once the downloader sees a DoneEvent (success).
-// An initial FailedEvent should allow mining to stop on a subsequent
-// downloader StartEvent.
-func TestMinerDownloaderFirstFails(t *testing.T) {
- t.Parallel()
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
-
- miner.Start()
- waitForMiningState(t, miner, true)
- // Start the downloader
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
-
- // Stop the downloader and wait for the update loop to run
- mux.Post(downloader.FailedEvent{})
- waitForMiningState(t, miner, true)
-
- // Since the downloader hasn't yet emitted a successful DoneEvent,
- // we expect the miner to stop on next StartEvent.
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
-
- // Downloader finally succeeds.
- mux.Post(downloader.DoneEvent{})
- waitForMiningState(t, miner, true)
-
- // Downloader starts again.
- // Since it has achieved a DoneEvent once, we expect miner
- // state to be unchanged.
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, true)
-
- mux.Post(downloader.FailedEvent{})
- waitForMiningState(t, miner, true)
-}
-
-func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
- t.Parallel()
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
-
- miner.Start()
- waitForMiningState(t, miner, true)
- // Start the downloader
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
-
- // Downloader finally succeeds.
- mux.Post(downloader.DoneEvent{})
- waitForMiningState(t, miner, true)
-
- miner.Stop()
- waitForMiningState(t, miner, false)
-
- miner.Start()
- waitForMiningState(t, miner, true)
-
- miner.Stop()
- waitForMiningState(t, miner, false)
-}
-
-func TestStartWhileDownload(t *testing.T) {
- t.Parallel()
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
- waitForMiningState(t, miner, false)
- miner.Start()
- waitForMiningState(t, miner, true)
- // Stop the downloader and wait for the update loop to run
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
- // Starting the miner after the downloader should not work
- miner.Start()
- waitForMiningState(t, miner, false)
-}
-
-func TestStartStopMiner(t *testing.T) {
- t.Parallel()
- miner, _, cleanup := createMiner(t)
- defer cleanup(false)
- waitForMiningState(t, miner, false)
- miner.Start()
- waitForMiningState(t, miner, true)
- miner.Stop()
- waitForMiningState(t, miner, false)
-}
-
-func TestCloseMiner(t *testing.T) {
- t.Parallel()
- miner, _, cleanup := createMiner(t)
- defer cleanup(true)
- waitForMiningState(t, miner, false)
- miner.Start()
- waitForMiningState(t, miner, true)
- // Terminate the miner and wait for the update loop to run
- miner.Close()
- waitForMiningState(t, miner, false)
-}
-
-// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't
-// possible at the moment
-func TestMinerSetEtherbase(t *testing.T) {
- t.Parallel()
- miner, mux, cleanup := createMiner(t)
- defer cleanup(false)
- miner.Start()
- waitForMiningState(t, miner, true)
- // Start the downloader
- mux.Post(downloader.StartEvent{})
- waitForMiningState(t, miner, false)
- // Now user tries to configure proper mining address
- miner.Start()
- // Stop the downloader and wait for the update loop to run
- mux.Post(downloader.DoneEvent{})
- waitForMiningState(t, miner, true)
-
- coinbase := common.HexToAddress("0xdeedbeef")
- miner.SetEtherbase(coinbase)
- if addr := miner.worker.etherbase(); addr != coinbase {
- t.Fatalf("Unexpected etherbase want %x got %x", coinbase, addr)
- }
-}
-
-// waitForMiningState waits until either
-// * the desired mining state was reached
-// * a timeout was reached which fails the test
-func waitForMiningState(t *testing.T, m *Miner, mining bool) {
- t.Helper()
-
- var state bool
- for i := 0; i < 100; i++ {
- time.Sleep(10 * time.Millisecond)
- if state = m.Mining(); state == mining {
- return
- }
- }
- t.Fatalf("Mining() == %t, want %t", state, mining)
-}
-
-func minerTestGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *core.Genesis {
- config := *params.AllCliqueProtocolChanges
- config.Clique = ¶ms.CliqueConfig{
- Period: period,
- Epoch: config.Clique.Epoch,
- }
-
- // Assemble and return the genesis with the precompiles and faucet pre-funded
- return &core.Genesis{
- Config: &config,
- ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...),
- GasLimit: gasLimit,
- BaseFee: big.NewInt(params.InitialBaseFee),
- Difficulty: big.NewInt(1),
- Alloc: map[common.Address]types.Account{
- common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
- common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
- common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
- common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity
- common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp
- common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
- common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
- common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
- common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b
- faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
- },
- }
-}
-func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
- // Create Ethash config
- config := Config{
- Etherbase: common.HexToAddress("123456789"),
- }
- // Create chainConfig
- chainDB := rawdb.NewMemoryDatabase()
- triedb := triedb.NewDatabase(chainDB, nil)
- genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
- chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
- if err != nil {
- t.Fatalf("can't create new chain config: %v", err)
- }
- // Create consensus engine
- engine := clique.New(chainConfig.Clique, chainDB)
- // Create Ethereum backend
- bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("can't create new chain %v", err)
- }
- statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
- blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
-
- pool := legacypool.New(testTxPoolConfig, blockchain)
- txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool})
-
- backend := NewMockBackend(bc, txpool)
- // Create event Mux
- mux := new(event.TypeMux)
- // Create Miner
- miner := New(backend, &config, chainConfig, mux, engine, nil)
- cleanup := func(skipMiner bool) {
- bc.Stop()
- engine.Close()
- txpool.Close()
- if !skipMiner {
- miner.Close()
- }
- }
- return miner, mux, cleanup
-}