Merge pull request #2629 from xanimo/1.14.5-pruning

qa: fixes pruning test
This commit is contained in:
Ross Nicoll 2021-10-22 00:29:28 +01:00 committed by GitHub
commit c909ac2e09
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 170 additions and 111 deletions

View file

@ -7,22 +7,76 @@
# Test pruning code # Test pruning code
# ******** # ********
# WARNING: # WARNING:
# This test uses 4GB of disk space. # This test uses 21GB of disk space.
# This test takes 30 mins or more (up to 2 hours) # This test takes 20 mins or more (up to 2 hours)
# ******** # ********
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.script import (
CScript,
OP_NOP,
OP_RETURN,
)
from test_framework.test_framework import BitcoinTestFramework from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import * from test_framework.util import (
start_node,
connect_nodes,
sync_blocks,
assert_equal,
assert_greater_than,
assert_raises_jsonrpc,
)
import time import time
import os import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so # Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be # the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time. # compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60 RESCAN_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = 0x620004
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
block.solve()
# Submit to the node
node.submitblock(block.serialize().hex())
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir): def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
@ -34,28 +88,24 @@ class PruneTest(BitcoinTestFramework):
self.setup_clean_chain = True self.setup_clean_chain = True
self.num_nodes = 6 self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self): def setup_network(self):
self.nodes = [] self.nodes = []
self.is_network_split = False self.is_network_split = False
# Create nodes 0 and 1 to mine # Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxreceivebuffer=20000"], timewait=1200))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-maxreceivebuffer=20000"], timewait=1200))
# Create node 2 to test pruning # Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900)) self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-maxreceivebuffer=20000", "-prune=2200"], timewait=1200))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/" self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0", "-maxreceivebuffer=20000"], timewait=1200))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0", "-maxreceivebuffer=20000"], timewait=1200))
# Create nodes 5 to test wallet in prune mode, but do not connect # Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"])) self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=2200"]))
# Determine default relay fee # Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
@ -63,8 +113,7 @@ class PruneTest(BitcoinTestFramework):
connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0) connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[3], 4)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5]) sync_blocks(self.nodes[0:5])
def create_big_chain(self): def create_big_chain(self):
@ -72,21 +121,22 @@ class PruneTest(BitcoinTestFramework):
self.nodes[1].generate(200) self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2]) sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150) self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data # Then mine enough full blocks to create more than 2200MiB of data
for i in range(645): mine_large_blocks(self.nodes[0], 2395)
mine_large_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:2])
# Note: Separated the manual testing from the main test
sync_blocks(self.nodes[0:5]) # This can and should be improved in the future
mine_large_blocks(self.nodes[3], 995)
mine_large_blocks(self.nodes[4], 995)
def test_height_min(self): def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"): if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early") raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success") print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir)) print("Though we're already using more than 2200MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned") print("Mining 20 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25): mine_large_blocks(self.nodes[0], 20)
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time() waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"): while os.path.isfile(self.prunedir+"blk00000.dat"):
@ -97,29 +147,24 @@ class PruneTest(BitcoinTestFramework):
print("Success") print("Success")
usage = calc_usage(self.prunedir) usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage) print("Usage should be below target:", usage)
if (usage > 550): if (usage > 2200):
raise AssertionError("Pruning target not being met") raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self): def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks # Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12): for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0) self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900) self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000", "-checkblocks=6"], timewait=1200)
# Mine 24 blocks in node 1 # Mine 24 blocks in node 1
for i in range(24): mine_large_blocks(self.nodes[1], 24)
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0 # Reorg back with 25 block chain from node 0
for i in range(25): mine_large_blocks(self.nodes[0], 25)
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time # Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[1], 0)
@ -129,25 +174,26 @@ class PruneTest(BitcoinTestFramework):
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir)) print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self): def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip # Node 1 will mine a 1441 block chain starting 1439 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain # This will cause Node 2 to do a reorg requiring 1440 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1) self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug", "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-blockmaxweight=20000", "-checkblocks=6", "-disablesafemode"], timewait=1200)
height = self.nodes[1].getblockcount() height = self.nodes[1].getblockcount()
print("Current block height:", height) print("Current block height:", height)
invalidheight = height-287 invalidheight = height-1439
badhash = self.nodes[1].getblockhash(invalidheight) badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash) print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash) self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 1440 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1)
print('curhash != mainchainhash: ', curhash != mainchainhash)
while curhash != mainchainhash: while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash) self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1)
@ -157,32 +203,33 @@ class PruneTest(BitcoinTestFramework):
# Reboot node1 to clear those giant tx's from mempool # Reboot node1 to clear those giant tx's from mempool
self.stop_node(1) self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000", "-blockmaxsize=5000", "-blockmaxweight=20000", "-checkblocks=6", "-disablesafemode"], timewait=1200)
print("Generating new longer chain of 300 more blocks") print("Generating new longer chain of 1441 more blocks")
self.nodes[1].generate(300) self.nodes[1].generate(1441)
print("Reconnect nodes") print("Reconnect nodes")
connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1) connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120) sync_blocks(self.nodes[0:3], timeout=300)
print("Verify height on node 2:",self.nodes[2].getblockcount()) print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)) print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") print("Mine 992 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22): # Get node0's wallet transactions back in its mempool, to avoid the
# This can be slow, so do this in multiple RPC calls to avoid # mined blocks from being too small.
# RPC timeouts. self.nodes[0].resendwallettransactions()
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects mine_large_blocks(self.nodes[0], 992)
sync_blocks(self.nodes[0:3], timeout=300)
sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir) usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage) print("Usage should be below target:", usage)
if (usage > 550): if (usage > 2200):
raise AssertionError("Pruning target not being met") raise AssertionError("Pruning target not being met")
return invalidheight,badhash return invalidheight, badhash
def reorg_back(self): def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away # Verify that a block on the old main chain fork has been pruned away
@ -190,8 +237,8 @@ class PruneTest(BitcoinTestFramework):
print("Will need to redownload block",self.forkheight) print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point # Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently # Although this is more than 1440 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it, # and only its other 1441 small and 992 large block are in the block files after it,
# its expected to still be retained # its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
@ -208,7 +255,7 @@ class PruneTest(BitcoinTestFramework):
# because it has all the block data. # because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic. # to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg # At this point node 2 is within 1440 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight: if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine) print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
@ -283,34 +330,36 @@ class PruneTest(BitcoinTestFramework):
if not has_block(0): if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there") raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file # height=141 should prune first file
prune(500) prune(141)
if has_block(0): if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now") raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1): if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there") raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file # height=282 should prune second file
prune(650) prune(282)
if has_block(1): if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now") raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat. # height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) prune(1000)
if not has_block(2): if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now") raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288) node.generate(288)
prune(1000) prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs for fnum in range(7):
if has_block(fnum):
raise AssertionError(f"blk0000{fnum}.dat is still there, should be pruned by now")
if not has_block(7):
raise AssertionError("blk00007.dat is missing when should still be there")
# stop node, start back up with auto-prune at 2200MB, make sure still runs
self.stop_node(node_number) self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900) self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=2200"], timewait=900)
print("Success") print("Success")
@ -318,22 +367,22 @@ class PruneTest(BitcoinTestFramework):
# check that the pruning node's wallet is still in good shape # check that the pruning node's wallet is still in good shape
print("Stop and start pruning node to trigger wallet rescan") print("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2) self.stop_node(2)
start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"]) start_node(2, self.options.tmpdir, ["-debug=1","-prune=2200"])
print("Success") print("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD. # check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494. # this was reported to fail in #7494.
print ("Syncing node 5 to test wallet") print ("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5) connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]] nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300) sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"]) start_node(5, self.options.tmpdir, ["-debug=1","-prune=2200"])
print ("Success") print ("Success")
def run_test(self): def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") print("Warning! This test requires 21GB of disk space and takes over 20 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks") print("Mining a big blockchain of 2745 blocks")
self.create_big_chain() self.create_big_chain()
# Chain diagram key: # Chain diagram key:
# * blocks on main chain # * blocks on main chain
@ -341,94 +390,94 @@ class PruneTest(BitcoinTestFramework):
# X invalidated block # X invalidated block
# N1 Node 1 # N1 Node 1
# #
# Start by mining a simple chain that all nodes have # Start by mining a simple chain that nodes 0-2 have
# N0=N1=N2 **...*(995) # N0=N1=N2 **...*(2745)
# stop manual-pruning node with 995 blocks # stop manual-pruning nodes with 995 blocks
self.stop_node(3) self.stop_node(3)
self.stop_node(4) self.stop_node(4)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight") print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min() self.test_height_min()
# Extend this chain past the PruneAfterHeight # Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020) # N0=N1=N2 **...*(2765)
print("Check that we'll exceed disk space target if we have a very high stale block rate") print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks() self.create_chain_with_staleblocks()
# Disconnect N0 # Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044) # N1=N2 **...*+...+(2789)
# N0 **...**...**(1045) # N0 **...**...**(2790)
# #
# reconnect nodes causing reorg on N1 and N2 # reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045) # N1=N2 **...*(2765) *...**(2790)
# \ # \
# +...+(1044) # +...+(2789)
# #
# repeat this process until you have 12 stale forks hanging off the # repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2 # main chain on N1 and N2
# N0 *************************...***************************(1320) # N0 *************************...***************************(3065)
# #
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # N1=N2 **...*(2765) *...**(2790) *.. ..**(3040) *...**(3065)
# \ \ \ # \ \ \
# +...+(1044) &.. $...$(1319) # +...+(2789) &.. $...$(3064)
# Save some current chain state for later use # Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320 self.mainchainheight = self.nodes[2].getblockcount() #3065
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still") print("Check that we can survive a 1440 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) (self.forkheight,self.forkhash) = self.reorg_test() #(1626, )
# Now create a 288 block reorg by mining a longer chain on N1 # Now create a 1440 block reorg by mining a longer chain on N1
# First disconnect N1 # First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # Then invalidate 1626 on main chain and 1625 on fork so height is 1625 on main chain
# N1 **...*(1020) **...**(1032)X.. # N1 **...*(2765) **...**(1625)X..
# \ # \
# ++...+(1031)X.. # ++...+(1624)X..
# #
# Now mine 300 more blocks on N1 # Now mine 1441 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332) # N1 **...*(2765) **...**(1625) @@...@(3066)
# \ \ # \ \
# \ X... # \ X...
# \ \ # \ \
# ++...+(1031)X.. .. # ++...+(1624)X.. ..
# #
# Reconnect nodes and mine 220 more blocks on N1 # Reconnect nodes and mine 992 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552) # N1 **...*(2765) **...**(1625) @@...@@@(4058)
# \ \ # \ \
# \ X... # \ X...
# \ \ # \ \
# ++...+(1031)X.. .. # ++...+(1624)X.. ..
# #
# N2 **...*(1020) **...**(1032) @@...@@@(1552) # N2 **...*(2765) **...**(1625) @@...@@@(4058)
# \ \ # \ \
# \ *...**(1320) # \ *...**(3065)
# \ \ # \ \
# ++...++(1044) .. # ++...++(2789) ..
# #
# N0 ********************(1032) @@...@@@(1552) # N0 ********************(1625) @@...@@@(4058)
# \ # \
# *...**(1320) # *...**(3065)
print("Test that we can rerequest a block we previously pruned if needed for a reorg") print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back() self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Verify that N2 still has block 1626 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # Invalidate 1626 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks # original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation # In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger. # on N0 and mine a new longest chain to trigger.
# Final result: # Final result:
# N0 ********************(1032) **...****(1553) # N0 ********************(1625) **...****(4059)
# \ # \
# X@...@@@(1552) # X@...@@@(4058)
# #
# N2 **...*(1020) **...**(1032) **...****(1553) # N2 **...*(2765) **...**(1625) **...****(4059)
# \ \ # \ \
# \ X@...@@@(1552) # \ X@...@@@(4058)
# \ # \
# +.. # +..
# #
# N1 doesn't change because 1033 on main chain (*) is invalid # N1 doesn't change because 1626 on main chain (*) is invalid
print("Test manual pruning with block indices") print("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False) self.manual_test(3, use_timestamp=False)

View file

@ -3250,7 +3250,12 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
if (state.vBlocksInFlight.size() > 0) { if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { int64_t nCalculatedDlWindow = std::max(consensusParams.nPowTargetSpacing, MIN_BLOCK_DOWNLOAD_MULTIPLIER) *
(BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads);
if (nNow > state.nDownloadingSince + nCalculatedDlWindow) {
LogPrint("net", "Timeout downloading block: window=%d; inFlight=%d; validHeaders=%d; otherDlPeers=%d;",
nCalculatedDlWindow, state.vBlocksInFlight.size(),
state.nBlocksInFlightValidHeaders, nOtherPeersWithValidatedDownloads);
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
pto->fDisconnect = true; pto->fDisconnect = true;
return true; return true;

View file

@ -21,6 +21,11 @@ static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
* Timeout = base + per_header * (expected number of headers) */ * Timeout = base + per_header * (expected number of headers) */
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
/** Sets a hard minimum to the multiplier used for block download
* timeouts, only triggers on regtest, where nPowTargetTimespan
* is set to 1 second.
*/
static constexpr int64_t MIN_BLOCK_DOWNLOAD_MULTIPLIER = 10; // 10 seconds
/** Register with a network node to receive its signals */ /** Register with a network node to receive its signals */
void RegisterNodeSignals(CNodeSignals& nodeSignals); void RegisterNodeSignals(CNodeSignals& nodeSignals);
/** Unregister a network node */ /** Unregister a network node */