Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix #254 - reloadxfield/ reindex crash #275

Merged
merged 3 commits into from
Jul 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 5 additions & 11 deletions src/validation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4167,14 +4167,11 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp, CXFieldHistoryMap*
int64_t nStart = GetTimeMillis();

int nLoaded = 0;
bool outOfOrder = false;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*GetCurrentMaxBlockSize(), GetCurrentMaxBlockSize()+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
LogPrint(BCLog::REINDEX, "%s: nRewind : %d (%s out of order)\n", __func__, nRewind,
outOfOrder ? "after" : "before");
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
Expand Down Expand Up @@ -4203,11 +4200,9 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp, CXFieldHistoryMap*
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
LogPrint(BCLog::REINDEX, "%s: trying to read block %s\n", __func__, pblock->GetHash().ToString());
CBlock& block = *pblock;
blkdat >> block;
nRewind = blkdat.GetPos();
LogPrint(BCLog::REINDEX, "%s: Read ok. nRewind = %d\n", __func__, nRewind);

uint256 hash = block.GetHash();
{
Expand All @@ -4216,29 +4211,24 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp, CXFieldHistoryMap*
if (hash != FederationParams().GenesisBlock().GetHash() && !LookupBlockIndex(block.hashPrevBlock)) {
LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString());
outOfOrder = true;
if (dbp)
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
continue;
}

// process in case the block isn't known yet
LogPrint(BCLog::REINDEX, "%s: before LookupBlockIndex\n", __func__);
CBlockIndex* pindex = LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
if (g_chainstate.AcceptBlock(pblock, state, nullptr, true, dbp, nullptr, pxfieldHistory)) {
nLoaded++;
}
if (state.IsError()) {
LogPrint(BCLog::REINDEX, "%s: AcceptBlock state: %s-%s\n", __func__, state.GetRejectReason(), state.GetDebugMessage());
break;
}
} else if (hash != FederationParams().GenesisBlock().GetHash() && pindex->nHeight % 1000 == 0) {
LogPrint(BCLog::REINDEX, "%s Block Import: already had block %s at height %d\n", __func__, hash.ToString(), pindex->nHeight);
}
else
LogPrint(BCLog::REINDEX, "%s: LookupBlockIndex returned null\n", __func__);
}

// Activate the genesis block so normal node progress can continue
Expand All @@ -4262,7 +4252,11 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp, CXFieldHistoryMap*
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();

if (ReadBlockFromDisk(*pblockrecursive, it->second, pblockrecursive->GetHeight()))
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = nullptr;
pindex = miSelf->second;

if (ReadBlockFromDisk(*pblockrecursive, it->second, pindex->nHeight))
{
LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
Expand Down
100 changes: 96 additions & 4 deletions test/functional/p2p_sendheaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]

Part 4: Test direct fetch behavior
Part 4a: Test direct fetch behavior
Part 4b: Test direct fetch behavior after federation block
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expand Down Expand Up @@ -107,6 +108,7 @@
sync_blocks,
wait_until,
)
from time import sleep

DIRECT_FETCH_RESPONSE_TIME = 0.05

Expand Down Expand Up @@ -238,6 +240,34 @@ def mine_reorg(self, length):
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]

def mine_federation_block(self, test_node):
"""254 test - add a federation block to change the aggpukey and then restart the node with -reloadxfield."""

newaggpubkey = "02bf2027c8455800c7626542219e6208b5fe787483689f1391d6d443ec85673ecf";

# Clear out block announcements from each p2p listener
tip = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1, newaggpubkey)
block.solve(self.signblockprivkey)
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
test_node.send_message(msg_block(block))

#restart node
self.stop_node(0)
self.start_node(0, ["-reloadxfield"])

# new aggprivkey is used to sign the rest of the blocks
self.signblockpubkey = "02bf2027c8455800c7626542219e6208b5fe787483689f1391d6d443ec85673ecf"
self.signblockprivkey = "aa2c70c4b85a09be514292d04b27bbb0cc3f86d306d58fe87743d10a095ada07"
self.signblockprivkey_wif = "cTHVmjaAwKtU75t89fg42SLx43nRxhsri6YY1Eynvs1V1tPRCfae"

return block.hash


def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode(self.nodes[0].time_to_connect))
Expand Down Expand Up @@ -450,7 +480,7 @@ def test_nonnull_locators(self, test_node, inv_node):

self.log.info("Part 3: success!")

self.log.info("Part 4: Testing direct fetch behavior...")
self.log.info("Part 4a: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
Expand Down Expand Up @@ -496,7 +526,7 @@ def test_nonnull_locators(self, test_node, inv_node):
height = self.nodes[0].getblockcount() - 1
blocks = []

self.log.info("Part 4: Now announce a header that forks the last two blocks!")
self.log.info("Part 4a: Now announce a header that forks the last two blocks!")
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
Expand Down Expand Up @@ -531,7 +561,69 @@ def test_nonnull_locators(self, test_node, inv_node):
with mininode_lock:
assert "getdata" not in test_node.last_message

self.log.info("Part 4: success!")
self.log.info("Part 4a: success!")

self.log.info("Part 4b: Testing direct fetch behavior after federation block...")

#mine a federation block
tip = self.mine_federation_block(test_node)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1

# Setup the p2p connections again
inv_node = self.nodes[0].add_p2p_connection(BaseNode(self.nodes[0].time_to_connect))
inv_node.sync_with_ping(timeout=100)

test_node = self.nodes[0].add_p2p_connection(BaseNode(self.nodes[0].time_to_connect))
test_node.sync_with_ping(timeout=100)

#wait for the federation block to sync
while self.nodes[0].getbestblockhash() != tip:
sleep(1)

#repeat sequence in 4a
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1

# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve(self.signblockprivkey)
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))

inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message

# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve(self.signblockprivkey)
tip = blocks[-1].sha256
block_time += 1
height += 1

test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)

[test_node.send_message(msg_block(x)) for x in blocks]

test_node.sync_with_ping()

self.log.info("Part 4b: success!")

# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
Expand Down