-
Notifications
You must be signed in to change notification settings - Fork 1.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
test: chain: unit tests for the syncer & sync manager #8072
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1098,3 +1098,158 @@ func TestInvalidHeight(t *testing.T) { | |
|
||
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true) | ||
} | ||
|
||
// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly | ||
func TestIncomingBlocks(t *testing.T) { | ||
H := 50 | ||
tu := prepSyncTest(t, H) | ||
|
||
client := tu.addClientNode() | ||
require.NoError(t, tu.mn.LinkAll()) | ||
|
||
clientNode := tu.nds[client] | ||
//stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001 | ||
incoming, err := clientNode.SyncIncomingBlocks(tu.ctx) | ||
require.NoError(tu.t, err) | ||
|
||
tu.connect(client, 0) | ||
tu.waitUntilSync(0, client) | ||
tu.compareSourceState(client) | ||
|
||
timeout := time.After(10 * time.Second) | ||
|
||
for i := 0; i < 5; i++ { | ||
tu.mineNewBlock(0, nil) | ||
tu.waitUntilSync(0, client) | ||
tu.compareSourceState(client) | ||
|
||
// just in case, so we don't get deadlocked | ||
select { | ||
case <-incoming: | ||
case <-timeout: | ||
tu.t.Fatal("TestIncomingBlocks timeout") | ||
} | ||
} | ||
} | ||
|
||
// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache | ||
func TestSyncManualBadTS(t *testing.T) { | ||
// Test setup: | ||
// - source node is fully synced, | ||
// - client node is unsynced | ||
// - client manually marked source's head and it's parent as bad | ||
H := 50 | ||
tu := prepSyncTest(t, H) | ||
|
||
client := tu.addClientNode() | ||
require.NoError(t, tu.mn.LinkAll()) | ||
|
||
sourceHead, err := tu.nds[source].ChainHead(tu.ctx) | ||
require.NoError(tu.t, err) | ||
|
||
clientHead, err := tu.nds[client].ChainHead(tu.ctx) | ||
require.NoError(tu.t, err) | ||
|
||
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup") | ||
|
||
//stm: @CHAIN_SYNCER_MARK_BAD_001 | ||
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0]) | ||
require.NoError(tu.t, err) | ||
|
||
sourceHeadParent := sourceHead.Parents().Cids()[0] | ||
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent) | ||
require.NoError(tu.t, err) | ||
|
||
//stm: @CHAIN_SYNCER_CHECK_BAD_001 | ||
reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0]) | ||
require.NoError(tu.t, err) | ||
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking") | ||
|
||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent) | ||
require.NoError(tu.t, err) | ||
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking") | ||
|
||
// Assertion 1: | ||
// - client shouldn't be synced after timeout, because the source TS is marked bad. | ||
// - bad block is the first block that should be synced, 1sec should be enough | ||
tu.connect(1, 0) | ||
timeout := time.After(1 * time.Second) | ||
<-timeout | ||
|
||
clientHead, err = tu.nds[client].ChainHead(tu.ctx) | ||
require.NoError(tu.t, err) | ||
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad") | ||
|
||
// Assertion 2: | ||
// - after unmarking blocks as bad and reconnecting, source & client should be in sync | ||
//stm: @CHAIN_SYNCER_UNMARK_BAD_001 | ||
err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0]) | ||
require.NoError(tu.t, err) | ||
|
||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0]) | ||
require.NoError(tu.t, err) | ||
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking") | ||
|
||
err = tu.nds[client].SyncUnmarkAllBad(tu.ctx) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm new to the lotus syncer so I'm curious: why do we need the explicit There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe it's done intentionally, so as to check that both methods of unmarking a bad block are working as intended. In this way, if a change is commited to either While this test strays into the "integration" category, I think it's a valid choice. |
||
require.NoError(tu.t, err) | ||
|
||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent) | ||
require.NoError(tu.t, err) | ||
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking") | ||
|
||
tu.disconnect(1, 0) | ||
tu.connect(1, 0) | ||
|
||
tu.waitUntilSync(0, client) | ||
tu.compareSourceState(client) | ||
} | ||
|
||
// TestState tests fetching the sync worker state before, during & after the sync | ||
func TestSyncState(t *testing.T) { | ||
H := 50 | ||
tu := prepSyncTest(t, H) | ||
|
||
client := tu.addClientNode() | ||
require.NoError(t, tu.mn.LinkAll()) | ||
clientNode := tu.nds[client] | ||
sourceHead, err := tu.nds[source].ChainHead(tu.ctx) | ||
require.NoError(tu.t, err) | ||
|
||
// sync state should be empty before the sync | ||
state, err := clientNode.SyncState(tu.ctx) | ||
require.NoError(tu.t, err) | ||
require.Equal(tu.t, len(state.ActiveSyncs), 0) | ||
|
||
tu.connect(client, 0) | ||
|
||
// wait until sync starts, or at most `timeout` seconds | ||
timeout := time.After(5 * time.Second) | ||
activeSyncs := []api.ActiveSync{} | ||
|
||
for len(activeSyncs) == 0 { | ||
//stm: @CHAIN_SYNCER_STATE_001 | ||
state, err = clientNode.SyncState(tu.ctx) | ||
require.NoError(tu.t, err) | ||
activeSyncs = state.ActiveSyncs | ||
|
||
sleep := time.After(100 * time.Millisecond) | ||
select { | ||
case <-sleep: | ||
case <-timeout: | ||
tu.t.Fatal("TestSyncState timeout") | ||
} | ||
} | ||
|
||
// check state during sync | ||
require.Equal(tu.t, len(activeSyncs), 1) | ||
require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead)) | ||
|
||
tu.waitUntilSync(0, client) | ||
tu.compareSourceState(client) | ||
|
||
// check state after sync | ||
state, err = clientNode.SyncState(tu.ctx) | ||
require.NoError(tu.t, err) | ||
require.Equal(tu.t, len(state.ActiveSyncs), 1) | ||
require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete) | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@arajasek you seem to be the last person who touched this (+ our resident DRAND expert). Can you confirm that we don't want this anymore?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ping @arajasek
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sorry about the delay here -- Yes, this can definitely be removed.