Skip to content

Commit

Permalink
Re-enable logstore unit test.
Browse files Browse the repository at this point in the history
  • Loading branch information
szmyd committed Jan 28, 2025
1 parent 869ef8e commit bd47812
Show file tree
Hide file tree
Showing 2 changed files with 172 additions and 152 deletions.
28 changes: 24 additions & 4 deletions src/homelogstore/log_dev.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ void LogDev::start(const bool format, JournalVirtualDev* blk_store) {
[[maybe_unused]] auto* const superblock{m_logdev_meta.create()};
m_blkstore->update_data_start_offset(0);
} else {
HS_LOG_ASSERT(!m_logdev_meta.is_empty(), "Expected meta data to be read already before loading");
const auto store_list{m_logdev_meta.load()};

// Notify to the caller that a new log store was reserved earlier and it is being loaded, with its meta info
Expand Down Expand Up @@ -694,6 +693,30 @@ void LogDevMetadata::rollback_super_blk_found(const sisl::byte_view& buf, void*
}

std::vector< std::pair< logstore_id_t, logstore_superblk > > LogDevMetadata::load() {
if (!m_sb) {
/// This code block is purely to make the UT work. The UT does not fully
// destruct the application, but does unregister the callbacks. We need
// to re-register here on the simulated "reboot"
MetaBlkMgrSI()->register_handler(
m_name,
[this](meta_blk* mblk, sisl::byte_view buf, size_t size) {
logdev_super_blk_found(std::move(buf), voidptr_cast(mblk));
},
nullptr);

MetaBlkMgrSI()->register_handler(
m_name + "_rollback_sb",
[this](meta_blk* mblk, sisl::byte_view buf, size_t size) {
rollback_super_blk_found(std::move(buf), voidptr_cast(mblk));
},
nullptr);
homestore::MetaBlkMgr::instance()->read_sub_sb(m_name);
homestore::MetaBlkMgr::instance()->read_sub_sb(m_name + "_rollback_sb");
}

HS_REL_ASSERT_NE(m_raw_logdev_buf->bytes, nullptr, "Load called without getting metadata");
HS_REL_ASSERT_LE(m_sb->get_version(), logdev_superblk::LOGDEV_SB_VERSION, "Logdev super blk version mismatch");

std::vector< std::pair< logstore_id_t, logstore_superblk > > ret_list;
ret_list.reserve(1024);
if (store_capacity()) {
Expand All @@ -703,9 +726,6 @@ std::vector< std::pair< logstore_id_t, logstore_superblk > > LogDevMetadata::loa
m_id_reserver = std::make_unique< sisl::IDReserver >();
}

HS_REL_ASSERT_NE(m_raw_logdev_buf->bytes, nullptr, "Load called without getting metadata");
HS_REL_ASSERT_LE(m_sb->get_version(), logdev_superblk::LOGDEV_SB_VERSION, "Logdev super blk version mismatch");

const logstore_superblk* const store_sb{m_sb->get_logstore_superblk()};
logstore_id_t idx{0};
decltype(m_sb->num_stores) n{0};
Expand Down
296 changes: 148 additions & 148 deletions src/homelogstore/tests/test_log_store.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -909,97 +909,97 @@ static uint64_t curr_trunc_start(std::shared_ptr< HomeLogStore >& _log_store) {
return std::max(0l, _log_store->truncated_upto());
}

//TEST_F(LogStoreTest, TruncateDurability) {
// auto _hs_log_store =
// SampleDB::instance()
// .log_store_clients()
// .emplace_back(std::make_unique< SampleLogStoreClient >(
// HomeLogStoreMgr::CTRL_LOG_FAMILY_IDX, [](logstore_family_id_t, logstore_seq_num_t, logdev_key) {}))
// .get()
// ->get_log_store();
//
// auto const start_index = [](std::shared_ptr< HomeLogStore >& _log_store) {
// return curr_trunc_start(_log_store) + 1;
// };
// auto const next_slot = [](std::shared_ptr< HomeLogStore >& _log_store) {
// auto const last_idx = _log_store->get_contiguous_issued_seq_num(curr_trunc_start(_log_store));
// EXPECT_TRUE(last_idx >= 0l);
// return static_cast< uint64_t >(last_idx) + 1;
// };
//
// uint64_t last_idx{100};
// for (auto i = 0u; i < last_idx; ++i) {
// bool io_memory{false};
// auto* const d{SampleLogStoreClient::prepare_data(i, io_memory)};
//
// EXPECT_TRUE(_hs_log_store->write_sync(next_slot(_hs_log_store),
// {reinterpret_cast< uint8_t* >(d), d->total_size(), false}));
//
// if (io_memory) {
// iomanager.iobuf_free(reinterpret_cast< uint8_t* >(d));
// } else {
// std::free(static_cast< void* >(d));
// }
// }
// // ls range should be [1, 100]
// EXPECT_EQ(start_index(_hs_log_store), 1ul);
// EXPECT_EQ(next_slot(_hs_log_store), last_idx + 1);
//
//// set flip to avoid logdev metablk persistance
//#ifdef _PRERELEASE
// flip::FlipClient* fc{HomeStoreFlip::client_instance()};
// flip::FlipFrequency freq;
// freq.set_count(1);
// freq.set_percent(100);
// fc->inject_noreturn_flip("logstore_test_skip_persist", {}, freq);
//#endif
//
// // fill gaps and truncate upto 150. ls is now [151, 151]
// uint64_t truncate_upto{150};
// for (auto curr_idx = next_slot(_hs_log_store); truncate_upto >= curr_idx; ++curr_idx) {
// _hs_log_store->fill_gap(curr_idx);
// }
// _hs_log_store->truncate(truncate_upto);
// // validate the satrt and end of the ls
// EXPECT_EQ(start_index(_hs_log_store), truncate_upto + 1);
// EXPECT_EQ(next_slot(_hs_log_store), truncate_upto + 1);
//
// // restart homestore
// const auto num_devs{SISL_OPTIONS["num_devs"].as< uint32_t >()}; // num devices
// const auto dev_size_bytes{SISL_OPTIONS["dev_size_mb"].as< uint64_t >() * 1024 * 1024};
// const auto num_threads{SISL_OPTIONS["num_threads"].as< uint32_t >()};
// const auto num_logstores{SISL_OPTIONS["num_logstores"].as< uint32_t >() + 1};
// SampleLogStoreClient::set_validate_on_log_found(false);
// SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
//
// _hs_log_store = SampleDB::instance().log_store_clients().back()->get_log_store();
//#ifdef _PRERELEASE
// // We are simulating a crash by setting the logstore_test_skip_persist flip which does not persist metablk
// // This will invalidate truncate call above and set logstore to [1 100]
// EXPECT_EQ(1ul, start_index(_hs_log_store));
// EXPECT_EQ(last_idx + 1, next_slot(_hs_log_store));
//#endif
//
// // fast_forward should be resilient to crashe and should be able to recover
//
// uint64_t fast_forward_upto{350};
// _hs_log_store->sync_truncate(fast_forward_upto);
//#ifdef _PRERELEASE
// flip::FlipFrequency freq1;
// freq1.set_count(1);
// freq1.set_percent(100);
// fc->inject_noreturn_flip("logstore_test_skip_persist", {}, freq1);
//#endif
// EXPECT_EQ(start_index(_hs_log_store), fast_forward_upto + 1);
// EXPECT_EQ(next_slot(_hs_log_store), fast_forward_upto + 1);
//
// SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
// EXPECT_EQ(start_index(_hs_log_store), fast_forward_upto + 1);
// EXPECT_EQ(next_slot(_hs_log_store), fast_forward_upto + 1);
//
// EXPECT_TRUE(SampleDB::instance().delete_log_store(_hs_log_store->get_store_id()));
// SampleLogStoreClient::set_validate_on_log_found(true);
//}
TEST_F(LogStoreTest, TruncateDurability) {
auto _hs_log_store =
SampleDB::instance()
.log_store_clients()
.emplace_back(std::make_unique< SampleLogStoreClient >(
HomeLogStoreMgr::CTRL_LOG_FAMILY_IDX, [](logstore_family_id_t, logstore_seq_num_t, logdev_key) {}))
.get()
->get_log_store();

auto const start_index = [](std::shared_ptr< HomeLogStore >& _log_store) {
return curr_trunc_start(_log_store) + 1;
};
auto const next_slot = [](std::shared_ptr< HomeLogStore >& _log_store) {
auto const last_idx = _log_store->get_contiguous_issued_seq_num(curr_trunc_start(_log_store));
EXPECT_TRUE(last_idx >= 0l);
return static_cast< uint64_t >(last_idx) + 1;
};

uint64_t last_idx{100};
for (auto i = 0u; i < last_idx; ++i) {
bool io_memory{false};
auto* const d{SampleLogStoreClient::prepare_data(i, io_memory)};

EXPECT_TRUE(_hs_log_store->write_sync(next_slot(_hs_log_store),
{reinterpret_cast< uint8_t* >(d), d->total_size(), false}));

if (io_memory) {
iomanager.iobuf_free(reinterpret_cast< uint8_t* >(d));
} else {
std::free(static_cast< void* >(d));
}
}
// ls range should be [1, 100]
EXPECT_EQ(start_index(_hs_log_store), 1ul);
EXPECT_EQ(next_slot(_hs_log_store), last_idx + 1);

// set flip to avoid logdev metablk persistance
#ifdef _PRERELEASE
flip::FlipClient* fc{HomeStoreFlip::client_instance()};
flip::FlipFrequency freq;
freq.set_count(1);
freq.set_percent(100);
fc->inject_noreturn_flip("logstore_test_skip_persist", {}, freq);
#endif

// fill gaps and truncate upto 150. ls is now [151, 151]
uint64_t truncate_upto{150};
for (auto curr_idx = next_slot(_hs_log_store); truncate_upto >= curr_idx; ++curr_idx) {
_hs_log_store->fill_gap(curr_idx);
}
_hs_log_store->truncate(truncate_upto);
// validate the satrt and end of the ls
EXPECT_EQ(start_index(_hs_log_store), truncate_upto + 1);
EXPECT_EQ(next_slot(_hs_log_store), truncate_upto + 1);

// restart homestore
const auto num_devs{SISL_OPTIONS["num_devs"].as< uint32_t >()}; // num devices
const auto dev_size_bytes{SISL_OPTIONS["dev_size_mb"].as< uint64_t >() * 1024 * 1024};
const auto num_threads{SISL_OPTIONS["num_threads"].as< uint32_t >()};
const auto num_logstores{SISL_OPTIONS["num_logstores"].as< uint32_t >() + 1};
SampleLogStoreClient::set_validate_on_log_found(false);
SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);

_hs_log_store = SampleDB::instance().log_store_clients().back()->get_log_store();
#ifdef _PRERELEASE
// We are simulating a crash by setting the logstore_test_skip_persist flip which does not persist metablk
// This will invalidate truncate call above and set logstore to [1 100]
EXPECT_EQ(1ul, start_index(_hs_log_store));
EXPECT_EQ(last_idx + 1, next_slot(_hs_log_store));
#endif

// fast_forward should be resilient to crashe and should be able to recover

uint64_t fast_forward_upto{350};
_hs_log_store->sync_truncate(fast_forward_upto);
#ifdef _PRERELEASE
flip::FlipFrequency freq1;
freq1.set_count(1);
freq1.set_percent(100);
fc->inject_noreturn_flip("logstore_test_skip_persist", {}, freq1);
#endif
EXPECT_EQ(start_index(_hs_log_store), fast_forward_upto + 1);
EXPECT_EQ(next_slot(_hs_log_store), fast_forward_upto + 1);

SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
EXPECT_EQ(start_index(_hs_log_store), fast_forward_upto + 1);
EXPECT_EQ(next_slot(_hs_log_store), fast_forward_upto + 1);

EXPECT_TRUE(SampleDB::instance().delete_log_store(_hs_log_store->get_store_id()));
SampleLogStoreClient::set_validate_on_log_found(true);
}

TEST_F(LogStoreTest, BurstRandInsertThenTruncate) {
const auto num_records{SISL_OPTIONS["num_records"].as< uint32_t >()};
Expand Down Expand Up @@ -1168,63 +1168,63 @@ TEST_F(LogStoreTest, VarRateInsertThenTruncate) {
}
}

//TEST_F(LogStoreTest, ThrottleSeqInsertThenRecover) {
// const auto num_devs{SISL_OPTIONS["num_devs"].as< uint32_t >()}; // num devices
// const auto dev_size_bytes{SISL_OPTIONS["dev_size_mb"].as< uint64_t >() * 1024 * 1024};
// const auto num_records{SISL_OPTIONS["num_records"].as< uint32_t >()};
// const auto num_threads{SISL_OPTIONS["num_threads"].as< uint32_t >()};
// const auto num_logstores{SISL_OPTIONS["num_logstores"].as< uint32_t >()};
// // somewhere between 4-15 iterations depending on if run with other tests or not this will fail
// const auto iterations = SISL_OPTIONS["iterations"].as< uint32_t >();
//
// for (uint32_t iteration{0}; iteration < iterations; ++iteration) {
// LOGINFO("Iteration {}", iteration);
// LOGINFO("Step 1: Reinit the num records to start sequential write test");
// this->init(num_records);
//
// LOGINFO("Step 2: Issue sequential inserts with q depth of 30");
// this->kickstart_inserts(1, 30);
//
// LOGINFO("Step 3: Wait for the Inserts to complete");
// this->wait_for_inserts();
//
// LOGINFO("Step 4: Read all the inserts one by one for each log store to validate if what is written is valid");
// this->read_validate(true);
//
// LOGINFO("Step 4.1: Iterate all inserts one by one for each log store and validate if what is written is valid");
// this->iterate_validate(true);
//
// LOGINFO("Step 5: Restart homestore");
// SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
// this->recovery_validate();
// this->init(num_records);
//
// LOGINFO("Step 6: Restart homestore again to validate recovery on consecutive restarts");
// SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
// this->recovery_validate();
// this->init(num_records);
//
// LOGINFO("Step 7: Issue more sequential inserts after restarts with q depth of 15");
// this->kickstart_inserts(1, 15);
//
// LOGINFO("Step 8: Wait for the previous Inserts to complete");
// this->wait_for_inserts();
//
// LOGINFO("Step 9: Read all the inserts one by one for each log store to validate if what is written is valid");
// this->read_validate(true);
//
// LOGINFO("Step 9.1: Iterate all inserts one by one for each log store and validate if what is written is valid");
// this->iterate_validate(true);
//
// LOGINFO("Step 10: Restart homestore again to validate recovery after inserts");
// SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
// this->recovery_validate();
// this->init(num_records);
//
// LOGINFO("Step 11: Truncate");
// this->truncate_validate();
// }
//}
TEST_F(LogStoreTest, ThrottleSeqInsertThenRecover) {
const auto num_devs{SISL_OPTIONS["num_devs"].as< uint32_t >()}; // num devices
const auto dev_size_bytes{SISL_OPTIONS["dev_size_mb"].as< uint64_t >() * 1024 * 1024};
const auto num_records{SISL_OPTIONS["num_records"].as< uint32_t >()};
const auto num_threads{SISL_OPTIONS["num_threads"].as< uint32_t >()};
const auto num_logstores{SISL_OPTIONS["num_logstores"].as< uint32_t >()};
// somewhere between 4-15 iterations depending on if run with other tests or not this will fail
const auto iterations = SISL_OPTIONS["iterations"].as< uint32_t >();

for (uint32_t iteration{0}; iteration < iterations; ++iteration) {
LOGINFO("Iteration {}", iteration);
LOGINFO("Step 1: Reinit the num records to start sequential write test");
this->init(num_records);

LOGINFO("Step 2: Issue sequential inserts with q depth of 30");
this->kickstart_inserts(1, 30);

LOGINFO("Step 3: Wait for the Inserts to complete");
this->wait_for_inserts();

LOGINFO("Step 4: Read all the inserts one by one for each log store to validate if what is written is valid");
this->read_validate(true);

LOGINFO("Step 4.1: Iterate all inserts one by one for each log store and validate if what is written is valid");
this->iterate_validate(true);

LOGINFO("Step 5: Restart homestore");
SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
this->recovery_validate();
this->init(num_records);

LOGINFO("Step 6: Restart homestore again to validate recovery on consecutive restarts");
SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
this->recovery_validate();
this->init(num_records);

LOGINFO("Step 7: Issue more sequential inserts after restarts with q depth of 15");
this->kickstart_inserts(1, 15);

LOGINFO("Step 8: Wait for the previous Inserts to complete");
this->wait_for_inserts();

LOGINFO("Step 9: Read all the inserts one by one for each log store to validate if what is written is valid");
this->read_validate(true);

LOGINFO("Step 9.1: Iterate all inserts one by one for each log store and validate if what is written is valid");
this->iterate_validate(true);

LOGINFO("Step 10: Restart homestore again to validate recovery after inserts");
SampleDB::instance().start_homestore(num_devs, dev_size_bytes, num_threads, num_logstores, true /* restart */);
this->recovery_validate();
this->init(num_records);

LOGINFO("Step 11: Truncate");
this->truncate_validate();
}
}

TEST_F(LogStoreTest, DeleteMultipleLogStores) {
const auto nrecords{(SISL_OPTIONS["num_records"].as< uint32_t >() * 5) / 100};
Expand Down

0 comments on commit bd47812

Please sign in to comment.