Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix hashdb64 workflow test #650

Merged
merged 3 commits into from
Oct 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
379 changes: 67 additions & 312 deletions src/hashdb64/database_64.cpp

Large diffs are not rendered by default.

63 changes: 11 additions & 52 deletions src/hashdb64/database_64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using namespace std;

/*

A Tree (state) is made of a set of TreeChunks:
A Tree (state) is made of a set of TreeChunks, each of them stored in one 4kB page:

/\
/__\
Expand All @@ -35,12 +35,12 @@ A Tree (state) is made of a set of TreeChunks:
/__\

When we want to read [key, value] for a given root:
- we call db.read(treeChunk.hash, treeChunk.data) starting from the root until we reach the [key, value] leaf node
- we search for the right page starting from the root until we reach the [key, value] leaf node in the final page

When we want to write a new leaf node [key, newValue] on a given root and get the resulting newStateRoot
- we calculate the new position of [key, newValue], creating new chunks if needed
- we calculate the new position of [key, newValue], creating new pages if needed
- we recalculate the hashes of all the modified and new chunks
- we call db.write(treeChunk.hash, treeChunk.data) of all the modified and new chunks
- we write every resulting hash in the proper position of the proper page

Every time we write a [key, newValue] we are potentially creating a new Tree = SUM(TreeChunks) if newValue != oldValue
Every new Tree represents a newer version of the state
Expand All @@ -60,51 +60,21 @@ The Forest takes note of the latest Tree hash to keep track of the current state

*/

class DB64Query
{
public:
string key;
Goldilocks::Element keyFea[4];
string &value; // value can be an input in multiWrite(), or an output in multiRead()
DB64Query(const string &_key, const Goldilocks::Element (&_keyFea)[4], string &_value) : key(_key), value(_value)
{
keyFea[0] = _keyFea[0];
keyFea[1] = _keyFea[1];
keyFea[2] = _keyFea[2];
keyFea[3] = _keyFea[3];
}
};

class Database64
{
public:
Goldilocks &fr;
const Config &config;
PoseidonGoldilocks poseidon;

// Basic flags
bool bInitialized = false;
bool useRemoteDB = false;

uint64_t headerPageNumber;

public:
//sem_t senderSem; // Semaphore to wakeup database sender thread when flush() is called
//sem_t getFlushDataSem; // Semaphore to unblock getFlushData() callers when new data is available
private:
//pthread_t senderPthread; // Database sender thread
//pthread_t cacheSynchPthread; // Cache synchronization thread

// Tree64
zkresult CalculateHash (Child &result, std::vector<TreeChunk *> &chunks, vector<DB64Query> &dbQueries, int idChunk, int level, vector<HashValueGL> *hashValues);
bool bInitialized = false;
uint64_t headerPageNumber;
pthread_mutex_t mutex;
uint64_t currentFlushId;

public:

// Constructor and destructor
Database64(Goldilocks &fr, const Config &config);
~Database64();

public:
// Basic methods
void init(void);

Expand All @@ -124,24 +94,13 @@ class Database64
zkresult consolidateBlock (uint64_t blockNumber); // TODO: Who reports this block number?
zkresult revertBlock (uint64_t blockNumber);

public:
// Flush data pending to be stored permamently
zkresult flush(uint64_t &flushId, uint64_t &lastSentFlushId);
zkresult getFlushStatus(uint64_t &storedFlushId, uint64_t &storingFlushId, uint64_t &lastFlushId, uint64_t &pendingToFlushNodes, uint64_t &pendingToFlushProgram, uint64_t &storingNodes, uint64_t &storingProgram);

// Get flush data, written to database by dbSenderThread; it blocks
zkresult getFlushData(uint64_t flushId, uint64_t &lastSentFlushId, unordered_map<string, string> (&nodes), unordered_map<string, string> (&program), string &nodesStateRoot);

// Clear cache
void clearCache(void);


// Lock/Unlock
void Lock(void) { pthread_mutex_lock(&mutex); };
void Unlock(void) { pthread_mutex_unlock(&mutex); };
};

// Thread to send data to database
//void *dbSenderThread64(void *arg);

// Thread to synchronize cache from master hash DB server
//void *dbCacheSynchThread64(void *arg);

#endif
7 changes: 2 additions & 5 deletions src/hashdb64/page/header_page.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,10 @@ zkresult HeaderPage::WriteVersionData (uint64_t &headerPageNumber, const uint64_
return KeyValuePage::Write(headerPage->versionDataPage, version2key(version), versionData2value(versionData), headerPageNumber);
}

zkresult HeaderPage::KeyValueHistoryRead (const uint64_t headerPageNumber, const string &key, const uint64_t version, mpz_class &value)
zkresult HeaderPage::KeyValueHistoryRead (const uint64_t keyValueHistoryPage, const string &key, const uint64_t version, mpz_class &value)
{
// Get header page
HeaderStruct * headerPage = (HeaderStruct *)pageManager.getPageAddress(headerPageNumber);

// Call the specific method
return KeyValueHistoryPage::Read(headerPage->keyValueHistoryPage, key, version, value);
return KeyValueHistoryPage::Read(keyValueHistoryPage, key, version, value);
}

zkresult HeaderPage::KeyValueHistoryWrite (uint64_t &headerPageNumber, const string &key, const uint64_t version, const mpz_class &value)
Expand Down
2 changes: 1 addition & 1 deletion src/hashdb64/page/header_page.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class HeaderPage
static zkresult WriteVersionData ( uint64_t &headerPageNumber, const uint64_t &version, const VersionDataStruct &versionData);

// Key-Value-History methods
static zkresult KeyValueHistoryRead (const uint64_t headerPageNumber, const string &key, const uint64_t version, mpz_class &value);
static zkresult KeyValueHistoryRead (const uint64_t keyValueHistoryPage, const string &key, const uint64_t version, mpz_class &value);
static zkresult KeyValueHistoryWrite ( uint64_t &headerPageNumber, const string &key, const uint64_t version, const mpz_class &value);
static zkresult KeyValueHistoryCalculateHash (uint64_t &headerPageNumber, Goldilocks::Element (&hash)[4]);

Expand Down
22 changes: 3 additions & 19 deletions src/hashdb64/page/key_value_history_page.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ zkresult KeyValueHistoryPage::Write (uint64_t &pageNumber, const string &key, co

uint64_t insertionRawDataPage = headerPage->rawDataPage;
uint64_t insertionRawDataOffset = RawDataPage::GetOffset(insertionRawDataPage);
uint64_t rawDataPage = insertionRawDataPage;

string keyAndValue = key + scalar2ba32(value); // TODO: Check that value size=32
zkr = RawDataPage::Write(headerPage->rawDataPage, keyAndValue);
Expand All @@ -183,9 +182,6 @@ zkresult KeyValueHistoryPage::Write (uint64_t &pageNumber, const string &key, co
page->keyValueEntry[index][1] = (insertionRawDataOffset << 48) + insertionRawDataPage;
page->keyValueEntry[index][2] = 0; // Mask as no hash has been calculated

// Update header
headerPage->rawDataPage = rawDataPage;

return ZKR_SUCCESS;
}
// Leaf node
Expand Down Expand Up @@ -254,10 +250,9 @@ zkresult KeyValueHistoryPage::Write (uint64_t &pageNumber, const string &key, co
// Get the current rawDataPage and offset
uint64_t insertionRawDataPage = headerPage->rawDataPage;
uint64_t insertionRawDataOffset = RawDataPage::GetOffset(headerPage->rawDataPage);
uint64_t rawDataPage = insertionRawDataPage;

string keyAndValue = key + valueBa;
zkr = RawDataPage::Write(rawDataPage, keyAndValue);
zkr = RawDataPage::Write(headerPage->rawDataPage, keyAndValue);
if (zkr != ZKR_SUCCESS)
{
zklog.error("KeyValueHistoryPage::Write() failed calling RawDataPage.Write result=" + zkresult2string(zkr) + " rawDataPage=" + to_string(rawDataPage) + " rawDataOffset=" + to_string(rawDataOffset) + " key=" + ba2string(key) + " version=" + to_string(version) + " level=" + to_string(level) + " index=" + to_string(index));
Expand All @@ -269,9 +264,6 @@ zkresult KeyValueHistoryPage::Write (uint64_t &pageNumber, const string &key, co
page->keyValueEntry[index][1] = (insertionRawDataOffset << 48) + (insertionRawDataPage & U64Mask48);
page->keyValueEntry[index][2] = 0;

// Update header
headerPage->rawDataPage = rawDataOffset;

return ZKR_SUCCESS;
}

Expand Down Expand Up @@ -418,13 +410,12 @@ zkresult KeyValueHistoryPage::calculatePageHash (const uint64_t pageNumber, cons
// Get the current rawDataPage and offset
uint64_t insertionRawDataPage = headerPage->rawDataPage;
uint64_t insertionRawDataOffset = RawDataPage::GetOffset(headerPage->rawDataPage);
uint64_t rawDataPage = insertionRawDataPage;

// Store the hash in raw page
string hashBa;
hashBa = string2ba(fea2string(fr, hash));
zkassert(hashBa.size() == 32);
zkr = RawDataPage::Write(rawDataPage, hashBa);
zkr = RawDataPage::Write(headerPage->rawDataPage, hashBa);
if (zkr != ZKR_SUCCESS)
{
zklog.error("KeyValueHistoryPage::calculatePageHash() failed calling RawDataPage.Write result=" + zkresult2string(zkr) + " insertionRawDataPage=" + to_string(insertionRawDataPage) + " insertionRawDataOffset=" + to_string(insertionRawDataOffset) + " level=" + to_string(level) + " index=" + to_string(index));
Expand All @@ -433,9 +424,6 @@ zkresult KeyValueHistoryPage::calculatePageHash (const uint64_t pageNumber, cons

// Record the new hash and its raw data
page->keyValueEntry[index][2] = (insertionRawDataOffset << 48) | (insertionRawDataPage & U64Mask48);

// Update the header
headerPage->rawDataPage = rawDataPage;
}
// If hash was calculated, get it from raw data
else
Expand Down Expand Up @@ -499,13 +487,12 @@ zkresult KeyValueHistoryPage::calculatePageHash (const uint64_t pageNumber, cons
// Get the current rawDataPage and offset
uint64_t insertionRawDataPage = headerPage->rawDataPage;
uint64_t insertionRawDataOffset = RawDataPage::GetOffset(headerPage->rawDataPage);
uint64_t rawDataPage = insertionRawDataPage;

// Store the hash in raw page
string hashBa;
hashBa = string2ba(fea2string(fr, hash));
zkassert(hashBa.size() == 32);
zkr = RawDataPage::Write(rawDataPage, hashBa);
zkr = RawDataPage::Write(headerPage->rawDataPage, hashBa);
if (zkr != ZKR_SUCCESS)
{
zklog.error("KeyValueHistoryPage::calculatePageHash() failed calling RawDataPage.Write result=" + zkresult2string(zkr) + " insertionRawDataPage=" + to_string(insertionRawDataPage) + " insertionRawDataOffset=" + to_string(insertionRawDataOffset) + " level=" + to_string(level) + " index=" + to_string(index));
Expand All @@ -514,9 +501,6 @@ zkresult KeyValueHistoryPage::calculatePageHash (const uint64_t pageNumber, cons

// Record the new hash and its raw data
page->keyValueEntry[index][2] = (insertionRawDataOffset << 48) | (insertionRawDataPage & U64Mask48);

// Update header page
headerPage->rawDataPage = rawDataPage;
}
}

Expand Down
7 changes: 4 additions & 3 deletions src/hashdb64/state_manager_64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "timer.hpp"
#include "persistence.hpp"
#include "definitions.hpp"
#include "zkglobals.hpp"

Goldilocks frSM64;
PoseidonGoldilocks poseidonSM64;
Expand Down Expand Up @@ -638,7 +639,7 @@ zkresult StateManager64::purge (const string &batchUUID, const string &_newState
continue;
}

zkr = purgeTxPersistence(txState.persistence[persistence], db.config);
zkr = purgeTxPersistence(txState.persistence[persistence], config);
if (zkr != ZKR_SUCCESS)
{
zklog.error("StateManager64::purge() failed calling purgeTxPersistence() zkr=" + zkresult2string(zkr) +
Expand Down Expand Up @@ -1019,7 +1020,7 @@ zkresult StateManager64::set (const string &batchUUID, uint64_t tx, Database64 &

zkresult zkr;

bool bUseStateManager = db.config.stateManager && (batchUUID.size() > 0);
bool bUseStateManager = config.stateManager && (batchUUID.size() > 0);

if (bUseStateManager)
{
Expand Down Expand Up @@ -1081,7 +1082,7 @@ zkresult StateManager64::get (const string &batchUUID, Database64 &db, const Gol
zklog.info("StateManager64::get() called with root=" + fea2string(fr,root) + " and key=" + fea2string(fr,key));
#endif

bool bUseStateManager = db.config.stateManager && (batchUUID.size() > 0);
bool bUseStateManager = config.stateManager && (batchUUID.size() > 0);

string keyString = fea2string(fr, key);
mpz_class value;
Expand Down
5 changes: 3 additions & 2 deletions src/service/hashdb/hashdb.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,8 @@ zkresult HashDB::getFlushData(uint64_t flushId, uint64_t &lastSentFlushId, unord

if (config.hashDB64)
{
zkr = db64.getFlushData(flushId, lastSentFlushId, nodes, program, nodesStateRoot);
zklog.error("HashDB::getFlushData() called with config.hashDB64=true");
return ZKR_DB_ERROR;
}
else
{
Expand All @@ -413,7 +414,7 @@ void HashDB::clearCache(void)
{
if (config.hashDB64)
{
db64.clearCache();
// We don't use cache in HashDB64
}
else
{
Expand Down
1 change: 0 additions & 1 deletion test/service/executor/executor_client.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,6 @@ bool ExecutorClient::ProcessBatch (void)
if (config.hashDB64)
{
Database64 &db = hashDB.db64;
db.clearCache();

CheckTreeCounters64 checkTreeCounters;

Expand Down
4 changes: 2 additions & 2 deletions test/service/hashdb/hashdb64_workflow_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ uint64_t HashDB64WorkflowTest (const Config& config)
for (uint64_t i=0; i<4; i++) root[i] = consolidatedStateRoot[i];

// Wait for data to be sent
/*while (true)
while (true)
{
uint64_t storedFlushId, storingFlushId, lastFlushId, pendingToFlushNodes, pendingToFlushProgram, storingNodes, storingProgram;
string proverId;
Expand All @@ -141,7 +141,7 @@ uint64_t HashDB64WorkflowTest (const Config& config)
}
sleep(1);
}
zklog.info("FLUSHED");*/
zklog.info("FLUSHED");

// Call ReadTree with the old state root to get the hashes of the initial values of all read or written keys
/*vector<HashValueGL> oldHashValues;
Expand Down
5 changes: 3 additions & 2 deletions test/utils/check_tree_64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include "zkmax.hpp"
#include "scalar.hpp"
#include "tree_chunk.hpp"
#include "zkglobals.hpp"

zkresult CheckTree64 (Database64 &db, const string &key, uint64_t level, CheckTreeCounters64 &checkTreeCounters)
{
Expand All @@ -11,7 +12,7 @@ zkresult CheckTree64 (Database64 &db, const string &key, uint64_t level, CheckTr

TreeChunk treeChunk(poseidon);
Goldilocks::Element keyFea[4];
string2fea(db.fr, key, keyFea);
string2fea(fr, key, keyFea);
zkresult result = ZKR_UNSPECIFIED; // = db.read(key, keyFea, treeChunk.data, NULL, false);
if (result != ZKR_SUCCESS)
{
Expand All @@ -38,7 +39,7 @@ zkresult CheckTree64 (Database64 &db, const string &key, uint64_t level, CheckTr
case INTERMEDIATE:
{
checkTreeCounters.intermediateNodes++;
result = CheckTree64(db, fea2string(db.fr, treeChunk.getChild(i).intermediate.hash), level+1, checkTreeCounters);
result = CheckTree64(db, fea2string(fr, treeChunk.getChild(i).intermediate.hash), level+1, checkTreeCounters);
if (zkr != ZKR_SUCCESS)
{
return zkr;
Expand Down
Loading