Skip to content

Commit

Permalink
Port config.hashDBSingleton changes from Elderberry 2
Browse files Browse the repository at this point in the history
  • Loading branch information
fractasy committed May 13, 2024
1 parent f6fee38 commit 486cf80
Show file tree
Hide file tree
Showing 10 changed files with 116 additions and 259 deletions.
4 changes: 1 addition & 3 deletions src/config/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ The configuration parameters can be of different uses:
|`saveProofToFile`|test|boolean|Saves generated proof to file, in JSON format|false|SAVE_PROOF_TO_FILE|
|`saveFilesInSubfolders`|test|boolean|Saves files in folders named as per hour, e.g. `output/2023/01/10/18`|false|SAVE_FILES_IN_SUBFOLDERS|
|`saveExecutorErrors`|test|boolean|Saves executor service errors file, in JSON format|false|SAVE_EXECUTOR_ERRORS|
|`loadDBToMemCache`|test|boolean|Fill database cache with content during initialization|false|LOAD_DB_TO_MEM_CACHE|
|`loadDBToMemCacheInParallel`|test|boolean|Fill database cache in parallel with the normal execution|false|LOAD_DB_TO_MEM_CACHE_IN_PARALLEL|
|`loadDBToMemTimeout`|test|u64|Fill database cache up to a certain time, in microseconds|30000000 (30 seconds)|LOAD_DB_TO_MEM_TIMEOUT|
|**`dbMTCacheSize`**|production|s64|Database MT cache size, in MB|8*1024 (8 GB)|DB_MT_CACHE_SIZE|
|**`useAssociativeCache`**|production|boolean|Use associative cache as Database MT cache, which is faster than regular cache|false|USE_ASSOCIATIVE_CACHE|
|`log2DbMTAssociativeCacheSize`|production|s64|log2 of the size in entries of the DatabaseMTAssociativeCache; note that 1 cache entry = 128 bytes|25|LOG2_DB_MT_ASSOCIATIVE_CACHE_SIZE|
Expand All @@ -81,6 +78,7 @@ The configuration parameters can be of different uses:
|`hashDBFileName`|test|string|Core name used for the hashDB files (path,numbering and extension not included). If hashDBFileName is empty in-memory version of the hashDB is used (only for DEBUG purposes). |""|HASHDB_FILE_NAME|
|`hashDBFileSize`|test|u64|HashDB files size in GB|128|HASHDB_FILE_SIZE|failures
|`hashDBFolder`|test|string|Folder containing the hashDB files|hashdb|HASHDB_FOLDER|
|`hashDBSingleton`|production|boolean|Use HashDB singleton. Set to false when databaseURL=local to get one dedicated cache per thread.|true|HASHDB_SINGLETON|
|`aggregatorServerPort`|test|u16|Aggregator server GRPC port|50081|AGGREGATOR_SERVER_PORT|
|**`aggregatorClientPort`**|production|u16|Aggregator client GRPC port to connect to|50081|AGGREGATOR_SERVER_PORT|
|**`aggregatorClientHost`**|production|string|Aggregator client GRPC host name to connect to, i.e. Aggregator server host name|"127.0.0.1"|AGGREGATOR_CLIENT_HOST|
Expand Down
20 changes: 9 additions & 11 deletions src/config/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,12 +169,6 @@ void Config::load(json &config)
ParseBool(config, "saveFilesInSubfolders", "SAVE_FILES_IN_SUBFOLDERS", saveFilesInSubfolders, false);
ParseBool(config, "saveExecutorErrors", "SAVE_EXECUTOR_ERRORS", saveExecutorErrors, false);

// Load DB to mem cache TODO: Discontinue this functionality
//ParseBool(config, "loadDBToMemCache", "LOAD_DB_TO_MEM_CACHE", loadDBToMemCache, false);
loadDBToMemCache = false;
ParseBool(config, "loadDBToMemCacheInParallel", "LOAD_DB_TO_MEM_CACHE_IN_PARALLEL", loadDBToMemCacheInParallel, false);
ParseU64(config, "loadDBToMemTimeout", "LOAD_DB_TO_MEM_TIMEOUT", loadDBToMemTimeout, 30*1000*1000); // Default = 30 seconds

// MT cache
ParseS64(config, "dbMTCacheSize", "DB_MT_CACHE_SIZE", dbMTCacheSize, 8*1024); // Default = 8 GB

Expand Down Expand Up @@ -207,6 +201,7 @@ void Config::load(json &config)
ParseString(config, "hashDBFileName", "HASHDB_FILE_NAME", hashDBFileName, "");
ParseU64(config, "hashDBFileSize", "HASHDB_FILE_SIZE", hashDBFileSize, 128);
ParseString(config, "hashDBFolder", "HASHDB_FOLDER", hashDBFolder, "hashdb");
ParseBool(config, "hashDBSingleton", "HASHDB_SINGLETON", hashDBSingleton, true);
ParseU16(config, "aggregatorServerPort", "AGGREGATOR_SERVER_PORT", aggregatorServerPort, 50081);
ParseU16(config, "aggregatorClientPort", "AGGREGATOR_CLIENT_PORT", aggregatorClientPort, 50081);
ParseString(config, "aggregatorClientHost", "AGGREGATOR_CLIENT_HOST", aggregatorClientHost, "127.0.0.1");
Expand Down Expand Up @@ -472,10 +467,7 @@ void Config::print(void)
zklog.info(" saveExecutorErrors=true");
if (saveResponseToFile)
zklog.info(" saveResponseToFile=true");
zklog.info(" loadDBToMemCache=" + to_string(loadDBToMemCache));
if (loadDBToMemCacheInParallel)
zklog.info(" loadDBToMemCacheInParallel=true");
if (opcodeTracer)
if (opcodeTracer)
zklog.info(" opcodeTracer=true");
if (logRemoteDbReads)
zklog.info(" logRemoteDbReads=true");
Expand Down Expand Up @@ -507,6 +499,7 @@ void Config::print(void)
zklog.info(" hashDBFileName=" + hashDBFileName);
zklog.info(" hashDBFileSize=" + to_string(hashDBFileSize));
zklog.info(" hastDBFolder=" + hashDBFolder);
zklog.info(" hashDBSingleton=" + to_string(hashDBSingleton));
zklog.info(" aggregatorServerPort=" + to_string(aggregatorServerPort));
zklog.info(" aggregatorClientPort=" + to_string(aggregatorClientPort));
zklog.info(" aggregatorClientHost=" + aggregatorClientHost);
Expand Down Expand Up @@ -635,7 +628,6 @@ void Config::print(void)
zklog.info(" log2DbVersionsAssociativeCacheSize=" + to_string(log2DbVersionsAssociativeCacheSize));
zklog.info(" log2DbVersionsAssociativeCacheIndexesSize=" + to_string(log2DbVersionsAssociativeCacheIndexesSize));
zklog.info(" dbProgramCacheSize=" + to_string(dbProgramCacheSize));
zklog.info(" loadDBToMemTimeout=" + to_string(loadDBToMemTimeout));
zklog.info(" fullTracerTraceReserveSize=" + to_string(fullTracerTraceReserveSize));
zklog.info(" ECRecoverPrecalc=" + to_string(ECRecoverPrecalc));
zklog.info(" ECRecoverPrecalcNThreads=" + to_string(ECRecoverPrecalcNThreads));
Expand Down Expand Up @@ -1056,5 +1048,11 @@ bool Config::check (void)
inputFile = "testvectors/diagnostic/input.json";
}

if (hashDBSingleton && (databaseURL != "local"))
{
zklog.error("hashDBSingleton=true but databaseURL!=local");
bError = true;
}

return bError;
}
4 changes: 1 addition & 3 deletions src/config/config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,6 @@ class Config
bool saveFilesInSubfolders; // Saves output files in folders per hour, e.g. output/2023/01/10/18
bool saveExecutorErrors; // Saves executor service errors in executor_errors.json

bool loadDBToMemCache;
bool loadDBToMemCacheInParallel;
uint64_t loadDBToMemTimeout;
int64_t dbMTCacheSize; // Size in MBytes for the cache to store MT records
bool useAssociativeCache; // Use the associative cache for MT records?
int64_t log2DbMTAssociativeCacheSize; // log2 of the size in entries of the DatabaseMTAssociativeCache. Note 1 cache entry = 128 bytes
Expand Down Expand Up @@ -94,6 +91,7 @@ class Config
string hashDBFileName;
uint64_t hashDBFileSize;
string hashDBFolder;
bool hashDBSingleton;

// Aggregator service (client)
uint16_t aggregatorServerPort;
Expand Down
201 changes: 24 additions & 177 deletions src/hashdb/database.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,6 @@
#include "zkmax.hpp"
#include "hashdb_remote.hpp"

#ifdef DATABASE_USE_CACHE

// Create static Database::dbMTCache and DatabaseCacheProgram objects
// This will be used to store DB records in memory and it will be shared for all the instances of Database class
// DatabaseCacheMT and DatabaseCacheProgram classes are thread-safe
DatabaseMTAssociativeCache Database::dbMTACache;
DatabaseMTCache Database::dbMTCache;
DatabaseProgramCache Database::dbProgramCache;

string Database::dbStateRootKey("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // 64 f's
Goldilocks::Element Database::dbStateRootvKey[4] = {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF};
bool Database::useAssociativeCache = false;


#endif

// Helper functions
string removeBSXIfExists(string s) {return ((s.at(0) == '\\') && (s.at(1) == 'x')) ? s.substr(2) : s;}

Expand All @@ -40,6 +24,30 @@ Database::Database (Goldilocks &fr, const Config &config) :
connectionsPool(NULL),
multiWrite(fr)
{
#ifdef DATABASE_USE_CACHE

// Init state root key
dbStateRootvKey[0].fe = 0xFFFFFFFFFFFFFFFF;
dbStateRootvKey[1].fe = 0xFFFFFFFFFFFFFFFF;
dbStateRootvKey[2].fe = 0xFFFFFFFFFFFFFFFF;
dbStateRootvKey[3].fe = 0xFFFFFFFFFFFFFFFF;

/* INIT DB CACHE */
if (config.useAssociativeCache)
{
useAssociativeCache = true;
dbMTACache.postConstruct(config.log2DbMTAssociativeCacheIndexesSize, config.log2DbMTAssociativeCacheSize, "MTACache");
}
else{
useAssociativeCache = false;
dbMTCache.setName("MTCache");
dbMTCache.setMaxSize(config.dbMTCacheSize*1024*1024);
}
dbProgramCache.setName("ProgramCache");
dbProgramCache.setMaxSize(config.dbProgramCacheSize*1024*1024);

#endif // DATABASE_USE_CACHE

// Init mutex
pthread_mutex_init(&connMutex, NULL);

Expand Down Expand Up @@ -1965,167 +1973,6 @@ void *dbCacheSynchThread (void *arg)
return NULL;
}

void loadDb2MemCache(const Config &config)
{
if (config.databaseURL == "local")
{
zklog.error("loadDb2MemCache() called with config.databaseURL==local");
exitProcess();
}

#ifdef DATABASE_USE_CACHE

TimerStart(LOAD_DB_TO_CACHE);

Goldilocks fr;
HashDB * pHashDB = (HashDB *)hashDBSingleton.get();

vector<Goldilocks::Element> dbValue;
zkresult zkr = pHashDB->db.read(Database::dbStateRootKey, Database::dbStateRootvKey, dbValue, NULL, true);

if (zkr == ZKR_DB_KEY_NOT_FOUND)
{
zklog.warning("loadDb2MemCache() dbStateRootKey=" + Database::dbStateRootKey + " not found in database; normal only if database is empty");
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}
else if (zkr != ZKR_SUCCESS)
{
zklog.error("loadDb2MemCache() failed calling db.read result=" + zkresult2string(zkr));
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}

string stateRootKey = fea2string(fr, dbValue[0], dbValue[1], dbValue[2], dbValue[3]);
zklog.info("loadDb2MemCache() found state root=" + stateRootKey);

if (stateRootKey == "0")
{
zklog.warning("loadDb2MemCache() found an empty tree");
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}

struct timeval loadCacheStartTime;
gettimeofday(&loadCacheStartTime, NULL);

unordered_map<uint64_t, vector<string>> treeMap;
vector<string> emptyVector;
string hash, leftHash, rightHash;
uint64_t counter = 0;

treeMap[0] = emptyVector;
treeMap[0].push_back(stateRootKey);
unordered_map<uint64_t, std::vector<std::string>>::iterator treeMapIterator;
for (uint64_t level=0; level<256; level++)
{
// Spend only 10 seconds
if (TimeDiff(loadCacheStartTime) > config.loadDBToMemTimeout)
{
break;
}

treeMapIterator = treeMap.find(level);
if (treeMapIterator == treeMap.end())
{
break;
}

if (treeMapIterator->second.size()==0)
{
break;
}

treeMap[level+1] = emptyVector;

//zklog.info("loadDb2MemCache() searching at level=" + to_string(level) + " for elements=" + to_string(treeMapIterator->second.size()));

for (uint64_t i=0; i<treeMapIterator->second.size(); i++)
{
// Spend only 10 seconds
if (TimeDiff(loadCacheStartTime) > config.loadDBToMemTimeout)
{
break;
}

hash = treeMapIterator->second[i];
dbValue.clear();
Goldilocks::Element vhash[4];
string hashNorm = NormalizeToNFormat(hash, 64);
if(pHashDB->db.usingAssociativeCache()) string2fea(fr, hashNorm, vhash);
zkresult zkr = pHashDB->db.read(hash, vhash, dbValue, NULL, true);

if (zkr != ZKR_SUCCESS)
{
zklog.error("loadDb2MemCache() failed calling db.read(" + hash + ") result=" + zkresult2string(zkr));
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}
if (dbValue.size() != 12)
{
zklog.error("loadDb2MemCache() failed calling db.read(" + hash + ") dbValue.size()=" + to_string(dbValue.size()));
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}
counter++;
if(Database::dbMTCache.enabled()){
double sizePercentage = double(Database::dbMTCache.getCurrentSize())*100.0/double(Database::dbMTCache.getMaxSize());
if ( sizePercentage > 90 )
{
zklog.info("loadDb2MemCache() stopping since size percentage=" + to_string(sizePercentage));
break;
}
}
// If capaxity is X000
if (fr.isZero(dbValue[9]) && fr.isZero(dbValue[10]) && fr.isZero(dbValue[11]))
{
// If capacity is 0000, this is an intermediate node that contains left and right hashes of its children
if (fr.isZero(dbValue[8]))
{
leftHash = fea2string(fr, dbValue[0], dbValue[1], dbValue[2], dbValue[3]);
if (leftHash != "0")
{
treeMap[level+1].push_back(leftHash);
//zklog.info("loadDb2MemCache() level=" + to_string(level) + " found leftHash=" + leftHash);
}
rightHash = fea2string(fr, dbValue[4], dbValue[5], dbValue[6], dbValue[7]);
if (rightHash != "0")
{
treeMap[level+1].push_back(rightHash);
//zklog.info("loadDb2MemCache() level=" + to_string(level) + " found rightHash=" + rightHash);
}
}
// If capacity is 1000, this is a leaf node that contains right hash of the value node
else if (fr.isOne(dbValue[8]))
{
rightHash = fea2string(fr, dbValue[4], dbValue[5], dbValue[6], dbValue[7]);
if (rightHash != "0")
{
//zklog.info("loadDb2MemCache() level=" + to_string(level) + " found value rightHash=" + rightHash);
dbValue.clear();
Goldilocks::Element vRightHash[4]={dbValue[4], dbValue[5], dbValue[6], dbValue[7]};
zkresult zkr = pHashDB->db.read(rightHash, vRightHash, dbValue, NULL, true);
if (zkr != ZKR_SUCCESS)
{
zklog.error("loadDb2MemCache() failed calling db.read(" + rightHash + ") result=" + zkresult2string(zkr));
TimerStopAndLog(LOAD_DB_TO_CACHE);
return;
}
counter++;
}
}
}
}
}

if(Database::dbMTCache.enabled()){
zklog.info("loadDb2MemCache() done counter=" + to_string(counter) + " cache at " + to_string((double(Database::dbMTCache.getCurrentSize())/double(Database::dbMTCache.getMaxSize()))*100) + "%");
}
TimerStopAndLog(LOAD_DB_TO_CACHE);

#endif
}

zkresult Database::resetDB(void)
{
#ifdef DEBUG
Expand Down
16 changes: 7 additions & 9 deletions src/hashdb/database.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,17 +67,17 @@ class Database

public:
#ifdef DATABASE_USE_CACHE
// Cache static instances
static bool useAssociativeCache;
static DatabaseMTAssociativeCache dbMTACache;
static DatabaseMTCache dbMTCache;
static DatabaseProgramCache dbProgramCache;
// Cache instances
bool useAssociativeCache;
DatabaseMTAssociativeCache dbMTACache;
DatabaseMTCache dbMTCache;
DatabaseProgramCache dbProgramCache;

// This is a fixed key to store the latest state root hash, used to load it to the cache
// This key is "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
// This key cannot be the result of a hash because it is out of the Goldilocks Element range
static string dbStateRootKey;
static Goldilocks::Element dbStateRootvKey[4];
string dbStateRootKey;
Goldilocks::Element dbStateRootvKey[4];

#endif

Expand Down Expand Up @@ -130,6 +130,4 @@ void *dbSenderThread(void *arg);
// Thread to synchronize cache from master hash DB server
void *dbCacheSynchThread(void *arg);

void loadDb2MemCache(const Config &config);

#endif
39 changes: 0 additions & 39 deletions src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -437,45 +437,6 @@ int main(int argc, char **argv)
SHA256GenerateScript(config);
}

#ifdef DATABASE_USE_CACHE

/* INIT DB CACHE */
if(config.useAssociativeCache){
Database::useAssociativeCache = true;
//Database::dbMTACache.postConstruct(config.log2DbMTAssociativeCacheIndexesSize, config.log2DbMTAssociativeCacheSize, "MTACache");
Database::dbMTACache.postConstruct(uint64_t(config.dbProgramCacheSize)*uint64_t(1024)*uint64_t(1024), "MTCache");
}
else{
Database::useAssociativeCache = false;
Database::dbMTCache.setName("MTCache");
Database::dbMTCache.setMaxSize(config.dbMTCacheSize*uint64_t(1024)*uint64_t(1024));
}
Database::dbProgramCache.setName("ProgramCache");
Database::dbProgramCache.setMaxSize(config.dbProgramCacheSize*uint64_t(1024)*uint64_t(1024));

if (config.databaseURL != "local") // remote DB
{

if (config.loadDBToMemCache && (config.runAggregatorClient || config.runExecutorServer || config.runHashDBServer))
{
TimerStart(DB_CACHE_LOAD);
// if we have a db cache enabled
if ((Database::dbMTCache.enabled()) || (Database::dbProgramCache.enabled()) || (Database::dbMTACache.enabled()))
{
if (config.loadDBToMemCacheInParallel) {
// Run thread that loads the DB into the dbCache
std::thread loadDBThread (loadDb2MemCache, config);
loadDBThread.detach();
} else {
loadDb2MemCache(config);
}
}
TimerStopAndLog(DB_CACHE_LOAD);
}
}

#endif // DATABASE_USE_CACHE

/* TESTS */

// Test Keccak SM
Expand Down
Loading

0 comments on commit 486cf80

Please sign in to comment.