diff --git a/src/hashdb64/database_64.cpp b/src/hashdb64/database_64.cpp index e43c28ba8..e8959d6be 100644 --- a/src/hashdb64/database_64.cpp +++ b/src/hashdb64/database_64.cpp @@ -26,8 +26,11 @@ // Helper functions string removeBSXIfExists64(string s) {return ((s.at(0) == '\\') && (s.at(1) == 'x')) ? s.substr(2) : s;} -Database64::Database64 (Goldilocks &fr, const Config &config) : headerPageNumber(0) +Database64::Database64 (Goldilocks &fr, const Config &config) : headerPageNumber(0), currentFlushId(0) { + // Init mutex + pthread_mutex_init(&mutex, NULL); + zkresult zkr; headerPageNumber = 0; zkr = HeaderPage::InitEmptyPage(headerPageNumber); @@ -36,9 +39,6 @@ Database64::Database64 (Goldilocks &fr, const Config &config) : headerPageNumber zklog.error("Database64::Database64() failed calling HeaderPage::InitEmptyPage() result=" + zkresult2string(zkr)); exitProcess(); } - // Initialize semaphores - //sem_init(&senderSem, 0, 0); - //sem_init(&getFlushDataSem, 0, 0); }; Database64::~Database64() @@ -118,20 +118,6 @@ zkresult Database64::readKV(const Goldilocks::Element (&root)[4], const Goldiloc return zkr; } -/* -zkresult Database64::readKV(const Goldilocks::Element (&root)[4], vector &KVLs, DatabaseMap *dbReadLog){ - zkresult zkr; - for (uint64_t i=0; i (&nodes), unordered_map (&program), string &nodesStateRoot) -{ -#if 0 - //zklog.info("--> getFlushData()"); - - // Set the deadline to now + 60 seconds - struct timespec deadline; - clock_gettime(CLOCK_REALTIME, &deadline); - deadline.tv_sec += 60; - - // Try to get the semaphore - int iResult; - iResult = sem_timedwait(&getFlushDataSem, &deadline); - if (iResult != 0) - { - zklog.info("Database64::getFlushData() timed out"); - return ZKR_SUCCESS; - } - - multiWrite.Lock(); - MultiWriteData64 &data = multiWrite.data[multiWrite.synchronizingDataIndex]; - - zklog.info("Database64::getFlushData woke up: pendingToFlushDataIndex=" + to_string(multiWrite.pendingToFlushDataIndex) + - " storingDataIndex=" + to_string(multiWrite.storingDataIndex) + - " synchronizingDataIndex=" + to_string(multiWrite.synchronizingDataIndex) + - " nodes=" + to_string(data.nodes.size()) + - " program=" + to_string(data.program.size()) + - " nodesStateRoot=" + data.nodesStateRoot); - - if (data.nodes.size() > 0) - { - nodes = data.nodes; - } - - if (data.program.size() > 0) - { - program = data.program; - } - - if (data.nodesStateRoot.size() > 0) - { - nodesStateRoot = data.nodesStateRoot; - } - - multiWrite.Unlock(); - - //zklog.info("<-- getFlushData()"); -#endif + Lock(); + storedFlushId = currentFlushId; + storingFlushId = currentFlushId; + lastFlushId = currentFlushId; + pendingToFlushNodes = 0; + pendingToFlushProgram = 0; + storingNodes = 0; + storingProgram = 0; + Unlock(); return ZKR_SUCCESS; } diff --git a/src/hashdb64/database_64.hpp b/src/hashdb64/database_64.hpp index 5845198a0..d018859c0 100644 --- a/src/hashdb64/database_64.hpp +++ b/src/hashdb64/database_64.hpp @@ -66,6 +66,8 @@ class Database64 bool bInitialized = false; uint64_t headerPageNumber; + pthread_mutex_t mutex; + uint64_t currentFlushId; public: @@ -96,8 +98,9 @@ class Database64 zkresult flush(uint64_t &flushId, uint64_t &lastSentFlushId); zkresult getFlushStatus(uint64_t &storedFlushId, uint64_t &storingFlushId, uint64_t &lastFlushId, uint64_t &pendingToFlushNodes, uint64_t &pendingToFlushProgram, uint64_t &storingNodes, uint64_t &storingProgram); - // Get flush data, written to database by dbSenderThread; it blocks - zkresult getFlushData(uint64_t flushId, uint64_t &lastSentFlushId, unordered_map (&nodes), unordered_map (&program), string &nodesStateRoot); + // Lock/Unlock + void Lock(void) { pthread_mutex_lock(&mutex); }; + void Unlock(void) { pthread_mutex_unlock(&mutex); }; }; #endif \ No newline at end of file diff --git a/src/service/hashdb/hashdb.cpp b/src/service/hashdb/hashdb.cpp index 468365b08..2f51866f0 100644 --- a/src/service/hashdb/hashdb.cpp +++ b/src/service/hashdb/hashdb.cpp @@ -399,7 +399,8 @@ zkresult HashDB::getFlushData(uint64_t flushId, uint64_t &lastSentFlushId, unord if (config.hashDB64) { - zkr = db64.getFlushData(flushId, lastSentFlushId, nodes, program, nodesStateRoot); + zklog.error("HashDB::getFlushData() called with config.hashDB64=true"); + return ZKR_DB_ERROR; } else { diff --git a/test/service/hashdb/hashdb64_workflow_test.cpp b/test/service/hashdb/hashdb64_workflow_test.cpp index b21ab803c..137d2eb23 100644 --- a/test/service/hashdb/hashdb64_workflow_test.cpp +++ b/test/service/hashdb/hashdb64_workflow_test.cpp @@ -128,7 +128,7 @@ uint64_t HashDB64WorkflowTest (const Config& config) for (uint64_t i=0; i<4; i++) root[i] = consolidatedStateRoot[i]; // Wait for data to be sent - /*while (true) + while (true) { uint64_t storedFlushId, storingFlushId, lastFlushId, pendingToFlushNodes, pendingToFlushProgram, storingNodes, storingProgram; string proverId; @@ -141,7 +141,7 @@ uint64_t HashDB64WorkflowTest (const Config& config) } sleep(1); } - zklog.info("FLUSHED");*/ + zklog.info("FLUSHED"); // Call ReadTree with the old state root to get the hashes of the initial values of all read or written keys /*vector oldHashValues;