diff --git a/CMakeLists.txt b/CMakeLists.txt index 8412211ebb..d73d7e36c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,9 +14,9 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 4) -set(VERSION_MINOR 0) +set(VERSION_MINOR 1) set(VERSION_PATCH 0) -set(VERSION_SUFFIX rc1) +set(VERSION_SUFFIX dev) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/README.md b/README.md index 436cc0b7c6..f16973bf5c 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ git clone --recursive https://github.com/AntelopeIO/leap.git git clone --recursive git@github.com:AntelopeIO/leap.git ``` -> ℹ️ **HTTPS vs. SSH Clone** ℹ️ +> ℹ️ **HTTPS vs. SSH Clone** ℹ️ Both an HTTPS or SSH git clone will yield the same result - a folder named `leap` containing our source code. It doesn't matter which type you use. Navigate into that folder: @@ -96,13 +96,13 @@ git submodule update --init --recursive ### Step 3 - Build Select build instructions below for a [pinned build](#pinned-build) (preferred) or an [unpinned build](#unpinned-build). -> ℹ️ **Pinned vs. Unpinned Build** ℹ️ +> ℹ️ **Pinned vs. Unpinned Build** ℹ️ We have two types of builds for Leap: "pinned" and "unpinned." The only difference is that pinned builds use specific versions for some dependencies hand-picked by the Leap engineers - they are "pinned" to those versions. In contrast, unpinned builds use the default dependency versions available on the build system at the time. We recommend performing a "pinned" build to ensure the compiler and boost versions remain the same between builds of different Leap versions. Leap requires these versions to remain the same, otherwise its state might need to be recovered from a portable snapshot or the chain needs to be replayed. -> ⚠️ **A Warning On Parallel Compilation Jobs (`-j` flag)** ⚠️ +> ⚠️ **A Warning On Parallel Compilation Jobs (`-j` flag)** ⚠️ When building C/C++ software, often the build is performed in parallel via a command such as `make -j "$(nproc)"` which uses all available CPU threads. However, be aware that some compilation units (`*.cpp` files) in Leap will consume nearly 4GB of memory. Failures due to memory exhaustion will typically, but not always, manifest as compiler crashes. Using all available CPU threads may also prevent you from doing other things on your computer during compilation. For these reasons, consider reducing this value. -> 🐋 **Docker and `sudo`** 🐋 +> 🐋 **Docker and `sudo`** 🐋 If you are in an Ubuntu docker container, omit `sudo` from all commands because you run as `root` by default. Most other docker containers also exclude `sudo`, especially Debian-family containers. If your shell prompt is a hash tag (`#`), omit `sudo`. #### Pinned Build @@ -111,12 +111,14 @@ Make sure you are in the root of the `leap` repo, then run the `install_depts.sh sudo scripts/install_deps.sh ``` -Next, run the pinned build script. You have to give it three arguments, in the following order: - - A temporary folder, for all dependencies that need to be built from source. - - A build folder, where the binaries you need to install will be built to. - - The number of jobs or CPU cores/threads to use (note the [jobs flag](#step-3---build) warning above). +Next, run the pinned build script. You have to give it three arguments in the following order: +1. A temporary folder, for all dependencies that need to be built from source. +1. A build folder, where the binaries you need to install will be built to. +1. The number of jobs or CPU cores/threads to use (note the [jobs flag](#step-3---build) warning above). -The following command runs the `pinned_build.sh` script, specifies a `deps` and `build` folder in the root of the Leap repo for the first two arguments, then builds the packages using all of your computer's CPU threads (Note: you don't need `sudo` for this command): +> 🔒 You do not need to run this script with `sudo` or as root. + +For example, the following command runs the `pinned_build.sh` script, specifies a `deps` and `build` folder in the root of the Leap repo for the first two arguments, then builds the packages using all of your computer's CPU threads: ```bash scripts/pinned_build.sh deps build "$(nproc)" ``` diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index e8d1a04817..1cf6775c4c 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -1533,7 +1533,7 @@ namespace eosio { namespace chain { ilog("blocks.log and blocks.index agree on number of blocks"); if (interval == 0) { - interval = std::max((log_bundle.log_index.num_blocks() + 7) >> 3, 1); + interval = std::max((log_bundle.log_index.num_blocks() + 7u) >> 3, 1u); } uint32_t expected_block_num = log_bundle.log_data.first_block_num(); diff --git a/libraries/chain/include/eosio/chain/log_index.hpp b/libraries/chain/include/eosio/chain/log_index.hpp index e5bc92bf4c..170e68460b 100644 --- a/libraries/chain/include/eosio/chain/log_index.hpp +++ b/libraries/chain/include/eosio/chain/log_index.hpp @@ -32,7 +32,7 @@ class log_index { bool is_open() const { return file_.is_open(); } uint64_t back() { return nth_block_position(num_blocks()-1); } - int num_blocks() const { return num_blocks_; } + unsigned num_blocks() const { return num_blocks_; } uint64_t nth_block_position(uint32_t n) { file_.seek(n*sizeof(uint64_t)); uint64_t r; diff --git a/libraries/wasm-jit/Include/IR/Operators.h b/libraries/wasm-jit/Include/IR/Operators.h index b7bf6f632e..33f2335410 100644 --- a/libraries/wasm-jit/Include/IR/Operators.h +++ b/libraries/wasm-jit/Include/IR/Operators.h @@ -12,10 +12,11 @@ namespace IR struct NoImm {}; struct MemoryImm {}; + PACKED_STRUCT( struct ControlStructureImm { ResultType resultType{}; - }; + }); struct BranchImm { @@ -675,4 +676,19 @@ namespace IR }; IR_API const char* getOpcodeName(Opcode opcode); -} \ No newline at end of file +} + +//paranoia for future platforms +static_assert(sizeof(IR::OpcodeAndImm) == 2); +static_assert(sizeof(IR::OpcodeAndImm) == 2); +static_assert(sizeof(IR::OpcodeAndImm) == 3); +static_assert(sizeof(IR::OpcodeAndImm) == 6); +static_assert(sizeof(IR::OpcodeAndImm) == 18); +static_assert(sizeof(IR::OpcodeAndImm>) == 6); +static_assert(sizeof(IR::OpcodeAndImm>) == 10); +static_assert(sizeof(IR::OpcodeAndImm>) == 6); +static_assert(sizeof(IR::OpcodeAndImm>) == 10); +static_assert(sizeof(IR::OpcodeAndImm>) == 6); +static_assert(sizeof(IR::OpcodeAndImm) == 6); +static_assert(sizeof(IR::OpcodeAndImm) == 6); +static_assert(sizeof(IR::OpcodeAndImm>) == 10); diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 33cf17dc90..1b4af397eb 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1727,4 +1727,7 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i for line in f: firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") - node.waitForTransactionsInBlock(firstTrxs) + status = node.waitForTransactionsInBlock(firstTrxs) + if status is None: + Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') + return status diff --git a/tests/TestHarness/queries.py b/tests/TestHarness/queries.py index 80b402f14d..ffaf1dd115 100644 --- a/tests/TestHarness/queries.py +++ b/tests/TestHarness/queries.py @@ -118,10 +118,10 @@ def getTransBlockNum(trans): # could be a transaction response if cntxt.hasKey("processed"): cntxt.add("processed") - cntxt.add("action_traces") - cntxt.index(0) if not cntxt.isSectionNull("except"): return "no_block" + cntxt.add("action_traces") + cntxt.index(0) return cntxt.add("block_num") # or what the trace api plugin returns @@ -242,7 +242,7 @@ def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayed assert(isinstance(transId, str)) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 - cmdDesc="get transaction_trace" + cmdDesc=self.fetchTransactionCommand() cmd="%s %s" % (cmdDesc, transId) msg="(transaction id=%s)" % (transId); for i in range(0,(int(60/timeout) - 1)): @@ -295,8 +295,8 @@ def getBlockNumByTransId(self, transId, exitOnError=True, delayedRetry=True, blo refBlockNum=None key="" try: - key="[transaction][transaction_header][ref_block_num]" - refBlockNum=trans["transaction_header"]["ref_block_num"] + key = self.fetchKeyCommand() + refBlockNum = self.fetchRefBlock(trans) refBlockNum=int(refBlockNum)+1 except (TypeError, ValueError, KeyError) as _: Utils.Print("transaction%s not found. Transaction: %s" % (key, trans)) @@ -346,9 +346,9 @@ def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json): return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg, returnType=returnType) def getTable(self, contract, scope, table, exitOnError=False): - cmdDesc = "get table --time-limit 999" - cmd="%s %s %s %s" % (cmdDesc, contract, scope, table) - msg="contract=%s, scope=%s, table=%s" % (contract, scope, table); + cmdDesc = "get table" + cmd=f"{cmdDesc} {self.cleosLimit} {contract} {scope} {table}" + msg=f"contract={contract}, scope={scope}, table={table}" return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) def getTableAccountBalance(self, contract, scope): @@ -529,7 +529,7 @@ def getAccountCodeHash(self, account): return m.group(1) except subprocess.CalledProcessError as ex: end=time.perf_counter() - msg=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") Utils.Print("ERROR: Exception during code hash retrieval. cmd Duration: %.3f sec. %s" % (end-start, msg)) return None @@ -580,8 +580,9 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() - msg=ex.output.decode("utf-8") - errorMsg="Exception during \"%s\". Exception message: %s. cmd Duration=%.3f sec. %s" % (cmdDesc, msg, end-start, exitMsg) + out=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") + errorMsg="Exception during \"%s\". Exception message: %s. stdout: %s. cmd Duration=%.3f sec. %s" % (cmdDesc, msg, out, end-start, exitMsg) if exitOnError: Utils.cmdError(errorMsg) Utils.errorExit(errorMsg) diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index ea765de94d..4c16a17438 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -107,7 +107,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False self.trackCmdTransaction(trans, reportStatus=reportStatus) except subprocess.CalledProcessError as ex: end=time.perf_counter() - msg=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") Utils.Print("ERROR: Exception during funds transfer. cmd Duration: %.3f sec. %s" % (end-start, msg)) if exitOnError: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) @@ -131,7 +131,7 @@ def transferFundsAsync(self, source, destination, amountStr, memo="memo", force= Utils.Print("cmd Duration: %.3f sec" % (end-start)) except subprocess.CalledProcessError as ex: end=time.perf_counter() - msg=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") Utils.Print("ERROR: Exception during spawn of funds transfer. cmd Duration: %.3f sec. %s" % (end-start, msg)) if exitOnError: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) @@ -158,15 +158,15 @@ def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransB except subprocess.CalledProcessError as ex: if not shouldFail: end=time.perf_counter() - msg=ex.output.decode("utf-8") - Utils.Print("ERROR: Exception during set contract. cmd Duration: %.3f sec. %s" % (end-start, msg)) + out=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") + Utils.Print("ERROR: Exception during set contract. stderr: %s. stdout: %s. cmd Duration: %.3f sec." % (msg, out, end-start)) return None else: retMap={} retMap["returncode"]=ex.returncode retMap["cmd"]=ex.cmd retMap["output"]=ex.output - retMap["stdout"]=ex.stdout retMap["stderr"]=ex.stderr return retMap @@ -213,7 +213,7 @@ def pushTransaction(self, trans, opts="", silentErrors=False, permissions=None): Utils.Print("cmd Duration: %.3f sec" % (end-start)) return (NodeosQueries.getTransStatus(retTrans) == 'executed', retTrans) except subprocess.CalledProcessError as ex: - msg=ex.output.decode("utf-8") + msg=ex.stderr.decode("utf-8") if not silentErrors: end=time.perf_counter() Utils.Print("ERROR: Exception during push transaction. cmd Duration=%.3f sec. %s" % (end - start, msg)) @@ -245,7 +245,6 @@ def pushMessage(self, account, action, data, opts, silentErrors=False, signature except subprocess.CalledProcessError as ex: msg=ex.stderr.decode("utf-8") output=ex.output.decode("utf-8") - msg=ex.output.decode("utf-8") if not silentErrors: end=time.perf_counter() Utils.Print("ERROR: Exception during push message. stderr: %s. stdout: %s. cmd Duration=%.3f sec." % (msg, output, end - start)) diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index 616ee214e2..72b8d57e1d 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -133,7 +133,8 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI acctPrivKeysList=[account1PrivKey,account2PrivKey], nodeId=snapshotNodeId, tpsPerGenerator=targetTpsPerGenerator, numGenerators=trxGeneratorCnt, durationSec=testTrxGenDurationSec, waitToComplete=False) - cluster.waitForTrxGeneratorsSpinup(nodeId=snapshotNodeId, numGenerators=trxGeneratorCnt) + status = cluster.waitForTrxGeneratorsSpinup(nodeId=snapshotNodeId, numGenerators=trxGeneratorCnt) + assert status is not None, "ERROR: Failed to spinup Transaction Generators" blockNum=node0.getBlockNum(BlockType.head) timePerBlock=500 diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index e9dedc14d7..364d4948a5 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -118,7 +118,8 @@ def waitForNodeStarted(node): tpsPerGenerator=targetTpsPerGenerator, numGenerators=trxGeneratorCnt, durationSec=testTrxGenDurationSec, waitToComplete=False) - cluster.waitForTrxGeneratorsSpinup(nodeId=node0.nodeId, numGenerators=trxGeneratorCnt) + status = cluster.waitForTrxGeneratorsSpinup(nodeId=node0.nodeId, numGenerators=trxGeneratorCnt) + assert status is not None, "ERROR: Failed to spinup Transaction Generators" blockNum=head(node0) timePerBlock=500