From 1cdab9da78937d82ae60656ef7e16cab2010ba7f Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Fri, 17 May 2024 15:51:33 +0800 Subject: [PATCH 01/13] Split shared sessions --- contrib/query.py | 36 +- electrumx/__init__.py | 6 - electrumx/lib/atomicals_blueprint_builder.py | 14 +- electrumx/lib/coins.py | 32 +- electrumx/server/block_processor.py | 119 +- electrumx/server/controller.py | 9 +- electrumx/server/db.py | 27 +- electrumx/server/session/__init__.py | 4 + .../electrumx_session.py} | 2230 ++++------------- .../server/{ => session}/http_session.py | 767 +++--- electrumx/server/session/session_base.py | 193 ++ electrumx/server/session/session_manager.py | 1260 ++++++++++ electrumx/server/session/shared_session.py | 64 + electrumx/version.py | 3 + electrumx_compact_history | 2 +- electrumx_server | 3 +- tests/server/test_api.py | 5 +- 17 files changed, 2494 insertions(+), 2280 deletions(-) create mode 100644 electrumx/server/session/__init__.py rename electrumx/server/{session.py => session/electrumx_session.py} (50%) rename electrumx/server/{ => session}/http_session.py (76%) create mode 100644 electrumx/server/session/session_base.py create mode 100644 electrumx/server/session/session_manager.py create mode 100644 electrumx/server/session/shared_session.py create mode 100644 electrumx/version.py diff --git a/contrib/query.py b/contrib/query.py index 6cf8d58f..c960cf34 100755 --- a/contrib/query.py +++ b/contrib/query.py @@ -7,33 +7,34 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Script to query the database for debugging purposes. +"""Script to query the database for debugging purposes. Not currently documented; might become easier to use in future. -''' +""" import argparse import asyncio -from electrumx import Env -from electrumx.server.db import DB from electrumx.lib.hash import hash_to_hex_str, Base58Error +from electrumx.server.db import DB +from electrumx.server.env import Env +from electrumx.server.history import History -async def print_stats(hist_db, utxo_db): - count = 0 - for key in utxo_db.iterator(prefix=b'u', include_value=False): - count += 1 - print(f'UTXO count: {utxos}') +async def print_stats(hist_db: History, utxo_db): + utxo_count = 0 + for _ in utxo_db.iterator(prefix=b'u', include_value=False): + utxo_count += 1 + print(f'UTXO count: {utxo_count}') - count = 0 - for key in utxo_db.iterator(prefix=b'h', include_value=False): - count += 1 - print(f'HashX count: {count}') + hash_count = 0 + for _ in utxo_db.iterator(prefix=b'h', include_value=False): + hash_count += 1 + print(f'HashX count: {hash_count}') hist = 0 hist_len = 0 - for key, value in hist_db.iterator(prefix=b'H'): + for key, value in hist_db.db.iterator(prefix=b'H'): hist += 1 hist_len += len(value) // 4 print(f'History rows {hist:,d} entries {hist_len:,d}') @@ -64,7 +65,7 @@ async def query(args): await db.open_for_serving() if not args.scripts: - await print_stats(db.hist_db, db.utxo_db) + await print_stats(db.history, db.utxo_db) return limit = args.limit for arg in args.scripts: @@ -97,15 +98,16 @@ def main(): parser = argparse.ArgumentParser( 'query.py', description='Invoke with COIN and DB_DIRECTORY set in the ' - 'environment as they would be invoking electrumx_server' + 'environment as they would be invoking electrumx_server' ) parser.add_argument('-l', '--limit', metavar='limit', type=int, default=10, help=f'maximum number of entries to ' - f'return (default: {default_limit})') + f'return (default: {default_limit})') parser.add_argument('scripts', nargs='*', default=[], type=str, help='hex scripts to query') args = parser.parse_args() asyncio.run(query(args)) + if __name__ == '__main__': main() diff --git a/electrumx/__init__.py b/electrumx/__init__.py index 88f1ae85..e69de29b 100644 --- a/electrumx/__init__.py +++ b/electrumx/__init__.py @@ -1,6 +0,0 @@ -__version__ = "1.16.0" -version = f'ElectrumX {__version__}' -version_short = __version__ - -from electrumx.server.controller import Controller -from electrumx.server.env import Env diff --git a/electrumx/lib/atomicals_blueprint_builder.py b/electrumx/lib/atomicals_blueprint_builder.py index 9e32fb23..a65fcfb5 100644 --- a/electrumx/lib/atomicals_blueprint_builder.py +++ b/electrumx/lib/atomicals_blueprint_builder.py @@ -23,7 +23,7 @@ def __init__(self, atomical_id_to_expected_outs_map, fts_burned, cleanly_assigne self.cleanly_assigned = cleanly_assigned self.fts_burned = fts_burned self.atomicals_list = atomicals_list - + def __repr__(self): return f'FtColoringSummary cleanly_assigned: {self.cleanly_assigned}, fts_burned: {self.fts_burned}, atomicals_list: {self.atomicals_list}' @@ -32,7 +32,7 @@ class ExpectedOutputSet: def __init__(self, expected_outputs, expected_values): self.expected_outputs = expected_outputs self.expected_values = expected_values - + def __repr__(self): return f'ExpectedOutputSet expected_outputs: {self.expected_outputs}, expected_values: {self.expected_values}' @@ -690,7 +690,15 @@ def validate_ft_transfer_has_no_inflation(self, atomical_id_to_expected_outs_map input_value = ft_info['atomical_value'] if sum_out_value and sum_out_value > input_value: atomical_id_compact = location_id_bytes_to_compact(atomical_id) - raise AtomicalsTransferBlueprintBuilderError(f'validate_ft_transfer_has_no_inflation: Fatal error the output sum of outputs is greater than input sum for Atomical: atomical_id={atomical_id_compact} input_value={input_value} sum_out_value={sum_out_value} ft_atomicals={ft_atomicals}') + raise AtomicalsTransferBlueprintBuilderError( + 'validate_ft_transfer_has_no_inflation: ' + 'Fatal error the output sum of outputs is greater than input sum for Atomical: ' + f'atomical_id={atomical_id_compact} ' + f'input_value={input_value} ' + f'sum_out_value={sum_out_value} ' + f'{hash_to_hex_str(self.tx_hash)} ' + f'ft_atomicals={ft_atomicals}' + ) def is_split_operation(self): return is_split_operation(self.operations_found_at_inputs) diff --git a/electrumx/lib/coins.py b/electrumx/lib/coins.py index 27eecf64..08769ba7 100644 --- a/electrumx/lib/coins.py +++ b/electrumx/lib/coins.py @@ -48,9 +48,9 @@ import electrumx.lib.tx_axe as lib_tx_axe import electrumx.server.block_processor as block_proc import electrumx.server.daemon as daemon -from electrumx.server.session import (ElectrumX, DashElectrumX, - SmartCashElectrumX, AuxPoWElectrumX, - NameIndexElectrumX, NameIndexAuxPoWElectrumX) +from electrumx.server.session.electrumx_session import (ElectrumX, DashElectrumX, + SmartCashElectrumX, AuxPoWElectrumX, + NameIndexElectrumX, NameIndexAuxPoWElectrumX) @dataclass @@ -65,8 +65,19 @@ class CoinError(Exception): '''Exception raised for coin-related errors.''' -class Coin: - '''Base class of coin hierarchy.''' +class CoinHeaderHashMixin: + @classmethod + def header_hash(cls, header): + """Given a header return hash""" + return double_sha256(header) + + +class CoinShortNameMixin: + SHORTNAME: str + + +class Coin(CoinHeaderHashMixin, CoinShortNameMixin): + """Base class of coin hierarchy.""" REORG_LIMIT = 200 # Not sure if these are coin-specific @@ -225,11 +236,6 @@ def privkey_WIF(cls, privkey_bytes, compressed): payload.append(0x01) return cls.ENCODE_CHECK(payload) - @classmethod - def header_hash(cls, header): - '''Given a header return hash''' - return double_sha256(header) - @classmethod def header_prevhash(cls, header): '''Given a header return previous hash''' @@ -329,7 +335,7 @@ def block_header(cls, block, height): return deserializer.read_header(cls.BASIC_HEADER_SIZE) -class ScryptMixin: +class ScryptMixin(CoinHeaderHashMixin): DESERIALIZER = lib_tx.DeserializerTxTime HEADER_HASH = None @@ -358,7 +364,7 @@ class KomodoMixin: DESERIALIZER = lib_tx.DeserializerZcash -class BitcoinMixin: +class BitcoinMixin(CoinShortNameMixin): SHORTNAME = "BTC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") @@ -847,7 +853,7 @@ def hashX_from_script(cls, script): return super().hashX_from_script(address_script) -class BitcoinTestnetMixin: +class BitcoinTestnetMixin(CoinShortNameMixin): SHORTNAME = "XTN" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") diff --git a/electrumx/server/block_processor.py b/electrumx/server/block_processor.py index 93f7caf1..e6ee1eee 100644 --- a/electrumx/server/block_processor.py +++ b/electrumx/server/block_processor.py @@ -9,23 +9,19 @@ '''Block prefetcher and chain processor.''' import asyncio -import os import time from typing import Sequence, Tuple, List, Callable, Optional, TYPE_CHECKING, Type, Union from aiorpcx import run_in_thread, CancelledError -import electrumx -from electrumx.server.daemon import DaemonError, Daemon +from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN, double_sha256 from electrumx.lib.script import SCRIPTHASH_LEN, is_unspendable_legacy, is_unspendable_genesis +from electrumx.lib.tx import Tx from electrumx.lib.util import ( - chunks, class_logger, pack_le_uint32, unpack_le_uint32, pack_le_uint64, unpack_le_uint64, pack_be_uint64, unpack_be_uint64, OldTaskGroup, pack_byte, pack_le_uint16, unpack_le_uint16_from + chunks, class_logger, pack_le_uint32, unpack_le_uint32, pack_le_uint64, unpack_le_uint64, pack_be_uint64, + OldTaskGroup, pack_le_uint16 ) -import math -from electrumx.lib.tx import Tx -from electrumx.server.db import FlushData, COMP_TXID_LEN, DB -from electrumx.server.history import TXNUM_LEN from electrumx.lib.util_atomicals import ( is_within_acceptable_blocks_for_general_reveal, auto_encode_bytes_elements, @@ -35,29 +31,27 @@ get_subname_request_candidate_status, is_within_acceptable_blocks_for_name_reveal, compact_to_location_id_bytes, - is_proof_of_work_prefix_match, format_name_type_candidates_to_rpc_for_subname, format_name_type_candidates_to_rpc, - pad_bytes_n, - has_requested_proof_of_work, - is_valid_container_string_name, + pad_bytes_n, + has_requested_proof_of_work, + is_valid_container_string_name, calculate_expected_bitwork, expand_spend_utxo_data, - encode_tx_hash_hex, SUBREALM_MINT_PATH, DMINT_PATH, MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, - MINT_REALM_CONTAINER_TICKER_COMMIT_REVEAL_DELAY_BLOCKS, - MINT_SUBNAME_COMMIT_PAYMENT_DELAY_BLOCKS, - is_valid_dmt_op_format, - is_compact_atomical_id, + MINT_REALM_CONTAINER_TICKER_COMMIT_REVEAL_DELAY_BLOCKS, + MINT_SUBNAME_COMMIT_PAYMENT_DELAY_BLOCKS, + is_valid_dmt_op_format, + is_compact_atomical_id, is_valid_regex, - unpack_mint_info, - parse_protocols_operations_from_witness_array, - location_id_bytes_to_compact, - is_valid_subrealm_string_name, - is_valid_realm_string_name, - is_valid_ticker_string, + unpack_mint_info, + parse_protocols_operations_from_witness_array, + location_id_bytes_to_compact, + is_valid_subrealm_string_name, + is_valid_realm_string_name, + is_valid_ticker_string, get_mint_info_op_factory, convert_db_mint_info_to_rpc_mint_info_format, calculate_latest_state_from_mod_history, @@ -72,21 +66,19 @@ is_txid_valid_for_perpetual_bitwork, auto_encode_bytes_items ) - -from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder - -import copy +from electrumx.server.daemon import DaemonError, Daemon +from electrumx.server.db import FlushData, COMP_TXID_LEN, DB +from electrumx.server.history import TXNUM_LEN +from electrumx.version import electrumx_version if TYPE_CHECKING: from electrumx.lib.coins import Coin, AtomicalsCoinMixin from electrumx.server.env import Env from electrumx.server.controller import Notifications -from cbor2 import dumps, loads, CBORDecodeError -import pickle +from cbor2 import dumps, loads import pylru -import regex -import sys +import sys import re TX_HASH_LEN = 32 @@ -184,7 +176,7 @@ async def _prefetch_blocks(self): first = self.fetched_height + 1 # Try and catch up all blocks but limit to room in cache. cache_room = max(self.min_cache_size // self.ave_size, 1) - count = min(daemon_height - self.fetched_height, cache_room) + count: int = min(daemon_height - self.fetched_height, cache_room) # Don't make too large a request count = min(self.coin.max_fetch_blocks(first), max(count, 0)) if not count: @@ -193,8 +185,7 @@ async def _prefetch_blocks(self): hex_hashes = await daemon.block_hex_hashes(first, count) if self.caught_up: - self.logger.info(f'new block height {first + count-1:,d} ' - f'hash {hex_hashes[-1]}') + self.logger.info(f'new block height {first + count - 1:,d} hash {hex_hashes[-1]}') blocks = await daemon.raw_blocks(hex_hashes) assert count == len(blocks) @@ -934,9 +925,14 @@ def spend_atomicals_utxo(self, tx_hash: bytes, tx_idx: int, live_run) -> bytes: found_at_least_one = False for atomical_a_db_key, atomical_a_db_value in self.db.utxo_db.iterator(prefix=prefix): found_at_least_one = True - # For live_run == True we must throw an exception since the b'a' record should always be there when we are spending - if live_run and found_at_least_one == False: - raise IndexError(f'Did not find expected at least one entry for atomicals table for atomical: {location_id_bytes_to_compact(atomical_id)} at location {location_id_bytes_to_compact(location_id)}') + # For live_run == True we must throw an exception since the b'a' record + # should always be there when we are spending + if live_run and not found_at_least_one: + raise IndexError( + 'Did not find expected at least one entry for atomicals table for atomical: ' + f'{location_id_bytes_to_compact(atomical_id)} at location ' + f'{location_id_bytes_to_compact(location_id)}' + ) # Only do the db delete if this was a live run if live_run: self.delete_general_data(b'a' + atomical_id + location_id) @@ -965,14 +961,20 @@ def delete_state_data(self, db_key_prefix, db_key_suffix, expected_entry_value): if state_map: cached_value = state_map.pop(db_key_suffix, None) if cached_value != expected_entry_value: - raise IndexError(f'IndexError: delete_state_data cache data does not match expected value {expected_entry_value} {db_value}') + raise IndexError( + 'IndexError: delete_state_data cache data does not match expected value' + f'{expected_entry_value} {cached_value}' + ) # return intentionally fall through to catch in db just in case db_delete_key = db_key_prefix + db_key_suffix db_value = self.db.utxo_db.get(db_delete_key) if db_value: if db_value != expected_entry_value: - raise IndexError(f'IndexError: delete_state_data db data does not match expected atomical id {expected_entry_value} {db_value}') + raise IndexError( + 'IndexError: delete_state_data db data does not match expected atomical id' + f'{expected_entry_value} {db_value}' + ) self.delete_general_data(db_delete_key) return cached_value or db_value @@ -1055,17 +1057,25 @@ def delete_pay_record(self, atomical_id, tx_num, expected_entry_value, db_prefix return cached_value or db_value # Delete the distributed mint data that is used to track how many mints were made - def delete_decentralized_mint_data(self, atomical_id, location_id) -> bytes: + def delete_decentralized_mint_data(self, atomical_id, location_id): cache_map = self.distmint_data_cache.get(atomical_id, None) - if cache_map != None: + if cache_map is not None: cache_map.pop(location_id, None) - self.logger.info(f'delete_decentralized_mint_data: distmint_data_cache. location_id={location_id_bytes_to_compact(location_id)}, atomical_id={location_id_bytes_to_compact(atomical_id)}') + self.logger.info( + 'delete_decentralized_mint_data: distmint_data_cache. ' + f'location_id={location_id_bytes_to_compact(location_id)}, ' + f'atomical_id={location_id_bytes_to_compact(atomical_id)}' + ) gi_key = b'gi' + atomical_id + location_id gi_value = self.db.utxo_db.get(gi_key) if gi_value: # not do the i entry beuse it's deleted elsewhere self.delete_general_data(gi_key) - self.logger.info(f'delete_decentralized_mint_data: db_deletes:. location_id={location_id_bytes_to_compact(location_id)}, atomical_id={location_id_bytes_to_compact(atomical_id)}') + self.logger.info( + 'delete_decentralized_mint_data: db_deletes:. ' + f'location_id={location_id_bytes_to_compact(location_id)}, ' + f'atomical_id={location_id_bytes_to_compact(atomical_id)}' + ) def log_subrealm_request(self, method, msg, status, subrealm, parent_realm_atomical_id, height): self.logger.info(f'{method} - {msg}, status={status} subrealm={subrealm}, parent_realm_atomical_id={parent_realm_atomical_id.hex()}, height={height}') @@ -3581,7 +3591,7 @@ def spend_utxo(self, tx_hash: bytes, tx_idx: int) -> bytes: ''' # Fast track is it being in the cache idx_packed = pack_le_uint32(tx_idx) - cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None) + cache_value: bytes | None = self.utxo_cache.pop(tx_hash + idx_packed, None) if cache_value: return cache_value @@ -3641,8 +3651,7 @@ async def _first_caught_up(self): self.db.first_sync = False await self.flush(True) if first_sync: - self.logger.info(f'{electrumx.version} synced to ' - f'height {self.height:,d}') + self.logger.info(f'{electrumx_version} synced to height {self.height:,d}') # Reopen for serving await self.db.open_for_serving() @@ -3703,8 +3712,14 @@ async def calc_reorg_range(self, count): class NameIndexBlockProcessor(BlockProcessor): - def advance_txs(self, txs, is_unspendable): - result = super().advance_txs(txs, is_unspendable) + def advance_txs( + self, + txs: Sequence[Tuple[Tx, bytes]], + is_unspendable: Callable[[bytes], bool], + header, + height + ): + result = super().advance_txs(txs, is_unspendable, header, height) tx_num = self.tx_count - len(txs) script_name_hashX = self.coin.name_hashX_from_script @@ -3734,7 +3749,13 @@ def advance_txs(self, txs, is_unspendable): class LTORBlockProcessor(BlockProcessor): - def advance_txs(self, txs, is_unspendable): + def advance_txs( + self, + txs: Sequence[Tuple[Tx, bytes]], + is_unspendable: Callable[[bytes], bool], + header, + height + ): self.tx_hashes.append(b''.join(tx_hash for tx, tx_hash in txs)) # Use local vars for speed in the loops diff --git a/electrumx/server/controller.py b/electrumx/server/controller.py index 6e0283f1..ac1c8aef 100644 --- a/electrumx/server/controller.py +++ b/electrumx/server/controller.py @@ -9,12 +9,12 @@ from aiorpcx import _version as aiorpcx_version -import electrumx from electrumx.lib.server_base import ServerBase from electrumx.lib.util import version_string, OldTaskGroup from electrumx.server.db import DB +from electrumx.server.session.session_manager import SessionManager from electrumx.server.mempool import MemPool, MemPoolAPI -from electrumx.server.session import SessionManager +from electrumx.version import electrumx_version class Notifications: @@ -87,7 +87,7 @@ async def serve(self, shutdown_event): env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() - self.logger.info(f'software version: {electrumx.version}') + self.logger.info(f'software version: {electrumx_version}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: {min_str}-{max_str}') self.logger.info(f'event loop policy: {env.loop_policy}') @@ -116,8 +116,7 @@ def get_db_height(): refresh_secs=env.daemon_poll_interval_mempool_msec/1000, ) - session_mgr = SessionManager(env, db, bp, daemon, mempool, - shutdown_event) + session_mgr = SessionManager(env, db, bp, daemon, mempool, shutdown_event) # Test daemon authentication, and also ensure it has a cached # height. Do this before entering the task group. diff --git a/electrumx/server/db.py b/electrumx/server/db.py index ad450712..250a4ca4 100644 --- a/electrumx/server/db.py +++ b/electrumx/server/db.py @@ -41,6 +41,7 @@ ATOMICAL_ID_LEN = 36 TX_HASH_LEN = 32 + @dataclass(order=True) class UTXO: __slots__ = 'tx_num', 'tx_pos', 'tx_hash', 'height', 'value' @@ -50,8 +51,8 @@ class UTXO: height: int # block height value: int # in satoshis -@attr.s(slots=True) +@attr.s(slots=True) class FlushData: height = attr.ib() tx_count = attr.ib() @@ -72,12 +73,12 @@ class FlushData: # atomicals_adds is used to track atomicals locations and unspent utxos with the b'i' and b'a' indexes # It uses a field 'deleted' to indicate whether to write the b'a' (active unspent utxo) or not - because it may have been spent before the cache flushed # Maps location_id to atomical_ids and the value/deleted entry - atomicals_adds = attr.ib() # type: Dict[bytes, Dict[bytes, { value: bytes, deleted: Boolean}] ] + atomicals_adds = attr.ib() # type: Dict[bytes, Dict[bytes, { value: bytes, deleted: bool }] ] # general_adds is a general purpose storage for key-value, used for the majority of atomicals data general_adds = attr.ib() # type: List[Tuple[Sequence[bytes], Sequence[bytes]]] # realm_adds map realm names to tx_num ints, which then map onto an atomical_id # The purpose is to track the earliest appearance of a realm name claim request in the order of the commit tx number - realm_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes] + realm_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes]] # container_adds map container names to tx_num ints, which then map onto an atomical_id # The purpose is to track the earliest appearance of a container name claim request in the order of the commit tx number container_adds = attr.ib() # type: List[Tuple[Sequence[bytes], Sequence[bytes]]] @@ -85,24 +86,26 @@ class FlushData: # The purpose is to track the earliest appearance of a ticker name claim request in the order of the commit tx number ticker_adds = attr.ib() # type: List[Tuple[Sequence[bytes], Sequence[bytes]]] # subrealm_adds maps parent_realm_id + subrealm name to tx_num ints, which then map onto an atomical_id - subrealm_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes] + subrealm_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes]] # subrealmpay_adds maps atomical_id to tx_num ints, which then map onto payment_outpoints - subrealmpay_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes] + subrealmpay_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes]] # dmitem_adds maps parent_realm_id + dmitem name to tx_num ints, which then map onto an atomical_id - dmitem_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes] + dmitem_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes]] # dmpay_adds maps atomical_id to tx_num ints, which then map onto payment_outpoints - dmpay_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes] + dmpay_adds = attr.ib() # type: Dict[bytes, Dict[int, bytes]] # distmint_adds tracks the b'gi' which is the initial distributed mint location tracked to determine if any more mints are allowed # It maps atomical_id (of the dft deploy token mint) to location_ids and then the details of the scripthash+sat_value of the mint distmint_adds = attr.ib() # type: Dict[bytes, Dict[bytes, bytes] # state_adds is for evt, mod state updates # It maps atomical_id to the data of the state update - state_adds = attr.ib() # type: Dict[bytes, Dict[bytes, bytes] + state_adds = attr.ib() # type: Dict[bytes, Dict[bytes, bytes]] # op_adds is for record tx operation of one tx - op_adds = attr.ib() # type: Dict[bytes, Dict[bytes] - + op_adds = attr.ib() # type: Dict[bytes, Dict[bytes]] + + COMP_TXID_LEN = 4 + class DB: '''Simple wrapper of the backend database for querying. @@ -262,7 +265,7 @@ def __init__(self, env: 'Env'): # Value: paylaod_bytes of the operation found # "maps pow reveal prefix to height to non atomicals operation data" - self.utxo_db = None + self.utxo_db: Optional[Storage] = None self.utxo_flush_count = 0 self.fs_height = -1 self.fs_tx_count = 0 @@ -270,7 +273,7 @@ def __init__(self, env: 'Env'): self.db_height = -1 self.db_tx_count = 0 self.db_atomical_count = 0 - self.db_tip = None # type: Optional[bytes] + self.db_tip: Optional[bytes] = None self.tx_counts = None self.atomical_counts = None self.last_flush = time.time() diff --git a/electrumx/server/session/__init__.py b/electrumx/server/session/__init__.py new file mode 100644 index 00000000..1d63197b --- /dev/null +++ b/electrumx/server/session/__init__.py @@ -0,0 +1,4 @@ +BAD_REQUEST = 1 +DAEMON_ERROR = 2 +MAX_TX_QUERY = 50 +ATOMICALS_INVALID_TX = 800422 diff --git a/electrumx/server/session.py b/electrumx/server/session/electrumx_session.py similarity index 50% rename from electrumx/server/session.py rename to electrumx/server/session/electrumx_session.py index c24425de..2f30c92a 100644 --- a/electrumx/server/session.py +++ b/electrumx/server/session/electrumx_session.py @@ -1,1443 +1,19 @@ -# Copyright (c) 2016-2018, Neil Booth -# -# All rights reserved. -# -# See the file "LICENCE" for information about the copyright -# and warranty status of this software. - -'''Classes for local RPC server and remote client TCP/SSL servers.''' - import asyncio import codecs import datetime -import itertools -import math -import os -import ssl -import time -from collections import defaultdict -from functools import partial -from ipaddress import IPv4Address, IPv6Address, IPv4Network, IPv6Network -from typing import Optional, TYPE_CHECKING -import asyncio -import attr -import pylru -from aiohttp import web -from aiorpcx import (Event, JSONRPCAutoDetect, JSONRPCConnection, - ReplyAndDisconnect, Request, RPCError, RPCSession, - handler_invocation, serve_rs, serve_ws, sleep, - NewlineFramer, TaskTimeout, timeout_after, run_in_thread) +from aiorpcx import timeout_after, TaskTimeout -import electrumx -from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder +from electrumx.lib import util from electrumx.lib.script2addr import get_address_from_output_script -import electrumx.lib.util as util -from electrumx.lib.util import OldTaskGroup, unpack_le_uint64 -from electrumx.lib.util_atomicals import ( - DFT_MINT_MAX_MAX_COUNT_DENSITY, - format_name_type_candidates_to_rpc, - SUBREALM_MINT_PATH, - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, - DMINT_PATH, - convert_db_mint_info_to_rpc_mint_info_format, - compact_to_location_id_bytes, - location_id_bytes_to_compact, - is_compact_atomical_id, - format_name_type_candidates_to_rpc_for_subname, - calculate_latest_state_from_mod_history, - parse_protocols_operations_from_witness_array, - validate_rules_data, - AtomicalsValidationError, - auto_encode_bytes_elements, - validate_merkle_proof_dmint -) -from electrumx.lib.hash import (HASHX_LEN, Base58Error, hash_to_hex_str, - hex_str_to_hash, sha256, double_sha256) -from electrumx.lib.merkle import MerkleCache -from electrumx.lib.text import sessions_lines +from electrumx.lib.util_atomicals import * from electrumx.server.daemon import DaemonError -from electrumx.server.history import TXNUM_LEN -from electrumx.server.http_middleware import rate_limiter, cors_middleware, error_middleware, request_middleware -from electrumx.server.http_session import HttpHandler -from electrumx.server.peers import PeerManager -from electrumx.lib.script import SCRIPTHASH_LEN - -if TYPE_CHECKING: - from electrumx.server.db import DB - from electrumx.server.env import Env - from electrumx.server.block_processor import BlockProcessor - from electrumx.server.daemon import Daemon - from electrumx.server.mempool import MemPool - - -BAD_REQUEST = 1 -DAEMON_ERROR = 2 -ATOMICALS_INVALID_TX = 800422 - -def scripthash_to_hashX(scripthash): - try: - bin_hash = hex_str_to_hash(scripthash) - if len(bin_hash) == 32: - return bin_hash[:HASHX_LEN] - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') - -def non_negative_integer(value): - '''Return param value it is or can be converted to a non-negative - integer, otherwise raise an RPCError.''' - try: - value = int(value) - if value >= 0: - return value - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, - f'{value} should be a non-negative integer') - -def assert_boolean(value): - '''Return param value it is boolean otherwise raise an RPCError.''' - if value in (False, True): - return value - raise RPCError(BAD_REQUEST, f'{value} should be a boolean value') - -def assert_tx_hash(value): - '''Raise an RPCError if the value is not a valid hexadecimal transaction hash. - - If it is valid, return it as 32-byte binary hash. - ''' - try: - raw_hash = hex_str_to_hash(value) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') - -def assert_atomical_id(value): - '''Raise an RPCError if the value is not a valid atomical id - If it is valid, return it as 32-byte binary hash. - ''' - try: - if value == None or value == "": - raise RPCError(BAD_REQUEST, f'atomical_id required') - index_of_i = value.find("i") - if index_of_i != 64: - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - raw_hash = hex_str_to_hash(value[ : 64]) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - -@attr.s(slots=True) -class SessionGroup: - name = attr.ib() - weight = attr.ib() - sessions = attr.ib() - retained_cost = attr.ib() - - def session_cost(self): - return sum(session.cost for session in self.sessions) - - def cost(self): - return self.retained_cost + self.session_cost() - - -@attr.s(slots=True) -class SessionReferences: - # All attributes are sets but groups is a list - sessions = attr.ib() - groups = attr.ib() - specials = attr.ib() # Lower-case strings - unknown = attr.ib() # Strings - - -class SessionManager: - '''Holds global state about all sessions.''' - - def __init__( - self, - env: 'Env', - db: 'DB', - bp: 'BlockProcessor', - daemon: 'Daemon', - mempool: 'MemPool', - shutdown_event: asyncio.Event, - ): - env.max_send = max(350000, env.max_send) - self.env = env - self.db = db - self.bp = bp - self.daemon = daemon - self.mempool = mempool - self.peer_mgr = PeerManager(env, db) - self.shutdown_event = shutdown_event - self.logger = util.class_logger(__name__, self.__class__.__name__) - self.servers = {} # service->server - self.sessions = {} # session->iterable of its SessionGroups - self.session_groups = {} # group name->SessionGroup instance - self.txs_sent = 0 - # Would use monotonic time, but aiorpcx sessions use Unix time: - self.start_time = time.time() - self._method_counts = defaultdict(int) - self._reorg_count = 0 - self._history_cache = pylru.lrucache(1000) - self._history_lookups = 0 - self._history_hits = 0 - self._history_op_cache = pylru.lrucache(1000) - self._tx_num_op_cache = pylru.lrucache(10000000) - self._tx_hashes_cache = pylru.lrucache(1000) - self._tx_hashes_lookups = 0 - self._tx_hashes_hits = 0 - # Really a MerkleCache cache - self._merkle_cache = pylru.lrucache(1000) - self._merkle_lookups = 0 - self._merkle_hits = 0 - self.estimatefee_cache = pylru.lrucache(1000) - self._tx_detail_cache = pylru.lrucache(1000000) - self.notified_height = None - self.hsub_results = None - self._task_group = OldTaskGroup() - self._sslc = None - # Event triggered when electrumx is listening for incoming requests. - self.server_listening = Event() - self.session_event = Event() - - # Set up the RPC request handlers - cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' - 'query reorg sessions stop debug_memusage_list_all_objects ' - 'debug_memusage_get_random_backref_chain'.split()) - LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) - for cmd in cmds} - - def _ssl_context(self): - if self._sslc is None: - self._sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) - self._sslc.load_cert_chain(self.env.ssl_certfile, keyfile=self.env.ssl_keyfile) - return self._sslc - - async def _start_servers(self, services): - for service in services: - kind = service.protocol.upper() - if service.protocol == 'http': - host = None if service.host == 'all_interfaces' else str(service.host) - try: - app = web.Application(middlewares=[ - cors_middleware(self), - error_middleware(self), - request_middleware(self), - ]) - handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) - # GET - app.router.add_get('/proxy', handler.proxy) - app.router.add_get('/proxy/health', handler.health) - app.router.add_get('/proxy/blockchain.block.header', handler.block_header) - app.router.add_get('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_get('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_get('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_get('/proxy/relayfee', handler.relayfee) - app.router.add_get('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_get('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_get('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_get('/proxy/server.add_peer', handler.add_peer) - # app.router.add_get('/proxy/server.banner', handler.banner) - app.router.add_get('/proxy/server.donation_address', handler.donation_address) - app.router.add_get('/proxy/server.features', handler.server_features_async) - app.router.add_get('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_get('/proxy/server.ping', handler.ping) - # app.router.add_get('/proxy/server.version', handler.server_version) - app.router.add_get('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) - app.router.add_get('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) - app.router.add_get('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_get('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_get('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_get('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_get('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_get('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_get('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_get('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_get('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_get('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_get('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_get('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) - app.router.add_get('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_get('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_get('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_get('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_get('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) - app.router.add_get('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_get('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_get('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_get('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_get('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) - app.router.add_get('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) - app.router.add_get('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_get('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) - app.router.add_get('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # POST - app.router.add_post('/proxy', handler.proxy) - app.router.add_post('/proxy/blockchain.block.header', handler.block_header) - app.router.add_post('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_post('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_post('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_post('/proxy/relayfee', handler.relayfee) - app.router.add_post('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_post('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_post('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_post('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_post('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_post('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_post('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_post('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_post('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_post('/proxy/server.add_peer', handler.add_peer) - # app.router.add_post('/proxy/server.banner', handler.banner) - app.router.add_post('/proxy/server.donation_address', handler.donation_address) - app.router.add_post('/proxy/server.features', handler.server_features_async) - app.router.add_post('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_post('/proxy/server.ping', handler.ping) - # app.router.add_post('/proxy/server.version', handler.server_version) - app.router.add_post('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) - app.router.add_post('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) - app.router.add_post('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_post('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_post('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_post('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_post('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_post('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_post('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_post('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_post('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_post('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_post('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_post('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) - app.router.add_post('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_post('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_post('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_post('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_post('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) - app.router.add_post('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_post('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_post('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_post('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_post('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) - app.router.add_post('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) - app.router.add_post('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_post('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) - app.router.add_post('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # common proxy - app.router.add_get('/proxy/{method}', handler.handle_get_method) - app.router.add_post('/proxy/{method}', handler.handle_post_method) - app['rate_limiter'] = rate_limiter - runner = web.AppRunner(app) - await runner.setup() - site = web.TCPSite(runner, host, service.port) - await site.start() - except Exception as e: - self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') - else: - self.logger.info(f'{kind} server listening on {service.address}') - else: - if service.protocol in self.env.SSL_PROTOCOLS: - sslc = self._ssl_context() - else: - sslc = None - if service.protocol == 'rpc': - session_class = LocalRPC - else: - session_class = self.env.coin.SESSIONCLS - if service.protocol in ('ws', 'wss'): - serve = serve_ws - else: - serve = serve_rs - # FIXME: pass the service not the kind - session_factory = partial(session_class, self, self.db, self.mempool, - self.peer_mgr, kind) - host = None if service.host == 'all_interfaces' else str(service.host) - try: - self.servers[service] = await serve(session_factory, host, - service.port, ssl=sslc) - except OSError as e: # don't suppress CancelledError - self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') - else: - self.logger.info(f'{kind} server listening on {service.address}') - - - async def _start_external_servers(self): - '''Start listening on TCP and SSL ports, but only if the respective - port was given in the environment. - ''' - await self._start_servers(service for service in self.env.services - if service.protocol != 'rpc') - self.server_listening.set() - - async def _stop_servers(self, services): - '''Stop the servers of the given protocols.''' - server_map = {service: self.servers.pop(service) - for service in set(services).intersection(self.servers)} - # Close all before waiting - for service, server in server_map.items(): - self.logger.info(f'closing down server for {service}') - server.close() - # No value in doing these concurrently - for server in server_map.values(): - await server.wait_closed() - - async def _manage_servers(self): - paused = False - max_sessions = self.env.max_sessions - low_watermark = max_sessions * 19 // 20 - while True: - await self.session_event.wait() - self.session_event.clear() - if not paused and len(self.sessions) >= max_sessions: - self.logger.info(f'maximum sessions {max_sessions:,d} ' - f'reached, stopping new connections until ' - f'count drops to {low_watermark:,d}') - await self._stop_servers(service for service in self.servers - if service.protocol != 'rpc') - paused = True - # Start listening for incoming connections if paused and - # session count has fallen - if paused and len(self.sessions) <= low_watermark: - self.logger.info('resuming listening for incoming connections') - await self._start_external_servers() - paused = False - - async def _log_sessions(self): - '''Periodically log sessions.''' - log_interval = self.env.log_sessions - if log_interval: - while True: - await sleep(log_interval) - data = self._session_data(for_log=True) - for line in sessions_lines(data): - self.logger.info(line) - self.logger.info(util.json_serialize(self._get_info())) - - async def _disconnect_sessions(self, sessions, reason, *, force_after=1.0): - if sessions: - session_ids = ', '.join(str(session.session_id) for session in sessions) - self.logger.info(f'{reason} session ids {session_ids}') - for session in sessions: - await self._task_group.spawn(session.close(force_after=force_after)) - - async def _clear_stale_sessions(self): - '''Cut off sessions that haven't done anything for 10 minutes.''' - while True: - await sleep(60) - stale_cutoff = time.time() - self.env.session_timeout - stale_sessions = [session for session in self.sessions - if session.last_recv < stale_cutoff] - await self._disconnect_sessions(stale_sessions, 'closing stale') - del stale_sessions - - async def _handle_chain_reorgs(self): - '''Clear certain caches on chain reorgs.''' - while True: - await self.bp.backed_up_event.wait() - self.logger.info(f'reorg signalled; clearing tx_hashes and merkle caches') - self._reorg_count += 1 - self._tx_hashes_cache.clear() - self._merkle_cache.clear() - - async def _recalc_concurrency(self): - '''Periodically recalculate session concurrency.''' - session_class = self.env.coin.SESSIONCLS - period = 300 - while True: - await sleep(period) - hard_limit = session_class.cost_hard_limit - - # Reduce retained group cost - refund = period * hard_limit / 5000 - dead_groups = [] - for group in self.session_groups.values(): - group.retained_cost = max(0.0, group.retained_cost - refund) - if group.retained_cost == 0 and not group.sessions: - dead_groups.append(group) - # Remove dead groups - for group in dead_groups: - self.session_groups.pop(group.name) - - # Recalc concurrency for sessions where cost is changing gradually, and update - # cost_decay_per_sec. - for session in self.sessions: - # Subs have an on-going cost so decay more slowly with more subs - session.cost_decay_per_sec = hard_limit / (10000 + 5 * session.sub_count()) - session.recalc_concurrency() - - def _get_info(self): - '''A summary of server state.''' - cache_fmt = '{:,d} lookups {:,d} hits {:,d} entries' - sessions = self.sessions - return { - 'coin': self.env.coin.__name__, - 'daemon': self.daemon.logged_url(), - 'daemon height': self.daemon.cached_height(), - 'db height': self.db.db_height, - 'db_flush_count': self.db.history.flush_count, - 'groups': len(self.session_groups), - 'history cache': cache_fmt.format( - self._history_lookups, self._history_hits, len(self._history_cache)), - 'merkle cache': cache_fmt.format( - self._merkle_lookups, self._merkle_hits, len(self._merkle_cache)), - 'pid': os.getpid(), - 'peers': self.peer_mgr.info(), - 'request counts': self._method_counts, - 'request total': sum(self._method_counts.values()), - 'sessions': { - 'count': len(sessions), - 'count with subs': sum(len(getattr(s, 'hashX_subs', ())) > 0 for s in sessions), - 'errors': sum(s.errors for s in sessions), - 'logged': len([s for s in sessions if s.log_me]), - 'pending requests': sum(s.unanswered_request_count() for s in sessions), - 'subs': sum(s.sub_count() for s in sessions), - }, - 'tx hashes cache': cache_fmt.format( - self._tx_hashes_lookups, self._tx_hashes_hits, len(self._tx_hashes_cache)), - 'txs sent': self.txs_sent, - 'uptime': util.formatted_time(time.time() - self.start_time), - 'version': electrumx.version, - } - - def _session_data(self, for_log): - '''Returned to the RPC 'sessions' call.''' - now = time.time() - sessions = sorted(self.sessions, key=lambda s: s.start_time) - return [(session.session_id, - session.flags(), - session.remote_address_string(for_log=for_log), - session.client, - session.protocol_version_string(), - session.cost, - session.extra_cost(), - session.unanswered_request_count(), - session.txs_sent, - session.sub_count(), - session.recv_count, session.recv_size, - session.send_count, session.send_size, - now - session.start_time) - for session in sessions] - - def _group_data(self): - '''Returned to the RPC 'groups' call.''' - result = [] - for name, group in self.session_groups.items(): - sessions = group.sessions - result.append([name, - len(sessions), - group.session_cost(), - group.retained_cost, - sum(s.unanswered_request_count() for s in sessions), - sum(s.txs_sent for s in sessions), - sum(s.sub_count() for s in sessions), - sum(s.recv_count for s in sessions), - sum(s.recv_size for s in sessions), - sum(s.send_count for s in sessions), - sum(s.send_size for s in sessions), - ]) - return result - - async def _refresh_hsub_results(self, height): - '''Refresh the cached header subscription responses to be for height, - and record that as notified_height. - ''' - # Paranoia: a reorg could race and leave db_height lower - height = min(height, self.db.db_height) - raw = await self.raw_header(height) - self.hsub_results = {'hex': raw.hex(), 'height': height} - self.notified_height = height - - def _session_references(self, items, special_strings): - '''Return a SessionReferences object.''' - if not isinstance(items, list) or not all(isinstance(item, str) for item in items): - raise RPCError(BAD_REQUEST, 'expected a list of session IDs') - - sessions_by_id = {session.session_id: session for session in self.sessions} - groups_by_name = self.session_groups - - sessions = set() - groups = set() # Names as groups are not hashable - specials = set() - unknown = set() - - for item in items: - if item.isdigit(): - session = sessions_by_id.get(int(item)) - if session: - sessions.add(session) - else: - unknown.add(item) - else: - lc_item = item.lower() - if lc_item in special_strings: - specials.add(lc_item) - else: - if lc_item in groups_by_name: - groups.add(lc_item) - else: - unknown.add(item) - - groups = [groups_by_name[group] for group in groups] - return SessionReferences(sessions, groups, specials, unknown) - - # --- LocalRPC command handlers - - async def rpc_add_peer(self, real_name): - '''Add a peer. - - real_name: "bch.electrumx.cash t50001 s50002" for example - ''' - await self.peer_mgr.add_localRPC_peer(real_name) - return f"peer '{real_name}' added" - - async def rpc_disconnect(self, session_ids): - '''Disconnect sesssions. - - session_ids: array of session IDs - ''' - refs = self._session_references(session_ids, {'all'}) - result = [] - - if 'all' in refs.specials: - sessions = self.sessions - result.append('disconnecting all sessions') - else: - sessions = refs.sessions - result.extend(f'disconnecting session {session.session_id}' for session in sessions) - for group in refs.groups: - result.append(f'disconnecting group {group.name}') - sessions.update(group.sessions) - result.extend(f'unknown: {item}' for item in refs.unknown) - - await self._disconnect_sessions(sessions, 'local RPC request to disconnect') - return result - - async def rpc_log(self, session_ids): - '''Toggle logging of sesssions. - - session_ids: array of session or group IDs, or 'all', 'none', 'new' - ''' - refs = self._session_references(session_ids, {'all', 'none', 'new'}) - result = [] - - def add_result(text, value): - result.append(f'logging {text}' if value else f'not logging {text}') - - if 'all' in refs.specials: - for session in self.sessions: - session.log_me = True - SessionBase.log_new = True - result.append('logging all sessions') - if 'none' in refs.specials: - for session in self.sessions: - session.log_me = False - SessionBase.log_new = False - result.append('logging no sessions') - if 'new' in refs.specials: - SessionBase.log_new = not SessionBase.log_new - add_result('new sessions', SessionBase.log_new) - - sessions = refs.sessions - for session in sessions: - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - for group in refs.groups: - for session in group.sessions.difference(sessions): - sessions.add(session) - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - - result.extend(f'unknown: {item}' for item in refs.unknown) - return result - - async def rpc_daemon_url(self, daemon_url): - '''Replace the daemon URL.''' - daemon_url = daemon_url or self.env.daemon_url - try: - self.daemon.set_url(daemon_url) - except Exception as e: - raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}') - return f'now using daemon at {self.daemon.logged_url()}' - - async def rpc_stop(self): - '''Shut down the server cleanly.''' - self.shutdown_event.set() - return 'stopping' - - async def rpc_getinfo(self): - '''Return summary information about the server process.''' - return self._get_info() - - async def rpc_groups(self): - '''Return statistics about the session groups.''' - return self._group_data() - - async def rpc_peers(self): - '''Return a list of data about server peers.''' - return self.peer_mgr.rpc_data() - - async def rpc_query(self, items, limit): - '''Returns data about a script, address or name.''' - coin = self.env.coin - db = self.db - lines = [] - - def arg_to_hashX(arg): - try: - script = bytes.fromhex(arg) - lines.append(f'Script: {arg}') - return coin.hashX_from_script(script) - except ValueError: - pass - - try: - hashX = coin.address_to_hashX(arg) - lines.append(f'Address: {arg}') - return hashX - except Base58Error: - pass - - try: - script = coin.build_name_index_script(arg.encode("ascii")) - hashX = coin.name_hashX_from_script(script) - lines.append(f'Name: {arg}') - return hashX - except (AttributeError, UnicodeEncodeError): - pass - - return None - - for arg in items: - hashX = arg_to_hashX(arg) - if not hashX: - continue - n = None - history = await db.limited_history(hashX, limit=limit) - for n, (tx_hash, height) in enumerate(history): - lines.append(f'History #{n:,d}: height {height:,d} ' - f'tx_hash {hash_to_hex_str(tx_hash)}') - if n is None: - lines.append('No history found') - n = None - utxos = await db.all_utxos(hashX) - for n, utxo in enumerate(utxos, start=1): - lines.append(f'UTXO #{n:,d}: tx_hash ' - f'{hash_to_hex_str(utxo.tx_hash)} ' - f'tx_pos {utxo.tx_pos:,d} height ' - f'{utxo.height:,d} value {utxo.value:,d}') - if n == limit: - break - if n is None: - lines.append('No UTXOs found') - - balance = sum(utxo.value for utxo in utxos) - lines.append(f'Balance: {coin.decimal_value(balance):,f} ' - f'{coin.SHORTNAME}') - - return lines - - async def rpc_sessions(self): - '''Return statistics about connected sessions.''' - return self._session_data(for_log=False) - - async def rpc_reorg(self, count): - '''Force a reorg of the given number of blocks. - - count: number of blocks to reorg - ''' - count = non_negative_integer(count) - if not self.bp.force_chain_reorg(count): - raise RPCError(BAD_REQUEST, 'still catching up with daemon') - return f'scheduled a reorg of {count:,d} blocks' - - async def rpc_debug_memusage_list_all_objects(self, limit: int) -> str: - """Return a string listing the most common types in memory.""" - import objgraph # optional dependency - import io - with io.StringIO() as fd: - objgraph.show_most_common_types( - limit=limit, - shortnames=False, - file=fd) - return fd.getvalue() - - async def rpc_debug_memusage_get_random_backref_chain(self, objtype: str) -> str: - """Return a dotfile as text containing the backref chain - for a randomly selected object of type objtype. - - Warning: very slow! and it blocks the server. - - To convert to image: - $ dot -Tps filename.dot -o outfile.ps - """ - import objgraph # optional dependency - import random - import io - with io.StringIO() as fd: - await run_in_thread( - lambda: - objgraph.show_chain( - objgraph.find_backref_chain( - random.choice(objgraph.by_type(objtype)), - objgraph.is_proper_module), - output=fd)) - return fd.getvalue() - - # --- External Interface - - async def serve(self, notifications, event): - '''Start the RPC server if enabled. When the event is triggered, - start TCP and SSL servers.''' - try: - await self._start_servers(service for service in self.env.services - if service.protocol == 'rpc') - await event.wait() - - session_class = self.env.coin.SESSIONCLS - session_class.cost_soft_limit = self.env.cost_soft_limit - session_class.cost_hard_limit = self.env.cost_hard_limit - session_class.cost_decay_per_sec = session_class.cost_hard_limit / 10000 - session_class.bw_cost_per_byte = 1.0 / self.env.bw_unit_cost - session_class.cost_sleep = self.env.request_sleep / 1000 - session_class.initial_concurrent = self.env.initial_concurrent - session_class.processing_timeout = self.env.request_timeout - - self.logger.info(f'max session count: {self.env.max_sessions:,d}') - self.logger.info(f'session timeout: {self.env.session_timeout:,d} seconds') - self.logger.info(f'session cost hard limit {self.env.cost_hard_limit:,d}') - self.logger.info(f'session cost soft limit {self.env.cost_soft_limit:,d}') - self.logger.info(f'bandwidth unit cost {self.env.bw_unit_cost:,d}') - self.logger.info(f'request sleep {self.env.request_sleep:,d}ms') - self.logger.info(f'request timeout {self.env.request_timeout:,d}s') - self.logger.info(f'initial concurrent {self.env.initial_concurrent:,d}') - - self.logger.info(f'max response size {self.env.max_send:,d} bytes') - if self.env.drop_client is not None: - self.logger.info( - f'drop clients matching: {self.env.drop_client.pattern}' - ) - for service in self.env.report_services: - self.logger.info(f'advertising service {service}') - # Start notifications; initialize hsub_results - await notifications.start(self.db.db_height, self._notify_sessions) - await self._start_external_servers() - # Peer discovery should start after the external servers - # because we connect to ourself - async with self._task_group as group: - await group.spawn(self.peer_mgr.discover_peers()) - await group.spawn(self._clear_stale_sessions()) - await group.spawn(self._handle_chain_reorgs()) - await group.spawn(self._recalc_concurrency()) - await group.spawn(self._log_sessions()) - await group.spawn(self._manage_servers()) - finally: - # Close servers then sessions - await self._stop_servers(self.servers.keys()) - async with OldTaskGroup() as group: - for session in list(self.sessions): - await group.spawn(session.close(force_after=1)) - - def extra_cost(self, session): - # Note there is no guarantee that session is still in self.sessions. Example traceback: - # notify_sessions->notify->address_status->bump_cost->recalc_concurrency->extra_cost - # during which there are many places the sesssion could be removed - groups = self.sessions.get(session) - if groups is None: - return 0 - return sum((group.cost() - session.cost) * group.weight for group in groups) - - async def _merkle_branch(self, height, tx_hashes, tx_pos): - tx_hash_count = len(tx_hashes) - cost = tx_hash_count - - if tx_hash_count >= 200: - self._merkle_lookups += 1 - merkle_cache = self._merkle_cache.get(height) - if merkle_cache: - self._merkle_hits += 1 - cost = 10 * math.sqrt(tx_hash_count) - else: - async def tx_hashes_func(start, count): - return tx_hashes[start: start + count] - merkle_cache = MerkleCache(self.db.merkle, tx_hashes_func) - self._merkle_cache[height] = merkle_cache - await merkle_cache.initialize(len(tx_hashes)) - branch, _root = await merkle_cache.branch_and_root(tx_hash_count, tx_pos) - else: - branch, _root = self.db.merkle.branch_and_root(tx_hashes, tx_pos) - - branch = [hash_to_hex_str(hash) for hash in branch] - return branch, cost / 2500 - - async def merkle_branch_for_tx_hash(self, height, tx_hash): - '''Return a triple (branch, tx_pos, cost).''' - tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) - try: - tx_pos = tx_hashes.index(tx_hash) - except ValueError: - raise RPCError(BAD_REQUEST, - f'tx {hash_to_hex_str(tx_hash)} not in block at height {height:,d}') - branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) - return branch, tx_pos, tx_hashes_cost + merkle_cost - - async def merkle_branch_for_tx_pos(self, height, tx_pos): - '''Return a triple (branch, tx_hash_hex, cost).''' - tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) - try: - tx_hash = tx_hashes[tx_pos] - except IndexError: - raise RPCError(BAD_REQUEST, - f'no tx at position {tx_pos:,d} in block at height {height:,d}') - branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) - return branch, hash_to_hex_str(tx_hash), tx_hashes_cost + merkle_cost - - async def tx_hashes_at_blockheight(self, height): - '''Returns a pair (tx_hashes, cost). - - tx_hashes is an ordered list of binary hashes, cost is an estimated cost of - getting the hashes; cheaper if in-cache. Raises RPCError. - ''' - self._tx_hashes_lookups += 1 - tx_hashes = self._tx_hashes_cache.get(height) - if tx_hashes: - self._tx_hashes_hits += 1 - return tx_hashes, 0.1 - - # Ensure the tx_hashes are fresh before placing in the cache - while True: - reorg_count = self._reorg_count - try: - tx_hashes = await self.db.tx_hashes_at_blockheight(height) - except self.db.DBError as e: - raise RPCError(BAD_REQUEST, f'db error: {e!r}') - if reorg_count == self._reorg_count: - break - - self._tx_hashes_cache[height] = tx_hashes - - return tx_hashes, 0.25 + len(tx_hashes) * 0.0001 - - def session_count(self): - '''The number of connections that we've sent something to.''' - return len(self.sessions) - - async def daemon_request(self, method, *args): - '''Catch a DaemonError and convert it to an RPCError.''' - try: - return await getattr(self.daemon, method)(*args) - except DaemonError as e: - raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None - - async def raw_header(self, height): - '''Return the binary header at the given height.''' - try: - return await self.db.raw_header(height) - except IndexError: - raise RPCError(BAD_REQUEST, f'height {height:,d} ' - 'out of range') from None - - async def broadcast_transaction(self, raw_tx): - hex_hash = await self.daemon.broadcast_transaction(raw_tx) - self.txs_sent += 1 - return hex_hash - - async def broadcast_transaction_validated(self, raw_tx, live_run): - self.bp.validate_ft_rules_raw_tx(raw_tx) - if live_run: - hex_hash = await self.daemon.broadcast_transaction(raw_tx) - self.txs_sent += 1 - return hex_hash - else: - tx, tx_hash = self.env.coin.DESERIALIZER(bytes.fromhex(raw_tx), 0).read_tx_and_hash() - return hash_to_hex_str(tx_hash) - - async def limited_history(self, hashX): - '''Returns a pair (history, cost). - - History is a sorted list of (tx_hash, height) tuples, or an RPCError.''' - # History DoS limit. Each element of history is about 99 bytes when encoded - # as JSON. - limit = self.env.max_send // 99 - cost = 0.1 - self._history_lookups += 1 - result = self._history_cache.get(hashX) - if result: - self._history_hits += 1 - else: - result = await self.db.limited_history(hashX, limit=limit) - cost += 0.1 + len(result) * 0.001 - if len(result) >= limit: - result = RPCError(BAD_REQUEST, f'history too large', cost=cost) - self._history_cache[hashX] = result - - if isinstance(result, Exception): - raise result - return result, cost - - async def get_history_op(self, hashX, limit=10, offset=0, op=None, reverse=True): - history_data = self._history_op_cache.get(hashX, []) - if not history_data: - history_data = [] - txnum_padding = bytes(8-TXNUM_LEN) - for _key, hist in self.db.history.db.iterator(prefix=hashX, reverse=reverse): - for tx_numb in util.chunks(hist, TXNUM_LEN): - tx_num, = util.unpack_le_uint64(tx_numb + txnum_padding) - op_data = self._tx_num_op_cache.get(tx_num) - if not op_data: - op_prefix_key = b'op' + util.pack_le_uint64(tx_num) - tx_op = self.db.utxo_db.get(op_prefix_key) - if tx_op: - op_data, = util.unpack_le_uint32(tx_op) - self._tx_num_op_cache[tx_num] = op_data - history_data.append({"tx_num": tx_num, "op": op_data}) - self._history_op_cache[hashX] = history_data - if reverse: - history_data.sort(key=lambda x: x['tx_num'], reverse=reverse) - if op: - history_data = list(filter(lambda x: x["op"] == op, history_data)) - else: - history_data = list(filter(lambda x: x["op"], history_data)) - return history_data[offset:limit+offset], len(history_data) - - # Analysis the transaction detail by txid. - # See BlockProcessor.op_list for the complete op list. - async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): - tx_hash = hex_str_to_hash(txid) - res = self._tx_detail_cache.get(tx_hash) - if res: - # txid maybe the same, this key should add height add key prefix - self.logger.debug(f"read transation detail from cache {txid}") - return res - if not height: - tx_num, height = self.db.get_tx_num_height_from_tx_hash(tx_hash) - - raw_tx = self.db.get_raw_tx_by_tx_hash(tx_hash) - if not raw_tx: - raw_tx = await self.daemon_request('getrawtransaction', txid, False) - raw_tx = bytes.fromhex(raw_tx) - tx, _tx_hash = self.env.coin.DESERIALIZER(raw_tx, 0).read_tx_and_hash() - assert tx_hash == _tx_hash - ops = self.db.get_op_by_tx_num(tx_num) - op_raw = self.bp.op_list_vk[ops[0]] if ops else "" - - operation_found_at_inputs = parse_protocols_operations_from_witness_array(tx, tx_hash, True) - atomicals_spent_at_inputs = self.bp.build_atomicals_spent_at_inputs_for_validation_only(tx) - atomicals_receive_at_outputs = self.bp.build_atomicals_receive_at_ouutput_for_validation_only(tx, tx_hash) - blueprint_builder = AtomicalsTransferBlueprintBuilder( - self.logger, - atomicals_spent_at_inputs, - operation_found_at_inputs, - tx_hash, - tx, - self.bp.get_atomicals_id_mint_info, - True - ) - is_burned = blueprint_builder.are_fts_burned - is_cleanly_assigned = blueprint_builder.cleanly_assigned - # format burned_fts - raw_burned_fts = blueprint_builder.get_fts_burned() - burned_fts = {} - for ft_key, ft_value in raw_burned_fts.items(): - burned_fts[location_id_bytes_to_compact(ft_key)] = ft_value - - res = { - "txid": txid, - "height": height, - "tx_num": tx_num, - "info": {}, - "transfers": { - "inputs": {}, - "outputs": {}, - "is_burned": is_burned, - "burned_fts": burned_fts, - "is_cleanly_assigned": is_cleanly_assigned - } - } - operation_type = operation_found_at_inputs.get("op", "") if operation_found_at_inputs else "" - if operation_found_at_inputs: - payload = operation_found_at_inputs.get("payload") - payload_not_none = payload or {} - res["info"]["payload"] = payload_not_none - if blueprint_builder.is_mint and operation_type in ["dmt", "ft"]: - expected_output_index = 0 - txout = tx.outputs[expected_output_index] - location = tx_hash + util.pack_le_uint32(expected_output_index) - # if save into the db, it means mint success - has_atomicals = self.db.get_atomicals_by_location_long_form(location) - if len(has_atomicals): - ticker_name = payload_not_none.get("args", {}).get("mint_ticker", "") - status, candidate_atomical_id, _ = self.bp.get_effective_ticker(ticker_name, self.bp.height) - if status: - atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - res["info"] = { - "atomical_id": atomical_id, - "location_id": location_id_bytes_to_compact(location), - "payload": payload, - "outputs": { - expected_output_index: [{ - "address": get_address_from_output_script(txout.pk_script), - "atomical_id": atomical_id, - "type": "FT", - "index": expected_output_index, - "value": txout.value - }] - } - } - elif operation_type == "nft": - if atomicals_receive_at_outputs: - expected_output_index = 0 - location = tx_hash + util.pack_le_uint32(expected_output_index) - txout = tx.outputs[expected_output_index] - atomical_id = location_id_bytes_to_compact( - atomicals_receive_at_outputs[expected_output_index][-1]["atomical_id"] - ) - res["info"] = { - "atomical_id": atomical_id, - "location_id": location_id_bytes_to_compact(location), - "payload": payload, - "outputs": { - expected_output_index: [{ - "address": get_address_from_output_script(txout.pk_script), - "atomical_id": atomical_id, - "type": "NFT", - "index": expected_output_index, - "value": txout.value - }] - } - } - # no operation_found_at_inputs, it will be transfer. - if blueprint_builder.ft_atomicals and atomicals_spent_at_inputs: - if not operation_type and not op_raw: - op_raw = "transfer" - for atomical_id, input_ft in blueprint_builder.ft_atomicals.items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - for i in input_ft.input_indexes: - prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) - prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) - if not prev_raw_tx: - prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) - prev_raw_tx = bytes.fromhex(prev_raw_tx) - self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx - prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() - ft_data = { - "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), - "atomical_id": compact_atomical_id, - "type": "FT", - "index": i.txin_index, - "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value - } - if i.txin_index not in res["transfers"]["inputs"]: - res["transfers"]["inputs"][i.txin_index] = [ft_data] - else: - res["transfers"]["inputs"][i.txin_index].append(ft_data) - for k, v in blueprint_builder.ft_output_blueprint.outputs.items(): - for atomical_id, output_ft in v['atomicals'].items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - ft_data = { - "address": get_address_from_output_script(tx.outputs[k].pk_script), - "atomical_id": compact_atomical_id, - "type": "FT", - "index": k, - "value": output_ft.satvalue - } - if k not in res["transfers"]["outputs"]: - res["transfers"]["outputs"][k] = [ft_data] - else: - res["transfers"]["outputs"][k].append(ft_data) - if blueprint_builder.nft_atomicals and atomicals_spent_at_inputs: - if not operation_type and not op_raw: - op_raw = "transfer" - for atomical_id, input_nft in blueprint_builder.nft_atomicals.items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - for i in input_nft.input_indexes: - prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) - prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) - if not prev_raw_tx: - prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) - prev_raw_tx = bytes.fromhex(prev_raw_tx) - self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx - prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() - nft_data = { - "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), - "atomical_id": compact_atomical_id, - "type": "NFT", - "index": i.txin_index, - "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value - } - if i.txin_index not in res["transfers"]["inputs"]: - res["transfers"]["inputs"][i.txin_index] = [nft_data] - else: - res["transfers"]["inputs"][i.txin_index].append(nft_data) - for k, v in blueprint_builder.nft_output_blueprint.outputs.items(): - for atomical_id, output_nft in v['atomicals'].items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - nft_data = { - "address": get_address_from_output_script(tx.outputs[k].pk_script), - "atomical_id": compact_atomical_id, - "type": output_nft.type, - "index": k, - "value": output_nft.total_satsvalue - } - if k not in res["transfers"]["outputs"]: - res["transfers"]["outputs"][k] = [nft_data] - else: - res["transfers"]["outputs"][k].append(nft_data) - - atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(tx) - if atomical_id_for_payment: - res["info"]["payment"] = { - "atomical_id": location_id_bytes_to_compact(atomical_id_for_payment), - "payment_marker_idx": payment_marker_idx - } - - if op_raw and height: - self._tx_detail_cache[tx_hash] = res - res["op"] = op_raw - - # Recursively encode the result. - return auto_encode_bytes_elements(res) - - async def transaction_global( - self, - limit: int = 10, - offset: int = 0, - op_type: Optional[str] = None, - reverse: bool = True - ): - height = self.bp.height - res = [] - count = 0 - history_list = [] - for current_height in range(height, self.env.coin.ATOMICALS_ACTIVATION_HEIGHT, -1): - txs = self.db.get_atomicals_block_txs(current_height) - for tx in txs: - tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) - history_list.append({ - "tx_num": tx_num, - "tx_hash": tx, - "height": current_height - }) - count += 1 - if count >= offset + limit: - break - history_list.sort(key=lambda x: x['tx_num'], reverse=reverse) - - for history in history_list: - data = await self.get_transaction_detail(history["tx_hash"], history["height"], history["tx_num"]) - if (op_type and op_type == data["op"]) or (not op_type and data["op"]): - res.append(data) - total = len(res) - return {"result": res[offset:offset+limit], "total": total, "limit": limit, "offset": offset} - - async def _notify_sessions(self, height, touched): - '''Notify sessions about height changes and touched addresses.''' - height_changed = height != self.notified_height - if height_changed: - await self._refresh_hsub_results(height) - # Invalidate all history caches since they rely on block heights - self._history_cache.clear() - # Invalidate our op cache for touched hashXs - op_cache = self._history_op_cache - for hashX in set(op_cache).intersection(touched): - op_cache.pop(hashX, None) - self.logger.info(f"refresh op cache {self.notified_height}") - time.sleep(2) - background_task = asyncio.create_task(self.get_history_op(hashX, 10, 0, None, True)) - await background_task - - for session in self.sessions: - if self._task_group.joined: # this can happen during shutdown - self.logger.warning(f"task group already terminated. not notifying sessions.") - return - await self._task_group.spawn(session.notify, touched, height_changed) - - def _ip_addr_group_name(self, session) -> Optional[str]: - host = session.remote_address().host - if isinstance(host, (IPv4Address, IPv6Address)): - if host.is_private: # exempt private addresses - return None - if isinstance(host, IPv4Address): - subnet_size = self.env.session_group_by_subnet_ipv4 - subnet = IPv4Network(host).supernet(prefixlen_diff=32 - subnet_size) - return str(subnet) - elif isinstance(host, IPv6Address): - subnet_size = self.env.session_group_by_subnet_ipv6 - subnet = IPv6Network(host).supernet(prefixlen_diff=128 - subnet_size) - return str(subnet) - return 'unknown_addr' - - def _session_group(self, name: Optional[str], weight: float) -> Optional[SessionGroup]: - if name is None: - return None - group = self.session_groups.get(name) - if not group: - group = SessionGroup(name, weight, set(), 0) - self.session_groups[name] = group - return group - - def add_session(self, session): - self.session_event.set() - # Return the session groups - groups = ( - self._session_group(self._ip_addr_group_name(session), 1.0), - ) - groups = tuple(group for group in groups if group is not None) - self.sessions[session] = groups - for group in groups: - group.sessions.add(session) - - def remove_session(self, session): - '''Remove a session from our sessions list if there.''' - self.session_event.set() - groups = self.sessions.pop(session) - for group in groups: - group.retained_cost += session.cost - group.sessions.remove(session) - - -class SessionBase(RPCSession): - '''Base class of ElectrumX JSON sessions. - - Each session runs its tasks in asynchronous parallelism with other - sessions. - ''' - - MAX_CHUNK_SIZE = 2016 - session_counter = itertools.count() - log_new = False - - def __init__( - self, - session_mgr: 'SessionManager', - db: 'DB', - mempool: 'MemPool', - peer_mgr: 'PeerManager', - kind: str, - transport, - ): - connection = JSONRPCConnection(JSONRPCAutoDetect) - super().__init__(transport, connection=connection) - self.session_mgr = session_mgr - self.db = db - self.mempool = mempool - self.peer_mgr = peer_mgr - self.kind = kind # 'RPC', 'TCP' etc. - self.env = session_mgr.env - self.coin = self.env.coin - self.client = 'unknown' - self.anon_logs = self.env.anon_logs - self.txs_sent = 0 - self.log_me = SessionBase.log_new - self.session_id = None - self.daemon_request = self.session_mgr.daemon_request - self.session_id = next(self.session_counter) - context = {'conn_id': f'{self.session_id}'} - logger = util.class_logger(__name__, self.__class__.__name__) - self.logger = util.ConnectionLogger(logger, context) - self.logger.info(f'{self.kind} {self.remote_address_string()}, ' - f'{self.session_mgr.session_count():,d} total') - self.session_mgr.add_session(self) - self.recalc_concurrency() # must be called after session_mgr.add_session - - async def notify(self, touched, height_changed): - pass - - def default_framer(self): - return NewlineFramer(max_size=self.env.max_recv) - - def remote_address_string(self, *, for_log=True): - '''Returns the peer's IP address and port as a human-readable - string, respecting anon logs if the output is for a log.''' - if for_log and self.anon_logs: - return 'xx.xx.xx.xx:xx' - return str(self.remote_address()) - - def flags(self): - '''Status flags.''' - status = self.kind[0] - if self.is_closing(): - status += 'C' - if self.log_me: - status += 'L' - status += str(self._incoming_concurrency.max_concurrent) - return status - - async def connection_lost(self): - '''Handle client disconnection.''' - await super().connection_lost() - self.session_mgr.remove_session(self) - msg = '' - if self._incoming_concurrency.max_concurrent < self.initial_concurrent * 0.8: - msg += ' whilst throttled' - if self.send_size >= 1_000_000: - msg += f'. Sent {self.send_size:,d} bytes in {self.send_count:,d} messages' - if msg: - msg = 'disconnected' + msg - self.logger.info(msg) - - def sub_count(self): - return 0 - - async def handle_request(self, request): - """Handle an incoming request. ElectrumX doesn't receive - notifications from client sessions. - """ - if isinstance(request, Request): - handler = self.request_handlers.get(request.method) - method = request.method - args = request.args - else: - handler = None - method = 'invalid method' - args = None - self.logger.debug(f'Session request handling: [method] {method}, [args] {args}') - - # If DROP_CLIENT_UNKNOWN is enabled, check if the client identified - # by calling server.version previously. If not, disconnect the session - if self.env.drop_client_unknown and method != 'server.version' and self.client == 'unknown': - self.logger.info(f'disconnecting because client is unknown') - raise ReplyAndDisconnect( - BAD_REQUEST, f'use server.version to identify client') - - self.session_mgr._method_counts[method] += 1 - coro = handler_invocation(handler, request)() - return await coro +from electrumx.server.session.session_base import * +from electrumx.version import electrumx_version, electrumx_version_short class ElectrumX(SessionBase): - '''A TCP server that handles incoming Electrum connections.''' + """A TCP server that handles incoming Electrum connections.""" PROTOCOL_MIN = (1, 4) PROTOCOL_MAX = (1, 4, 3) @@ -1449,9 +25,9 @@ def __init__(self, *args, **kwargs): self.hashX_subs = {} self.sv_seen = False self.mempool_statuses = {} - self.set_request_handlers(self.PROTOCOL_MIN) + self.set_request_handlers(self.PROTOCOL_MAX) self.is_peer = False - self.cost = 5.0 # Connection cost + self.cost = 5.0 # Connection cost @classmethod def protocol_min_max_strings(cls): @@ -1460,7 +36,7 @@ def protocol_min_max_strings(cls): @classmethod def server_features(cls, env): - '''Return the server features dictionary.''' + """Return the server features dictionary.""" hosts_dict = {} for service in env.report_services: port_dict = hosts_dict.setdefault(str(service.host), {}) @@ -1471,7 +47,7 @@ def server_features(cls, env): return { 'hosts': hosts_dict, 'pruning': None, - 'server_version': electrumx.version, + 'server_version': electrumx_version, 'protocol_min': min_str, 'protocol_max': max_str, 'genesis_hash': env.coin.GENESIS_HASH, @@ -1480,13 +56,12 @@ def server_features(cls, env): } async def server_features_async(self): - self.bump_cost(0.2) return self.server_features(self.env) @classmethod def server_version_args(cls): - '''The arguments to a server.version RPC call to a peer.''' - return [electrumx.version, cls.protocol_min_max_strings()] + """The arguments to a server.version RPC call to a peer.""" + return [electrumx_version, cls.protocol_min_max_strings()] def protocol_version_string(self): return util.version_string(self.protocol_tuple) @@ -1509,7 +84,7 @@ def unsubscribe_hashX(self, hashX): return self.hashX_subs.pop(hashX, None) async def notify(self, touched, height_changed): - '''Wrap _notify_inner; websockets raises exceptions for unclear reasons.''' + """Wrap _notify_inner; websockets raises exceptions for unclear reasons.""" try: async with timeout_after(30): await self._notify_inner(touched, height_changed) @@ -1520,11 +95,11 @@ async def notify(self, touched, height_changed): self.logger.exception('unexpected exception notifying client') async def _notify_inner(self, touched, height_changed): - '''Notify the client about changes to touched addresses (from mempool + """Notify the client about changes to touched addresses (from mempool updates or new blocks) and height. - ''' + """ if height_changed and self.subscribe_headers: - args = (await self.subscribe_headers_result(), ) + args = (await self.subscribe_headers_result(),) await self.send_notification('blockchain.headers.subscribe', args) touched = touched.intersection(self.hashX_subs) @@ -1555,33 +130,102 @@ async def _notify_inner(self, touched, height_changed): es = '' if len(changed) == 1 else 'es' self.logger.info(f'notified of {len(changed):,d} address{es}') + def set_request_handlers(self, protocols): + self.protocol_tuple: Tuple[int, ...] = protocols + handlers = { + 'blockchain.block.header': self.block_header, + 'blockchain.block.headers': self.block_headers, + 'blockchain.estimatefee': self.estimatefee, + 'blockchain.headers.subscribe': self.headers_subscribe, + 'blockchain.relayfee': self.relayfee, + 'blockchain.scripthash.get_balance': self.scripthash_get_balance, + 'blockchain.scripthash.get_history': self.scripthash_get_history, + 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, + 'blockchain.scripthash.listunspent': self.scripthash_listunspent, + 'blockchain.scripthash.subscribe': self.scripthash_subscribe, + 'blockchain.transaction.broadcast': self.transaction_broadcast, + 'blockchain.transaction.broadcast_force': self.transaction_broadcast_force, + 'blockchain.transaction.get': self.transaction_get, + 'blockchain.transaction.get_merkle': self.transaction_merkle, + 'blockchain.transaction.id_from_pos': self.transaction_id_from_pos, + 'mempool.get_fee_histogram': self.compact_fee_histogram, + 'server.add_peer': self.add_peer, + 'server.banner': self.banner, + 'server.donation_address': self.donation_address, + 'server.features': self.server_features_async, + 'server.peers.subscribe': self.peers_subscribe, + 'server.ping': self.ping, + 'server.version': self.server_version, + # The Atomicals era has begun # + 'blockchain.atomicals.validate': self.transaction_broadcast_validate, + 'blockchain.atomicals.get_ft_balances_scripthash': self.atomicals_get_ft_balances, + 'blockchain.atomicals.get_nft_balances_scripthash': self.atomicals_get_nft_balances, + 'blockchain.atomicals.listscripthash': self.atomicals_listscripthash, + 'blockchain.atomicals.list': self.atomicals_list, + 'blockchain.atomicals.get_numbers': self.atomicals_num_to_id, + 'blockchain.atomicals.get_block_hash': self.atomicals_block_hash, + 'blockchain.atomicals.get_block_txs': self.atomicals_block_txs, + 'blockchain.atomicals.dump': self.atomicals_dump, + 'blockchain.atomicals.at_location': self.atomicals_at_location, + 'blockchain.atomicals.get_location': self.atomicals_get_location, + 'blockchain.atomicals.get': self.atomicals_get, + 'blockchain.atomicals.get_global': self.atomicals_get_global, + 'blockchain.atomicals.get_state': self.atomical_get_state, + 'blockchain.atomicals.get_state_history': self.atomical_get_state_history, + 'blockchain.atomicals.get_events': self.atomical_get_events, + 'blockchain.atomicals.get_tx_history': self.atomicals_get_tx_history, + 'blockchain.atomicals.get_realm_info': self.atomicals_get_realm_info, + 'blockchain.atomicals.get_by_realm': self.atomicals_get_by_realm, + 'blockchain.atomicals.get_by_subrealm': self.atomicals_get_by_subrealm, + 'blockchain.atomicals.get_by_dmitem': self.atomicals_get_by_dmitem, + 'blockchain.atomicals.get_by_ticker': self.atomicals_get_by_ticker, + 'blockchain.atomicals.get_by_container': self.atomicals_get_by_container, + 'blockchain.atomicals.get_by_container_item': self.atomicals_get_by_container_item, + 'blockchain.atomicals.get_by_container_item_validate': self.atomicals_get_by_container_item_validation, + 'blockchain.atomicals.get_container_items': self.atomicals_get_container_items, + 'blockchain.atomicals.get_ft_info': self.atomicals_get_ft_info, + 'blockchain.atomicals.get_dft_mints': self.atomicals_get_dft_mints, + 'blockchain.atomicals.find_tickers': self.atomicals_search_tickers, + 'blockchain.atomicals.find_realms': self.atomicals_search_realms, + 'blockchain.atomicals.find_subrealms': self.atomicals_search_subrealms, + 'blockchain.atomicals.find_containers': self.atomicals_search_containers, + 'blockchain.atomicals.get_holders': self.atomicals_get_holders, + 'blockchain.atomicals.transaction': self.atomicals_transaction, + 'blockchain.atomicals.transaction_by_height': self.transaction_by_height, + 'blockchain.atomicals.transaction_by_atomical_id': self.transaction_by_atomical_id, + 'blockchain.atomicals.transaction_by_scripthash': self.transaction_by_scripthash, + } + if protocols >= (1, 4, 2): + handlers['blockchain.scripthash.unsubscribe'] = self.scripthash_unsubscribe + self.request_handlers = handlers + async def subscribe_headers_result(self): - '''The result of a header subscription or notification.''' + """The result of a header subscription or notification.""" return self.session_mgr.hsub_results async def headers_subscribe(self): - '''Subscribe to get raw headers of new blocks.''' + """Subscribe to get raw headers of new blocks.""" if not self.subscribe_headers: self.subscribe_headers = True self.bump_cost(0.25) return await self.subscribe_headers_result() async def add_peer(self, features): - '''Add a peer (but only if the peer resolves to the source).''' + """Add a peer (but only if the peer resolves to the source).""" self.is_peer = True self.bump_cost(100.0) return await self.peer_mgr.on_add_peer(features, self.remote_address()) async def peers_subscribe(self): - '''Return the server peers as a list of (ip, host, details) tuples.''' + """Return the server peers as a list of (ip, host, details) tuples.""" self.bump_cost(1.0) return self.peer_mgr.on_peers_subscribe(self.is_tor()) async def address_status(self, hashX): - '''Returns an address status. + """Returns an address status. Status is a hex string, but must be None if there is no history. - ''' + """ # Note history is ordered and mempool unordered in electrum-server # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 db_history, cost = await self.session_mgr.limited_history(hashX) @@ -1610,8 +254,8 @@ async def address_status(self, hashX): return status async def subscription_address_status(self, hashX): - '''As for address_status, but if it can't be calculated the subscription is - discarded.''' + """As for address_status, but if it can't be calculated the subscription is + discarded.""" try: return await self.address_status(hashX) except RPCError: @@ -1619,8 +263,8 @@ async def subscription_address_status(self, hashX): return None async def hashX_listunspent(self, hashX): - '''Return the list of UTXOs of a script hash, including mempool - effects.''' + """Return the list of UTXOs of a script hash, including mempool + effects.""" utxos = await self.db.all_utxos(hashX) utxos = sorted(utxos) utxos.extend(await self.mempool.unordered_UTXOs(hashX)) @@ -1640,13 +284,14 @@ async def hashX_listunspent(self, hashX): atomical_id_compact = location_id_bytes_to_compact(atomical_id) location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + returned_utxos.append({ 'txid': hash_to_hex_str(utxo.tx_hash), 'tx_hash': hash_to_hex_str(utxo.tx_hash), 'index': utxo.tx_pos, 'tx_pos': utxo.tx_pos, 'vout': utxo.tx_pos, - 'height': utxo.height, + 'height': utxo.height, 'value': utxo.value, 'atomicals': atomicals_basic_infos }) @@ -1730,7 +375,7 @@ async def atomical_id_get_location(self, compact_atomical_id): async def get_summary_info(self, atomical_hash_count=10): if atomical_hash_count and atomical_hash_count > 100000: - atomical_hash_count = 100000 + atomical_hash_count = 100000 db_height = self.db.db_height last_block_hash = self.db.get_atomicals_block_hash(db_height) @@ -1759,14 +404,14 @@ async def atomicals_list_get(self, limit, offset, asc): for atomical_id in atomicals: atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) atomicals_populated.append(atomical) - return {'global': await self.get_summary_info(), 'result': atomicals_populated } + return {'global': await self.get_summary_info(), 'result': atomicals_populated} async def atomicals_num_to_id(self, limit, offset, asc): atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) atomicals_num_to_id_map_reformatted = {} for num, id in atomicals_num_to_id_map.items(): atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) - return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted } + return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted} async def atomicals_block_hash(self, height): if not height: @@ -1776,7 +421,7 @@ async def atomicals_block_hash(self, height): async def atomicals_block_txs(self, height): tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) - return {'global': await self.get_summary_info(), 'result': tx_list } + return {'global': await self.get_summary_info(), 'result': tx_list} async def hashX_subscribe(self, hashX, alias): # Store the subscription only after address_status succeeds @@ -1792,7 +437,7 @@ async def get_balance(self, hashX): return {'confirmed': confirmed, 'unconfirmed': unconfirmed} async def scripthash_get_balance(self, scripthash): - '''Return the confirmed and unconfirmed balance of a scripthash.''' + """Return the confirmed and unconfirmed balance of a scripthash.""" hashX = scripthash_to_hashX(scripthash) return await self.get_balance(hashX) @@ -1814,13 +459,13 @@ async def confirmed_and_unconfirmed_history(self, hashX): for tx_hash, height in history] return conf + await self.unconfirmed_history(hashX) - async def atomicals_listscripthash(self, scripthash, Verbose=False): - '''Return the list of Atomical UTXOs for an address''' + async def atomicals_listscripthash(self, scripthash, verbose=False): + """Return the list of Atomical UTXOs for an address""" hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listscripthash_atomicals(hashX, Verbose) + return await self.hashX_listscripthash_atomicals(hashX, verbose) async def atomicals_list(self, offset, limit, asc): - '''Return the list of atomicals order by reverse atomical number''' + """Return the list of atomicals order by reverse atomical number""" return await self.atomicals_list_get(offset, limit, asc) async def atomicals_get(self, compact_atomical_id_or_atomical_number): @@ -1831,8 +476,8 @@ async def atomicals_dump(self): if True: self.db.dump() return {'result': True} - else: - return {'result': False} + # else: + # return {'result': False} async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0): atomical_id = compact_to_location_id_bytes(compact_atomical_id) @@ -1841,30 +486,36 @@ async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0 async def atomicals_get_ft_info(self, compact_atomical_id_or_atomical_number): compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} async def atomicals_get_global(self, hashes=10): return {'global': await self.get_summary_info(hashes)} async def atomicals_get_location(self, compact_atomical_id_or_atomical_number): compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_location(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_location(compact_atomical_id)} async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} async def atomical_get_state_history(self, compact_atomical_id_or_atomical_number): compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state_history(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_state_history(compact_atomical_id)} async def atomical_get_events(self, compact_atomical_id_or_atomical_number): compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_events(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_events(compact_atomical_id)} def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): compact_atomical_id = compact_atomical_id_or_atomical_number - if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: found_atomical_id = self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) @@ -1874,21 +525,26 @@ def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): return compact_atomical_id async def atomicals_get_tx_history(self, compact_atomical_id_or_atomical_number): - '''Return the history of an Atomical``` + """Return the history of an Atomical``` atomical_id: the mint transaction hash + 'i' of the atomical id verbose: to determine whether to print extended information - ''' + """ compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: - compact_atomical_id = location_id_bytes_to_compact(self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} + compact_atomical_id = location_id_bytes_to_compact( + self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} async def atomicals_get_by_ticker(self, ticker): height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1907,12 +563,15 @@ async def atomicals_get_by_ticker(self, ticker): return { 'result': return_result } + async def atomicals_get_by_container(self, container): if not isinstance(container, str): raise RPCError(BAD_REQUEST, f'empty container') height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1954,7 +613,8 @@ def auto_populate_container_dmint_items_fields(self, items): async def atomicals_get_container_items(self, container, limit, offset): if not isinstance(container, str): raise RPCError(BAD_REQUEST, f'empty container') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, self.session_mgr.bp.height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, + self.session_mgr.bp.height) found_atomical_id = None if status == 'verified': found_atomical_id = candidate_atomical_id @@ -2006,15 +666,20 @@ async def atomicals_get_by_container_item(self, container, item_name): height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) found_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if status == 'verified': found_atomical_id = candidate_atomical_id else: self.logger.info(f'formatted_entries {formatted_entries}') raise RPCError(BAD_REQUEST, f'Container does not exist') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, item_name, height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, + item_name, height) found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) if status == 'verified': @@ -2030,13 +695,16 @@ async def atomicals_get_by_container_item(self, container, item_name): 'result': return_result } - async def atomicals_get_by_container_item_validation(self, container, item_name, bitworkc, bitworkr, main_name, main_hash, proof, check_without_sealed): + async def atomicals_get_by_container_item_validation(self, container, item_name, bitworkc, bitworkr, main_name, + main_hash, proof, check_without_sealed): if not isinstance(container, str): raise RPCError(BAD_REQUEST, f'empty container') height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) found_parent_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if status == 'verified': found_parent_atomical_id = candidate_atomical_id else: @@ -2056,9 +724,12 @@ async def atomicals_get_by_container_item_validation(self, container, item_name, raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') dmint = container_dmint_status.get('dmint') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, item_name, height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, + item_name, height) found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) if status == 'verified': @@ -2068,8 +739,12 @@ async def atomicals_get_by_container_item_validation(self, container, item_name, if not proof or not isinstance(proof, list) or len(proof) == 0: raise RPCError(BAD_REQUEST, f'Proof must be provided') - applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, item_name, height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, DMINT_PATH) - proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, bitworkr, main_name, main_hash, proof) + applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, + item_name, + height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, + DMINT_PATH) + proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, + bitworkr, main_name, main_hash, proof) if applicable_rule and applicable_rule.get('matched_rule'): applicable_rule = applicable_rule.get('matched_rule') @@ -2092,7 +767,9 @@ async def atomicals_get_by_container_item_validation(self, container, item_name, async def atomicals_get_by_realm(self, name): height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -2116,8 +793,11 @@ async def atomicals_get_by_subrealm(self, parent_compact_atomical_id_or_atomical height = self.session_mgr.bp.height compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, + name, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -2141,8 +821,11 @@ async def atomicals_get_by_dmitem(self, parent_compact_atomical_id_or_atomical_n height = self.session_mgr.bp.height compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, + height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -2177,10 +860,12 @@ async def atomicals_get_realm_info(self, full_name, Verbose=False): height = self.session_mgr.bp.height for name_part in split_names: if level == 0: - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm(name_part, height) + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm( + name_part, height) else: self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm(last_found_realm, name_part, height) + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm( + last_found_realm, name_part, height) # stops when it does not found the realm component if realm_status != 'verified': break @@ -2201,23 +886,26 @@ async def atomicals_get_realm_info(self, full_name, Verbose=False): is_first_name_part = False else: joined_name += '.' - joined_name += name_element['name_part'] + joined_name += name_element['name_part'] # Nothing was found realms_path_len = len(realms_path) if realms_path_len == 0: return {'result': { - 'atomical_id': None, - 'top_level_realm_atomical_id': None, - 'top_level_realm_name': None, - 'nearest_parent_realm_atomical_id': None, - 'nearest_parent_realm_name': None, - 'request_full_realm_name': full_name, - 'found_full_realm_name': None, - 'missing_name_parts': full_name, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) } - } + 'atomical_id': None, + 'top_level_realm_atomical_id': None, + 'top_level_realm_name': None, + 'nearest_parent_realm_atomical_id': None, + 'nearest_parent_realm_name': None, + 'request_full_realm_name': full_name, + 'found_full_realm_name': None, + 'missing_name_parts': full_name, + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates))} + } # Populate the subrealm minting rules for a parent atomical that = self + def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): current_height = that.session_mgr.bp.height subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) @@ -2231,7 +919,9 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo } if current_height_rules_list and len(current_height_rules_list) > 0: nearest_parent_realm_subrealm_mint_allowed = True - struct_to_populate['nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed + struct_to_populate[ + 'nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed + # # # @@ -2251,7 +941,10 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo nearest_parent_realm_atomical_id = top_level_realm nearest_parent_realm_name = top_level_realm_name final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, + compact_to_location_id_bytes( + nearest_parent_realm_atomical_id), + final_subrealm_name) return_struct = { 'atomical_id': realms_path[-1]['atomical_id'], 'top_level_realm_atomical_id': top_level_realm, @@ -2261,9 +954,12 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo 'request_full_realm_name': full_name, 'found_full_realm_name': joined_name, 'missing_name_parts': None, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates)) } - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct, Verbose) return {'result': return_struct} # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found @@ -2281,9 +977,12 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo nearest_parent_realm_atomical_id = top_level_realm nearest_parent_realm_name = top_level_realm_name - missing_name_parts = '.'.join(split_names[ len(realms_path):]) + missing_name_parts = '.'.join(split_names[len(realms_path):]) final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, + compact_to_location_id_bytes( + nearest_parent_realm_atomical_id), + final_subrealm_name) return_struct = { 'atomical_id': None, 'top_level_realm_atomical_id': top_level_realm, @@ -2294,32 +993,44 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo 'found_full_realm_name': joined_name, 'missing_name_parts': missing_name_parts, 'final_subrealm_name': final_subrealm_name, - 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates)) } if Verbose: - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct, Verbose) return {'result': return_struct} # Perform a search for tickers, containers, and realms - def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, Limit=1000, Offset=0, is_verified_only=False): + def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, + Limit=1000, Offset=0, is_verified_only=False): db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, prefix, Reverse, Limit, Offset) formatted_results = [] for item in db_entries: + status = None if name_type_str == "ticker": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.ticker_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.ticker_data_cache) elif name_type_str == "realm": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.realm_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.realm_data_cache) elif name_type_str == "collection": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.container_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.container_data_cache) elif name_type_str == "subrealm": - status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], self.session_mgr.bp.height) + status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], + self.session_mgr.bp.height) obj = { 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), - 'tx_num': item['tx_num'] + 'tx_num': item['tx_num'], + name_type_str + '_hex': item['name_hex'], + name_type_str: item['name'], + 'status': status, } - obj[name_type_str + '_hex'] = item['name_hex'] - obj[name_type_str] = item['name'] - obj['status'] = status if is_verified_only and status == "verified": formatted_results.append(obj) elif not is_verified_only: @@ -2329,32 +1040,42 @@ def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix async def atomicals_search_tickers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): if isinstance(prefix, str): prefix = prefix.encode() - return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, + is_verified_only) async def atomicals_search_realms(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): if isinstance(prefix, str): prefix = prefix.encode() - return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, + is_verified_only) - async def atomicals_search_subrealms(self, parent_realm_id_compact, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + async def atomicals_search_subrealms(self, parent_realm_id_compact, prefix=None, Reverse=False, Limit=100, Offset=0, + is_verified_only=False): parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) if isinstance(prefix, str): prefix = prefix.encode() - return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, + Limit, Offset, is_verified_only) - async def atomicals_search_containers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + async def atomicals_search_containers(self, prefix=None, Reverse=False, Limit=100, Offset=0, + is_verified_only=False): if isinstance(prefix, str): prefix = prefix.encode() - return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, + is_verified_only) async def atomicals_at_location(self, compact_location_id): - '''Return the Atomicals at a specific location id``` - ''' + """Return the Atomicals at a specific location id``` + """ atomical_basic_infos = [] - atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form(compact_to_location_id_bytes(compact_location_id)) + atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form( + compact_to_location_id_bytes(compact_location_id)) for atomical_id in atomicals_found_at_location['atomicals']: atomical_basic_info = self.session_mgr.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) - atomical_basic_info['value'] = self.db.get_uxto_atomicals_value(compact_to_location_id_bytes(compact_location_id), atomical_id) + atomical_basic_info['value'] = self.db.get_uxto_atomicals_value( + compact_to_location_id_bytes(compact_location_id), + atomical_id + ) atomical_basic_infos.append(atomical_basic_info) return { 'location_info': atomicals_found_at_location['location_info'], @@ -2362,18 +1083,18 @@ async def atomicals_at_location(self, compact_location_id): } async def atomicals_get_ft_balances(self, scripthash): - '''Return the FT balances for a scripthash address''' + """Return the FT balances for a scripthash address""" hashX = scripthash_to_hashX(scripthash) return await self.hashX_ft_balances_atomicals(hashX) async def atomicals_get_nft_balances(self, scripthash): - '''Return the NFT balances for a scripthash address''' + """Return the NFT balances for a scripthash address""" hashX = scripthash_to_hashX(scripthash) return await self.hashX_nft_balances_atomicals(hashX) async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): - '''Return the holder by a specific location id``` - ''' + """Return the holder by a specific location id``` + """ formatted_results = [] atomical_id = compact_to_location_id_bytes(compact_atomical_id) atomical = await self.atomical_id_get(compact_atomical_id) @@ -2386,7 +1107,7 @@ async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): if max_supply < 0: mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount - for holder in atomical.get("holders", [])[offset:offset+limit]: + for holder in atomical.get("holders", [])[offset:offset + limit]: percent = holder['holding'] / max_supply formatted_results.append({ "percent": percent, @@ -2394,7 +1115,7 @@ async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): "holding": holder["holding"] }) elif atomical["type"] == "NFT": - for holder in atomical.get("holders", [])[offset:offset+limit]: + for holder in atomical.get("holders", [])[offset:offset + limit]: formatted_results.append({ "address": get_address_from_output_script(bytes.fromhex(holder['script'])), "holding": holder["holding"] @@ -2449,16 +1170,18 @@ async def hashX_ft_balances_atomicals(self, hashX): return_struct['balances'][atomical_id_compact]['ticker'] = atomical_id_basic_info.get('$ticker') return_struct['balances'][atomical_id_compact]['confirmed'] = 0 if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] + return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][ + atomical_id_compact] return return_struct async def hashX_nft_balances_atomicals(self, hashX): + Verbose = False utxos = await self.db.all_utxos(hashX) utxos = sorted(utxos) # Comment out the utxos for now and add it in later # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) + spends = [] # await self.mempool.potential_spends(hashX) returned_utxos = [] atomicals_id_map = {} for utxo in utxos: @@ -2466,11 +1189,11 @@ async def hashX_nft_balances_atomicals(self, hashX): continue atomicals = self.db.get_atomicals_by_utxo(utxo, True) atomicals_basic_infos = {} - for atomical_id in atomicals: + for atomical_id in atomicals: # This call is efficient in that it's cached underneath. # Now we only show the atomical id and its corresponding value # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) atomical_id_compact = location_id_bytes_to_compact(atomical_id) atomicals_id_map[atomical_id_compact] = atomical_basic_info location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) @@ -2492,42 +1215,55 @@ async def hashX_nft_balances_atomicals(self, hashX): for atomical_id_entry_compact in returned_utxo['atomicals']: atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert(atomical_id_compact == atomical_id_entry_compact) + assert (atomical_id_compact == atomical_id_entry_compact) if atomical_id_basic_info.get('type') == 'NFT': if return_struct['balances'].get(atomical_id_compact) is None: return_struct['balances'][atomical_id_compact] = {} return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact return_struct['balances'][atomical_id_compact]['confirmed'] = 0 if atomical_id_basic_info.get('subtype'): - return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get( + 'subtype') if atomical_id_basic_info.get('$request_container'): - return_struct['balances'][atomical_id_compact]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['balances'][atomical_id_compact][ + 'request_container'] = atomical_id_basic_info.get('$request_container') if atomical_id_basic_info.get('$container'): - return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get('$container') + return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get( + '$container') if atomical_id_basic_info.get('$dmitem'): return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') if atomical_id_basic_info.get('$request_dmitem'): - return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') if atomical_id_basic_info.get('$realm'): return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') if atomical_id_basic_info.get('$request_realm'): - return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') if atomical_id_basic_info.get('$subrealm'): - return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get('$subrealm') + return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get( + '$subrealm') if atomical_id_basic_info.get('$request_subrealm'): - return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') if atomical_id_basic_info.get('$full_realm_name'): - return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') if atomical_id_basic_info.get('$parent_container'): - return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') if atomical_id_basic_info.get('$parent_realm'): - return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') if atomical_id_basic_info.get('$parent_container_name'): - return_struct['balances'][atomical_id_compact]['parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') + return_struct['balances'][atomical_id_compact][ + 'parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') if atomical_id_basic_info.get('$bitwork'): - return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get('$bitwork') + return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get( + '$bitwork') if atomical_id_basic_info.get('$parents'): - return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get('$parents') + return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get( + '$parents') if returned_utxo['height'] > 0: return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] return return_struct @@ -2550,7 +1286,8 @@ async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): # This call is efficient in that it's cached underneath. # Now we only show the atomical id and its corresponding value # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id( + atomical_id) atomical_id_compact = location_id_bytes_to_compact(atomical_id) atomicals_id_map[atomical_id_compact] = atomical_basic_info location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) @@ -2586,63 +1323,98 @@ async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): } if atomical_id_basic_info.get('$realm'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['atomicals'][atomical_id_ref][ + 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') elif atomical_id_basic_info.get('$subrealm'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') - return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get('$subrealm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['atomicals'][atomical_id_ref][ + 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') + return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get( + '$subrealm') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') elif atomical_id_basic_info.get('$dmitem'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['atomicals'][atomical_id_ref][ + 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') elif atomical_id_basic_info.get('$ticker'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( + '$ticker_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( + '$request_ticker') return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') elif atomical_id_basic_info.get('$container'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get('$container') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['atomicals'][atomical_id_ref][ + 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get( + '$container') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( + '$request_container') # Label them as candidates if they were candidates elif atomical_id_basic_info.get('subtype') == 'request_realm': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get('$realm_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') + return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get( + '$realm_candidates') elif atomical_id_basic_info.get('subtype') == 'request_subrealm': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get('$subrealm_candidates') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get( + '$subrealm_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref][ + 'request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') elif atomical_id_basic_info.get('subtype') == 'request_dmitem': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get('$dmitem_candidates') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get( + '$dmitem_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') elif atomical_id_basic_info.get('subtype') == 'request_container': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['container_candidates'] = atomical_id_basic_info.get('$container_candidates') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['atomicals'][atomical_id_ref][ + 'container_candidates'] = atomical_id_basic_info.get('$container_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( + '$request_container') elif atomical_id_basic_info.get('$request_ticker_status'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( + '$ticker_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( + '$request_ticker') if returned_utxo['height'] <= 0: return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] @@ -2651,33 +1423,30 @@ async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): return return_struct - async def atomicals_get_tx(self, txids): - return await self.atomical_get_tx(txids) - async def scripthash_get_history(self, scripthash): - '''Return the confirmed and unconfirmed history of a scripthash.''' + """Return the confirmed and unconfirmed history of a scripthash.""" hashX = scripthash_to_hashX(scripthash) return await self.confirmed_and_unconfirmed_history(hashX) async def scripthash_get_mempool(self, scripthash): - '''Return the mempool transactions touching a scripthash.''' + """Return the mempool transactions touching a scripthash.""" hashX = scripthash_to_hashX(scripthash) return await self.unconfirmed_history(hashX) async def scripthash_listunspent(self, scripthash): - '''Return the list of UTXOs of a scripthash.''' + """Return the list of UTXOs of a scripthash.""" hashX = scripthash_to_hashX(scripthash) return await self.hashX_listunspent(hashX) async def scripthash_subscribe(self, scripthash): - '''Subscribe to a script hash. + """Subscribe to a script hash. - scripthash: the SHA256 hash of the script to subscribe to''' + scripthash: the SHA256 hash of the script to subscribe to""" hashX = scripthash_to_hashX(scripthash) return await self.hashX_subscribe(hashX, scripthash) async def scripthash_unsubscribe(self, scripthash): - '''Unsubscribe from a script hash.''' + """Unsubscribe from a script hash.""" self.bump_cost(0.1) hashX = scripthash_to_hashX(scripthash) return self.unsubscribe_hashX(hashX) is not None @@ -2697,8 +1466,8 @@ async def _merkle_proof(self, cp_height, height): } async def block_header(self, height, cp_height=0): - '''Return a raw block header as a hexadecimal string, or as a - dictionary with a merkle proof.''' + """Return a raw block header as a hexadecimal string, or as a + dictionary with a merkle proof.""" height = non_negative_integer(height) cp_height = non_negative_integer(cp_height) raw_header_hex = (await self.session_mgr.raw_header(height)).hex() @@ -2710,12 +1479,12 @@ async def block_header(self, height, cp_height=0): return result async def block_headers(self, start_height, count, cp_height=0): - '''Return count concatenated block headers as hex for the main chain; + """Return count concatenated block headers as hex for the main chain; starting at start_height. start_height and count must be non-negative integers. At most MAX_CHUNK_SIZE headers will be returned. - ''' + """ start_height = non_negative_integer(start_height) count = non_negative_integer(count) cp_height = non_negative_integer(cp_height) @@ -2733,8 +1502,8 @@ async def block_headers(self, start_height, count, cp_height=0): return result def is_tor(self): - '''Try to detect if the connection is to a tor hidden service we are - running.''' + """Try to detect if the connection is to a tor hidden service we are + running.""" proxy_address = self.peer_mgr.proxy_address() if not proxy_address: return False @@ -2751,23 +1520,23 @@ async def replaced_banner(self, banner): revision //= 100 daemon_version = f'{major:d}.{minor:d}.{revision:d}' for pair in [ - ('$SERVER_VERSION', electrumx.version_short), - ('$SERVER_SUBVERSION', electrumx.version), - ('$DAEMON_VERSION', daemon_version), - ('$DAEMON_SUBVERSION', network_info['subversion']), - ('$DONATION_ADDRESS', self.env.donation_address), + ('$SERVER_VERSION', electrumx_version_short), + ('$SERVER_SUBVERSION', electrumx_version), + ('$DAEMON_VERSION', daemon_version), + ('$DAEMON_SUBVERSION', network_info['subversion']), + ('$DONATION_ADDRESS', self.env.donation_address), ]: banner = banner.replace(*pair) return banner async def donation_address(self): - '''Return the donation address as a string, empty if there is none.''' + """Return the donation address as a string, empty if there is none.""" self.bump_cost(0.1) return self.env.donation_address async def banner(self): - '''Return the server banner text.''' - banner = f'You are connected to an {electrumx.version} server.' + """Return the server banner text.""" + banner = f'You are connected to an {electrumx_version} server.' self.bump_cost(0.5) if self.is_tor(): @@ -2786,18 +1555,18 @@ async def banner(self): return banner async def relayfee(self): - '''The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.''' + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" self.bump_cost(1.0) return await self.daemon_request('relayfee') async def estimatefee(self, number, mode=None): - '''The estimated transaction fee per kilobyte to be paid for a + """The estimated transaction fee per kilobyte to be paid for a transaction to be included within a certain number of blocks. number: the number of blocks mode: CONSERVATIVE or ECONOMICAL estimation mode - ''' + """ number = non_negative_integer(number) # use whitelist for mode, otherwise it would be easy to force a cache miss: if mode not in self.coin.ESTIMATEFEE_MODES: @@ -2834,18 +1603,18 @@ async def estimatefee(self, number, mode=None): return feerate async def ping(self): - '''Serves as a connection keep-alive mechanism and for the client to + """Serves as a connection keep-alive mechanism and for the client to confirm the server is still responding. - ''' + """ self.bump_cost(0.1) return None async def server_version(self, client_name='', protocol_version=None): - '''Returns the server version as a string. + """Returns the server version as a string. client_name: a string identifying the client protocol_version: the protocol version spoken by the client - ''' + """ self.bump_cost(0.5) if self.sv_seen: raise RPCError(BAD_REQUEST, f'server.version already sent') @@ -2875,7 +1644,7 @@ async def server_version(self, client_name='', protocol_version=None): BAD_REQUEST, f'unsupported protocol version: {protocol_version}')) self.set_request_handlers(ptuple) - return electrumx.version, self.protocol_version_string() + return electrumx_version, self.protocol_version_string() async def crash_old_client(self, ptuple, crash_client_ver): if crash_client_ver: @@ -2890,54 +1659,19 @@ async def crash_old_client(self, ptuple, crash_client_ver): await self.send_notification('blockchain.estimatefee', ()) async def transaction_broadcast_validate(self, raw_tx): - '''Simulate a Broadcast a raw transaction to the network. - - raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules''' self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) - return hex_hash - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') + return await self.ss.transaction_broadcast_validate() async def transaction_broadcast(self, raw_tx): - '''Broadcast a raw transaction to the network. + """Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string''' + raw_tx: the raw transaction as a hexadecimal string""" self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') - - else: - self.txs_sent += 1 - client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): - msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) - if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') - return msg - - self.logger.info(f'sent tx: {hex_hash}') - return hex_hash + return await self.ss.transaction_broadcast(raw_tx) async def transaction_broadcast_force(self, raw_tx): - '''Broadcast a raw transaction to the network. Force even if invalid FT transfer - raw_tx: the raw transaction as a hexadecimal string''' + """Broadcast a raw transaction to the network. Force even if invalid FT transfer + raw_tx: the raw transaction as a hexadecimal string""" self.bump_cost(0.25 + len(raw_tx) / 5000) # This returns errors as JSON RPC errors, as is natural try: @@ -2947,11 +1681,11 @@ async def transaction_broadcast_force(self, raw_tx): message = error['message'] self.logger.info(f'error sending transaction: {message}') raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') + f'network rules.\n\n{message}\n[{raw_tx}]') else: self.txs_sent += 1 client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): + if client_ver != (0,): msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) if msg: self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' @@ -2961,13 +1695,12 @@ async def transaction_broadcast_force(self, raw_tx): self.logger.info(f'sent tx: {hex_hash}') return hex_hash - async def transaction_get(self, tx_hash, verbose=False): - '''Return the serialized raw transaction given its hash + """Return the serialized raw transaction given its hash tx_hash: the transaction hash as a hexadecimal string verbose: passed on to the daemon - ''' + """ assert_tx_hash(tx_hash) if verbose not in (True, False): raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') @@ -2976,12 +1709,12 @@ async def transaction_get(self, tx_hash, verbose=False): return await self.daemon_request('getrawtransaction', tx_hash, verbose) async def transaction_merkle(self, tx_hash, height): - '''Return the merkle branch to a confirmed transaction given its hash + """Return the merkle branch to a confirmed transaction given its hash and height. tx_hash: the transaction hash as a hexadecimal string height: the height of the block it is in - ''' + """ tx_hash = assert_tx_hash(tx_hash) height = non_negative_integer(height) @@ -2992,9 +1725,9 @@ async def transaction_merkle(self, tx_hash, height): return {"block_height": height, "merkle": branch, "pos": tx_pos} async def transaction_id_from_pos(self, height, tx_pos, merkle=False): - '''Return the txid and optionally a merkle proof, given + """Return the txid and optionally a merkle proof, given a block height and position in the block. - ''' + """ tx_pos = non_negative_integer(tx_pos) height = non_negative_integer(height) if merkle not in (True, False): @@ -3018,7 +1751,7 @@ async def transaction_id_from_pos(self, height, tx_pos, merkle=False): async def compact_fee_histogram(self): self.bump_cost(1.0) return await self.mempool.compact_fee_histogram() - + async def atomicals_transaction(self, txid): return await self.session_mgr.get_transaction_detail(txid) @@ -3041,7 +1774,7 @@ async def get_transaction_detail_by_height(self, height, limit, offset, op_type, if (op_type and op_type == data["op"]) or (not op_type and data["op"]): res.append(data) total = len(res) - return res[offset:offset+limit], total + return res[offset:offset + limit], total # get the whole transaction by block height # return transaction detail @@ -3050,13 +1783,16 @@ async def transaction_by_height(self, height, limit=10, offset=0, op_type=None, return {"result": res, "total": total, "limit": limit, "offset": offset} # get transaction by atomical id - async def transaction_by_atomical_id(self, compact_atomical_id_or_atomical_number, limit=10, offset=0, op_type=None, reverse=True): + async def transaction_by_atomical_id(self, compact_atomical_id_or_atomical_number, limit=10, offset=0, op_type=None, + reverse=True): res = [] compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: - compact_atomical_id = location_id_bytes_to_compact(self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + compact_atomical_id = location_id_bytes_to_compact( + self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) atomical_id = compact_to_location_id_bytes(compact_atomical_id) hashX = double_sha256(atomical_id) @@ -3092,92 +1828,9 @@ async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_typ res.append(data) return {"result": res, "total": total, "limit": limit, "offset": offset} - def set_request_handlers(self, ptuple): - self.protocol_tuple = ptuple - handlers = { - 'blockchain.block.header': self.block_header, - 'blockchain.block.headers': self.block_headers, - 'blockchain.estimatefee': self.estimatefee, - 'blockchain.headers.subscribe': self.headers_subscribe, - 'blockchain.relayfee': self.relayfee, - 'blockchain.scripthash.get_balance': self.scripthash_get_balance, - 'blockchain.scripthash.get_history': self.scripthash_get_history, - 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, - 'blockchain.scripthash.listunspent': self.scripthash_listunspent, - 'blockchain.scripthash.subscribe': self.scripthash_subscribe, - 'blockchain.transaction.broadcast': self.transaction_broadcast, - 'blockchain.transaction.broadcast_force': self.transaction_broadcast_force, - 'blockchain.transaction.get': self.transaction_get, - 'blockchain.transaction.get_merkle': self.transaction_merkle, - 'blockchain.transaction.id_from_pos': self.transaction_id_from_pos, - 'mempool.get_fee_histogram': self.compact_fee_histogram, - 'server.add_peer': self.add_peer, - 'server.banner': self.banner, - 'server.donation_address': self.donation_address, - 'server.features': self.server_features_async, - 'server.peers.subscribe': self.peers_subscribe, - 'server.ping': self.ping, - 'server.version': self.server_version, - # The Atomicals era has begun # - 'blockchain.atomicals.validate': self.transaction_broadcast_validate, - 'blockchain.atomicals.get_ft_balances_scripthash': self.atomicals_get_ft_balances, - 'blockchain.atomicals.get_nft_balances_scripthash': self.atomicals_get_nft_balances, - 'blockchain.atomicals.listscripthash': self.atomicals_listscripthash, - 'blockchain.atomicals.list': self.atomicals_list, - 'blockchain.atomicals.get_numbers': self.atomicals_num_to_id, - 'blockchain.atomicals.get_block_hash': self.atomicals_block_hash, - 'blockchain.atomicals.get_block_txs': self.atomicals_block_txs, - 'blockchain.atomicals.dump': self.atomicals_dump, - 'blockchain.atomicals.at_location': self.atomicals_at_location, - 'blockchain.atomicals.get_location': self.atomicals_get_location, - 'blockchain.atomicals.get': self.atomicals_get, - 'blockchain.atomicals.get_global': self.atomicals_get_global, - 'blockchain.atomicals.get_state': self.atomical_get_state, - 'blockchain.atomicals.get_state_history': self.atomical_get_state_history, - 'blockchain.atomicals.get_events': self.atomical_get_events, - 'blockchain.atomicals.get_tx_history': self.atomicals_get_tx_history, - 'blockchain.atomicals.get_realm_info': self.atomicals_get_realm_info, - 'blockchain.atomicals.get_by_realm': self.atomicals_get_by_realm, - 'blockchain.atomicals.get_by_subrealm': self.atomicals_get_by_subrealm, - 'blockchain.atomicals.get_by_dmitem': self.atomicals_get_by_dmitem, - 'blockchain.atomicals.get_by_ticker': self.atomicals_get_by_ticker, - 'blockchain.atomicals.get_by_container': self.atomicals_get_by_container, - 'blockchain.atomicals.get_by_container_item': self.atomicals_get_by_container_item, - 'blockchain.atomicals.get_by_container_item_validate': self.atomicals_get_by_container_item_validation, - 'blockchain.atomicals.get_container_items': self.atomicals_get_container_items, - 'blockchain.atomicals.get_ft_info': self.atomicals_get_ft_info, - 'blockchain.atomicals.get_dft_mints': self.atomicals_get_dft_mints, - 'blockchain.atomicals.find_tickers': self.atomicals_search_tickers, - 'blockchain.atomicals.find_realms': self.atomicals_search_realms, - 'blockchain.atomicals.find_subrealms': self.atomicals_search_subrealms, - 'blockchain.atomicals.find_containers': self.atomicals_search_containers, - 'blockchain.atomicals.get_holders': self.atomicals_get_holders, - 'blockchain.atomicals.transaction': self.atomicals_transaction, - 'blockchain.atomicals.transaction_global': self.session_mgr.transaction_global, - 'blockchain.atomicals.transaction_by_height': self.transaction_by_height, - 'blockchain.atomicals.transaction_by_atomical_id': self.transaction_by_atomical_id, - 'blockchain.atomicals.transaction_by_scripthash': self.transaction_by_scripthash, - } - if ptuple >= (1, 4, 2): - handlers['blockchain.scripthash.unsubscribe'] = self.scripthash_unsubscribe - self.request_handlers = handlers - -class LocalRPC(SessionBase): - '''A local TCP RPC server session.''' - - processing_timeout = 10**9 # disable timeouts - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.client = 'RPC' - self.connection.max_response_size = 0 - - def protocol_version_string(self): - return 'RPC' - class DashElectrumX(ElectrumX): - '''A TCP server that handles incoming Electrum Dash connections.''' + """A TCP server that handles incoming Electrum Dash connections.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -3189,7 +1842,7 @@ def set_request_handlers(self, ptuple): super().set_request_handlers(ptuple) self.request_handlers.update({ 'masternode.announce.broadcast': - self.masternode_announce_broadcast, + self.masternode_announce_broadcast, 'masternode.subscribe': self.masternode_subscribe, 'masternode.list': self.masternode_list, 'protx.diff': self.protx_diff, @@ -3197,7 +1850,7 @@ def set_request_handlers(self, ptuple): }) async def _notify_inner(self, touched, height_changed): - '''Notify the client about changes in masternode list.''' + """Notify the client about changes in masternode list.""" await super()._notify_inner(touched, height_changed) for mn in self.mns.copy(): status = await self.daemon_request('masternode_list', @@ -3207,10 +1860,10 @@ async def _notify_inner(self, touched, height_changed): # Masternode command handlers async def masternode_announce_broadcast(self, signmnb): - '''Pass through the masternode announce message to be broadcast + """Pass through the masternode announce message to be broadcast by the daemon. - signmnb: signed masternode broadcast message.''' + signmnb: signed masternode broadcast message.""" try: return await self.daemon_request('masternode_broadcast', ('relay', signmnb)) @@ -3219,13 +1872,13 @@ async def masternode_announce_broadcast(self, signmnb): message = error['message'] self.logger.info(f'masternode_broadcast: {message}') raise RPCError(BAD_REQUEST, 'the masternode broadcast was ' - f'rejected.\n\n{message}\n[{signmnb}]') + f'rejected.\n\n{message}\n[{signmnb}]') async def masternode_subscribe(self, collateral): - '''Returns the status of masternode. + """Returns the status of masternode. collateral: masternode collateral. - ''' + """ result = await self.daemon_request('masternode_list', ('status', collateral)) if result is not None: @@ -3234,21 +1887,21 @@ async def masternode_subscribe(self, collateral): return None async def masternode_list(self, payees): - ''' + """ Returns the list of masternodes. payees: a list of masternode payee addresses. - ''' + """ if not isinstance(payees, list): raise RPCError(BAD_REQUEST, 'expected a list of payees') def get_masternode_payment_queue(mns): - '''Returns the calculated position in the payment queue for all the + """Returns the calculated position in the payment queue for all the valid masterernodes in the given mns list. mns: a list of masternodes information. - ''' - now = int(datetime.datetime.utcnow().strftime("%s")) + """ + now = int(datetime.datetime.now(datetime.UTC).strftime("%s")) mn_queue = [] # Only ENABLED masternodes are considered for the list. @@ -3273,12 +1926,12 @@ def get_masternode_payment_queue(mns): return mn_queue def get_payment_position(payment_queue, address): - ''' + """ Returns the position of the payment list for the given address. payment_queue: position in the payment queue for the masternode. address: masternode payee address. - ''' + """ position = -1 for pos, mn in enumerate(payment_queue, start=1): if mn[2] == address: @@ -3289,10 +1942,9 @@ def get_payment_position(payment_queue, address): # Accordingly with the masternode payment queue, a custom list # with the masternode information including the payment # position is returned. - cache = self.session_mgr.mn_cache + cache = self.mn_cache if not cache or self.session_mgr.mn_cache_height != self.db.db_height: - full_mn_list = await self.daemon_request('masternode_list', - ('full',)) + full_mn_list = await self.daemon_request('masternode_list', ('full',)) mn_payment_queue = get_masternode_payment_queue(full_mn_list) mn_payment_count = len(mn_payment_queue) mn_list = [] @@ -3313,7 +1965,7 @@ def get_payment_position(payment_queue, address): mn_payment_queue, mn_info['payee'] ) mn_info['inselection'] = ( - mn_info['paymentposition'] < mn_payment_count // 10 + mn_info['paymentposition'] < mn_payment_count // 10 ) hashX = self.coin.address_to_hashX(mn_info['payee']) balance = await self.get_balance(hashX) @@ -3331,13 +1983,13 @@ def get_payment_position(payment_queue, address): return cache async def protx_diff(self, base_height, height): - ''' + """ Calculates a diff between two deterministic masternode lists. The result also contains proof data. base_height: The starting block height (starting from 1). height: The ending block height. - ''' + """ if not isinstance(base_height, int) or not isinstance(height, int): raise RPCError(BAD_REQUEST, 'expected a int block heights') @@ -3353,11 +2005,11 @@ async def protx_diff(self, base_height, height): ('diff', base_height, height)) async def protx_info(self, protx_hash): - ''' + """ Returns detailed information about a deterministic masternode. protx_hash: The hash of the initial ProRegTx - ''' + """ if not isinstance(protx_hash, str): raise RPCError(BAD_REQUEST, 'expected protx hash string') @@ -3368,7 +2020,7 @@ async def protx_info(self, protx_hash): class SmartCashElectrumX(DashElectrumX): - '''A TCP server that handles incoming Electrum-SMART connections.''' + """A TCP server that handles incoming Electrum-SMART connections.""" def set_request_handlers(self, ptuple): super().set_request_handlers(ptuple) @@ -3378,18 +2030,18 @@ def set_request_handlers(self, ptuple): }) async def smartrewards_current(self): - '''Returns the current smartrewards info.''' + """Returns the current smartrewards info.""" result = await self.daemon_request('smartrewards', ('current',)) if result is not None: return result return None async def smartrewards_check(self, addr): - ''' + """ Returns the status of an address addr: a single smartcash address - ''' + """ result = await self.daemon_request('smartrewards', ('check', addr)) if result is not None: return result @@ -3434,7 +2086,7 @@ def truncate_auxpow(self, headers_full_hex, start_height): headers = bytearray() while cursor < len(headers_full): - headers += headers_full[cursor:cursor+self.coin.TRUNCATED_HEADER_SIZE] + headers += headers_full[cursor:cursor + self.coin.TRUNCATED_HEADER_SIZE] cursor += self.db.dynamic_header_len(height) height += 1 diff --git a/electrumx/server/http_session.py b/electrumx/server/session/http_session.py similarity index 76% rename from electrumx/server/http_session.py rename to electrumx/server/session/http_session.py index 38e19386..9e66418e 100644 --- a/electrumx/server/http_session.py +++ b/electrumx/server/session/http_session.py @@ -1,83 +1,23 @@ # -*- coding: utf-8 -*- import asyncio -import codecs import datetime import json -import time -import aiorpcx -from aiohttp import request, web -from aiorpcx import RPCError, ReplyAndDisconnect -from functools import reduce from decimal import Decimal -import electrumx -from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder -from electrumx.lib.hash import HASHX_LEN, double_sha256, hash_to_hex_str, hex_str_to_hash, sha256 +from typing import Optional + +import aiorpcx +from aiohttp import web +from aiorpcx import RPCError + import electrumx.lib.util as util from electrumx.lib.script2addr import get_address_from_output_script -from electrumx.lib.util_atomicals import DFT_MINT_MAX_MAX_COUNT_DENSITY, DMINT_PATH, MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, SUBREALM_MINT_PATH, AtomicalsValidationError, auto_encode_bytes_elements, calculate_latest_state_from_mod_history, compact_to_location_id_bytes, format_name_type_candidates_to_rpc, format_name_type_candidates_to_rpc_for_subname, is_compact_atomical_id, location_id_bytes_to_compact, parse_protocols_operations_from_witness_array, validate_merkle_proof_dmint, validate_rules_data -from electrumx.server.daemon import DaemonError - - -BAD_REQUEST = 1 -DAEMON_ERROR = 2 -MAX_TX_QUERY = 50 -ATOMICALS_INVALID_TX = 800422 - -def scripthash_to_hashX(scripthash): - try: - bin_hash = hex_str_to_hash(scripthash) - if len(bin_hash) == 32: - return bin_hash[:HASHX_LEN] - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') - - -def assert_atomical_id(value): - '''Raise an RPCError if the value is not a valid atomical id - If it is valid, return it as 32-byte binary hash. - ''' - try: - if value == None or value == "": - raise RPCError(BAD_REQUEST, f'atomical_id required') - index_of_i = value.find("i") - if index_of_i != 64: - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - raw_hash = hex_str_to_hash(value[ : 64]) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - - -def assert_tx_hash(value): - '''Raise an RPCError if the value is not a valid hexadecimal transaction hash. - - If it is valid, return it as 32-byte binary hash. - ''' - try: - raw_hash = hex_str_to_hash(value) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') - - -def non_negative_integer(value): - '''Return param value it is or can be converted to a non-negative - integer, otherwise raise an RPCError.''' - try: - value = int(value) - if value >= 0: - return value - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, - f'{value} should be a non-negative integer') +from electrumx.lib.util_atomicals import * +from electrumx.server.session import BAD_REQUEST +from electrumx.server.session.session_base import assert_tx_hash, scripthash_to_hashX, non_negative_integer, \ + assert_atomical_id +from electrumx.server.session.shared_session import SharedSession +from electrumx.version import electrumx_version class DecimalEncoder(json.JSONEncoder): @@ -87,8 +27,19 @@ def default(self, o): return super(DecimalEncoder, self).default(o) -class HttpHandler(object): +async def format_params(request: web.Request): + params: list + if request.method == "GET": + params = json.loads(request.query.get("params", "[]")) + elif request.content_length: + json_data = await request.json() + params = json_data.get("params", []) + else: + params = [] + return dict(zip(range(len(params)), params)) + +class HttpHandler(object): PROTOCOL_MIN = (1, 4) PROTOCOL_MAX = (1, 4, 3) @@ -105,34 +56,20 @@ def __init__(self, session_mgr, db, mempool, peer_mgr, kind): self.coin = self.env.coin self.client = 'unknown' self.anon_logs = self.env.anon_logs - self.txs_sent = 0 self.log_me = False self.daemon_request = self.session_mgr.daemon_request self.mempool_statuses = {} self.sv_seen = False self.MAX_CHUNK_SIZE = 2016 self.hashX_subs = {} - - async def format_params(self, request: web.Request): - params: list - if request.method == "GET": - params = json.loads(request.query.get("params", "[]")) - elif request.content_length: - json_data = await request.json() - params = json_data.get("params", []) - else: - params = [] - return dict(zip(range(len(params)), params)) + # Use the sharing session to manage handlers. + self.ss = SharedSession(self.session_mgr, self.logger) async def get_rpc_server(self): for service in self.env.services: if service.protocol == 'tcp': return service - def remote_address(self): - '''Returns a NetAddress or None if not connected.''' - return self.transport.remote_address() - @classmethod def protocol_min_max_strings(cls): return [util.version_string(ver) @@ -140,7 +77,7 @@ def protocol_min_max_strings(cls): @classmethod def server_features(cls, env): - '''Return the server features dictionary.''' + """Return the server features dictionary.""" hosts_dict = {} for service in env.report_services: port_dict = hosts_dict.setdefault(str(service.host), {}) @@ -151,7 +88,7 @@ def server_features(cls, env): return { 'hosts': hosts_dict, 'pruning': None, - 'server_version': electrumx.version, + 'server_version': electrumx_version, 'protocol_min': min_str, 'protocol_max': max_str, 'genesis_hash': env.coin.GENESIS_HASH, @@ -159,17 +96,6 @@ def server_features(cls, env): 'services': [str(service) for service in env.report_services], } - def is_tor(self): - '''Try to detect if the connection is to a tor hidden service we are - running.''' - proxy_address = self.peer_mgr.proxy_address() - if not proxy_address: - return False - remote_addr = self.remote_address() - if not remote_addr: - return False - return remote_addr.host == proxy_address.host - async def _merkle_proof(self, cp_height, height): max_height = self.db.db_height if not height <= cp_height <= max_height: @@ -214,10 +140,10 @@ async def address(self, request): return web.json_response(res) async def address_status(self, hashX): - '''Returns an address status. + """Returns an address status. Status is a hex string, but must be None if there is no history. - ''' + """ # Note history is ordered and mempool unordered in electrum-server # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 db_history, cost = await self.session_mgr.limited_history(hashX) @@ -259,12 +185,12 @@ def address_to_hashX(self, address): raise RPCError(BAD_REQUEST, f'{address} is not a valid address') async def address_get_balance(self, address): - '''Return the confirmed and unconfirmed balance of an address.''' + """Return the confirmed and unconfirmed balance of an address.""" hashX = self.address_to_hashX(address) return await self.get_balance(hashX) async def address_get_history(self, address): - '''Return the confirmed and unconfirmed history of an address.''' + """Return the confirmed and unconfirmed history of an address.""" hashX = self.address_to_hashX(address) return await self.confirmed_and_unconfirmed_history(hashX) @@ -297,10 +223,10 @@ async def confirmed_and_unconfirmed_history(self, hashX): return conf + await self.unconfirmed_history(hashX) async def mempool_get(self, verbose=False): - '''Returns all transaction ids in memory pool as a json array of string transaction ids + """Returns all transaction ids in memory pool as a json array of string transaction ids verbose: True for a json object, false for array of transaction ids - ''' + """ if verbose not in (True, False): raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean') @@ -318,15 +244,13 @@ async def atomical_id_get(self, compact_atomical_id): raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') return atomical_in_mempool - async def atomicals_list_get(self, limit, offset, asc): atomicals = await self.db.get_atomicals_list(limit, offset, asc) atomicals_populated = [] for atomical_id in atomicals: atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) atomicals_populated.append(atomical) - return {'global': await self.get_summary_info(), 'result': atomicals_populated } - + return {'global': await self.get_summary_info(), 'result': atomicals_populated} async def atomical_id_get_ft_info(self, compact_atomical_id): atomical_id = compact_to_location_id_bytes(compact_atomical_id) @@ -349,30 +273,35 @@ async def atomical_id_get_ft_info(self, compact_atomical_id): return atomical_in_mempool # Perform a search for tickers, containers, realms, subrealms - def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, reverse=False, + limit=100, offset=0, is_verified_only=False): search_prefix = b'' if prefix: search_prefix = prefix.encode() - db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, search_prefix, Reverse, Limit, Offset) + db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, search_prefix, reverse, limit, + offset) formatted_results = [] for item in db_entries: + status = None if name_type_str == "ticker": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.ticker_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.ticker_data_cache) elif name_type_str == "realm": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.realm_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.realm_data_cache) elif name_type_str == "collection": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.container_data_cache) + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], + self.session_mgr.bp.height, + self.session_mgr.bp.container_data_cache) elif name_type_str == "subrealm": - status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], self.session_mgr.bp.height) + status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], + self.session_mgr.bp.height) - obj = { - 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), - 'tx_num': item['tx_num'] - } - obj[name_type_str] = item['name'] - obj[name_type_str + '_hex'] = item.get('name_hex') - obj['status'] = status + obj = {'atomical_id': location_id_bytes_to_compact(item['atomical_id']), 'tx_num': item['tx_num'], + name_type_str: item['name'], name_type_str + '_hex': item.get('name_hex'), 'status': status} if is_verified_only and status == "verified": formatted_results.append(obj) elif not is_verified_only: @@ -398,17 +327,17 @@ async def search_token(self, db_prefix, name_type_str, prefix=None, Reverse=Fals atomical_id = location_id_bytes_to_compact(item['atomical_id']) atomical_data = await self.atomical_id_get_ft_info(atomical_id) obj = { - 'atomical_id': (atomical_id), + 'atomical_id': atomical_id, 'tx_num': item['tx_num'], 'atomical_data': atomical_data, + name_type_str: item['name'] } - obj[name_type_str] = item['name'] formatted_results.append(obj) return {'result': formatted_results} async def hashX_listunspent(self, hashX): - '''Return the list of UTXOs of a script hash, including mempool - effects.''' + """Return the list of UTXOs of a script hash, including mempool + effects.""" utxos = await self.db.all_utxos(hashX) utxos = sorted(utxos) utxos.extend(await self.mempool.unordered_UTXOs(hashX)) @@ -445,7 +374,7 @@ async def hashX_ft_balances_atomicals(self, hashX): utxos = sorted(utxos) # Comment out the utxos for now and add it in later # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - spends = [] # await self.mempool.potential_spends(hashX) + spends = [] # await self.mempool.potential_spends(hashX) returned_utxos = [] atomicals_id_map = {} for utxo in utxos: @@ -457,7 +386,7 @@ async def hashX_ft_balances_atomicals(self, hashX): # This call is efficient in that it's cached underneath. # Now we only show the atomical id and its corresponding value # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) atomical_id_compact = location_id_bytes_to_compact(atomical_id) atomicals_id_map[atomical_id_compact] = atomical_basic_info location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) @@ -479,7 +408,7 @@ async def hashX_ft_balances_atomicals(self, hashX): for atomical_id_entry_compact in returned_utxo['atomicals']: atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert(atomical_id_compact == atomical_id_entry_compact) + assert (atomical_id_compact == atomical_id_entry_compact) if atomical_id_basic_info.get('type') == 'FT': if return_struct['balances'].get(atomical_id_compact) is None: return_struct['balances'][atomical_id_compact] = {} @@ -495,7 +424,7 @@ async def hashX_nft_balances_atomicals(self, hashX): utxos = sorted(utxos) # Comment out the utxos for now and add it in later # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - spends = [] # await self.mempool.potential_spends(hashX) + spends = [] # await self.mempool.potential_spends(hashX) returned_utxos = [] atomicals_id_map = {} for utxo in utxos: @@ -507,7 +436,7 @@ async def hashX_nft_balances_atomicals(self, hashX): # This call is efficient in that it's cached underneath. # Now we only show the atomical id and its corresponding value # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) atomical_id_compact = location_id_bytes_to_compact(atomical_id) atomicals_id_map[atomical_id_compact] = atomical_basic_info location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) @@ -529,49 +458,63 @@ async def hashX_nft_balances_atomicals(self, hashX): for atomical_id_entry_compact in returned_utxo['atomicals']: atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert(atomical_id_compact == atomical_id_entry_compact) + assert (atomical_id_compact == atomical_id_entry_compact) if atomical_id_basic_info.get('type') == 'NFT': if return_struct['balances'].get(atomical_id_compact) is None: return_struct['balances'][atomical_id_compact] = {} return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact return_struct['balances'][atomical_id_compact]['confirmed'] = 0 if atomical_id_basic_info.get('subtype'): - return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get( + 'subtype') if atomical_id_basic_info.get('$request_container'): - return_struct['balances'][atomical_id_compact]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['balances'][atomical_id_compact][ + 'request_container'] = atomical_id_basic_info.get('$request_container') if atomical_id_basic_info.get('$container'): - return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get('$container') + return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get( + '$container') if atomical_id_basic_info.get('$dmitem'): return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') if atomical_id_basic_info.get('$request_dmitem'): - return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') if atomical_id_basic_info.get('$realm'): return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') if atomical_id_basic_info.get('$request_realm'): - return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') if atomical_id_basic_info.get('$subrealm'): - return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get('$subrealm') + return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get( + '$subrealm') if atomical_id_basic_info.get('$request_subrealm'): - return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') if atomical_id_basic_info.get('$full_realm_name'): - return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') if atomical_id_basic_info.get('$parent_container'): - return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') if atomical_id_basic_info.get('$parent_realm'): - return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') if atomical_id_basic_info.get('$parent_container_name'): - return_struct['balances'][atomical_id_compact]['parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') + return_struct['balances'][atomical_id_compact][ + 'parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') if atomical_id_basic_info.get('$bitwork'): - return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get('$bitwork') + return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get( + '$bitwork') if atomical_id_basic_info.get('$parents'): - return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get('$parents') + return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get( + '$parents') if returned_utxo['height'] > 0: return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] return return_struct def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): compact_atomical_id = compact_atomical_id_or_atomical_number - if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: found_atomical_id = self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) @@ -588,7 +531,7 @@ async def atomical_id_get_location(self, compact_atomical_id): async def get_summary_info(self, atomical_hash_count=10): if atomical_hash_count and atomical_hash_count > 100000: - atomical_hash_count = 100000 + atomical_hash_count = 100000 db_height = self.db.db_height last_block_hash = self.db.get_atomicals_block_hash(db_height) @@ -700,63 +643,98 @@ async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): } if atomical_id_basic_info.get('$realm'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['atomicals'][atomical_id_ref][ + 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') elif atomical_id_basic_info.get('$subrealm'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') - return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get('$subrealm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + return_struct['atomicals'][atomical_id_ref][ + 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') + return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get( + '$subrealm') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( + '$full_realm_name') elif atomical_id_basic_info.get('$dmitem'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['atomicals'][atomical_id_ref][ + 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') elif atomical_id_basic_info.get('$ticker'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( + '$ticker_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( + '$request_ticker') return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') elif atomical_id_basic_info.get('$container'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get('$container') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['atomicals'][atomical_id_ref][ + 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get( + '$container') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( + '$request_container') # Label them as candidates if they were candidates elif atomical_id_basic_info.get('subtype') == 'request_realm': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get('$realm_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( + '$request_realm') + return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get( + '$realm_candidates') elif atomical_id_basic_info.get('subtype') == 'request_subrealm': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get('$subrealm_candidates') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get( + '$subrealm_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref][ + 'request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( + '$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( + '$parent_realm') elif atomical_id_basic_info.get('subtype') == 'request_dmitem': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get('$dmitem_candidates') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get( + '$dmitem_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( + '$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( + '$parent_container') elif atomical_id_basic_info.get('subtype') == 'request_container': return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['container_candidates'] = atomical_id_basic_info.get('$container_candidates') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + return_struct['atomicals'][atomical_id_ref][ + 'container_candidates'] = atomical_id_basic_info.get('$container_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( + '$request_container') elif atomical_id_basic_info.get('$request_ticker_status'): return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( + '$ticker_candidates') + return_struct['atomicals'][atomical_id_ref][ + 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( + '$request_ticker') if returned_utxo['height'] <= 0: return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] @@ -799,28 +777,33 @@ async def handle_post_method(self, request): # verified async def proxy(self, request): - result = {"success":True,"info":{"note":"Atomicals ElectrumX Digital Object Proxy Online","usageInfo":{"note":"The service offers both POST and GET requests for proxying requests to ElectrumX. To handle larger broadcast transaction payloads use the POST method instead of GET.","POST":"POST /proxy/:method with string encoded array in the field \\\"params\\\" in the request body. ","GET":"GET /proxy/:method?params=[\\\"value1\\\"] with string encoded array in the query argument \\\"params\\\" in the URL."},"healthCheck":"GET /proxy/health","github":"https://github.com/atomicals/electrumx-proxy","license":"MIT"}} + result = {"success": True, "info": {"note": "Atomicals ElectrumX Digital Object Proxy Online", "usageInfo": { + "note": "The service offers both POST and GET requests for proxying requests to ElectrumX. To handle larger broadcast transaction payloads use the POST method instead of GET.", + "POST": "POST /proxy/:method with string encoded array in the field \\\"params\\\" in the request body. ", + "GET": "GET /proxy/:method?params=[\\\"value1\\\"] with string encoded array in the query argument \\\"params\\\" in the URL."}, + "healthCheck": "GET /proxy/health", + "github": "https://github.com/atomicals/electrumx-proxy", "license": "MIT"}} return web.json_response(data=result) # verified async def health(self, request): - result = {"success": True,"health": True} + result = {"success": True, "health": True} return web.json_response(data=result) # verified async def atomicals_list(self, request): - params = await self.format_params(request) + params = await format_params(request) offset = params.get(0, 100) limit = params.get(1, 0) asc = params.get(2, True) - '''Return the list of atomicals order by reverse atomical number''' + """Return the list of atomicals order by reverse atomical number""" formatted_results = await self.atomicals_list_get(offset, limit, asc) return formatted_results # verified async def atomicals_get(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) @@ -828,8 +811,8 @@ async def atomicals_get(self, request): # verified async def scripthash_listunspent(self, request): - '''Return the list of UTXOs of a scripthash.''' - params = await self.format_params(request) + """Return the list of UTXOs of a scripthash.""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) @@ -837,43 +820,16 @@ async def scripthash_listunspent(self, request): # need verify async def transaction_broadcast(self, request): - '''Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string''' - params = await self.format_params(request) + """Broadcast a raw transaction to the network. + raw_tx: the raw transaction as a hexadecimal string""" + params = await format_params(request) raw_tx = params.get(0, "") - - # self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') - - else: - self.txs_sent += 1 - client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): - msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) - if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') - return msg - - self.logger.info(f'sent tx: {hex_hash}') - return hex_hash + return await self.ss.transaction_broadcast(raw_tx) # verified async def scripthash_get_history(self, request): - '''Return the confirmed and unconfirmed history of a scripthash.''' - params = await self.format_params(request) + """Return the confirmed and unconfirmed history of a scripthash.""" + params = await format_params(request) scripthash = params.get(0) hashX = scripthash_to_hashX(scripthash) @@ -881,12 +837,12 @@ async def scripthash_get_history(self, request): # verified async def transaction_get(self, request): - '''Return the serialized raw transaction given its hash + """Return the serialized raw transaction given its hash tx_hash: the transaction hash as a hexadecimal string verbose: passed on to the daemon - ''' - params = await self.format_params(request) + """ + params = await format_params(request) tx_hash = params.get(0, "") verbose = params.get(1, False) @@ -899,17 +855,18 @@ async def transaction_get(self, request): # verified async def atomical_get_state(self, request): # async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") Verbose = params.get(0, False) compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} # verified async def scripthash_get_balance(self, request): - '''Return the confirmed and unconfirmed balance of a scripthash.''' - params = await self.format_params(request) + """Return the confirmed and unconfirmed balance of a scripthash.""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) @@ -917,15 +874,16 @@ async def scripthash_get_balance(self, request): # verified async def atomicals_get_location(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_location(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_location(compact_atomical_id)} # verified async def atomicals_listscripthash(self, request): - '''Return the list of Atomical UTXOs for an address''' - params = await self.format_params(request) + """Return the list of Atomical UTXOs for an address""" + params = await format_params(request) scripthash = params.get(0, "") Verbose = params.get(1, False) @@ -934,14 +892,14 @@ async def atomicals_listscripthash(self, request): # verified async def atomicals_get_global(self, request): - params = await self.format_params(request) + params = await format_params(request) hashes = params.get(0, 10) return {'global': await self.get_summary_info(hashes)} async def block_header(self, request): - '''Return a raw block header as a hexadecimal string, or as a - dictionary with a merkle proof.''' - params = await self.format_params(request) + """Return a raw block header as a hexadecimal string, or as a + dictionary with a merkle proof.""" + params = await format_params(request) height = params.get(0, 0) cp_height = params.get(1, 0) height = non_negative_integer(height) @@ -954,13 +912,13 @@ async def block_header(self, request): return result async def block_headers(self, request): - '''Return count concatenated block headers as hex for the main chain; + """Return count concatenated block headers as hex for the main chain; starting at start_height. start_height and count must be non-negative integers. At most MAX_CHUNK_SIZE headers will be returned. - ''' - params = await self.format_params(request) + """ + params = await format_params(request) start_height = params.get(0, 0) count = params.get(1, 0) cp_height = params.get(2, 0) @@ -979,13 +937,13 @@ async def block_headers(self, request): return result async def estimatefee(self, request): - '''The estimated transaction fee per kilobyte to be paid for a + """The estimated transaction fee per kilobyte to be paid for a transaction to be included within a certain number of blocks. number: the number of blocks mode: CONSERVATIVE or ECONOMICAL estimation mode - ''' - params = await self.format_params(request) + """ + params = await format_params(request) number = params.get(0, 0) mode = params.get(1, None) @@ -1023,40 +981,40 @@ async def estimatefee(self, request): return feerate async def headers_subscribe(self, request): - '''Subscribe to get raw headers of new blocks.''' + """Subscribe to get raw headers of new blocks.""" self.subscribe_headers = True return self.session_mgr.hsub_results async def relayfee(self, request): - '''The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.''' + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" return await self.daemon_request('relayfee') async def scripthash_get_mempool(self, request): - '''Return the mempool transactions touching a scripthash.''' - params = await self.format_params(request) + """Return the mempool transactions touching a scripthash.""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) return await self.unconfirmed_history(hashX) async def scripthash_subscribe(self, request): - '''Subscribe to a script hash. + """Subscribe to a script hash. - scripthash: the SHA256 hash of the script to subscribe to''' - params = await self.format_params(request) + scripthash: the SHA256 hash of the script to subscribe to""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) return await self.hashX_subscribe(hashX, scripthash) async def transaction_merkle(self, request): - '''Return the merkle branch to a confirmed transaction given its hash + """Return the merkle branch to a confirmed transaction given its hash and height. tx_hash: the transaction hash as a hexadecimal string height: the height of the block it is in - ''' - params = await self.format_params(request) + """ + params = await format_params(request) tx_hash = params.get(0, "") height = params.get(1, "") @@ -1070,10 +1028,10 @@ async def transaction_merkle(self, request): return res async def transaction_id_from_pos(self, request): - '''Return the txid and optionally a merkle proof, given + """Return the txid and optionally a merkle proof, given a block height and position in the block. - ''' - params = await self.format_params(request) + """ + params = await format_params(request) height = params.get(0, 0) tx_pos = params.get(1, 0) merkle = params.get(2, False) @@ -1100,72 +1058,58 @@ async def compact_fee_histogram(self, request): return await self.mempool.compact_fee_histogram() async def rpc_add_peer(self, request): - '''Add a peer. + """Add a peer. real_name: "bch.electrumx.cash t50001 s50002" for example - ''' - params = await self.format_params(request) + """ + params = await format_params(request) real_name = params.get(0, "") await self.peer_mgr.add_localRPC_peer(real_name) res = f"peer '{real_name}' added" return res - async def add_peer(self, request): - '''Add a peer (but only if the peer resolves to the source).''' - params = await self.format_params(request) - features = params.get(0, None) - self.is_peer = True - return await self.peer_mgr.on_add_peer(features, self.remote_address()) - async def donation_address(self, request): - '''Return the donation address as a string, empty if there is none.''' + """Return the donation address as a string, empty if there is none.""" return self.env.donation_address async def server_features_async(self, request): return self.server_features(self.env) async def peers_subscribe(self, request): - '''Return the server peers as a list of (ip, host, details) tuples.''' - return self.peer_mgr.on_peers_subscribe(self.is_tor()) + """Return the server peers as a list of (ip, host, details) tuples.""" + return self.peer_mgr.on_peers_subscribe(False) async def ping(self, request): - '''Serves as a connection keep-alive mechanism and for the client to + """Serves as a connection keep-alive mechanism and for the client to confirm the server is still responding. - ''' + """ return None async def transaction_broadcast_validate(self, request): - '''Simulate a Broadcast a raw transaction to the network. + """Simulate a Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules''' - params = await self.format_params(request) + raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules""" + params = await format_params(request) raw_tx = params.get(0, "") - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) - return hex_hash - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') + return await self.ss.transaction_broadcast_validate(raw_tx) async def atomicals_get_ft_balances(self, request): - '''Return the FT balances for a scripthash address''' - params = await self.format_params(request) + """Return the FT balances for a scripthash address""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) return await self.hashX_ft_balances_atomicals(hashX) async def atomicals_get_nft_balances(self, request): - '''Return the NFT balances for a scripthash address''' - params = await self.format_params(request) + """Return the NFT balances for a scripthash address""" + params = await format_params(request) scripthash = params.get(0, "") hashX = scripthash_to_hashX(scripthash) return await self.hashX_nft_balances_atomicals(hashX) async def atomicals_num_to_id(self, request): - params = await self.format_params(request) + params = await format_params(request) limit = params.get(0, 10) offset = params.get(1, 0) asc = params.get(2, False) @@ -1174,32 +1118,33 @@ async def atomicals_num_to_id(self, request): atomicals_num_to_id_map_reformatted = {} for num, id in atomicals_num_to_id_map.items(): atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) - return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted } + return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted} async def atomicals_block_hash(self, request): - params = await self.format_params(request) + params = await format_params(request) height = params.get(0, self.session_mgr.bp.height) block_hash = self.db.get_atomicals_block_hash(height) return {'result': block_hash} async def atomicals_block_txs(self, request): - params = await self.format_params(request) + params = await format_params(request) height = params.get(0, "") tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) - return {'global': await self.get_summary_info(), 'result': tx_list } + return {'global': await self.get_summary_info(), 'result': tx_list} async def atomicals_dump(self, request): self.db.dump() return {'result': True} async def atomicals_at_location(self, request): - '''Return the Atomicals at a specific location id``` - ''' - params = await self.format_params(request) + """Return the Atomicals at a specific location id``` + """ + params = await format_params(request) compact_location_id = params.get(0, "") atomical_basic_infos = [] - atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form(compact_to_location_id_bytes(compact_location_id)) + atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form( + compact_to_location_id_bytes(compact_location_id)) # atomicals_found_at_location['atomicals'] # atomicals_found_at_location['atomicals'].sort(key=lambda x: x['atomical_number']) for atomical_id in atomicals_found_at_location['atomicals']: @@ -1212,37 +1157,42 @@ async def atomicals_at_location(self, request): } async def atomical_get_state_history(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state_history(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_state_history(compact_atomical_id)} async def atomical_get_events(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_events(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_events(compact_atomical_id)} async def atomicals_get_tx_history(self, request): - '''Return the history of an Atomical``` + """Return the history of an Atomical``` atomical_id: the mint transaction hash + 'i' of the atomical id verbose: to determine whether to print extended information - ''' - params = await self.format_params(request) + """ + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: - compact_atomical_id = location_id_bytes_to_compact(self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} + compact_atomical_id = location_id_bytes_to_compact( + self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm async def atomicals_get_realm_info(self, request): - params = await self.format_params(request) + params = await format_params(request) full_name = params.get(0, "") Verbose = params.get(1, False) @@ -1259,10 +1209,12 @@ async def atomicals_get_realm_info(self, request): height = self.session_mgr.bp.height for name_part in split_names: if level == 0: - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm(name_part, height) + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm( + name_part, height) else: self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm(last_found_realm, name_part, height) + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm( + last_found_realm, name_part, height) # stops when it does not found the realm component if realm_status != 'verified': break @@ -1283,23 +1235,26 @@ async def atomicals_get_realm_info(self, request): is_first_name_part = False else: joined_name += '.' - joined_name += name_element['name_part'] + joined_name += name_element['name_part'] # Nothing was found realms_path_len = len(realms_path) if realms_path_len == 0: return {'result': { - 'atomical_id': None, - 'top_level_realm_atomical_id': None, - 'top_level_realm_name': None, - 'nearest_parent_realm_atomical_id': None, - 'nearest_parent_realm_name': None, - 'request_full_realm_name': full_name, - 'found_full_realm_name': None, - 'missing_name_parts': full_name, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) } - } + 'atomical_id': None, + 'top_level_realm_atomical_id': None, + 'top_level_realm_name': None, + 'nearest_parent_realm_atomical_id': None, + 'nearest_parent_realm_name': None, + 'request_full_realm_name': full_name, + 'found_full_realm_name': None, + 'missing_name_parts': full_name, + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates))} + } # Populate the subrealm minting rules for a parent atomical that = self + def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): current_height = that.session_mgr.bp.height subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) @@ -1313,7 +1268,9 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo } if current_height_rules_list and len(current_height_rules_list) > 0: nearest_parent_realm_subrealm_mint_allowed = True - struct_to_populate['nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed + struct_to_populate[ + 'nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed + # # # @@ -1333,7 +1290,10 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo nearest_parent_realm_atomical_id = top_level_realm nearest_parent_realm_name = top_level_realm_name final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, + compact_to_location_id_bytes( + nearest_parent_realm_atomical_id), + final_subrealm_name) return_struct = { 'atomical_id': realms_path[-1]['atomical_id'], 'top_level_realm_atomical_id': top_level_realm, @@ -1343,9 +1303,12 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo 'request_full_realm_name': full_name, 'found_full_realm_name': joined_name, 'missing_name_parts': None, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates)) } - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct, Verbose) return {'result': return_struct} # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found @@ -1363,9 +1326,12 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo nearest_parent_realm_atomical_id = top_level_realm nearest_parent_realm_name = top_level_realm_name - missing_name_parts = '.'.join(split_names[ len(realms_path):]) + missing_name_parts = '.'.join(split_names[len(realms_path):]) final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, + compact_to_location_id_bytes( + nearest_parent_realm_atomical_id), + final_subrealm_name) return_struct = { 'atomical_id': None, 'top_level_realm_atomical_id': top_level_realm, @@ -1376,19 +1342,24 @@ def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbo 'found_full_realm_name': joined_name, 'missing_name_parts': missing_name_parts, 'final_subrealm_name': final_subrealm_name, - 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + latest_all_entries_candidates)) } if Verbose: - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct, Verbose) return {'result': return_struct} async def atomicals_get_by_realm(self, request): - params = await self.format_params(request) + params = await format_params(request) name = params.get(0, "") height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1412,15 +1383,18 @@ async def atomicals_get_by_realm(self, request): return res async def atomicals_get_by_subrealm(self, request): - params = await self.format_params(request) + params = await format_params(request) parent_compact_atomical_id_or_atomical_number = params.get(0, "") name = params.get(1, "") height = self.session_mgr.bp.height compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, + name, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1444,15 +1418,18 @@ async def atomicals_get_by_subrealm(self, request): return res async def atomicals_get_by_dmitem(self, request): - params = await self.format_params(request) + params = await format_params(request) parent_compact_atomical_id_or_atomical_number = params.get(0, "") name = params.get(1, "") height = self.session_mgr.bp.height compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, + height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1477,12 +1454,14 @@ async def atomicals_get_by_dmitem(self, request): # verified async def atomicals_get_by_ticker(self, request): - params = await self.format_params(request) + params = await format_params(request) ticker = params.get(0, "") height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1505,12 +1484,14 @@ async def atomicals_get_by_ticker(self, request): } async def atomicals_get_by_container(self, request): - params = await self.format_params(request) + params = await format_params(request) container = params.get(0, "") height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1534,22 +1515,27 @@ async def atomicals_get_by_container(self, request): return res async def atomicals_get_by_container_item(self, request): - params = await self.format_params(request) + params = await format_params(request) container = params.get(0, "") item_name = params.get(1, "") height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) found_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if status == 'verified': found_atomical_id = candidate_atomical_id else: self.logger.info(f'formatted_entries {formatted_entries}') raise RPCError(BAD_REQUEST, f'Container does not exist') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, item_name, height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, + item_name, height) found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) @@ -1570,7 +1556,7 @@ async def atomicals_get_by_container_item(self, request): } async def atomicals_get_by_container_item_validation(self, request): - params = await self.format_params(request) + params = await format_params(request) container = params.get(0, "") item_name = params.get(1, "") bitworkc = params.get(2, "") @@ -1583,7 +1569,9 @@ async def atomicals_get_by_container_item_validation(self, request): height = self.session_mgr.bp.height status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) found_parent_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if status == 'verified': found_parent_atomical_id = candidate_atomical_id else: @@ -1601,9 +1589,12 @@ async def atomicals_get_by_container_item_validation(self, request): raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') dmint = container_dmint_status.get('dmint') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, item_name, height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, + item_name, height) found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, + self.session_mgr.bp.build_atomical_id_to_candidate_map( + all_entries)) if candidate_atomical_id: candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) if status == 'verified': @@ -1613,8 +1604,12 @@ async def atomicals_get_by_container_item_validation(self, request): if not proof or not isinstance(proof, list) or len(proof) == 0: raise RPCError(BAD_REQUEST, f'Proof must be provided') - applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, item_name, height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, DMINT_PATH) - proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, bitworkr, main_name, main_hash, proof) + applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, + item_name, + height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, + DMINT_PATH) + proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, + bitworkr, main_name, main_hash, proof) if applicable_rule and applicable_rule.get('matched_rule'): applicable_rule = applicable_rule.get('matched_rule') @@ -1646,12 +1641,13 @@ def auto_populate_container_regular_items_fields(self, items): return auto_encode_bytes_elements(items) async def atomicals_get_container_items(self, request): - params = await self.format_params(request) + params = await format_params(request) container = params.get(0, "") limit = params.get(1, 10) offset = params.get(2, 0) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, self.session_mgr.bp.height) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, + self.session_mgr.bp.height) found_atomical_id = None if status == 'verified': found_atomical_id = candidate_atomical_id @@ -1699,40 +1695,44 @@ async def atomicals_get_container_items(self, request): return res async def atomicals_get_ft_info(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} + return {'global': await self.get_summary_info(), + 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} async def atomicals_get_dft_mints(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") atomical_id = compact_to_location_id_bytes(compact_atomical_id_or_atomical_number) Limit = params.get(1, 100) Offset = params.get(2, 0) - return {'global': await self.get_summary_info(), 'result': self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, Limit, Offset)} + return {'global': await self.get_summary_info(), + 'result': self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, Limit, Offset)} # verified async def atomicals_search_tickers(self, request): - params = await self.format_params(request) + params = await format_params(request) prefix = params.get(0, None) Reverse = params.get(1, False) Limit = params.get(2, 100) Offset = params.get(3, 0) is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, + is_verified_only) async def atomicals_search_realms(self, request): - params = await self.format_params(request) + params = await format_params(request) prefix = params.get(0, None) Reverse = params.get(1, False) Limit = params.get(2, 100) Offset = params.get(3, 0) is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, + is_verified_only) async def atomicals_search_subrealms(self, request): - params = await self.format_params(request) + params = await format_params(request) parent_realm_id_compact = params.get(0, "") prefix = params.get(1, None) Reverse = params.get(2, False) @@ -1740,21 +1740,23 @@ async def atomicals_search_subrealms(self, request): Offset = params.get(4, 0) is_verified_only = params.get(5, True) parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) - return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, + Limit, Offset, is_verified_only) async def atomicals_search_containers(self, request): - params = await self.format_params(request) + params = await format_params(request) prefix = params.get(0, None) Reverse = params.get(1, False) Limit = params.get(2, 100) Offset = params.get(3, 0) is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, is_verified_only) + return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, + is_verified_only) async def atomicals_get_holders(self, request): - '''Return the holder by a specific location id``` - ''' - params = await self.format_params(request) + """Return the holder by a specific location id``` + """ + params = await format_params(request) compact_atomical_id = params.get(0, "") limit = params.get(1, 50) offset = params.get(2, 0) @@ -1771,7 +1773,7 @@ async def atomicals_get_holders(self, request): if max_supply < 0: mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount - for holder in atomical.get("holders", [])[offset:offset+limit]: + for holder in atomical.get("holders", [])[offset:offset + limit]: percent = holder['holding'] / max_supply formatted_results.append({ "percent": percent, @@ -1779,7 +1781,7 @@ async def atomicals_get_holders(self, request): "holding": holder["holding"] }) elif atomical["type"] == "NFT": - for holder in atomical.get("holders", [])[offset:offset+limit]: + for holder in atomical.get("holders", [])[offset:offset + limit]: formatted_results.append({ "address": get_address_from_output_script(bytes.fromhex(holder['script'])), "holding": holder["holding"] @@ -1787,11 +1789,11 @@ async def atomicals_get_holders(self, request): return formatted_results async def atomicals_transaction(self, request): - params = await self.format_params(request) + params = await format_params(request) txid = params.get(0, "") return await self.session_mgr.get_transaction_detail(txid) - async def get_transaction_detail_by_height(self, height, limit, offset, op_type, reverse=True): + async def get_transaction_detail_by_height(self, height, limit, offset, op_type: Optional[str] = None, reverse=True): res = [] txs_list = [] txs = self.db.get_atomicals_block_txs(height) @@ -1803,19 +1805,18 @@ async def get_transaction_detail_by_height(self, height, limit, offset, op_type, "tx_hash": tx, "height": height }) - txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) for tx in txs_list: data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) if (op_type and op_type == data["op"]) or (not op_type and data["op"]): res.append(data) total = len(res) - return res[offset:offset+limit], total + return res[offset:offset + limit], total # get the whole transaction by block height # return transaction detail async def transaction_by_height(self, request): - params = await self.format_params(request) + params = await format_params(request) height = params.get(0, "") limit = params.get(1, 10) offset = params.get(2, 0) @@ -1827,7 +1828,7 @@ async def transaction_by_height(self, request): # get transaction by atomical id async def transaction_by_atomical_id(self, request): - params = await self.format_params(request) + params = await format_params(request) compact_atomical_id_or_atomical_number = params.get(0, "") limit = params.get(1, 10) offset = params.get(2, 0) @@ -1836,10 +1837,12 @@ async def transaction_by_atomical_id(self, request): res = [] compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( + compact_atomical_id_or_atomical_number): assert_atomical_id(compact_atomical_id) else: - compact_atomical_id = location_id_bytes_to_compact(self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + compact_atomical_id = location_id_bytes_to_compact( + self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) atomical_id = compact_to_location_id_bytes(compact_atomical_id) hashX = double_sha256(atomical_id) @@ -1859,7 +1862,7 @@ async def transaction_by_atomical_id(self, request): # get transaction by scripthash async def transaction_by_scripthash(self, request): - params = await self.format_params(request) + params = await format_params(request) scripthash = params.get(0, "") limit = params.get(1, 10) offset = params.get(2, 0) @@ -1882,9 +1885,9 @@ async def transaction_by_scripthash(self, request): res.append(data) return {"result": res, "total": total, "limit": limit, "offset": offset} - # searh for global + # searah for global async def transaction_global(self, request): - params = await self.format_params(request) + params = await format_params(request) limit = params.get(0, 10) offset = params.get(1, 0) op_type = params.get(2, None) diff --git a/electrumx/server/session/session_base.py b/electrumx/server/session/session_base.py new file mode 100644 index 00000000..f9c3319e --- /dev/null +++ b/electrumx/server/session/session_base.py @@ -0,0 +1,193 @@ +from typing import Optional, Tuple, Callable, Dict + +import electrumx.lib.util as util +import itertools + +from aiorpcx import Request, RPCSession, JSONRPCConnection, JSONRPCAutoDetect, NewlineFramer, ReplyAndDisconnect, \ + handler_invocation, RPCError + +from electrumx.lib.hash import hex_str_to_hash, HASHX_LEN +from electrumx.server.db import DB +from electrumx.server.mempool import MemPool +from electrumx.server.peers import PeerManager +from electrumx.server.session import BAD_REQUEST +from electrumx.server.session.shared_session import SharedSession + + +def scripthash_to_hashX(scripthash): + try: + bin_hash = hex_str_to_hash(scripthash) + if len(bin_hash) == 32: + return bin_hash[:HASHX_LEN] + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') + + +def non_negative_integer(value): + """Return param value it is or can be converted to a non-negative + integer, otherwise raise an RPCError.""" + try: + value = int(value) + if value >= 0: + return value + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{value} should be a non-negative integer') + + +def assert_tx_hash(value): + """Raise an RPCError if the value is not a valid hexadecimal transaction hash. + + If it is valid, return it as 32-byte binary hash.""" + try: + raw_hash = hex_str_to_hash(value) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') + + +def assert_atomical_id(value): + """Raise an RPCError if the value is not a valid atomical id + If it is valid, return it as 32-byte binary hash.""" + try: + if value is None or value == "": + raise RPCError(BAD_REQUEST, f'atomical_id required') + index_of_i = value.find("i") + if index_of_i != 64: + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') + raw_hash = hex_str_to_hash(value[: 64]) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') + + +class SessionBase(RPCSession): + """Base class of ElectrumX JSON sessions. + + Each session runs its tasks in asynchronous parallelism with other + sessions. + """ + + MAX_CHUNK_SIZE = 2016 + session_counter = itertools.count() + log_new = False + + def __init__( + self, + session_mgr, + db: 'DB', + mempool: 'MemPool', + peer_mgr: 'PeerManager', + kind: str, + transport, + ): + connection = JSONRPCConnection(JSONRPCAutoDetect) + super().__init__(transport, connection=connection) + self.session_mgr = session_mgr + self.db = db + self.mempool = mempool + self.peer_mgr = peer_mgr + self.kind = kind # 'RPC', 'TCP' etc. + self.env = session_mgr.env + self.coin = self.env.coin + self.client = 'unknown' + self.anon_logs = self.env.anon_logs + self.txs_sent = 0 + self.log_me = SessionBase.log_new + self.session_id = None + self.daemon_request = self.session_mgr.daemon_request + self.session_id = next(self.session_counter) + context = {'conn_id': f'{self.session_id}'} + logger = util.class_logger(__name__, self.__class__.__name__) + self.logger = util.ConnectionLogger(logger, context) + self.logger.info(f'{self.kind} {self.remote_address_string()}, ' + f'{self.session_mgr.session_count():,d} total') + self.session_mgr.add_session(self) + self.recalc_concurrency() # must be called after session_mgr.add_session + self.protocol_tuple: Optional[Tuple[int, ...]] = None + self.request_handlers: Optional[Dict[str, Callable]] = None + # Use the sharing session to manage handlers. + self.ss = SharedSession(self.session_mgr, self.logger) + + async def notify(self, touched, height_changed): + pass + + def default_framer(self): + return NewlineFramer(max_size=self.env.max_recv) + + def remote_address_string(self, *, for_log=True): + """Returns the peer's IP address and port as a human-readable + string, respecting anon logs if the output is for a log.""" + if for_log and self.anon_logs: + return 'xx.xx.xx.xx:xx' + return str(self.remote_address()) + + def flags(self): + """Status flags.""" + status = self.kind[0] + if self.is_closing(): + status += 'C' + if self.log_me: + status += 'L' + status += str(self._incoming_concurrency.max_concurrent) + return status + + async def connection_lost(self): + """Handle client disconnection.""" + await super().connection_lost() + self.session_mgr.remove_session(self) + msg = '' + if self._incoming_concurrency.max_concurrent < self.initial_concurrent * 0.8: + msg += ' whilst throttled' + if self.send_size >= 1_000_000: + msg += f'. Sent {self.send_size:,d} bytes in {self.send_count:,d} messages' + if msg: + msg = 'disconnected' + msg + self.logger.info(msg) + + def sub_count(self): + return 0 + + async def handle_request(self, request): + """Handle an incoming request. ElectrumX doesn't receive + notifications from client sessions. + """ + if isinstance(request, Request): + handler = self.request_handlers.get(request.method) + method = request.method + args = request.args + else: + handler = None + method = 'invalid method' + args = None + self.logger.debug(f'Session request handling: [method] {method}, [args] {args}') + + # If DROP_CLIENT_UNKNOWN is enabled, check if the client identified + # by calling server.version previously. If not, disconnect the session + if self.env.drop_client_unknown and method != 'server.version' and self.client == 'unknown': + self.logger.info(f'disconnecting because client is unknown') + raise ReplyAndDisconnect(BAD_REQUEST, f'use server.version to identify client') + + self.session_mgr.method_counts[method] += 1 + coro = handler_invocation(handler, request)() + return await coro + + +class LocalRPC(SessionBase): + """A local TCP RPC server session.""" + + processing_timeout = 10**9 # disable timeouts + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.client = 'RPC' + self.connection.max_response_size = 0 + + def protocol_version_string(self): + return 'RPC' diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py new file mode 100644 index 00000000..43b72f80 --- /dev/null +++ b/electrumx/server/session/session_manager.py @@ -0,0 +1,1260 @@ +import attr +import ssl +import time +from asyncio import Event, sleep +from collections import defaultdict +from functools import partial +from ipaddress import * + +import pylru +from aiorpcx import serve_ws, serve_rs, RPCError, run_in_thread + +from electrumx.lib import util +from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder +from electrumx.lib.hash import Base58Error +from electrumx.lib.merkle import MerkleCache +from electrumx.lib.script2addr import * +from electrumx.lib.text import sessions_lines +from electrumx.lib.util import OldTaskGroup +from electrumx.lib.util_atomicals import * +from electrumx.server.block_processor import BlockProcessor +from electrumx.server.daemon import DaemonError, Daemon +from electrumx.server.db import DB +from electrumx.server.env import Env +from electrumx.server.history import TXNUM_LEN +from electrumx.server.http_middleware import * +from electrumx.server.mempool import MemPool +from electrumx.server.session import BAD_REQUEST, DAEMON_ERROR +from electrumx.server.session.http_session import HttpHandler +from electrumx.server.session.session_base import LocalRPC, SessionBase, non_negative_integer +from electrumx.server.peers import PeerManager + +from typing import TYPE_CHECKING + +from electrumx.version import electrumx_version + +if TYPE_CHECKING: + pass + + +@attr.s(slots=True) +class SessionGroup: + name = attr.ib() + weight = attr.ib() + sessions = attr.ib() + retained_cost = attr.ib() + + def session_cost(self): + return sum(session.cost for session in self.sessions) + + def cost(self): + return self.retained_cost + self.session_cost() + + +@attr.s(slots=True) +class SessionReferences: + # All attributes are sets but groups is a list + sessions = attr.ib() + groups = attr.ib() + specials = attr.ib() # Lower-case strings + unknown = attr.ib() # Strings + + +class SessionManager: + """Holds global state about all sessions.""" + + def __init__( + self, + env: 'Env', + db: 'DB', + bp: 'BlockProcessor', + daemon: 'Daemon', + mempool: 'MemPool', + shutdown_event: asyncio.Event, + ): + env.max_send = max(350000, env.max_send) + self.env = env + self.db = db + self.bp = bp + self.daemon = daemon + self.mempool = mempool + self.peer_mgr = PeerManager(env, db) + self.shutdown_event = shutdown_event + self.logger = util.class_logger(__name__, self.__class__.__name__) + self.servers = {} # service->server + self.sessions = {} # session->iterable of its SessionGroups + self.session_groups = {} # group name->SessionGroup instance + self.txs_sent = 0 + # Would use monotonic time, but aiorpcx sessions use Unix time: + self.start_time = time.time() + self.method_counts = defaultdict(int) + self._reorg_count = 0 + self._history_cache = pylru.lrucache(1000) + self._history_lookups = 0 + self._history_hits = 0 + self._history_op_cache = pylru.lrucache(1000) + self._tx_num_op_cache = pylru.lrucache(10000000) + self._tx_hashes_cache = pylru.lrucache(1000) + self._tx_hashes_lookups = 0 + self._tx_hashes_hits = 0 + # Really a MerkleCache cache + self._merkle_cache = pylru.lrucache(1000) + self._merkle_lookups = 0 + self._merkle_hits = 0 + self.estimatefee_cache = pylru.lrucache(1000) + self._tx_detail_cache = pylru.lrucache(1000000) + self.notified_height = None + self.hsub_results = None + self._task_group = OldTaskGroup() + self._sslc = None + # Event triggered when electrumx is listening for incoming requests. + self.server_listening = Event() + self.session_event = Event() + + # Set up the RPC request handlers + cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' + 'query reorg sessions stop debug_memusage_list_all_objects ' + 'debug_memusage_get_random_backref_chain'.split()) + # LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) for cmd in cmds} + + def _ssl_context(self): + if self._sslc is None: + self._sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) + self._sslc.load_cert_chain(self.env.ssl_certfile, keyfile=self.env.ssl_keyfile) + return self._sslc + + async def _start_servers(self, services): + for service in services: + kind = service.protocol.upper() + if service.protocol == 'http': + host = None if service.host == 'all_interfaces' else str(service.host) + try: + app = web.Application(middlewares=[ + cors_middleware(self), + error_middleware(self), + request_middleware(self), + ]) + handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) + # GET + app.router.add_get('/proxy', handler.proxy) + app.router.add_get('/proxy/health', handler.health) + app.router.add_get('/proxy/blockchain.block.header', handler.block_header) + app.router.add_get('/proxy/blockchain.block.headers', handler.block_headers) + app.router.add_get('/proxy/blockchain.estimatefee', handler.estimatefee) + # app.router.add_get('/proxy/headers.subscribe', handler.headers_subscribe) + # app.router.add_get('/proxy/relayfee', handler.relayfee) + app.router.add_get('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) + app.router.add_get('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) + app.router.add_get('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) + app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) + app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) + app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) + app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) + app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) + app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) + # app.router.add_get('/proxy/server.add_peer', handler.add_peer) + # app.router.add_get('/proxy/server.banner', handler.banner) + app.router.add_get('/proxy/server.donation_address', handler.donation_address) + app.router.add_get('/proxy/server.features', handler.server_features_async) + app.router.add_get('/proxy/server.peers.subscribe', handler.peers_subscribe) + app.router.add_get('/proxy/server.ping', handler.ping) + # app.router.add_get('/proxy/server.version', handler.server_version) + app.router.add_get('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) + app.router.add_get('/proxy/blockchain.atomicals.get_ft_balances_scripthash', + handler.atomicals_get_ft_balances) + app.router.add_get('/proxy/blockchain.atomicals.get_nft_balances_scripthash', + handler.atomicals_get_nft_balances) + app.router.add_get('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) + app.router.add_get('/proxy/blockchain.atomicals.list', handler.atomicals_list) + app.router.add_get('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) + app.router.add_get('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) + app.router.add_get('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) + app.router.add_get('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) + app.router.add_get('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) + app.router.add_get('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) + app.router.add_get('/proxy/blockchain.atomicals.get', handler.atomicals_get) + app.router.add_get('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) + app.router.add_get('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) + app.router.add_get('/proxy/blockchain.atomicals.get_state_history', + handler.atomical_get_state_history) + app.router.add_get('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) + app.router.add_get('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) + app.router.add_get('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) + app.router.add_get('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) + app.router.add_get('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) + app.router.add_get('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) + app.router.add_get('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container', + handler.atomicals_get_by_container) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item', + handler.atomicals_get_by_container_item) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item_validate', + handler.atomicals_get_by_container_item_validation) + app.router.add_get('/proxy/blockchain.atomicals.get_container_items', + handler.atomicals_get_container_items) + app.router.add_get('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) + app.router.add_get('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) + app.router.add_get('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) + app.router.add_get('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) + app.router.add_get('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) + app.router.add_get('/proxy/blockchain.atomicals.find_containers', + handler.atomicals_search_containers) + app.router.add_get('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) + app.router.add_get('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_height', + handler.transaction_by_height) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_atomical_id', + handler.transaction_by_atomical_id) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_scripthash', + handler.transaction_by_scripthash) + app.router.add_get('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) + # POST + app.router.add_post('/proxy', handler.proxy) + app.router.add_post('/proxy/blockchain.block.header', handler.block_header) + app.router.add_post('/proxy/blockchain.block.headers', handler.block_headers) + app.router.add_post('/proxy/blockchain.estimatefee', handler.estimatefee) + # app.router.add_post('/proxy/headers.subscribe', handler.headers_subscribe) + # app.router.add_post('/proxy/relayfee', handler.relayfee) + app.router.add_post('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) + app.router.add_post('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) + app.router.add_post('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) + app.router.add_post('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) + app.router.add_post('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) + app.router.add_post('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) + app.router.add_post('/proxy/blockchain.transaction.get', handler.transaction_get) + app.router.add_post('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) + app.router.add_post('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) + # app.router.add_post('/proxy/server.add_peer', handler.add_peer) + # app.router.add_post('/proxy/server.banner', handler.banner) + app.router.add_post('/proxy/server.donation_address', handler.donation_address) + app.router.add_post('/proxy/server.features', handler.server_features_async) + app.router.add_post('/proxy/server.peers.subscribe', handler.peers_subscribe) + app.router.add_post('/proxy/server.ping', handler.ping) + # app.router.add_post('/proxy/server.version', handler.server_version) + app.router.add_post('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) + app.router.add_post('/proxy/blockchain.atomicals.get_ft_balances_scripthash', + handler.atomicals_get_ft_balances) + app.router.add_post('/proxy/blockchain.atomicals.get_nft_balances_scripthash', + handler.atomicals_get_nft_balances) + app.router.add_post('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) + app.router.add_post('/proxy/blockchain.atomicals.list', handler.atomicals_list) + app.router.add_post('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) + app.router.add_post('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) + app.router.add_post('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) + app.router.add_post('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) + app.router.add_post('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) + app.router.add_post('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) + app.router.add_post('/proxy/blockchain.atomicals.get', handler.atomicals_get) + app.router.add_post('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) + app.router.add_post('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) + app.router.add_post('/proxy/blockchain.atomicals.get_state_history', + handler.atomical_get_state_history) + app.router.add_post('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) + app.router.add_post('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) + app.router.add_post('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) + app.router.add_post('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) + app.router.add_post('/proxy/blockchain.atomicals.get_by_subrealm', + handler.atomicals_get_by_subrealm) + app.router.add_post('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) + app.router.add_post('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container', + handler.atomicals_get_by_container) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item', + handler.atomicals_get_by_container_item) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item_validate', + handler.atomicals_get_by_container_item_validation) + app.router.add_post('/proxy/blockchain.atomicals.get_container_items', + handler.atomicals_get_container_items) + app.router.add_post('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) + app.router.add_post('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) + app.router.add_post('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) + app.router.add_post('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) + app.router.add_post('/proxy/blockchain.atomicals.find_subrealms', + handler.atomicals_search_subrealms) + app.router.add_post('/proxy/blockchain.atomicals.find_containers', + handler.atomicals_search_containers) + app.router.add_post('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) + app.router.add_post('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_height', + handler.transaction_by_height) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_atomical_id', + handler.transaction_by_atomical_id) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_scripthash', + handler.transaction_by_scripthash) + app.router.add_post('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) + # common proxy + app.router.add_get('/proxy/{method}', handler.handle_get_method) + app.router.add_post('/proxy/{method}', handler.handle_post_method) + app['rate_limiter'] = rate_limiter + runner = web.AppRunner(app) + await runner.setup() + site = web.TCPSite(runner, host, service.port) + await site.start() + except Exception as e: + self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') + else: + self.logger.info(f'{kind} server listening on {service.address}') + else: + if service.protocol in self.env.SSL_PROTOCOLS: + sslc = self._ssl_context() + else: + sslc = None + if service.protocol == 'rpc': + session_class = LocalRPC + else: + session_class = self.env.coin.SESSIONCLS + if service.protocol in ('ws', 'wss'): + serve = serve_ws + else: + serve = serve_rs + # FIXME: pass the service not the kind + session_factory = partial(session_class, self, self.db, self.mempool, + self.peer_mgr, kind) + host = None if service.host == 'all_interfaces' else str(service.host) + try: + self.servers[service] = await serve(session_factory, host, + service.port, ssl=sslc) + except OSError as e: # don't suppress CancelledError + self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') + else: + self.logger.info(f'{kind} server listening on {service.address}') + + async def _start_external_servers(self): + """Start listening on TCP and SSL ports, but only if the respective + port was given in the environment. + """ + await self._start_servers(service for service in self.env.services + if service.protocol != 'rpc') + self.server_listening.set() + + async def _stop_servers(self, services): + """Stop the servers of the given protocols.""" + server_map = {service: self.servers.pop(service) + for service in set(services).intersection(self.servers)} + # Close all before waiting + for service, server in server_map.items(): + self.logger.info(f'closing down server for {service}') + server.close() + # No value in doing these concurrently + for server in server_map.values(): + await server.wait_closed() + + async def _manage_servers(self): + paused = False + max_sessions = self.env.max_sessions + low_watermark = max_sessions * 19 // 20 + while True: + await self.session_event.wait() + self.session_event.clear() + if not paused and len(self.sessions) >= max_sessions: + self.logger.info(f'maximum sessions {max_sessions:,d} ' + f'reached, stopping new connections until ' + f'count drops to {low_watermark:,d}') + await self._stop_servers(service for service in self.servers + if service.protocol != 'rpc') + paused = True + # Start listening for incoming connections if paused and + # session count has fallen + if paused and len(self.sessions) <= low_watermark: + self.logger.info('resuming listening for incoming connections') + await self._start_external_servers() + paused = False + + async def _log_sessions(self): + """Periodically log sessions.""" + log_interval = self.env.log_sessions + if log_interval: + while True: + await sleep(log_interval) + data = self._session_data(for_log=True) + for line in sessions_lines(data): + self.logger.info(line) + self.logger.info(util.json_serialize(self._get_info())) + + async def _disconnect_sessions(self, sessions, reason, *, force_after=1.0): + if sessions: + session_ids = ', '.join(str(session.session_id) for session in sessions) + self.logger.info(f'{reason} session ids {session_ids}') + for session in sessions: + await self._task_group.spawn(session.close(force_after=force_after)) + + async def _clear_stale_sessions(self): + """Cut off sessions that haven't done anything for 10 minutes.""" + while True: + await sleep(60) + stale_cutoff = time.time() - self.env.session_timeout + stale_sessions = [session for session in self.sessions + if session.last_recv < stale_cutoff] + await self._disconnect_sessions(stale_sessions, 'closing stale') + del stale_sessions + + async def _handle_chain_reorgs(self): + """Clear certain caches on chain reorgs.""" + while True: + await self.bp.backed_up_event.wait() + self.logger.info(f'reorg signalled; clearing tx_hashes and merkle caches') + self._reorg_count += 1 + self._tx_hashes_cache.clear() + self._merkle_cache.clear() + + async def _recalc_concurrency(self): + """Periodically recalculate session concurrency.""" + session_class = self.env.coin.SESSIONCLS + period = 300 + while True: + await sleep(period) + hard_limit = session_class.cost_hard_limit + + # Reduce retained group cost + refund = period * hard_limit / 5000 + dead_groups = [] + for group in self.session_groups.values(): + group.retained_cost = max(0.0, group.retained_cost - refund) + if group.retained_cost == 0 and not group.sessions: + dead_groups.append(group) + # Remove dead groups + for group in dead_groups: + self.session_groups.pop(group.name) + + # Recalc concurrency for sessions where cost is changing gradually, and update + # cost_decay_per_sec. + for session in self.sessions: + # Subs have an on-going cost so decay more slowly with more subs + session.cost_decay_per_sec = hard_limit / (10000 + 5 * session.sub_count()) + session.recalc_concurrency() + + def _get_info(self): + """A summary of server state.""" + cache_fmt = '{:,d} lookups {:,d} hits {:,d} entries' + sessions = self.sessions + return { + 'coin': self.env.coin.__name__, + 'daemon': self.daemon.logged_url(), + 'daemon height': self.daemon.cached_height(), + 'db height': self.db.db_height, + 'db_flush_count': self.db.history.flush_count, + 'groups': len(self.session_groups), + 'history cache': cache_fmt.format( + self._history_lookups, self._history_hits, len(self._history_cache)), + 'merkle cache': cache_fmt.format( + self._merkle_lookups, self._merkle_hits, len(self._merkle_cache)), + 'pid': os.getpid(), + 'peers': self.peer_mgr.info(), + 'request counts': self.method_counts, + 'request total': sum(self.method_counts.values()), + 'sessions': { + 'count': len(sessions), + 'count with subs': sum(len(getattr(s, 'hashX_subs', ())) > 0 for s in sessions), + 'errors': sum(s.errors for s in sessions), + 'logged': len([s for s in sessions if s.log_me]), + 'pending requests': sum(s.unanswered_request_count() for s in sessions), + 'subs': sum(s.sub_count() for s in sessions), + }, + 'tx hashes cache': cache_fmt.format( + self._tx_hashes_lookups, self._tx_hashes_hits, len(self._tx_hashes_cache)), + 'txs sent': self.txs_sent, + 'uptime': util.formatted_time(time.time() - self.start_time), + 'version': electrumx_version, + } + + def _session_data(self, for_log): + """Returned to the RPC 'sessions' call.""" + now = time.time() + sessions = sorted(self.sessions, key=lambda s: s.start_time) + return [(session.session_id, + session.flags(), + session.remote_address_string(for_log=for_log), + session.client, + session.protocol_version_string(), + session.cost, + session.extra_cost(), + session.unanswered_request_count(), + session.txs_sent, + session.sub_count(), + session.recv_count, session.recv_size, + session.send_count, session.send_size, + now - session.start_time) + for session in sessions] + + def _group_data(self): + """Returned to the RPC 'groups' call.""" + result = [] + for name, group in self.session_groups.items(): + sessions = group.sessions + result.append([name, + len(sessions), + group.session_cost(), + group.retained_cost, + sum(s.unanswered_request_count() for s in sessions), + sum(s.txs_sent for s in sessions), + sum(s.sub_count() for s in sessions), + sum(s.recv_count for s in sessions), + sum(s.recv_size for s in sessions), + sum(s.send_count for s in sessions), + sum(s.send_size for s in sessions), + ]) + return result + + async def _refresh_hsub_results(self, height): + """Refresh the cached header subscription responses to be for height, + and record that as notified_height. + """ + # Paranoia: a reorg could race and leave db_height lower + height = min(height, self.db.db_height) + raw = await self.raw_header(height) + self.hsub_results = {'hex': raw.hex(), 'height': height} + self.notified_height = height + + def _session_references(self, items, special_strings): + """Return a SessionReferences object.""" + if not isinstance(items, list) or not all(isinstance(item, str) for item in items): + raise RPCError(BAD_REQUEST, 'expected a list of session IDs') + + sessions_by_id = {session.session_id: session for session in self.sessions} + groups_by_name = self.session_groups + + sessions = set() + groups = set() # Names as groups are not hashable + specials = set() + unknown = set() + + for item in items: + if item.isdigit(): + session = sessions_by_id.get(int(item)) + if session: + sessions.add(session) + else: + unknown.add(item) + else: + lc_item = item.lower() + if lc_item in special_strings: + specials.add(lc_item) + else: + if lc_item in groups_by_name: + groups.add(lc_item) + else: + unknown.add(item) + + groups = [groups_by_name[group] for group in groups] + return SessionReferences(sessions, groups, specials, unknown) + + # --- LocalRPC command handlers + + async def rpc_add_peer(self, real_name): + """Add a peer. + + real_name: "bch.electrumx.cash t50001 s50002" for example + """ + await self.peer_mgr.add_localRPC_peer(real_name) + return f"peer '{real_name}' added" + + async def rpc_disconnect(self, session_ids): + """Disconnect sesssions. + + session_ids: array of session IDs + """ + refs = self._session_references(session_ids, {'all'}) + result = [] + + if 'all' in refs.specials: + sessions = self.sessions + result.append('disconnecting all sessions') + else: + sessions = refs.sessions + result.extend(f'disconnecting session {session.session_id}' for session in sessions) + for group in refs.groups: + result.append(f'disconnecting group {group.name}') + sessions.update(group.sessions) + result.extend(f'unknown: {item}' for item in refs.unknown) + + await self._disconnect_sessions(sessions, 'local RPC request to disconnect') + return result + + async def rpc_log(self, session_ids): + """Toggle logging of sesssions. + + session_ids: array of session or group IDs, or 'all', 'none', 'new' + """ + refs = self._session_references(session_ids, {'all', 'none', 'new'}) + result = [] + + def add_result(text, value): + result.append(f'logging {text}' if value else f'not logging {text}') + + if 'all' in refs.specials: + for session in self.sessions: + session.log_me = True + SessionBase.log_new = True + result.append('logging all sessions') + if 'none' in refs.specials: + for session in self.sessions: + session.log_me = False + SessionBase.log_new = False + result.append('logging no sessions') + if 'new' in refs.specials: + SessionBase.log_new = not SessionBase.log_new + add_result('new sessions', SessionBase.log_new) + + sessions = refs.sessions + for session in sessions: + session.log_me = not session.log_me + add_result(f'session {session.session_id}', session.log_me) + for group in refs.groups: + for session in group.sessions.difference(sessions): + sessions.add(session) + session.log_me = not session.log_me + add_result(f'session {session.session_id}', session.log_me) + + result.extend(f'unknown: {item}' for item in refs.unknown) + return result + + async def rpc_daemon_url(self, daemon_url): + """Replace the daemon URL.""" + daemon_url = daemon_url or self.env.daemon_url + try: + self.daemon.set_url(daemon_url) + except Exception as e: + raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}') + return f'now using daemon at {self.daemon.logged_url()}' + + async def rpc_stop(self): + """Shut down the server cleanly.""" + self.shutdown_event.set() + return 'stopping' + + async def rpc_getinfo(self): + """Return summary information about the server process.""" + return self._get_info() + + async def rpc_groups(self): + """Return statistics about the session groups.""" + return self._group_data() + + async def rpc_peers(self): + """Return a list of data about server peers.""" + return self.peer_mgr.rpc_data() + + async def rpc_query(self, items, limit): + """Returns data about a script, address or name.""" + coin = self.env.coin + db = self.db + lines = [] + + def arg_to_hashX(arg): + try: + script = bytes.fromhex(arg) + lines.append(f'Script: {arg}') + return coin.hashX_from_script(script) + except ValueError: + pass + + try: + hashX = coin.address_to_hashX(arg) + lines.append(f'Address: {arg}') + return hashX + except Base58Error: + pass + + try: + script = coin.build_name_index_script(arg.encode("ascii")) + hashX = coin.name_hashX_from_script(script) + lines.append(f'Name: {arg}') + return hashX + except (AttributeError, UnicodeEncodeError): + pass + + return None + + for arg in items: + hashX = arg_to_hashX(arg) + if not hashX: + continue + n = None + history = await db.limited_history(hashX, limit=limit) + for n, (tx_hash, height) in enumerate(history): + lines.append(f'History #{n:,d}: height {height:,d} ' + f'tx_hash {hash_to_hex_str(tx_hash)}') + if n is None: + lines.append('No history found') + n = None + utxos = await db.all_utxos(hashX) + for n, utxo in enumerate(utxos, start=1): + lines.append(f'UTXO #{n:,d}: tx_hash ' + f'{hash_to_hex_str(utxo.tx_hash)} ' + f'tx_pos {utxo.tx_pos:,d} height ' + f'{utxo.height:,d} value {utxo.value:,d}') + if n == limit: + break + if n is None: + lines.append('No UTXOs found') + + balance = sum(utxo.value for utxo in utxos) + lines.append(f'Balance: {coin.decimal_value(balance):,f} ' + f'{coin.SHORTNAME}') + + return lines + + async def rpc_sessions(self): + """Return statistics about connected sessions.""" + return self._session_data(for_log=False) + + async def rpc_reorg(self, count): + """Force a reorg of the given number of blocks. + + count: number of blocks to reorg + """ + count = non_negative_integer(count) + if not self.bp.force_chain_reorg(count): + raise RPCError(BAD_REQUEST, 'still catching up with daemon') + return f'scheduled a reorg of {count:,d} blocks' + + async def rpc_debug_memusage_list_all_objects(self, limit: int) -> str: + """Return a string listing the most common types in memory.""" + import objgraph # optional dependency + import io + with io.StringIO() as fd: + objgraph.show_most_common_types( + limit=limit, + shortnames=False, + file=fd) + return fd.getvalue() + + async def rpc_debug_memusage_get_random_backref_chain(self, objtype: str) -> str: + """Return a dotfile as text containing the backref chain + for a randomly selected object of type objtype. + + Warning: very slow! and it blocks the server. + + To convert to image: + $ dot -Tps filename.dot -o outfile.ps + """ + import objgraph # optional dependency + import random + import io + with io.StringIO() as fd: + await run_in_thread( + lambda: + objgraph.show_chain( + objgraph.find_backref_chain( + random.choice(objgraph.by_type(objtype)), + objgraph.is_proper_module + ), + output=fd + ) + ) + return fd.getvalue() + + # --- External Interface + + async def serve(self, notifications, event): + """Start the RPC server if enabled. When the event is triggered, + start TCP and SSL servers.""" + try: + await self._start_servers(service for service in self.env.services + if service.protocol == 'rpc') + await event.wait() + + session_class = self.env.coin.SESSIONCLS + session_class.cost_soft_limit = self.env.cost_soft_limit + session_class.cost_hard_limit = self.env.cost_hard_limit + session_class.cost_decay_per_sec = session_class.cost_hard_limit / 10000 + session_class.bw_cost_per_byte = 1.0 / self.env.bw_unit_cost + session_class.cost_sleep = self.env.request_sleep / 1000 + session_class.initial_concurrent = self.env.initial_concurrent + session_class.processing_timeout = self.env.request_timeout + + self.logger.info(f'max session count: {self.env.max_sessions:,d}') + self.logger.info(f'session timeout: {self.env.session_timeout:,d} seconds') + self.logger.info(f'session cost hard limit {self.env.cost_hard_limit:,d}') + self.logger.info(f'session cost soft limit {self.env.cost_soft_limit:,d}') + self.logger.info(f'bandwidth unit cost {self.env.bw_unit_cost:,d}') + self.logger.info(f'request sleep {self.env.request_sleep:,d}ms') + self.logger.info(f'request timeout {self.env.request_timeout:,d}s') + self.logger.info(f'initial concurrent {self.env.initial_concurrent:,d}') + + self.logger.info(f'max response size {self.env.max_send:,d} bytes') + if self.env.drop_client is not None: + self.logger.info( + f'drop clients matching: {self.env.drop_client.pattern}' + ) + for service in self.env.report_services: + self.logger.info(f'advertising service {service}') + # Start notifications; initialize hsub_results + await notifications.start(self.db.db_height, self._notify_sessions) + await self._start_external_servers() + # Peer discovery should start after the external servers + # because we connect to ourself + async with self._task_group as group: + await group.spawn(self.peer_mgr.discover_peers()) + await group.spawn(self._clear_stale_sessions()) + await group.spawn(self._handle_chain_reorgs()) + await group.spawn(self._recalc_concurrency()) + await group.spawn(self._log_sessions()) + await group.spawn(self._manage_servers()) + finally: + # Close servers then sessions + await self._stop_servers(self.servers.keys()) + async with OldTaskGroup() as group: + for session in list(self.sessions): + await group.spawn(session.close(force_after=1)) + + def extra_cost(self, session): + # Note there is no guarantee that session is still in self.sessions. Example traceback: + # notify_sessions->notify->address_status->bump_cost->recalc_concurrency->extra_cost + # during which there are many places the sesssion could be removed + groups = self.sessions.get(session) + if groups is None: + return 0 + return sum((group.cost() - session.cost) * group.weight for group in groups) + + async def _merkle_branch(self, height, tx_hashes, tx_pos): + tx_hash_count = len(tx_hashes) + cost = tx_hash_count + + if tx_hash_count >= 200: + self._merkle_lookups += 1 + merkle_cache = self._merkle_cache.get(height) + if merkle_cache: + self._merkle_hits += 1 + cost = 10 * math.sqrt(tx_hash_count) + else: + async def tx_hashes_func(start, count): + return tx_hashes[start: start + count] + + merkle_cache = MerkleCache(self.db.merkle, tx_hashes_func) + self._merkle_cache[height] = merkle_cache + await merkle_cache.initialize(len(tx_hashes)) + branch, _root = await merkle_cache.branch_and_root(tx_hash_count, tx_pos) + else: + branch, _root = self.db.merkle.branch_and_root(tx_hashes, tx_pos) + + branch = [hash_to_hex_str(hash) for hash in branch] + return branch, cost / 2500 + + async def merkle_branch_for_tx_hash(self, height, tx_hash): + """Return a triple (branch, tx_pos, cost).""" + tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) + try: + tx_pos = tx_hashes.index(tx_hash) + except ValueError: + raise RPCError(BAD_REQUEST, + f'tx {hash_to_hex_str(tx_hash)} not in block at height {height:,d}') + branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) + return branch, tx_pos, tx_hashes_cost + merkle_cost + + async def merkle_branch_for_tx_pos(self, height, tx_pos): + """Return a triple (branch, tx_hash_hex, cost).""" + tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) + try: + tx_hash = tx_hashes[tx_pos] + except IndexError: + raise RPCError(BAD_REQUEST, + f'no tx at position {tx_pos:,d} in block at height {height:,d}') + branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) + return branch, hash_to_hex_str(tx_hash), tx_hashes_cost + merkle_cost + + async def tx_hashes_at_blockheight(self, height): + """Returns a pair (tx_hashes, cost). + + tx_hashes is an ordered list of binary hashes, cost is an estimated cost of + getting the hashes; cheaper if in-cache. Raises RPCError. + """ + self._tx_hashes_lookups += 1 + tx_hashes = self._tx_hashes_cache.get(height) + if tx_hashes: + self._tx_hashes_hits += 1 + return tx_hashes, 0.1 + + # Ensure the tx_hashes are fresh before placing in the cache + while True: + reorg_count = self._reorg_count + try: + tx_hashes = await self.db.tx_hashes_at_blockheight(height) + except self.db.DBError as e: + raise RPCError(BAD_REQUEST, f'db error: {e!r}') + if reorg_count == self._reorg_count: + break + + self._tx_hashes_cache[height] = tx_hashes + + return tx_hashes, 0.25 + len(tx_hashes) * 0.0001 + + def session_count(self): + """The number of connections that we've sent something to.""" + return len(self.sessions) + + async def daemon_request(self, method, *args): + """Catch a DaemonError and convert it to an RPCError.""" + try: + return await getattr(self.daemon, method)(*args) + except DaemonError as e: + raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None + + async def raw_header(self, height): + """Return the binary header at the given height.""" + try: + return await self.db.raw_header(height) + except IndexError: + raise RPCError(BAD_REQUEST, f'height {height:,d} ' + 'out of range') from None + + async def broadcast_transaction(self, raw_tx): + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + + async def broadcast_transaction_validated(self, raw_tx, live_run): + self.bp.validate_ft_rules_raw_tx(raw_tx) + if live_run: + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + else: + tx, tx_hash = self.env.coin.DESERIALIZER(bytes.fromhex(raw_tx), 0).read_tx_and_hash() + return hash_to_hex_str(tx_hash) + + async def limited_history(self, hashX): + """Returns a pair (history, cost). + + History is a sorted list of (tx_hash, height) tuples, or an RPCError.""" + # History DoS limit. Each element of history is about 99 bytes when encoded + # as JSON. + limit = self.env.max_send // 99 + cost = 0.1 + self._history_lookups += 1 + result = self._history_cache.get(hashX) + if result: + self._history_hits += 1 + else: + result = await self.db.limited_history(hashX, limit=limit) + cost += 0.1 + len(result) * 0.001 + if len(result) >= limit: + result = RPCError(BAD_REQUEST, f'history too large', cost=cost) + self._history_cache[hashX] = result + + if isinstance(result, Exception): + raise result + return result, cost + + async def get_history_op(self, hashX, limit=10, offset=0, op=None, reverse=True): + history_data = self._history_op_cache.get(hashX, []) + if not history_data: + history_data = [] + txnum_padding = bytes(8 - TXNUM_LEN) + for _key, hist in self.db.history.db.iterator(prefix=hashX, reverse=reverse): + for tx_numb in util.chunks(hist, TXNUM_LEN): + tx_num, = util.unpack_le_uint64(tx_numb + txnum_padding) + op_data = self._tx_num_op_cache.get(tx_num) + if not op_data: + op_prefix_key = b'op' + util.pack_le_uint64(tx_num) + tx_op = self.db.utxo_db.get(op_prefix_key) + if tx_op: + op_data, = util.unpack_le_uint32(tx_op) + self._tx_num_op_cache[tx_num] = op_data + history_data.append({"tx_num": tx_num, "op": op_data}) + self._history_op_cache[hashX] = history_data + if reverse: + history_data.sort(key=lambda x: x['tx_num'], reverse=reverse) + if op: + history_data = list(filter(lambda x: x["op"] == op, history_data)) + else: + history_data = list(filter(lambda x: x["op"], history_data)) + return history_data[offset:limit + offset], len(history_data) + + # Analysis the transaction detail by txid. + # See BlockProcessor.op_list for the complete op list. + async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): + tx_hash = hex_str_to_hash(txid) + res = self._tx_detail_cache.get(tx_hash) + if res: + # txid maybe the same, this key should add height add key prefix + self.logger.debug(f"read transation detail from cache {txid}") + return res + if not height: + tx_num, height = self.db.get_tx_num_height_from_tx_hash(tx_hash) + + raw_tx = self.db.get_raw_tx_by_tx_hash(tx_hash) + if not raw_tx: + raw_tx = await self.daemon_request('getrawtransaction', txid, False) + raw_tx = bytes.fromhex(raw_tx) + tx, _tx_hash = self.env.coin.DESERIALIZER(raw_tx, 0).read_tx_and_hash() + assert tx_hash == _tx_hash + ops = self.db.get_op_by_tx_num(tx_num) + op_raw = self.bp.op_list_vk[ops[0]] if ops else "" + + operation_found_at_inputs = parse_protocols_operations_from_witness_array(tx, tx_hash, True) + atomicals_spent_at_inputs = self.bp.build_atomicals_spent_at_inputs_for_validation_only(tx) + atomicals_receive_at_outputs = self.bp.build_atomicals_receive_at_ouutput_for_validation_only(tx, tx_hash) + blueprint_builder = AtomicalsTransferBlueprintBuilder( + self.logger, + atomicals_spent_at_inputs, + operation_found_at_inputs, + tx_hash, + tx, + self.bp.get_atomicals_id_mint_info, + True + ) + is_burned = blueprint_builder.are_fts_burned + is_cleanly_assigned = blueprint_builder.cleanly_assigned + # format burned_fts + raw_burned_fts = blueprint_builder.get_fts_burned() + burned_fts = {} + for ft_key, ft_value in raw_burned_fts.items(): + burned_fts[location_id_bytes_to_compact(ft_key)] = ft_value + + res = { + "txid": txid, + "height": height, + "tx_num": tx_num, + "info": {}, + "transfers": { + "inputs": {}, + "outputs": {}, + "is_burned": is_burned, + "burned_fts": burned_fts, + "is_cleanly_assigned": is_cleanly_assigned + } + } + operation_type = operation_found_at_inputs.get("op", "") if operation_found_at_inputs else "" + if operation_found_at_inputs: + payload = operation_found_at_inputs.get("payload") + payload_not_none = payload or {} + res["info"]["payload"] = payload_not_none + if blueprint_builder.is_mint and operation_type in ["dmt", "ft"]: + expected_output_index = 0 + txout = tx.outputs[expected_output_index] + location = tx_hash + util.pack_le_uint32(expected_output_index) + # if save into the db, it means mint success + has_atomicals = self.db.get_atomicals_by_location_long_form(location) + if len(has_atomicals): + ticker_name = payload_not_none.get("args", {}).get("mint_ticker", "") + status, candidate_atomical_id, _ = self.bp.get_effective_ticker(ticker_name, self.bp.height) + if status: + atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + res["info"] = { + "atomical_id": atomical_id, + "location_id": location_id_bytes_to_compact(location), + "payload": payload, + "outputs": { + expected_output_index: [{ + "address": get_address_from_output_script(txout.pk_script), + "atomical_id": atomical_id, + "type": "FT", + "index": expected_output_index, + "value": txout.value + }] + } + } + elif operation_type == "nft": + if atomicals_receive_at_outputs: + expected_output_index = 0 + location = tx_hash + util.pack_le_uint32(expected_output_index) + txout = tx.outputs[expected_output_index] + atomical_id = location_id_bytes_to_compact( + atomicals_receive_at_outputs[expected_output_index][-1]["atomical_id"] + ) + res["info"] = { + "atomical_id": atomical_id, + "location_id": location_id_bytes_to_compact(location), + "payload": payload, + "outputs": { + expected_output_index: [{ + "address": get_address_from_output_script(txout.pk_script), + "atomical_id": atomical_id, + "type": "NFT", + "index": expected_output_index, + "value": txout.value + }] + } + } + # no operation_found_at_inputs, it will be transfer. + if blueprint_builder.ft_atomicals and atomicals_spent_at_inputs: + if not operation_type and not op_raw: + op_raw = "transfer" + for atomical_id, input_ft in blueprint_builder.ft_atomicals.items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + for i in input_ft.input_indexes: + prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) + prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) + if not prev_raw_tx: + prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) + prev_raw_tx = bytes.fromhex(prev_raw_tx) + self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx + prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() + ft_data = { + "address": get_address_from_output_script( + prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), + "atomical_id": compact_atomical_id, + "type": "FT", + "index": i.txin_index, + "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value + } + if i.txin_index not in res["transfers"]["inputs"]: + res["transfers"]["inputs"][i.txin_index] = [ft_data] + else: + res["transfers"]["inputs"][i.txin_index].append(ft_data) + for k, v in blueprint_builder.ft_output_blueprint.outputs.items(): + for atomical_id, output_ft in v['atomicals'].items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + ft_data = { + "address": get_address_from_output_script(tx.outputs[k].pk_script), + "atomical_id": compact_atomical_id, + "type": "FT", + "index": k, + "value": output_ft.satvalue + } + if k not in res["transfers"]["outputs"]: + res["transfers"]["outputs"][k] = [ft_data] + else: + res["transfers"]["outputs"][k].append(ft_data) + if blueprint_builder.nft_atomicals and atomicals_spent_at_inputs: + if not operation_type and not op_raw: + op_raw = "transfer" + for atomical_id, input_nft in blueprint_builder.nft_atomicals.items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + for i in input_nft.input_indexes: + prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) + prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) + if not prev_raw_tx: + prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) + prev_raw_tx = bytes.fromhex(prev_raw_tx) + self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx + prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() + nft_data = { + "address": get_address_from_output_script( + prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), + "atomical_id": compact_atomical_id, + "type": "NFT", + "index": i.txin_index, + "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value + } + if i.txin_index not in res["transfers"]["inputs"]: + res["transfers"]["inputs"][i.txin_index] = [nft_data] + else: + res["transfers"]["inputs"][i.txin_index].append(nft_data) + for k, v in blueprint_builder.nft_output_blueprint.outputs.items(): + for atomical_id, output_nft in v['atomicals'].items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + nft_data = { + "address": get_address_from_output_script(tx.outputs[k].pk_script), + "atomical_id": compact_atomical_id, + "type": output_nft.type, + "index": k, + "value": output_nft.total_satsvalue + } + if k not in res["transfers"]["outputs"]: + res["transfers"]["outputs"][k] = [nft_data] + else: + res["transfers"]["outputs"][k].append(nft_data) + + atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found( + tx) + if atomical_id_for_payment: + res["info"]["payment"] = { + "atomical_id": location_id_bytes_to_compact(atomical_id_for_payment), + "payment_marker_idx": payment_marker_idx + } + + if op_raw and height: + self._tx_detail_cache[tx_hash] = res + res["op"] = op_raw + + # Recursively encode the result. + return auto_encode_bytes_elements(res) + + async def transaction_global( + self, + limit: int = 10, + offset: int = 0, + op_type: Optional[str] = None, + reverse: bool = True + ): + height = self.bp.height + res = [] + count = 0 + history_list = [] + for current_height in range(height, self.env.coin.ATOMICALS_ACTIVATION_HEIGHT, -1): + txs = self.db.get_atomicals_block_txs(current_height) + for tx in txs: + tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) + history_list.append({ + "tx_num": tx_num, + "tx_hash": tx, + "height": current_height + }) + count += 1 + if count >= offset + limit: + break + history_list.sort(key=lambda x: x['tx_num'], reverse=reverse) + + for history in history_list: + data = await self.get_transaction_detail(history["tx_hash"], history["height"], history["tx_num"]) + if (op_type and op_type == data["op"]) or (not op_type and data["op"]): + res.append(data) + total = len(res) + return {"result": res[offset:offset + limit], "total": total, "limit": limit, "offset": offset} + + async def _notify_sessions(self, height, touched): + """Notify sessions about height changes and touched addresses.""" + height_changed = height != self.notified_height + if height_changed: + await self._refresh_hsub_results(height) + # Invalidate all history caches since they rely on block heights + self._history_cache.clear() + # Invalidate our op cache for touched hashXs + op_cache = self._history_op_cache + for hashX in set(op_cache).intersection(touched): + op_cache.pop(hashX, None) + self.logger.info(f"refresh op cache {self.notified_height}") + time.sleep(2) + background_task = asyncio.create_task(self.get_history_op(hashX, 10, 0, None, True)) + await background_task + + for session in self.sessions: + if self._task_group.joined: # this can happen during shutdown + self.logger.warning(f"task group already terminated. not notifying sessions.") + return + await self._task_group.spawn(session.notify, touched, height_changed) + + def _ip_addr_group_name(self, session) -> Optional[str]: + host = session.remote_address().host + if isinstance(host, (IPv4Address, IPv6Address)): + if host.is_private: # exempt private addresses + return None + if isinstance(host, IPv4Address): + subnet_size = self.env.session_group_by_subnet_ipv4 + subnet = IPv4Network(host).supernet(prefixlen_diff=32 - subnet_size) + return str(subnet) + elif isinstance(host, IPv6Address): + subnet_size = self.env.session_group_by_subnet_ipv6 + subnet = IPv6Network(host).supernet(prefixlen_diff=128 - subnet_size) + return str(subnet) + return 'unknown_addr' + + def _session_group(self, name: Optional[str], weight: float) -> Optional[SessionGroup]: + if name is None: + return None + group = self.session_groups.get(name) + if not group: + group = SessionGroup(name, weight, set(), 0) + self.session_groups[name] = group + return group + + def add_session(self, session): + self.session_event.set() + # Return the session groups + groups = ( + self._session_group(self._ip_addr_group_name(session), 1.0), + ) + groups = tuple(group for group in groups if group is not None) + self.sessions[session] = groups + for group in groups: + group.sessions.add(session) + + def remove_session(self, session): + """Remove a session from our sessions list if there.""" + self.session_event.set() + groups = self.sessions.pop(session) + for group in groups: + group.retained_cost += session.cost + group.sessions.remove(session) diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py new file mode 100644 index 00000000..1c928ff8 --- /dev/null +++ b/electrumx/server/session/shared_session.py @@ -0,0 +1,64 @@ +from aiorpcx import RPCError +from logging import LoggerAdapter + +from electrumx.lib import util +from electrumx.lib.util_atomicals import AtomicalsValidationError +from electrumx.server.daemon import DaemonError +from electrumx.server.session import ATOMICALS_INVALID_TX, BAD_REQUEST + + +class SharedSession: + def __init__(self, session_mgr: 'SessionManager', logger: LoggerAdapter): + self.session_mgr = session_mgr + self.logger = logger + self.txs_sent = 0 + + async def transaction_broadcast_validate(self, raw_tx: str = ""): + """Simulate a Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules""" + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) + return hex_hash + except AtomicalsValidationError as e: + self.logger.info(f'error validating atomicals transaction: {e}') + raise RPCError( + ATOMICALS_INVALID_TX, + f'the transaction was rejected by atomicals rules.\n\n{e}\n[{raw_tx}]' + ) + + async def transaction_broadcast(self, raw_tx): + """Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string""" + # This returns errors as JSON RPC errors, as is natural. + try: + hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) + hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError( + BAD_REQUEST, + f'the transaction was rejected by network rules.\n\n{message}\n[{raw_tx}]' + ) + except AtomicalsValidationError as e: + self.logger.info(f'error validating atomicals transaction: {e}') + raise RPCError( + ATOMICALS_INVALID_TX, + f'the transaction was rejected by atomicals rules.\n\n{e}\n[{raw_tx}]' + ) + else: + self.txs_sent += 1 + client_ver = util.protocol_tuple(self.client) + if client_ver != (0,): + msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) + if msg: + self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' + f'client from {self.client}') + return msg + + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash diff --git a/electrumx/version.py b/electrumx/version.py new file mode 100644 index 00000000..77cdc88b --- /dev/null +++ b/electrumx/version.py @@ -0,0 +1,3 @@ +__version__ = "1.4.2.0" +electrumx_version = f'ElectrumX {__version__}' +electrumx_version_short = __version__ diff --git a/electrumx_compact_history b/electrumx_compact_history index 1449b53c..c9752c57 100755 --- a/electrumx_compact_history +++ b/electrumx_compact_history @@ -38,8 +38,8 @@ import traceback from os import environ from dotenv import load_dotenv -from electrumx import Env from electrumx.server.db import DB +from electrumx.server.env import Env load_dotenv() diff --git a/electrumx_server b/electrumx_server index 1098cc61..4420ca19 100755 --- a/electrumx_server +++ b/electrumx_server @@ -16,8 +16,9 @@ import sys import logging.handlers from dotenv import load_dotenv -from electrumx import Controller, Env from electrumx.lib.util import CompactFormatter, make_logger +from electrumx.server.controller import Controller +from electrumx.server.env import Env load_dotenv() diff --git a/tests/server/test_api.py b/tests/server/test_api.py index 7cad87e1..c60f336d 100644 --- a/tests/server/test_api.py +++ b/tests/server/test_api.py @@ -1,8 +1,9 @@ import asyncio +from aiorpcx import RPCError from unittest import mock -from aiorpcx import RPCError -from electrumx import Controller, Env +from electrumx.server.controller import Controller +from electrumx.server.env import Env loop = asyncio.get_event_loop() From aba57f6221cbaa48634ed7c203d8ef98670f2ee4 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Tue, 21 May 2024 10:30:15 +0800 Subject: [PATCH 02/13] Resolves recursive imports --- electrumx/server/env.py | 4 +- electrumx/server/session/electrumx_session.py | 30 ++------ electrumx/server/session/http_session.py | 54 +++++++++---- electrumx/server/session/session_base.py | 76 ++++--------------- electrumx/server/session/session_manager.py | 58 +++----------- electrumx/server/session/shared_session.py | 41 +++++++++- electrumx/server/session/util.py | 57 ++++++++++++++ 7 files changed, 166 insertions(+), 154 deletions(-) create mode 100644 electrumx/server/session/util.py diff --git a/electrumx/server/env.py b/electrumx/server/env.py index 52d8e887..006bca15 100644 --- a/electrumx/server/env.py +++ b/electrumx/server/env.py @@ -9,10 +9,10 @@ import re +from typing import Type, Union, TYPE_CHECKING +from aiorpcx import Service, ServicePart from ipaddress import IPv4Address, IPv6Address -from typing import Type, Union -from aiorpcx import Service, ServicePart from electrumx.lib.coins import Coin, AtomicalsCoinMixin from electrumx.lib.env_base import EnvBase diff --git a/electrumx/server/session/electrumx_session.py b/electrumx/server/session/electrumx_session.py index 2f30c92a..df987147 100644 --- a/electrumx/server/session/electrumx_session.py +++ b/electrumx/server/session/electrumx_session.py @@ -2,13 +2,15 @@ import codecs import datetime -from aiorpcx import timeout_after, TaskTimeout +from typing import Tuple +from aiorpcx import timeout_after, TaskTimeout, ReplyAndDisconnect from electrumx.lib import util from electrumx.lib.script2addr import get_address_from_output_script from electrumx.lib.util_atomicals import * from electrumx.server.daemon import DaemonError -from electrumx.server.session.session_base import * +from electrumx.server.session.session_base import SessionBase +from electrumx.server.session.util import * from electrumx.version import electrumx_version, electrumx_version_short @@ -1660,7 +1662,7 @@ async def crash_old_client(self, ptuple, crash_client_ver): async def transaction_broadcast_validate(self, raw_tx): self.bump_cost(0.25 + len(raw_tx) / 5000) - return await self.ss.transaction_broadcast_validate() + return await self.ss.transaction_broadcast_validate(raw_tx) async def transaction_broadcast(self, raw_tx): """Broadcast a raw transaction to the network. @@ -1673,27 +1675,7 @@ async def transaction_broadcast_force(self, raw_tx): """Broadcast a raw transaction to the network. Force even if invalid FT transfer raw_tx: the raw transaction as a hexadecimal string""" self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') - else: - self.txs_sent += 1 - client_ver = util.protocol_tuple(self.client) - if client_ver != (0,): - msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) - if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') - return msg - - self.logger.info(f'sent tx: {hex_hash}') - return hex_hash + return await self.ss.transaction_broadcast_force(raw_tx) async def transaction_get(self, tx_hash, verbose=False): """Return the serialized raw transaction given its hash diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index 9e66418e..5e0e8091 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -63,7 +63,12 @@ def __init__(self, session_mgr, db, mempool, peer_mgr, kind): self.MAX_CHUNK_SIZE = 2016 self.hashX_subs = {} # Use the sharing session to manage handlers. - self.ss = SharedSession(self.session_mgr, self.logger) + self.ss = SharedSession( + self.logger, + self.coin, + self.session_mgr, + self.client, + ) async def get_rpc_server(self): for service in self.env.services: @@ -777,12 +782,23 @@ async def handle_post_method(self, request): # verified async def proxy(self, request): - result = {"success": True, "info": {"note": "Atomicals ElectrumX Digital Object Proxy Online", "usageInfo": { - "note": "The service offers both POST and GET requests for proxying requests to ElectrumX. To handle larger broadcast transaction payloads use the POST method instead of GET.", - "POST": "POST /proxy/:method with string encoded array in the field \\\"params\\\" in the request body. ", - "GET": "GET /proxy/:method?params=[\\\"value1\\\"] with string encoded array in the query argument \\\"params\\\" in the URL."}, - "healthCheck": "GET /proxy/health", - "github": "https://github.com/atomicals/electrumx-proxy", "license": "MIT"}} + result = { + "success": True, + "info": { + "note": "Atomicals ElectrumX Digital Object Proxy Online", + "usageInfo": { + "note": "The service offers both POST and GET requests for proxying requests to ElectrumX. " + "To handle larger broadcast transaction payloads use the POST method instead of GET.", + "POST": "POST /proxy/:method with string encoded array " + "in the field \\\"params\\\" in the request body. ", + "GET": "GET /proxy/:method?params=[\\\"value1\\\"] with string encoded array " + "in the query argument \\\"params\\\" in the URL." + }, + "healthCheck": "GET /proxy/health", + "github": "https://github.com/atomicals/electrumx-proxy", + "license": "MIT" + } + } return web.json_response(data=result) # verified @@ -818,14 +834,6 @@ async def scripthash_listunspent(self, request): hashX = scripthash_to_hashX(scripthash) return await self.hashX_listunspent(hashX) - # need verify - async def transaction_broadcast(self, request): - """Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string""" - params = await format_params(request) - raw_tx = params.get(0, "") - return await self.ss.transaction_broadcast(raw_tx) - # verified async def scripthash_get_history(self, request): """Return the confirmed and unconfirmed history of a scripthash.""" @@ -1094,6 +1102,22 @@ async def transaction_broadcast_validate(self, request): raw_tx = params.get(0, "") return await self.ss.transaction_broadcast_validate(raw_tx) + # need verify + async def transaction_broadcast(self, request): + """Broadcast a raw transaction to the network. + raw_tx: the raw transaction as a hexadecimal string""" + params = await format_params(request) + raw_tx = params.get(0, "") + return await self.ss.transaction_broadcast(raw_tx) + + # need verify + async def transaction_broadcast_force(self, request): + """Broadcast a raw transaction to the network. + raw_tx: the raw transaction as a hexadecimal string""" + params = await format_params(request) + raw_tx = params.get(0, "") + return await self.ss.transaction_broadcast_force(raw_tx) + async def atomicals_get_ft_balances(self, request): """Return the FT balances for a scripthash address""" params = await format_params(request) diff --git a/electrumx/server/session/session_base.py b/electrumx/server/session/session_base.py index f9c3319e..7f7ec393 100644 --- a/electrumx/server/session/session_base.py +++ b/electrumx/server/session/session_base.py @@ -1,70 +1,18 @@ -from typing import Optional, Tuple, Callable, Dict +from typing import Optional, Tuple, Callable, Dict, TYPE_CHECKING import electrumx.lib.util as util import itertools from aiorpcx import Request, RPCSession, JSONRPCConnection, JSONRPCAutoDetect, NewlineFramer, ReplyAndDisconnect, \ - handler_invocation, RPCError + handler_invocation -from electrumx.lib.hash import hex_str_to_hash, HASHX_LEN -from electrumx.server.db import DB -from electrumx.server.mempool import MemPool -from electrumx.server.peers import PeerManager from electrumx.server.session import BAD_REQUEST from electrumx.server.session.shared_session import SharedSession - -def scripthash_to_hashX(scripthash): - try: - bin_hash = hex_str_to_hash(scripthash) - if len(bin_hash) == 32: - return bin_hash[:HASHX_LEN] - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') - - -def non_negative_integer(value): - """Return param value it is or can be converted to a non-negative - integer, otherwise raise an RPCError.""" - try: - value = int(value) - if value >= 0: - return value - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{value} should be a non-negative integer') - - -def assert_tx_hash(value): - """Raise an RPCError if the value is not a valid hexadecimal transaction hash. - - If it is valid, return it as 32-byte binary hash.""" - try: - raw_hash = hex_str_to_hash(value) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') - - -def assert_atomical_id(value): - """Raise an RPCError if the value is not a valid atomical id - If it is valid, return it as 32-byte binary hash.""" - try: - if value is None or value == "": - raise RPCError(BAD_REQUEST, f'atomical_id required') - index_of_i = value.find("i") - if index_of_i != 64: - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - raw_hash = hex_str_to_hash(value[: 64]) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') +if TYPE_CHECKING: + from electrumx.server.db import DB + from electrumx.server.mempool import MemPool + from electrumx.server.peers import PeerManager class SessionBase(RPCSession): @@ -76,7 +24,6 @@ class SessionBase(RPCSession): MAX_CHUNK_SIZE = 2016 session_counter = itertools.count() - log_new = False def __init__( self, @@ -99,7 +46,7 @@ def __init__( self.client = 'unknown' self.anon_logs = self.env.anon_logs self.txs_sent = 0 - self.log_me = SessionBase.log_new + self.log_me = False self.session_id = None self.daemon_request = self.session_mgr.daemon_request self.session_id = next(self.session_counter) @@ -113,7 +60,12 @@ def __init__( self.protocol_tuple: Optional[Tuple[int, ...]] = None self.request_handlers: Optional[Dict[str, Callable]] = None # Use the sharing session to manage handlers. - self.ss = SharedSession(self.session_mgr, self.logger) + self.ss = SharedSession( + self.logger, + self.coin, + self.session_mgr, + self.client, + ) async def notify(self, touched, height_changed): pass @@ -182,7 +134,7 @@ async def handle_request(self, request): class LocalRPC(SessionBase): """A local TCP RPC server session.""" - processing_timeout = 10**9 # disable timeouts + processing_timeout = 10 ** 9 # disable timeouts def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py index 43b72f80..178ac734 100644 --- a/electrumx/server/session/session_manager.py +++ b/electrumx/server/session/session_manager.py @@ -17,24 +17,23 @@ from electrumx.lib.text import sessions_lines from electrumx.lib.util import OldTaskGroup from electrumx.lib.util_atomicals import * -from electrumx.server.block_processor import BlockProcessor -from electrumx.server.daemon import DaemonError, Daemon -from electrumx.server.db import DB -from electrumx.server.env import Env from electrumx.server.history import TXNUM_LEN from electrumx.server.http_middleware import * from electrumx.server.mempool import MemPool from electrumx.server.session import BAD_REQUEST, DAEMON_ERROR -from electrumx.server.session.http_session import HttpHandler -from electrumx.server.session.session_base import LocalRPC, SessionBase, non_negative_integer +from electrumx.server.session.util import non_negative_integer from electrumx.server.peers import PeerManager -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Type from electrumx.version import electrumx_version if TYPE_CHECKING: - pass + from electrumx.server.block_processor import BlockProcessor + from electrumx.server.daemon import DaemonError, Daemon + from electrumx.server.db import DB + from electrumx.server.env import Env + from electrumx.server.session.http_session import HttpHandler @attr.s(slots=True) @@ -149,6 +148,7 @@ async def _start_servers(self, services): app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) + app.router.add_get('/proxy/blockchain.transaction.broadcast_force', handler.transaction_broadcast_force) app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) @@ -300,7 +300,7 @@ async def _start_servers(self, services): else: sslc = None if service.protocol == 'rpc': - session_class = LocalRPC + session_class = Type['LocalRPC'] else: session_class = self.env.coin.SESSIONCLS if service.protocol in ('ws', 'wss'): @@ -570,44 +570,6 @@ async def rpc_disconnect(self, session_ids): await self._disconnect_sessions(sessions, 'local RPC request to disconnect') return result - async def rpc_log(self, session_ids): - """Toggle logging of sesssions. - - session_ids: array of session or group IDs, or 'all', 'none', 'new' - """ - refs = self._session_references(session_ids, {'all', 'none', 'new'}) - result = [] - - def add_result(text, value): - result.append(f'logging {text}' if value else f'not logging {text}') - - if 'all' in refs.specials: - for session in self.sessions: - session.log_me = True - SessionBase.log_new = True - result.append('logging all sessions') - if 'none' in refs.specials: - for session in self.sessions: - session.log_me = False - SessionBase.log_new = False - result.append('logging no sessions') - if 'new' in refs.specials: - SessionBase.log_new = not SessionBase.log_new - add_result('new sessions', SessionBase.log_new) - - sessions = refs.sessions - for session in sessions: - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - for group in refs.groups: - for session in group.sessions.difference(sessions): - sessions.add(session) - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - - result.extend(f'unknown: {item}' for item in refs.unknown) - return result - async def rpc_daemon_url(self, daemon_url): """Replace the daemon URL.""" daemon_url = daemon_url or self.env.daemon_url @@ -903,7 +865,7 @@ async def broadcast_transaction(self, raw_tx): self.txs_sent += 1 return hex_hash - async def broadcast_transaction_validated(self, raw_tx, live_run): + async def broadcast_transaction_validated(self, raw_tx: str, live_run: bool): self.bp.validate_ft_rules_raw_tx(raw_tx) if live_run: hex_hash = await self.daemon.broadcast_transaction(raw_tx) diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index 1c928ff8..2fb91871 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -1,3 +1,5 @@ +from typing import Type, TYPE_CHECKING, Union + from aiorpcx import RPCError from logging import LoggerAdapter @@ -5,13 +7,25 @@ from electrumx.lib.util_atomicals import AtomicalsValidationError from electrumx.server.daemon import DaemonError from electrumx.server.session import ATOMICALS_INVALID_TX, BAD_REQUEST +from electrumx.server.session.session_manager import SessionManager + +if TYPE_CHECKING: + from electrumx.lib.coins import AtomicalsCoinMixin, Coin class SharedSession: - def __init__(self, session_mgr: 'SessionManager', logger: LoggerAdapter): + def __init__( + self, + logger: LoggerAdapter, + coin: Type[Union['Coin', 'AtomicalsCoinMixin']], + session_mgr: SessionManager, + client: str, + ): self.session_mgr = session_mgr self.logger = logger - self.txs_sent = 0 + self.txs_sent: int = 0 + self.client: str = client + self.coin = coin async def transaction_broadcast_validate(self, raw_tx: str = ""): """Simulate a Broadcast a raw transaction to the network. @@ -35,7 +49,6 @@ async def transaction_broadcast(self, raw_tx): # This returns errors as JSON RPC errors, as is natural. try: hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) - hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) except DaemonError as e: error, = e.args message = error['message'] @@ -50,6 +63,28 @@ async def transaction_broadcast(self, raw_tx): ATOMICALS_INVALID_TX, f'the transaction was rejected by atomicals rules.\n\n{e}\n[{raw_tx}]' ) + else: + self.txs_sent += 1 + client_ver = util.protocol_tuple(self.client) + if client_ver != (0, ): + msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) + if msg: + self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' + f'client from {self.client}') + return msg + + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash + + async def transaction_broadcast_force(self, raw_tx: str): + try: + hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]') else: self.txs_sent += 1 client_ver = util.protocol_tuple(self.client) diff --git a/electrumx/server/session/util.py b/electrumx/server/session/util.py new file mode 100644 index 00000000..1140d956 --- /dev/null +++ b/electrumx/server/session/util.py @@ -0,0 +1,57 @@ +from aiorpcx import RPCError + +from electrumx.lib.hash import hex_str_to_hash, HASHX_LEN +from electrumx.server.session import BAD_REQUEST + + +def scripthash_to_hashX(scripthash): + try: + bin_hash = hex_str_to_hash(scripthash) + if len(bin_hash) == 32: + return bin_hash[:HASHX_LEN] + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') + + +def non_negative_integer(value): + """Return param value it is or can be converted to a non-negative + integer, otherwise raise an RPCError.""" + try: + value = int(value) + if value >= 0: + return value + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{value} should be a non-negative integer') + + +def assert_tx_hash(value): + """Raise an RPCError if the value is not a valid hexadecimal transaction hash. + + If it is valid, return it as 32-byte binary hash.""" + try: + raw_hash = hex_str_to_hash(value) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') + + +def assert_atomical_id(value): + """Raise an RPCError if the value is not a valid atomical id + If it is valid, return it as 32-byte binary hash.""" + try: + if value is None or value == "": + raise RPCError(BAD_REQUEST, f'atomical_id required') + index_of_i = value.find("i") + if index_of_i != 64: + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') + raw_hash = hex_str_to_hash(value[: 64]) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') From 4bbee8f559db2ad08a2cd64d1a9dc1c96f4ff977 Mon Sep 17 00:00:00 2001 From: AM shadow <154503348+shadowv0vshadow@users.noreply.github.com> Date: Tue, 21 May 2024 23:49:39 +0800 Subject: [PATCH 03/13] fix transaction bug --- electrumx/server/session.py | 3490 +++++++++++++++++++++++++++++++++++ 1 file changed, 3490 insertions(+) create mode 100644 electrumx/server/session.py diff --git a/electrumx/server/session.py b/electrumx/server/session.py new file mode 100644 index 00000000..612ef530 --- /dev/null +++ b/electrumx/server/session.py @@ -0,0 +1,3490 @@ +# Copyright (c) 2016-2018, Neil Booth +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Classes for local RPC server and remote client TCP/SSL servers.''' + +import asyncio +import codecs +import datetime +import itertools +import math +import os +import ssl +import time +from collections import defaultdict +from functools import partial +from ipaddress import IPv4Address, IPv6Address, IPv4Network, IPv6Network +from typing import Optional, TYPE_CHECKING +import asyncio + +import attr +import pylru +from aiohttp import web +from aiorpcx import (Event, JSONRPCAutoDetect, JSONRPCConnection, + ReplyAndDisconnect, Request, RPCError, RPCSession, + handler_invocation, serve_rs, serve_ws, sleep, + NewlineFramer, TaskTimeout, timeout_after, run_in_thread) + +import electrumx +from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder +from electrumx.lib.script2addr import get_address_from_output_script +import electrumx.lib.util as util +from electrumx.lib.util import OldTaskGroup, unpack_le_uint64 +from electrumx.lib.util_atomicals import ( + DFT_MINT_MAX_MAX_COUNT_DENSITY, + format_name_type_candidates_to_rpc, + SUBREALM_MINT_PATH, + MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, + DMINT_PATH, + convert_db_mint_info_to_rpc_mint_info_format, + compact_to_location_id_bytes, + location_id_bytes_to_compact, + is_compact_atomical_id, + format_name_type_candidates_to_rpc_for_subname, + calculate_latest_state_from_mod_history, + parse_protocols_operations_from_witness_array, + validate_rules_data, + AtomicalsValidationError, + auto_encode_bytes_elements, + validate_merkle_proof_dmint +) +from electrumx.lib.hash import (HASHX_LEN, Base58Error, hash_to_hex_str, + hex_str_to_hash, sha256, double_sha256) +from electrumx.lib.merkle import MerkleCache +from electrumx.lib.text import sessions_lines +from electrumx.server.daemon import DaemonError +from electrumx.server.history import TXNUM_LEN +from electrumx.server.http_middleware import rate_limiter, cors_middleware, error_middleware, request_middleware +from electrumx.server.http_session import HttpHandler +from electrumx.server.peers import PeerManager +from electrumx.lib.script import SCRIPTHASH_LEN + +if TYPE_CHECKING: + from electrumx.server.db import DB + from electrumx.server.env import Env + from electrumx.server.block_processor import BlockProcessor + from electrumx.server.daemon import Daemon + from electrumx.server.mempool import MemPool + + +BAD_REQUEST = 1 +DAEMON_ERROR = 2 +ATOMICALS_INVALID_TX = 800422 + +def scripthash_to_hashX(scripthash): + try: + bin_hash = hex_str_to_hash(scripthash) + if len(bin_hash) == 32: + return bin_hash[:HASHX_LEN] + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') + +def non_negative_integer(value): + '''Return param value it is or can be converted to a non-negative + integer, otherwise raise an RPCError.''' + try: + value = int(value) + if value >= 0: + return value + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, + f'{value} should be a non-negative integer') + +def assert_boolean(value): + '''Return param value it is boolean otherwise raise an RPCError.''' + if value in (False, True): + return value + raise RPCError(BAD_REQUEST, f'{value} should be a boolean value') + +def assert_tx_hash(value): + '''Raise an RPCError if the value is not a valid hexadecimal transaction hash. + + If it is valid, return it as 32-byte binary hash. + ''' + try: + raw_hash = hex_str_to_hash(value) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') + +def assert_atomical_id(value): + '''Raise an RPCError if the value is not a valid atomical id + If it is valid, return it as 32-byte binary hash. + ''' + try: + if value == None or value == "": + raise RPCError(BAD_REQUEST, f'atomical_id required') + index_of_i = value.find("i") + if index_of_i != 64: + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') + raw_hash = hex_str_to_hash(value[ : 64]) + if len(raw_hash) == 32: + return raw_hash + except (ValueError, TypeError): + pass + + raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') + +@attr.s(slots=True) +class SessionGroup: + name = attr.ib() + weight = attr.ib() + sessions = attr.ib() + retained_cost = attr.ib() + + def session_cost(self): + return sum(session.cost for session in self.sessions) + + def cost(self): + return self.retained_cost + self.session_cost() + + +@attr.s(slots=True) +class SessionReferences: + # All attributes are sets but groups is a list + sessions = attr.ib() + groups = attr.ib() + specials = attr.ib() # Lower-case strings + unknown = attr.ib() # Strings + + +class SessionManager: + '''Holds global state about all sessions.''' + + def __init__( + self, + env: 'Env', + db: 'DB', + bp: 'BlockProcessor', + daemon: 'Daemon', + mempool: 'MemPool', + shutdown_event: asyncio.Event, + ): + env.max_send = max(350000, env.max_send) + self.env = env + self.db = db + self.bp = bp + self.daemon = daemon + self.mempool = mempool + self.peer_mgr = PeerManager(env, db) + self.shutdown_event = shutdown_event + self.logger = util.class_logger(__name__, self.__class__.__name__) + self.servers = {} # service->server + self.sessions = {} # session->iterable of its SessionGroups + self.session_groups = {} # group name->SessionGroup instance + self.txs_sent = 0 + # Would use monotonic time, but aiorpcx sessions use Unix time: + self.start_time = time.time() + self._method_counts = defaultdict(int) + self._reorg_count = 0 + self._history_cache = pylru.lrucache(1000) + self._history_lookups = 0 + self._history_hits = 0 + self._history_op_cache = pylru.lrucache(1000) + self._tx_num_op_cache = pylru.lrucache(10000000) + self._tx_hashes_cache = pylru.lrucache(1000) + self._tx_hashes_lookups = 0 + self._tx_hashes_hits = 0 + # Really a MerkleCache cache + self._merkle_cache = pylru.lrucache(1000) + self._merkle_lookups = 0 + self._merkle_hits = 0 + self.estimatefee_cache = pylru.lrucache(1000) + self._tx_detail_cache = pylru.lrucache(1000000) + self.notified_height = None + self.hsub_results = None + self._task_group = OldTaskGroup() + self._sslc = None + # Event triggered when electrumx is listening for incoming requests. + self.server_listening = Event() + self.session_event = Event() + + # Set up the RPC request handlers + cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' + 'query reorg sessions stop debug_memusage_list_all_objects ' + 'debug_memusage_get_random_backref_chain'.split()) + LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) + for cmd in cmds} + + def _ssl_context(self): + if self._sslc is None: + self._sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) + self._sslc.load_cert_chain(self.env.ssl_certfile, keyfile=self.env.ssl_keyfile) + return self._sslc + + async def _start_servers(self, services): + for service in services: + kind = service.protocol.upper() + if service.protocol == 'http': + host = None if service.host == 'all_interfaces' else str(service.host) + try: + app = web.Application(middlewares=[ + cors_middleware(self), + error_middleware(self), + request_middleware(self), + ]) + handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) + # GET + app.router.add_get('/proxy', handler.proxy) + app.router.add_get('/proxy/health', handler.health) + app.router.add_get('/proxy/blockchain.block.header', handler.block_header) + app.router.add_get('/proxy/blockchain.block.headers', handler.block_headers) + app.router.add_get('/proxy/blockchain.estimatefee', handler.estimatefee) + # app.router.add_get('/proxy/headers.subscribe', handler.headers_subscribe) + # app.router.add_get('/proxy/relayfee', handler.relayfee) + app.router.add_get('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) + app.router.add_get('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) + app.router.add_get('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) + app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) + app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) + app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) + app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) + app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) + app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) + # app.router.add_get('/proxy/server.add_peer', handler.add_peer) + # app.router.add_get('/proxy/server.banner', handler.banner) + app.router.add_get('/proxy/server.donation_address', handler.donation_address) + app.router.add_get('/proxy/server.features', handler.server_features_async) + app.router.add_get('/proxy/server.peers.subscribe', handler.peers_subscribe) + app.router.add_get('/proxy/server.ping', handler.ping) + # app.router.add_get('/proxy/server.version', handler.server_version) + app.router.add_get('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) + app.router.add_get('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) + app.router.add_get('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) + app.router.add_get('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) + app.router.add_get('/proxy/blockchain.atomicals.list', handler.atomicals_list) + app.router.add_get('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) + app.router.add_get('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) + app.router.add_get('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) + app.router.add_get('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) + app.router.add_get('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) + app.router.add_get('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) + app.router.add_get('/proxy/blockchain.atomicals.get', handler.atomicals_get) + app.router.add_get('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) + app.router.add_get('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) + app.router.add_get('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) + app.router.add_get('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) + app.router.add_get('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) + app.router.add_get('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) + app.router.add_get('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) + app.router.add_get('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) + app.router.add_get('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) + app.router.add_get('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) + app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) + app.router.add_get('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) + app.router.add_get('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) + app.router.add_get('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) + app.router.add_get('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) + app.router.add_get('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) + app.router.add_get('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) + app.router.add_get('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) + app.router.add_get('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) + app.router.add_get('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) + app.router.add_get('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) + app.router.add_get('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) + # POST + app.router.add_post('/proxy', handler.proxy) + app.router.add_post('/proxy/blockchain.block.header', handler.block_header) + app.router.add_post('/proxy/blockchain.block.headers', handler.block_headers) + app.router.add_post('/proxy/blockchain.estimatefee', handler.estimatefee) + # app.router.add_post('/proxy/headers.subscribe', handler.headers_subscribe) + # app.router.add_post('/proxy/relayfee', handler.relayfee) + app.router.add_post('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) + app.router.add_post('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) + app.router.add_post('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) + app.router.add_post('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) + app.router.add_post('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) + app.router.add_post('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) + app.router.add_post('/proxy/blockchain.transaction.get', handler.transaction_get) + app.router.add_post('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) + app.router.add_post('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) + # app.router.add_post('/proxy/server.add_peer', handler.add_peer) + # app.router.add_post('/proxy/server.banner', handler.banner) + app.router.add_post('/proxy/server.donation_address', handler.donation_address) + app.router.add_post('/proxy/server.features', handler.server_features_async) + app.router.add_post('/proxy/server.peers.subscribe', handler.peers_subscribe) + app.router.add_post('/proxy/server.ping', handler.ping) + # app.router.add_post('/proxy/server.version', handler.server_version) + app.router.add_post('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) + app.router.add_post('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) + app.router.add_post('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) + app.router.add_post('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) + app.router.add_post('/proxy/blockchain.atomicals.list', handler.atomicals_list) + app.router.add_post('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) + app.router.add_post('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) + app.router.add_post('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) + app.router.add_post('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) + app.router.add_post('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) + app.router.add_post('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) + app.router.add_post('/proxy/blockchain.atomicals.get', handler.atomicals_get) + app.router.add_post('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) + app.router.add_post('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) + app.router.add_post('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) + app.router.add_post('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) + app.router.add_post('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) + app.router.add_post('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) + app.router.add_post('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) + app.router.add_post('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) + app.router.add_post('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) + app.router.add_post('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) + app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) + app.router.add_post('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) + app.router.add_post('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) + app.router.add_post('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) + app.router.add_post('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) + app.router.add_post('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) + app.router.add_post('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) + app.router.add_post('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) + app.router.add_post('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) + app.router.add_post('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) + app.router.add_post('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) + app.router.add_post('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) + # common proxy + app.router.add_get('/proxy/{method}', handler.handle_get_method) + app.router.add_post('/proxy/{method}', handler.handle_post_method) + app['rate_limiter'] = rate_limiter + runner = web.AppRunner(app) + await runner.setup() + site = web.TCPSite(runner, host, service.port) + await site.start() + except Exception as e: + self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') + else: + self.logger.info(f'{kind} server listening on {service.address}') + else: + if service.protocol in self.env.SSL_PROTOCOLS: + sslc = self._ssl_context() + else: + sslc = None + if service.protocol == 'rpc': + session_class = LocalRPC + else: + session_class = self.env.coin.SESSIONCLS + if service.protocol in ('ws', 'wss'): + serve = serve_ws + else: + serve = serve_rs + # FIXME: pass the service not the kind + session_factory = partial(session_class, self, self.db, self.mempool, + self.peer_mgr, kind) + host = None if service.host == 'all_interfaces' else str(service.host) + try: + self.servers[service] = await serve(session_factory, host, + service.port, ssl=sslc) + except OSError as e: # don't suppress CancelledError + self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') + else: + self.logger.info(f'{kind} server listening on {service.address}') + + + async def _start_external_servers(self): + '''Start listening on TCP and SSL ports, but only if the respective + port was given in the environment. + ''' + await self._start_servers(service for service in self.env.services + if service.protocol != 'rpc') + self.server_listening.set() + + async def _stop_servers(self, services): + '''Stop the servers of the given protocols.''' + server_map = {service: self.servers.pop(service) + for service in set(services).intersection(self.servers)} + # Close all before waiting + for service, server in server_map.items(): + self.logger.info(f'closing down server for {service}') + server.close() + # No value in doing these concurrently + for server in server_map.values(): + await server.wait_closed() + + async def _manage_servers(self): + paused = False + max_sessions = self.env.max_sessions + low_watermark = max_sessions * 19 // 20 + while True: + await self.session_event.wait() + self.session_event.clear() + if not paused and len(self.sessions) >= max_sessions: + self.logger.info(f'maximum sessions {max_sessions:,d} ' + f'reached, stopping new connections until ' + f'count drops to {low_watermark:,d}') + await self._stop_servers(service for service in self.servers + if service.protocol != 'rpc') + paused = True + # Start listening for incoming connections if paused and + # session count has fallen + if paused and len(self.sessions) <= low_watermark: + self.logger.info('resuming listening for incoming connections') + await self._start_external_servers() + paused = False + + async def _log_sessions(self): + '''Periodically log sessions.''' + log_interval = self.env.log_sessions + if log_interval: + while True: + await sleep(log_interval) + data = self._session_data(for_log=True) + for line in sessions_lines(data): + self.logger.info(line) + self.logger.info(util.json_serialize(self._get_info())) + + async def _disconnect_sessions(self, sessions, reason, *, force_after=1.0): + if sessions: + session_ids = ', '.join(str(session.session_id) for session in sessions) + self.logger.info(f'{reason} session ids {session_ids}') + for session in sessions: + await self._task_group.spawn(session.close(force_after=force_after)) + + async def _clear_stale_sessions(self): + '''Cut off sessions that haven't done anything for 10 minutes.''' + while True: + await sleep(60) + stale_cutoff = time.time() - self.env.session_timeout + stale_sessions = [session for session in self.sessions + if session.last_recv < stale_cutoff] + await self._disconnect_sessions(stale_sessions, 'closing stale') + del stale_sessions + + async def _handle_chain_reorgs(self): + '''Clear certain caches on chain reorgs.''' + while True: + await self.bp.backed_up_event.wait() + self.logger.info(f'reorg signalled; clearing tx_hashes and merkle caches') + self._reorg_count += 1 + self._tx_hashes_cache.clear() + self._merkle_cache.clear() + + async def _recalc_concurrency(self): + '''Periodically recalculate session concurrency.''' + session_class = self.env.coin.SESSIONCLS + period = 300 + while True: + await sleep(period) + hard_limit = session_class.cost_hard_limit + + # Reduce retained group cost + refund = period * hard_limit / 5000 + dead_groups = [] + for group in self.session_groups.values(): + group.retained_cost = max(0.0, group.retained_cost - refund) + if group.retained_cost == 0 and not group.sessions: + dead_groups.append(group) + # Remove dead groups + for group in dead_groups: + self.session_groups.pop(group.name) + + # Recalc concurrency for sessions where cost is changing gradually, and update + # cost_decay_per_sec. + for session in self.sessions: + # Subs have an on-going cost so decay more slowly with more subs + session.cost_decay_per_sec = hard_limit / (10000 + 5 * session.sub_count()) + session.recalc_concurrency() + + def _get_info(self): + '''A summary of server state.''' + cache_fmt = '{:,d} lookups {:,d} hits {:,d} entries' + sessions = self.sessions + return { + 'coin': self.env.coin.__name__, + 'daemon': self.daemon.logged_url(), + 'daemon height': self.daemon.cached_height(), + 'db height': self.db.db_height, + 'db_flush_count': self.db.history.flush_count, + 'groups': len(self.session_groups), + 'history cache': cache_fmt.format( + self._history_lookups, self._history_hits, len(self._history_cache)), + 'merkle cache': cache_fmt.format( + self._merkle_lookups, self._merkle_hits, len(self._merkle_cache)), + 'pid': os.getpid(), + 'peers': self.peer_mgr.info(), + 'request counts': self._method_counts, + 'request total': sum(self._method_counts.values()), + 'sessions': { + 'count': len(sessions), + 'count with subs': sum(len(getattr(s, 'hashX_subs', ())) > 0 for s in sessions), + 'errors': sum(s.errors for s in sessions), + 'logged': len([s for s in sessions if s.log_me]), + 'pending requests': sum(s.unanswered_request_count() for s in sessions), + 'subs': sum(s.sub_count() for s in sessions), + }, + 'tx hashes cache': cache_fmt.format( + self._tx_hashes_lookups, self._tx_hashes_hits, len(self._tx_hashes_cache)), + 'txs sent': self.txs_sent, + 'uptime': util.formatted_time(time.time() - self.start_time), + 'version': electrumx.version, + } + + def _session_data(self, for_log): + '''Returned to the RPC 'sessions' call.''' + now = time.time() + sessions = sorted(self.sessions, key=lambda s: s.start_time) + return [(session.session_id, + session.flags(), + session.remote_address_string(for_log=for_log), + session.client, + session.protocol_version_string(), + session.cost, + session.extra_cost(), + session.unanswered_request_count(), + session.txs_sent, + session.sub_count(), + session.recv_count, session.recv_size, + session.send_count, session.send_size, + now - session.start_time) + for session in sessions] + + def _group_data(self): + '''Returned to the RPC 'groups' call.''' + result = [] + for name, group in self.session_groups.items(): + sessions = group.sessions + result.append([name, + len(sessions), + group.session_cost(), + group.retained_cost, + sum(s.unanswered_request_count() for s in sessions), + sum(s.txs_sent for s in sessions), + sum(s.sub_count() for s in sessions), + sum(s.recv_count for s in sessions), + sum(s.recv_size for s in sessions), + sum(s.send_count for s in sessions), + sum(s.send_size for s in sessions), + ]) + return result + + async def _refresh_hsub_results(self, height): + '''Refresh the cached header subscription responses to be for height, + and record that as notified_height. + ''' + # Paranoia: a reorg could race and leave db_height lower + height = min(height, self.db.db_height) + raw = await self.raw_header(height) + self.hsub_results = {'hex': raw.hex(), 'height': height} + self.notified_height = height + + def _session_references(self, items, special_strings): + '''Return a SessionReferences object.''' + if not isinstance(items, list) or not all(isinstance(item, str) for item in items): + raise RPCError(BAD_REQUEST, 'expected a list of session IDs') + + sessions_by_id = {session.session_id: session for session in self.sessions} + groups_by_name = self.session_groups + + sessions = set() + groups = set() # Names as groups are not hashable + specials = set() + unknown = set() + + for item in items: + if item.isdigit(): + session = sessions_by_id.get(int(item)) + if session: + sessions.add(session) + else: + unknown.add(item) + else: + lc_item = item.lower() + if lc_item in special_strings: + specials.add(lc_item) + else: + if lc_item in groups_by_name: + groups.add(lc_item) + else: + unknown.add(item) + + groups = [groups_by_name[group] for group in groups] + return SessionReferences(sessions, groups, specials, unknown) + + # --- LocalRPC command handlers + + async def rpc_add_peer(self, real_name): + '''Add a peer. + + real_name: "bch.electrumx.cash t50001 s50002" for example + ''' + await self.peer_mgr.add_localRPC_peer(real_name) + return f"peer '{real_name}' added" + + async def rpc_disconnect(self, session_ids): + '''Disconnect sesssions. + + session_ids: array of session IDs + ''' + refs = self._session_references(session_ids, {'all'}) + result = [] + + if 'all' in refs.specials: + sessions = self.sessions + result.append('disconnecting all sessions') + else: + sessions = refs.sessions + result.extend(f'disconnecting session {session.session_id}' for session in sessions) + for group in refs.groups: + result.append(f'disconnecting group {group.name}') + sessions.update(group.sessions) + result.extend(f'unknown: {item}' for item in refs.unknown) + + await self._disconnect_sessions(sessions, 'local RPC request to disconnect') + return result + + async def rpc_log(self, session_ids): + '''Toggle logging of sesssions. + + session_ids: array of session or group IDs, or 'all', 'none', 'new' + ''' + refs = self._session_references(session_ids, {'all', 'none', 'new'}) + result = [] + + def add_result(text, value): + result.append(f'logging {text}' if value else f'not logging {text}') + + if 'all' in refs.specials: + for session in self.sessions: + session.log_me = True + SessionBase.log_new = True + result.append('logging all sessions') + if 'none' in refs.specials: + for session in self.sessions: + session.log_me = False + SessionBase.log_new = False + result.append('logging no sessions') + if 'new' in refs.specials: + SessionBase.log_new = not SessionBase.log_new + add_result('new sessions', SessionBase.log_new) + + sessions = refs.sessions + for session in sessions: + session.log_me = not session.log_me + add_result(f'session {session.session_id}', session.log_me) + for group in refs.groups: + for session in group.sessions.difference(sessions): + sessions.add(session) + session.log_me = not session.log_me + add_result(f'session {session.session_id}', session.log_me) + + result.extend(f'unknown: {item}' for item in refs.unknown) + return result + + async def rpc_daemon_url(self, daemon_url): + '''Replace the daemon URL.''' + daemon_url = daemon_url or self.env.daemon_url + try: + self.daemon.set_url(daemon_url) + except Exception as e: + raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}') + return f'now using daemon at {self.daemon.logged_url()}' + + async def rpc_stop(self): + '''Shut down the server cleanly.''' + self.shutdown_event.set() + return 'stopping' + + async def rpc_getinfo(self): + '''Return summary information about the server process.''' + return self._get_info() + + async def rpc_groups(self): + '''Return statistics about the session groups.''' + return self._group_data() + + async def rpc_peers(self): + '''Return a list of data about server peers.''' + return self.peer_mgr.rpc_data() + + async def rpc_query(self, items, limit): + '''Returns data about a script, address or name.''' + coin = self.env.coin + db = self.db + lines = [] + + def arg_to_hashX(arg): + try: + script = bytes.fromhex(arg) + lines.append(f'Script: {arg}') + return coin.hashX_from_script(script) + except ValueError: + pass + + try: + hashX = coin.address_to_hashX(arg) + lines.append(f'Address: {arg}') + return hashX + except Base58Error: + pass + + try: + script = coin.build_name_index_script(arg.encode("ascii")) + hashX = coin.name_hashX_from_script(script) + lines.append(f'Name: {arg}') + return hashX + except (AttributeError, UnicodeEncodeError): + pass + + return None + + for arg in items: + hashX = arg_to_hashX(arg) + if not hashX: + continue + n = None + history = await db.limited_history(hashX, limit=limit) + for n, (tx_hash, height) in enumerate(history): + lines.append(f'History #{n:,d}: height {height:,d} ' + f'tx_hash {hash_to_hex_str(tx_hash)}') + if n is None: + lines.append('No history found') + n = None + utxos = await db.all_utxos(hashX) + for n, utxo in enumerate(utxos, start=1): + lines.append(f'UTXO #{n:,d}: tx_hash ' + f'{hash_to_hex_str(utxo.tx_hash)} ' + f'tx_pos {utxo.tx_pos:,d} height ' + f'{utxo.height:,d} value {utxo.value:,d}') + if n == limit: + break + if n is None: + lines.append('No UTXOs found') + + balance = sum(utxo.value for utxo in utxos) + lines.append(f'Balance: {coin.decimal_value(balance):,f} ' + f'{coin.SHORTNAME}') + + return lines + + async def rpc_sessions(self): + '''Return statistics about connected sessions.''' + return self._session_data(for_log=False) + + async def rpc_reorg(self, count): + '''Force a reorg of the given number of blocks. + + count: number of blocks to reorg + ''' + count = non_negative_integer(count) + if not self.bp.force_chain_reorg(count): + raise RPCError(BAD_REQUEST, 'still catching up with daemon') + return f'scheduled a reorg of {count:,d} blocks' + + async def rpc_debug_memusage_list_all_objects(self, limit: int) -> str: + """Return a string listing the most common types in memory.""" + import objgraph # optional dependency + import io + with io.StringIO() as fd: + objgraph.show_most_common_types( + limit=limit, + shortnames=False, + file=fd) + return fd.getvalue() + + async def rpc_debug_memusage_get_random_backref_chain(self, objtype: str) -> str: + """Return a dotfile as text containing the backref chain + for a randomly selected object of type objtype. + + Warning: very slow! and it blocks the server. + + To convert to image: + $ dot -Tps filename.dot -o outfile.ps + """ + import objgraph # optional dependency + import random + import io + with io.StringIO() as fd: + await run_in_thread( + lambda: + objgraph.show_chain( + objgraph.find_backref_chain( + random.choice(objgraph.by_type(objtype)), + objgraph.is_proper_module), + output=fd)) + return fd.getvalue() + + # --- External Interface + + async def serve(self, notifications, event): + '''Start the RPC server if enabled. When the event is triggered, + start TCP and SSL servers.''' + try: + await self._start_servers(service for service in self.env.services + if service.protocol == 'rpc') + await event.wait() + + session_class = self.env.coin.SESSIONCLS + session_class.cost_soft_limit = self.env.cost_soft_limit + session_class.cost_hard_limit = self.env.cost_hard_limit + session_class.cost_decay_per_sec = session_class.cost_hard_limit / 10000 + session_class.bw_cost_per_byte = 1.0 / self.env.bw_unit_cost + session_class.cost_sleep = self.env.request_sleep / 1000 + session_class.initial_concurrent = self.env.initial_concurrent + session_class.processing_timeout = self.env.request_timeout + + self.logger.info(f'max session count: {self.env.max_sessions:,d}') + self.logger.info(f'session timeout: {self.env.session_timeout:,d} seconds') + self.logger.info(f'session cost hard limit {self.env.cost_hard_limit:,d}') + self.logger.info(f'session cost soft limit {self.env.cost_soft_limit:,d}') + self.logger.info(f'bandwidth unit cost {self.env.bw_unit_cost:,d}') + self.logger.info(f'request sleep {self.env.request_sleep:,d}ms') + self.logger.info(f'request timeout {self.env.request_timeout:,d}s') + self.logger.info(f'initial concurrent {self.env.initial_concurrent:,d}') + + self.logger.info(f'max response size {self.env.max_send:,d} bytes') + if self.env.drop_client is not None: + self.logger.info( + f'drop clients matching: {self.env.drop_client.pattern}' + ) + for service in self.env.report_services: + self.logger.info(f'advertising service {service}') + # Start notifications; initialize hsub_results + await notifications.start(self.db.db_height, self._notify_sessions) + await self._start_external_servers() + # Peer discovery should start after the external servers + # because we connect to ourself + async with self._task_group as group: + await group.spawn(self.peer_mgr.discover_peers()) + await group.spawn(self._clear_stale_sessions()) + await group.spawn(self._handle_chain_reorgs()) + await group.spawn(self._recalc_concurrency()) + await group.spawn(self._log_sessions()) + await group.spawn(self._manage_servers()) + finally: + # Close servers then sessions + await self._stop_servers(self.servers.keys()) + async with OldTaskGroup() as group: + for session in list(self.sessions): + await group.spawn(session.close(force_after=1)) + + def extra_cost(self, session): + # Note there is no guarantee that session is still in self.sessions. Example traceback: + # notify_sessions->notify->address_status->bump_cost->recalc_concurrency->extra_cost + # during which there are many places the sesssion could be removed + groups = self.sessions.get(session) + if groups is None: + return 0 + return sum((group.cost() - session.cost) * group.weight for group in groups) + + async def _merkle_branch(self, height, tx_hashes, tx_pos): + tx_hash_count = len(tx_hashes) + cost = tx_hash_count + + if tx_hash_count >= 200: + self._merkle_lookups += 1 + merkle_cache = self._merkle_cache.get(height) + if merkle_cache: + self._merkle_hits += 1 + cost = 10 * math.sqrt(tx_hash_count) + else: + async def tx_hashes_func(start, count): + return tx_hashes[start: start + count] + merkle_cache = MerkleCache(self.db.merkle, tx_hashes_func) + self._merkle_cache[height] = merkle_cache + await merkle_cache.initialize(len(tx_hashes)) + branch, _root = await merkle_cache.branch_and_root(tx_hash_count, tx_pos) + else: + branch, _root = self.db.merkle.branch_and_root(tx_hashes, tx_pos) + + branch = [hash_to_hex_str(hash) for hash in branch] + return branch, cost / 2500 + + async def merkle_branch_for_tx_hash(self, height, tx_hash): + '''Return a triple (branch, tx_pos, cost).''' + tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) + try: + tx_pos = tx_hashes.index(tx_hash) + except ValueError: + raise RPCError(BAD_REQUEST, + f'tx {hash_to_hex_str(tx_hash)} not in block at height {height:,d}') + branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) + return branch, tx_pos, tx_hashes_cost + merkle_cost + + async def merkle_branch_for_tx_pos(self, height, tx_pos): + '''Return a triple (branch, tx_hash_hex, cost).''' + tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) + try: + tx_hash = tx_hashes[tx_pos] + except IndexError: + raise RPCError(BAD_REQUEST, + f'no tx at position {tx_pos:,d} in block at height {height:,d}') + branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) + return branch, hash_to_hex_str(tx_hash), tx_hashes_cost + merkle_cost + + async def tx_hashes_at_blockheight(self, height): + '''Returns a pair (tx_hashes, cost). + + tx_hashes is an ordered list of binary hashes, cost is an estimated cost of + getting the hashes; cheaper if in-cache. Raises RPCError. + ''' + self._tx_hashes_lookups += 1 + tx_hashes = self._tx_hashes_cache.get(height) + if tx_hashes: + self._tx_hashes_hits += 1 + return tx_hashes, 0.1 + + # Ensure the tx_hashes are fresh before placing in the cache + while True: + reorg_count = self._reorg_count + try: + tx_hashes = await self.db.tx_hashes_at_blockheight(height) + except self.db.DBError as e: + raise RPCError(BAD_REQUEST, f'db error: {e!r}') + if reorg_count == self._reorg_count: + break + + self._tx_hashes_cache[height] = tx_hashes + + return tx_hashes, 0.25 + len(tx_hashes) * 0.0001 + + def session_count(self): + '''The number of connections that we've sent something to.''' + return len(self.sessions) + + async def daemon_request(self, method, *args): + '''Catch a DaemonError and convert it to an RPCError.''' + try: + return await getattr(self.daemon, method)(*args) + except DaemonError as e: + raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None + + async def raw_header(self, height): + '''Return the binary header at the given height.''' + try: + return await self.db.raw_header(height) + except IndexError: + raise RPCError(BAD_REQUEST, f'height {height:,d} ' + 'out of range') from None + + async def broadcast_transaction(self, raw_tx): + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + + async def broadcast_transaction_validated(self, raw_tx, live_run): + self.bp.validate_ft_rules_raw_tx(raw_tx) + if live_run: + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + else: + tx, tx_hash = self.env.coin.DESERIALIZER(bytes.fromhex(raw_tx), 0).read_tx_and_hash() + return hash_to_hex_str(tx_hash) + + async def limited_history(self, hashX): + '''Returns a pair (history, cost). + + History is a sorted list of (tx_hash, height) tuples, or an RPCError.''' + # History DoS limit. Each element of history is about 99 bytes when encoded + # as JSON. + limit = self.env.max_send // 99 + cost = 0.1 + self._history_lookups += 1 + result = self._history_cache.get(hashX) + if result: + self._history_hits += 1 + else: + result = await self.db.limited_history(hashX, limit=limit) + cost += 0.1 + len(result) * 0.001 + if len(result) >= limit: + result = RPCError(BAD_REQUEST, f'history too large', cost=cost) + self._history_cache[hashX] = result + + if isinstance(result, Exception): + raise result + return result, cost + + async def get_history_op(self, hashX, limit=10, offset=0, op=None, reverse=True): + history_data = self._history_op_cache.get(hashX, []) + if not history_data: + history_data = [] + txnum_padding = bytes(8-TXNUM_LEN) + for _key, hist in self.db.history.db.iterator(prefix=hashX, reverse=reverse): + for tx_numb in util.chunks(hist, TXNUM_LEN): + tx_num, = util.unpack_le_uint64(tx_numb + txnum_padding) + op_data = self._tx_num_op_cache.get(tx_num) + if not op_data: + op_prefix_key = b'op' + util.pack_le_uint64(tx_num) + tx_op = self.db.utxo_db.get(op_prefix_key) + if tx_op: + op_data, = util.unpack_le_uint32(tx_op) + self._tx_num_op_cache[tx_num] = op_data + history_data.append({"tx_num": tx_num, "op": op_data}) + self._history_op_cache[hashX] = history_data + if reverse: + history_data.sort(key=lambda x: x['tx_num'], reverse=reverse) + if op: + history_data = list(filter(lambda x: x["op"] == op, history_data)) + else: + history_data = list(filter(lambda x: x["op"], history_data)) + return history_data[offset:limit+offset], len(history_data) + + # Analysis the transaction detail by txid. + # See BlockProcessor.op_list for the complete op list. + async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): + tx_hash = hex_str_to_hash(txid) + res = self._tx_detail_cache.get(tx_hash) + if res: + # txid maybe the same, this key should add height add key prefix + self.logger.debug(f"read transation detail from cache {txid}") + return res + if not height: + tx_num, height = self.db.get_tx_num_height_from_tx_hash(tx_hash) + + raw_tx = self.db.get_raw_tx_by_tx_hash(tx_hash) + if not raw_tx: + raw_tx = await self.daemon_request('getrawtransaction', txid, False) + raw_tx = bytes.fromhex(raw_tx) + tx, _tx_hash = self.env.coin.DESERIALIZER(raw_tx, 0).read_tx_and_hash() + assert tx_hash == _tx_hash + ops = self.db.get_op_by_tx_num(tx_num) + op_raw = self.bp.op_list_vk[ops[0]] if ops else "" + + operation_found_at_inputs = parse_protocols_operations_from_witness_array(tx, tx_hash, True) + atomicals_spent_at_inputs = self.bp.build_atomicals_spent_at_inputs_for_validation_only(tx) + atomicals_receive_at_outputs = self.bp.build_atomicals_receive_at_ouutput_for_validation_only(tx, tx_hash) + blueprint_builder = AtomicalsTransferBlueprintBuilder( + self.logger, + atomicals_spent_at_inputs, + operation_found_at_inputs, + tx_hash, + tx, + self.bp.get_atomicals_id_mint_info, + self.bp.is_dmint_activated(height), + self.bp.is_custom_coloring_activated(height), + ) + is_burned = blueprint_builder.are_fts_burned + is_cleanly_assigned = blueprint_builder.cleanly_assigned + # format burned_fts + raw_burned_fts = blueprint_builder.get_fts_burned() + burned_fts = {} + for ft_key, ft_value in raw_burned_fts.items(): + burned_fts[location_id_bytes_to_compact(ft_key)] = ft_value + + res = { + "txid": txid, + "height": height, + "tx_num": tx_num, + "info": {}, + "transfers": { + "inputs": {}, + "outputs": {}, + "is_burned": is_burned, + "burned_fts": burned_fts, + "is_cleanly_assigned": is_cleanly_assigned + } + } + operation_type = operation_found_at_inputs.get("op", "") if operation_found_at_inputs else "" + if operation_found_at_inputs: + payload = operation_found_at_inputs.get("payload") + payload_not_none = payload or {} + res["info"]["payload"] = payload_not_none + if blueprint_builder.is_mint and operation_type in ["dmt", "ft"]: + expected_output_index = 0 + txout = tx.outputs[expected_output_index] + location = tx_hash + util.pack_le_uint32(expected_output_index) + # if save into the db, it means mint success + has_atomicals = self.db.get_atomicals_by_location_long_form(location) + if len(has_atomicals): + ticker_name = payload_not_none.get("args", {}).get("mint_ticker", "") + status, candidate_atomical_id, _ = self.bp.get_effective_ticker(ticker_name, self.bp.height) + if status: + atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + res["info"] = { + "atomical_id": atomical_id, + "location_id": location_id_bytes_to_compact(location), + "payload": payload, + "outputs": { + expected_output_index: [{ + "address": get_address_from_output_script(txout.pk_script), + "atomical_id": atomical_id, + "type": "FT", + "index": expected_output_index, + "value": txout.value + }] + } + } + elif operation_type == "nft": + if atomicals_receive_at_outputs: + expected_output_index = 0 + location = tx_hash + util.pack_le_uint32(expected_output_index) + txout = tx.outputs[expected_output_index] + atomical_id = location_id_bytes_to_compact( + atomicals_receive_at_outputs[expected_output_index][-1]["atomical_id"] + ) + res["info"] = { + "atomical_id": atomical_id, + "location_id": location_id_bytes_to_compact(location), + "payload": payload, + "outputs": { + expected_output_index: [{ + "address": get_address_from_output_script(txout.pk_script), + "atomical_id": atomical_id, + "type": "NFT", + "index": expected_output_index, + "value": txout.value + }] + } + } + # no operation_found_at_inputs, it will be transfer. + if blueprint_builder.ft_atomicals and atomicals_spent_at_inputs: + if not operation_type and not op_raw: + op_raw = "transfer" + for atomical_id, input_ft in blueprint_builder.ft_atomicals.items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + for i in input_ft.input_indexes: + prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) + prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) + if not prev_raw_tx: + prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) + prev_raw_tx = bytes.fromhex(prev_raw_tx) + self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx + prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() + ft_data = { + "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), + "atomical_id": compact_atomical_id, + "type": "FT", + "index": i.txin_index, + "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value + } + if i.txin_index not in res["transfers"]["inputs"]: + res["transfers"]["inputs"][i.txin_index] = [ft_data] + else: + res["transfers"]["inputs"][i.txin_index].append(ft_data) + for k, v in blueprint_builder.ft_output_blueprint.outputs.items(): + for atomical_id, output_ft in v['atomicals'].items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + ft_data = { + "address": get_address_from_output_script(tx.outputs[k].pk_script), + "atomical_id": compact_atomical_id, + "type": "FT", + "index": k, + "value": output_ft.sat_value + } + if k not in res["transfers"]["outputs"]: + res["transfers"]["outputs"][k] = [ft_data] + else: + res["transfers"]["outputs"][k].append(ft_data) + if blueprint_builder.nft_atomicals and atomicals_spent_at_inputs: + if not operation_type and not op_raw: + op_raw = "transfer" + for atomical_id, input_nft in blueprint_builder.nft_atomicals.items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + for i in input_nft.input_indexes: + prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) + prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) + if not prev_raw_tx: + prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) + prev_raw_tx = bytes.fromhex(prev_raw_tx) + self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx + prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() + nft_data = { + "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), + "atomical_id": compact_atomical_id, + "type": "NFT", + "index": i.txin_index, + "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value + } + if i.txin_index not in res["transfers"]["inputs"]: + res["transfers"]["inputs"][i.txin_index] = [nft_data] + else: + res["transfers"]["inputs"][i.txin_index].append(nft_data) + for k, v in blueprint_builder.nft_output_blueprint.outputs.items(): + for atomical_id, output_nft in v['atomicals'].items(): + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + nft_data = { + "address": get_address_from_output_script(tx.outputs[k].pk_script), + "atomical_id": compact_atomical_id, + "type": output_nft.type, + "index": k, + "value": output_nft.total_satsvalue + } + if k not in res["transfers"]["outputs"]: + res["transfers"]["outputs"][k] = [nft_data] + else: + res["transfers"]["outputs"][k].append(nft_data) + + atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(tx) + if atomical_id_for_payment: + res["info"]["payment"] = { + "atomical_id": location_id_bytes_to_compact(atomical_id_for_payment), + "payment_marker_idx": payment_marker_idx + } + + if op_raw and height: + self._tx_detail_cache[tx_hash] = res + res["op"] = op_raw + + # Recursively encode the result. + return auto_encode_bytes_elements(res) + + async def transaction_global( + self, + limit: int = 10, + offset: int = 0, + op_type: Optional[str] = None, + reverse: bool = True + ): + height = self.bp.height + res = [] + count = 0 + history_list = [] + for current_height in range(height, self.env.coin.ATOMICALS_ACTIVATION_HEIGHT, -1): + txs = self.db.get_atomicals_block_txs(current_height) + for tx in txs: + tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) + history_list.append({ + "tx_num": tx_num, + "tx_hash": tx, + "height": current_height + }) + count += 1 + if count >= offset + limit: + break + history_list.sort(key=lambda x: x['tx_num'], reverse=reverse) + + for history in history_list: + data = await self.get_transaction_detail(history["tx_hash"], history["height"], history["tx_num"]) + if (op_type and op_type == data["op"]) or (not op_type and data["op"]): + res.append(data) + total = len(res) + return {"result": res[offset:offset+limit], "total": total, "limit": limit, "offset": offset} + + async def _notify_sessions(self, height, touched): + '''Notify sessions about height changes and touched addresses.''' + height_changed = height != self.notified_height + if height_changed: + await self._refresh_hsub_results(height) + # Invalidate all history caches since they rely on block heights + self._history_cache.clear() + # Invalidate our op cache for touched hashXs + op_cache = self._history_op_cache + for hashX in set(op_cache).intersection(touched): + op_cache.pop(hashX, None) + self.logger.info(f"refresh op cache {self.notified_height}") + time.sleep(2) + background_task = asyncio.create_task(self.get_history_op(hashX, 10, 0, None, True)) + await background_task + + for session in self.sessions: + if self._task_group.joined: # this can happen during shutdown + self.logger.warning(f"task group already terminated. not notifying sessions.") + return + await self._task_group.spawn(session.notify, touched, height_changed) + + def _ip_addr_group_name(self, session) -> Optional[str]: + host = session.remote_address().host + if isinstance(host, (IPv4Address, IPv6Address)): + if host.is_private: # exempt private addresses + return None + if isinstance(host, IPv4Address): + subnet_size = self.env.session_group_by_subnet_ipv4 + subnet = IPv4Network(host).supernet(prefixlen_diff=32 - subnet_size) + return str(subnet) + elif isinstance(host, IPv6Address): + subnet_size = self.env.session_group_by_subnet_ipv6 + subnet = IPv6Network(host).supernet(prefixlen_diff=128 - subnet_size) + return str(subnet) + return 'unknown_addr' + + def _session_group(self, name: Optional[str], weight: float) -> Optional[SessionGroup]: + if name is None: + return None + group = self.session_groups.get(name) + if not group: + group = SessionGroup(name, weight, set(), 0) + self.session_groups[name] = group + return group + + def add_session(self, session): + self.session_event.set() + # Return the session groups + groups = ( + self._session_group(self._ip_addr_group_name(session), 1.0), + ) + groups = tuple(group for group in groups if group is not None) + self.sessions[session] = groups + for group in groups: + group.sessions.add(session) + + def remove_session(self, session): + '''Remove a session from our sessions list if there.''' + self.session_event.set() + groups = self.sessions.pop(session) + for group in groups: + group.retained_cost += session.cost + group.sessions.remove(session) + + +class SessionBase(RPCSession): + '''Base class of ElectrumX JSON sessions. + + Each session runs its tasks in asynchronous parallelism with other + sessions. + ''' + + MAX_CHUNK_SIZE = 2016 + session_counter = itertools.count() + log_new = False + + def __init__( + self, + session_mgr: 'SessionManager', + db: 'DB', + mempool: 'MemPool', + peer_mgr: 'PeerManager', + kind: str, + transport, + ): + connection = JSONRPCConnection(JSONRPCAutoDetect) + super().__init__(transport, connection=connection) + self.session_mgr = session_mgr + self.db = db + self.mempool = mempool + self.peer_mgr = peer_mgr + self.kind = kind # 'RPC', 'TCP' etc. + self.env = session_mgr.env + self.coin = self.env.coin + self.client = 'unknown' + self.anon_logs = self.env.anon_logs + self.txs_sent = 0 + self.log_me = SessionBase.log_new + self.session_id = None + self.daemon_request = self.session_mgr.daemon_request + self.session_id = next(self.session_counter) + context = {'conn_id': f'{self.session_id}'} + logger = util.class_logger(__name__, self.__class__.__name__) + self.logger = util.ConnectionLogger(logger, context) + self.logger.info(f'{self.kind} {self.remote_address_string()}, ' + f'{self.session_mgr.session_count():,d} total') + self.session_mgr.add_session(self) + self.recalc_concurrency() # must be called after session_mgr.add_session + + async def notify(self, touched, height_changed): + pass + + def default_framer(self): + return NewlineFramer(max_size=self.env.max_recv) + + def remote_address_string(self, *, for_log=True): + '''Returns the peer's IP address and port as a human-readable + string, respecting anon logs if the output is for a log.''' + if for_log and self.anon_logs: + return 'xx.xx.xx.xx:xx' + return str(self.remote_address()) + + def flags(self): + '''Status flags.''' + status = self.kind[0] + if self.is_closing(): + status += 'C' + if self.log_me: + status += 'L' + status += str(self._incoming_concurrency.max_concurrent) + return status + + async def connection_lost(self): + '''Handle client disconnection.''' + await super().connection_lost() + self.session_mgr.remove_session(self) + msg = '' + if self._incoming_concurrency.max_concurrent < self.initial_concurrent * 0.8: + msg += ' whilst throttled' + if self.send_size >= 1_000_000: + msg += f'. Sent {self.send_size:,d} bytes in {self.send_count:,d} messages' + if msg: + msg = 'disconnected' + msg + self.logger.info(msg) + + def sub_count(self): + return 0 + + async def handle_request(self, request): + """Handle an incoming request. ElectrumX doesn't receive + notifications from client sessions. + """ + if isinstance(request, Request): + handler = self.request_handlers.get(request.method) + method = request.method + args = request.args + else: + handler = None + method = 'invalid method' + args = None + self.logger.debug(f'Session request handling: [method] {method}, [args] {args}') + + # If DROP_CLIENT_UNKNOWN is enabled, check if the client identified + # by calling server.version previously. If not, disconnect the session + if self.env.drop_client_unknown and method != 'server.version' and self.client == 'unknown': + self.logger.info(f'disconnecting because client is unknown') + raise ReplyAndDisconnect( + BAD_REQUEST, f'use server.version to identify client') + + self.session_mgr._method_counts[method] += 1 + coro = handler_invocation(handler, request)() + return await coro + + +class ElectrumX(SessionBase): + '''A TCP server that handles incoming Electrum connections.''' + + PROTOCOL_MIN = (1, 4) + PROTOCOL_MAX = (1, 4, 3) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.subscribe_headers = False + self.connection.max_response_size = self.env.max_send + self.hashX_subs = {} + self.sv_seen = False + self.mempool_statuses = {} + self.set_request_handlers(self.PROTOCOL_MIN) + self.is_peer = False + self.cost = 5.0 # Connection cost + + @classmethod + def protocol_min_max_strings(cls): + return [util.version_string(ver) + for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] + + @classmethod + def server_features(cls, env): + '''Return the server features dictionary.''' + hosts_dict = {} + for service in env.report_services: + port_dict = hosts_dict.setdefault(str(service.host), {}) + if service.protocol not in port_dict: + port_dict[f'{service.protocol}_port'] = service.port + + min_str, max_str = cls.protocol_min_max_strings() + return { + 'hosts': hosts_dict, + 'pruning': None, + 'server_version': electrumx.version, + 'protocol_min': min_str, + 'protocol_max': max_str, + 'genesis_hash': env.coin.GENESIS_HASH, + 'hash_function': 'sha256', + 'services': [str(service) for service in env.report_services], + } + + async def server_features_async(self): + self.bump_cost(0.2) + return self.server_features(self.env) + + @classmethod + def server_version_args(cls): + '''The arguments to a server.version RPC call to a peer.''' + return [electrumx.version, cls.protocol_min_max_strings()] + + def protocol_version_string(self): + return util.version_string(self.protocol_tuple) + + def extra_cost(self): + return self.session_mgr.extra_cost(self) + + def on_disconnect_due_to_excessive_session_cost(self): + remote_addr = self.remote_address() + ip_addr = remote_addr.host if remote_addr else None + groups = self.session_mgr.sessions[self] + group_names = [group.name for group in groups] + self.logger.info(f"closing session over res usage. ip: {ip_addr}. groups: {group_names}") + + def sub_count(self): + return len(self.hashX_subs) + + def unsubscribe_hashX(self, hashX): + self.mempool_statuses.pop(hashX, None) + return self.hashX_subs.pop(hashX, None) + + async def notify(self, touched, height_changed): + '''Wrap _notify_inner; websockets raises exceptions for unclear reasons.''' + try: + async with timeout_after(30): + await self._notify_inner(touched, height_changed) + except TaskTimeout: + self.logger.warning('timeout notifying client, closing...') + await self.close(force_after=1.0) + except Exception: + self.logger.exception('unexpected exception notifying client') + + async def _notify_inner(self, touched, height_changed): + '''Notify the client about changes to touched addresses (from mempool + updates or new blocks) and height. + ''' + if height_changed and self.subscribe_headers: + args = (await self.subscribe_headers_result(), ) + await self.send_notification('blockchain.headers.subscribe', args) + + touched = touched.intersection(self.hashX_subs) + if touched or (height_changed and self.mempool_statuses): + changed = {} + + for hashX in touched: + alias = self.hashX_subs.get(hashX) + if alias: + status = await self.subscription_address_status(hashX) + changed[alias] = status + + # Check mempool hashXs - the status is a function of the confirmed state of + # other transactions. + mempool_statuses = self.mempool_statuses.copy() + for hashX, old_status in mempool_statuses.items(): + alias = self.hashX_subs.get(hashX) + if alias: + status = await self.subscription_address_status(hashX) + if status != old_status: + changed[alias] = status + + method = 'blockchain.scripthash.subscribe' + for alias, status in changed.items(): + await self.send_notification(method, (alias, status)) + + if changed: + es = '' if len(changed) == 1 else 'es' + self.logger.info(f'notified of {len(changed):,d} address{es}') + + async def subscribe_headers_result(self): + '''The result of a header subscription or notification.''' + return self.session_mgr.hsub_results + + async def headers_subscribe(self): + '''Subscribe to get raw headers of new blocks.''' + if not self.subscribe_headers: + self.subscribe_headers = True + self.bump_cost(0.25) + return await self.subscribe_headers_result() + + async def add_peer(self, features): + '''Add a peer (but only if the peer resolves to the source).''' + self.is_peer = True + self.bump_cost(100.0) + return await self.peer_mgr.on_add_peer(features, self.remote_address()) + + async def peers_subscribe(self): + '''Return the server peers as a list of (ip, host, details) tuples.''' + self.bump_cost(1.0) + return self.peer_mgr.on_peers_subscribe(self.is_tor()) + + async def address_status(self, hashX): + '''Returns an address status. + + Status is a hex string, but must be None if there is no history. + ''' + # Note history is ordered and mempool unordered in electrum-server + # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 + db_history, cost = await self.session_mgr.limited_history(hashX) + mempool = await self.mempool.transaction_summaries(hashX) + + status = ''.join(f'{hash_to_hex_str(tx_hash)}:' + f'{height:d}:' + for tx_hash, height in db_history) + status += ''.join(f'{hash_to_hex_str(tx.hash)}:' + f'{-tx.has_unconfirmed_inputs:d}:' + for tx in mempool) + + # Add status hashing cost + self.bump_cost(cost + 0.1 + len(status) * 0.00002) + + if status: + status = sha256(status.encode()).hex() + else: + status = None + + if mempool: + self.mempool_statuses[hashX] = status + else: + self.mempool_statuses.pop(hashX, None) + + return status + + async def subscription_address_status(self, hashX): + '''As for address_status, but if it can't be calculated the subscription is + discarded.''' + try: + return await self.address_status(hashX) + except RPCError: + self.unsubscribe_hashX(hashX) + return None + + async def hashX_listunspent(self, hashX): + '''Return the list of UTXOs of a script hash, including mempool + effects.''' + utxos = await self.db.all_utxos(hashX) + utxos = sorted(utxos) + utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = await self.mempool.potential_spends(hashX) + returned_utxos = [] + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + # Todo need to combine mempool atomicals + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'tx_hash': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'tx_pos': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + return returned_utxos + + # Get atomical_id from an atomical inscription number + def get_atomical_id_by_atomical_number(self, atomical_number): + return self.db.get_atomical_id_by_atomical_number(atomical_number) + + # Get atomicals base information from db or placeholder information if mint is still in the mempool and unconfirmed + async def atomical_id_get(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + if atomical: + return atomical + # Check mempool + atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) + if atomical_in_mempool == None: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') + return atomical_in_mempool + + async def atomical_id_get_ft_info(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + if atomical['subtype'] == 'decentralized': + atomical = await self.session_mgr.bp.get_dft_mint_info_rpc_format_by_atomical_id(atomical_id) + elif atomical['subtype'] == 'direct': + atomical = await self.session_mgr.bp.get_ft_mint_info_rpc_format_by_atomical_id(atomical_id) + else: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not a fungible token (FT)') + + if atomical: + return atomical + # Check mempool + atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) + if atomical_in_mempool == None: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') + return atomical_in_mempool + + async def atomical_id_get_state(self, compact_atomical_id, Verbose=False): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + height = self.session_mgr.bp.height + self.db.populate_extended_mod_state_latest_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def atomical_id_get_state_history(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + height = self.session_mgr.bp.height + self.db.populate_extended_mod_state_history_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def atomical_id_get_events(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + height = self.session_mgr.bp.height + self.db.populate_extended_events_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def atomical_id_get_tx_history(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + history = await self.scripthash_get_history(hash_to_hex_str(double_sha256(atomical_id))) + history.sort(key=lambda x: x['height'], reverse=True) + + atomical['tx'] = { + 'history': history + } + return atomical + + async def atomical_id_get_location(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def get_summary_info(self, atomical_hash_count=10): + + if atomical_hash_count and atomical_hash_count > 100000: + atomical_hash_count = 100000 + + db_height = self.db.db_height + last_block_hash = self.db.get_atomicals_block_hash(db_height) + ret = { + 'coin': self.env.coin.__name__, + 'network': self.coin.NET, + 'height': db_height, + 'block_tip': hash_to_hex_str(self.db.db_tip), + 'server_time': datetime.datetime.now().isoformat(), + 'atomicals_block_tip': last_block_hash, + 'atomical_count': self.db.db_atomical_count + } + + list_hashes = {} + ret['atomicals_block_hashes'] = {} + # ret['atomicals_block_hashes'][db_height] = last_block_hash + for i in range(atomical_hash_count): + next_db_height = db_height - i + nextblockhash = self.db.get_atomicals_block_hash(next_db_height) + ret['atomicals_block_hashes'][next_db_height] = nextblockhash + return ret + + async def atomicals_list_get(self, limit, offset, asc): + atomicals = await self.db.get_atomicals_list(limit, offset, asc) + atomicals_populated = [] + for atomical_id in atomicals: + atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) + atomicals_populated.append(atomical) + return {'global': await self.get_summary_info(), 'result': atomicals_populated } + + async def atomicals_num_to_id(self, limit, offset, asc): + atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) + atomicals_num_to_id_map_reformatted = {} + for num, id in atomicals_num_to_id_map.items(): + atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) + return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted } + + async def atomicals_block_hash(self, height): + if not height: + height = self.session_mgr.bp.height + block_hash = self.db.get_atomicals_block_hash(height) + return {'result': block_hash} + + async def atomicals_block_txs(self, height): + tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) + return {'global': await self.get_summary_info(), 'result': tx_list } + + async def hashX_subscribe(self, hashX, alias): + # Store the subscription only after address_status succeeds + result = await self.address_status(hashX) + self.hashX_subs[hashX] = alias + return result + + async def get_balance(self, hashX): + utxos = await self.db.all_utxos(hashX) + confirmed = sum(utxo.value for utxo in utxos) + unconfirmed = await self.mempool.balance_delta(hashX) + self.bump_cost(1.0 + len(utxos) / 50) + return {'confirmed': confirmed, 'unconfirmed': unconfirmed} + + async def scripthash_get_balance(self, scripthash): + '''Return the confirmed and unconfirmed balance of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.get_balance(hashX) + + async def unconfirmed_history(self, hashX): + # Note unconfirmed history is unordered in electrum-server + # height is -1 if it has unconfirmed inputs, otherwise 0 + result = [{'tx_hash': hash_to_hex_str(tx.hash), + 'height': -tx.has_unconfirmed_inputs, + 'fee': tx.fee} + for tx in await self.mempool.transaction_summaries(hashX)] + self.bump_cost(0.25 + len(result) / 50) + return result + + async def confirmed_and_unconfirmed_history(self, hashX): + # Note history is ordered but unconfirmed is unordered in e-s + history, cost = await self.session_mgr.limited_history(hashX) + self.bump_cost(cost) + conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} + for tx_hash, height in history] + return conf + await self.unconfirmed_history(hashX) + + async def atomicals_listscripthash(self, scripthash, Verbose=False): + '''Return the list of Atomical UTXOs for an address''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_listscripthash_atomicals(hashX, Verbose) + + async def atomicals_list(self, offset, limit, asc): + '''Return the list of atomicals order by reverse atomical number''' + return await self.atomicals_list_get(offset, limit, asc) + + async def atomicals_get(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get(compact_atomical_id)} + + async def atomicals_dump(self): + if True: + self.db.dump() + return {'result': True} + else: + return {'result': False} + + async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + entries = self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, limit, offset) + return {'global': await self.get_summary_info(), 'result': entries} + + async def atomicals_get_ft_info(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} + + async def atomicals_get_global(self, hashes=10): + return {'global': await self.get_summary_info(hashes)} + + async def atomicals_get_location(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_location(compact_atomical_id)} + + async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} + + async def atomical_get_state_history(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state_history(compact_atomical_id)} + + async def atomical_get_events(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_events(compact_atomical_id)} + + def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = compact_atomical_id_or_atomical_number + if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + assert_atomical_id(compact_atomical_id) + else: + found_atomical_id = self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) + if not found_atomical_id: + raise RPCError(BAD_REQUEST, f'not found atomical: {compact_atomical_id_or_atomical_number}') + compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) + return compact_atomical_id + + async def atomicals_get_tx_history(self, compact_atomical_id_or_atomical_number): + '''Return the history of an Atomical``` + atomical_id: the mint transaction hash + 'i' of the atomical id + verbose: to determine whether to print extended information + ''' + compact_atomical_id = compact_atomical_id_or_atomical_number + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + assert_atomical_id(compact_atomical_id) + else: + compact_atomical_id = location_id_bytes_to_compact(self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} + + async def atomicals_get_by_ticker(self, ticker): + height = self.session_mgr.bp.height + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'ticker' + } + return { + 'result': return_result + } + async def atomicals_get_by_container(self, container): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.session_mgr.bp.height + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'container' + } + return { + 'result': return_result + } + + def auto_populate_container_regular_items_fields(self, items): + if not items or not isinstance(items, dict): + return {} + for item, value in items.items(): + provided_id = value.get('id') + value['status'] = 'verified' + if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: + value['$id'] = location_id_bytes_to_compact(provided_id) + return auto_encode_bytes_elements(items) + + def auto_populate_container_dmint_items_fields(self, items): + if not items or not isinstance(items, dict): + return {} + for item, value in items.items(): + provided_id = value.get('id') + if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: + value['$id'] = location_id_bytes_to_compact(provided_id) + return auto_encode_bytes_elements(items) + + async def atomicals_get_container_items(self, container, limit, offset): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, self.session_mgr.bp.height) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + else: + raise RPCError(BAD_REQUEST, f'Container not found') + + compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) + container_info = await self.atomical_id_get(compact_atomical_id) + # If it is a dmint container then there is no items field, instead construct it from the dmitems + container_dmint_status = container_info.get('$container_dmint_status') + items = [] + if container_dmint_status: + if limit > 100: + limit = 100 + if offset < 0: + offset = 0 + height = self.session_mgr.bp.height + items = await self.session_mgr.bp.get_effective_dmitems_paginated(found_atomical_id, limit, offset, height) + return { + 'result': { + 'container': container_info, + 'item_data': { + 'limit': limit, + 'offset': offset, + 'type': 'dmint', + 'items': self.auto_populate_container_dmint_items_fields(items) + } + } + } + else: + container_mod_history = self.session_mgr.bp.get_mod_history(found_atomical_id, self.session_mgr.bp.height) + current_height_latest_state = calculate_latest_state_from_mod_history(container_mod_history) + items = current_height_latest_state.get('items', []) + return { + 'result': { + 'container': container_info, + 'item_data': { + 'limit': limit, + 'offset': offset, + 'type': 'regular', + 'items': self.auto_populate_container_regular_items_fields(items) + } + } + } + + async def atomicals_get_by_container_item(self, container, item_name): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.session_mgr.bp.height + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) + found_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + if status == 'verified': + found_atomical_id = candidate_atomical_id + else: + self.logger.info(f'formatted_entries {formatted_entries}') + raise RPCError(BAD_REQUEST, f'Container does not exist') + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, item_name, height) + found_item_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + if status == 'verified': + found_item_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_item_atomical_id, + 'candidates': formatted_entries, + 'type': 'item' + } + return { + 'result': return_result + } + + async def atomicals_get_by_container_item_validation(self, container, item_name, bitworkc, bitworkr, main_name, main_hash, proof, check_without_sealed): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.session_mgr.bp.height + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) + found_parent_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + if status == 'verified': + found_parent_atomical_id = candidate_atomical_id + else: + raise RPCError(BAD_REQUEST, f'Container does not exist') + compact_atomical_id = location_id_bytes_to_compact(found_parent_atomical_id) + container_info = await self.atomical_id_get(compact_atomical_id) + # If it is a dmint container then there is no items field, instead construct it from the dmitems + container_dmint_status = container_info.get('$container_dmint_status') + errors = container_dmint_status.get('errors') + if not container_dmint_status: + raise RPCError(BAD_REQUEST, f'Container dmint status not exist') + if container_dmint_status.get('status') != 'valid': + errors = container_dmint_status.get('errors') + if check_without_sealed and errors and len(errors) == 1 and errors[0] == 'container not sealed': + pass + else: + raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') + + dmint = container_dmint_status.get('dmint') + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, item_name, height) + found_item_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + if status == 'verified': + found_item_atomical_id = candidate_atomical_id + + # validate the proof data nonetheless + if not proof or not isinstance(proof, list) or len(proof) == 0: + raise RPCError(BAD_REQUEST, f'Proof must be provided') + + applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, item_name, height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, DMINT_PATH) + proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, bitworkr, main_name, main_hash, proof) + if applicable_rule and applicable_rule.get('matched_rule'): + applicable_rule = applicable_rule.get('matched_rule') + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_item_atomical_id, + 'candidates': formatted_entries, + 'type': 'item', + 'applicable_rule': applicable_rule, + 'proof_valid': proof_valid, + 'target_vector': target_vector, + 'target_hash': target_hash, + 'dmint': state_at_height.get('dmint') + } + return { + 'result': return_result + } + + async def atomicals_get_by_realm(self, name): + height = self.session_mgr.bp.height + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'realm' + } + return { + 'result': return_result + } + + async def atomicals_get_by_subrealm(self, parent_compact_atomical_id_or_atomical_number, name): + height = self.session_mgr.bp.height + compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) + atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, name, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'subrealm' + } + return { + 'result': return_result + } + + async def atomicals_get_by_dmitem(self, parent_compact_atomical_id_or_atomical_number, name): + height = self.session_mgr.bp.height + compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) + atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) + status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, height) + formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) + + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'dmitem' + } + return { + 'result': return_result + } + + # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm + async def atomicals_get_realm_info(self, full_name, Verbose=False): + if not full_name or not isinstance(full_name, str): + raise RPCError(BAD_REQUEST, f'invalid input full_name: {full_name}') + full_name = full_name.lower() + split_names = full_name.split('.') + total_name_parts = len(split_names) + level = 0 + last_found_realm_atomical_id = None + last_found_realm = None + realms_path = [] + latest_all_entries_candidates = [] + height = self.session_mgr.bp.height + for name_part in split_names: + if level == 0: + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm(name_part, height) + else: + self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') + realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm(last_found_realm, name_part, height) + # stops when it does not found the realm component + if realm_status != 'verified': + break + # Save the latest realm (could be the top level realm, or the parent of a subrealm, or even the subrealm itself) + last_found_realm_atomical_id = last_found_realm + # Add it to the list of paths + realms_path.append({ + 'atomical_id': location_id_bytes_to_compact(last_found_realm), + 'name_part': name_part, + 'candidates': latest_all_entries_candidates + }) + level += 1 + + joined_name = '' + is_first_name_part = True + for name_element in realms_path: + if is_first_name_part: + is_first_name_part = False + else: + joined_name += '.' + joined_name += name_element['name_part'] + # Nothing was found + realms_path_len = len(realms_path) + if realms_path_len == 0: + return {'result': { + 'atomical_id': None, + 'top_level_realm_atomical_id': None, + 'top_level_realm_name': None, + 'nearest_parent_realm_atomical_id': None, + 'nearest_parent_realm_name': None, + 'request_full_realm_name': full_name, + 'found_full_realm_name': None, + 'missing_name_parts': full_name, + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) } + } + # Populate the subrealm minting rules for a parent atomical + that = self + def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): + current_height = that.session_mgr.bp.height + subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) + current_height_latest_state = calculate_latest_state_from_mod_history(subrealm_mint_mod_history) + current_height_rules_list = validate_rules_data(current_height_latest_state.get(SUBREALM_MINT_PATH, None)) + nearest_parent_realm_subrealm_mint_allowed = False + struct_to_populate['nearest_parent_realm_subrealm_mint_rules'] = { + 'nearest_parent_realm_atomical_id': location_id_bytes_to_compact(parent_atomical_id), + 'current_height': current_height, + 'current_height_rules': current_height_rules_list + } + if current_height_rules_list and len(current_height_rules_list) > 0: + nearest_parent_realm_subrealm_mint_allowed = True + struct_to_populate['nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed + # + # + # + # At least the top level realm was found if we got this far + # + # + # The number of realms returned and name components is equal, therefore the subrealm was found correctly + if realms_path_len == total_name_parts: + nearest_parent_realm_atomical_id = None + nearest_parent_realm_name = None + top_level_realm = realms_path[0]['atomical_id'] + top_level_realm_name = realms_path[0]['name_part'] + if realms_path_len >= 2: + nearest_parent_realm_atomical_id = realms_path[-2]['atomical_id'] + nearest_parent_realm_name = realms_path[-2]['name_part'] + elif realms_path_len == 1: + nearest_parent_realm_atomical_id = top_level_realm + nearest_parent_realm_name = top_level_realm_name + final_subrealm_name = split_names[-1] + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + return_struct = { + 'atomical_id': realms_path[-1]['atomical_id'], + 'top_level_realm_atomical_id': top_level_realm, + 'top_level_realm_name': top_level_realm_name, + 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, + 'nearest_parent_realm_name': nearest_parent_realm_name, + 'request_full_realm_name': full_name, + 'found_full_realm_name': joined_name, + 'missing_name_parts': None, + 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + } + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + return {'result': return_struct} + + # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found + # But the final subrealm does not exist yet + # if realms_path_len < total_name_parts: + # It is known if we got this far that realms_path_len < total_name_parts + nearest_parent_realm_atomical_id = None + nearest_parent_realm_name = None + top_level_realm = realms_path[0]['atomical_id'] + top_level_realm_name = realms_path[0]['name_part'] + if realms_path_len >= 2: + nearest_parent_realm_atomical_id = realms_path[-1]['atomical_id'] + nearest_parent_realm_name = realms_path[-1]['name_part'] + elif realms_path_len == 1: + nearest_parent_realm_atomical_id = top_level_realm + nearest_parent_realm_name = top_level_realm_name + + missing_name_parts = '.'.join(split_names[ len(realms_path):]) + final_subrealm_name = split_names[-1] + applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) + return_struct = { + 'atomical_id': None, + 'top_level_realm_atomical_id': top_level_realm, + 'top_level_realm_name': top_level_realm_name, + 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, + 'nearest_parent_realm_name': nearest_parent_realm_name, + 'request_full_realm_name': full_name, + 'found_full_realm_name': joined_name, + 'missing_name_parts': missing_name_parts, + 'final_subrealm_name': final_subrealm_name, + 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) + } + if Verbose: + populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) + return {'result': return_struct} + + # Perform a search for tickers, containers, and realms + def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, Limit=1000, Offset=0, is_verified_only=False): + db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, prefix, Reverse, Limit, Offset) + formatted_results = [] + for item in db_entries: + if name_type_str == "ticker": + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.ticker_data_cache) + elif name_type_str == "realm": + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.realm_data_cache) + elif name_type_str == "collection": + status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.container_data_cache) + elif name_type_str == "subrealm": + status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], self.session_mgr.bp.height) + obj = { + 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), + 'tx_num': item['tx_num'] + } + obj[name_type_str + '_hex'] = item['name_hex'] + obj[name_type_str] = item['name'] + obj['status'] = status + if is_verified_only and status == "verified": + formatted_results.append(obj) + elif not is_verified_only: + formatted_results.append(obj) + return {'result': formatted_results} + + async def atomicals_search_tickers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + if isinstance(prefix, str): + prefix = prefix.encode() + return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, is_verified_only) + + async def atomicals_search_realms(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + if isinstance(prefix, str): + prefix = prefix.encode() + return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, is_verified_only) + + async def atomicals_search_subrealms(self, parent_realm_id_compact, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) + if isinstance(prefix, str): + prefix = prefix.encode() + return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, Limit, Offset, is_verified_only) + + async def atomicals_search_containers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): + if isinstance(prefix, str): + prefix = prefix.encode() + return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, is_verified_only) + + async def atomicals_at_location(self, compact_location_id): + '''Return the Atomicals at a specific location id``` + ''' + atomical_basic_infos = [] + atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form(compact_to_location_id_bytes(compact_location_id)) + for atomical_id in atomicals_found_at_location['atomicals']: + atomical_basic_info = self.session_mgr.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) + atomical_basic_info['value'] = self.db.get_uxto_atomicals_value(compact_to_location_id_bytes(compact_location_id), atomical_id) + atomical_basic_infos.append(atomical_basic_info) + return { + 'location_info': atomicals_found_at_location['location_info'], + 'atomicals': atomical_basic_infos + } + + async def atomicals_get_ft_balances(self, scripthash): + '''Return the FT balances for a scripthash address''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_ft_balances_atomicals(hashX) + + async def atomicals_get_nft_balances(self, scripthash): + '''Return the NFT balances for a scripthash address''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_nft_balances_atomicals(hashX) + + async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): + '''Return the holder by a specific location id``` + ''' + formatted_results = [] + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.atomical_id_get(compact_atomical_id) + atomical = await self.db.populate_extended_atomical_holder_info(atomical_id, atomical) + if atomical["type"] == "FT": + if atomical.get("$mint_mode", "fixed") == "fixed": + max_supply = atomical.get('$max_supply', 0) + else: + max_supply = atomical.get('$max_supply', -1) + if max_supply < 0: + mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") + max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount + for holder in atomical.get("holders", [])[offset:offset+limit]: + percent = holder['holding'] / max_supply + formatted_results.append({ + "percent": percent, + "address": get_address_from_output_script(bytes.fromhex(holder['script'])), + "holding": holder["holding"] + }) + elif atomical["type"] == "NFT": + for holder in atomical.get("holders", [])[offset:offset+limit]: + formatted_results.append({ + "address": get_address_from_output_script(bytes.fromhex(holder['script'])), + "holding": holder["holding"] + }) + return formatted_results + + async def hashX_ft_balances_atomicals(self, hashX): + utxos = await self.db.all_utxos(hashX) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[atomical_id_compact] = atomical_basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + if len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + return_struct = { + 'balances': {} + } + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] + atomical_id_compact = atomical_id_basic_info['atomical_id'] + assert (atomical_id_compact == atomical_id_entry_compact) + if atomical_id_basic_info.get('type') == 'FT': + if return_struct['balances'].get(atomical_id_compact) is None: + return_struct['balances'][atomical_id_compact] = {} + return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact + return_struct['balances'][atomical_id_compact]['ticker'] = atomical_id_basic_info.get('$ticker') + return_struct['balances'][atomical_id_compact]['confirmed'] = 0 + if returned_utxo['height'] > 0: + return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] + return return_struct + + async def hashX_nft_balances_atomicals(self, hashX): + utxos = await self.db.all_utxos(hashX) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[atomical_id_compact] = atomical_basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + if len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + return_struct = { + 'balances': {} + } + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] + atomical_id_compact = atomical_id_basic_info['atomical_id'] + assert(atomical_id_compact == atomical_id_entry_compact) + if atomical_id_basic_info.get('type') == 'NFT': + if return_struct['balances'].get(atomical_id_compact) is None: + return_struct['balances'][atomical_id_compact] = {} + return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact + return_struct['balances'][atomical_id_compact]['confirmed'] = 0 + if atomical_id_basic_info.get('subtype'): + return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get('subtype') + if atomical_id_basic_info.get('$request_container'): + return_struct['balances'][atomical_id_compact]['request_container'] = atomical_id_basic_info.get('$request_container') + if atomical_id_basic_info.get('$container'): + return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get('$container') + if atomical_id_basic_info.get('$dmitem'): + return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') + if atomical_id_basic_info.get('$request_dmitem'): + return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + if atomical_id_basic_info.get('$realm'): + return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') + if atomical_id_basic_info.get('$request_realm'): + return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get('$request_realm') + if atomical_id_basic_info.get('$subrealm'): + return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get('$subrealm') + if atomical_id_basic_info.get('$request_subrealm'): + return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + if atomical_id_basic_info.get('$full_realm_name'): + return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + if atomical_id_basic_info.get('$parent_container'): + return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get('$parent_container') + if atomical_id_basic_info.get('$parent_realm'): + return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + if atomical_id_basic_info.get('$parent_container_name'): + return_struct['balances'][atomical_id_compact]['parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') + if atomical_id_basic_info.get('$bitwork'): + return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get('$bitwork') + if atomical_id_basic_info.get('$parents'): + return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get('$parents') + if returned_utxo['height'] > 0: + return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] + return return_struct + + async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): + utxos = await self.db.all_utxos(hashX) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[atomical_id_compact] = atomical_basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + if Verbose or len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + return_struct = { + 'global': await self.get_summary_info(), + 'atomicals': {}, + 'utxos': returned_utxos + } + + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] + atomical_id_ref = atomical_id_basic_info['atomical_id'] + if return_struct['atomicals'].get(atomical_id_ref) is None: + return_struct['atomicals'][atomical_id_ref] = { + 'atomical_id': atomical_id_ref, + 'atomical_number': atomical_id_basic_info['atomical_number'], + 'type': atomical_id_basic_info['type'], + 'confirmed': 0, + # 'subtype': atomical_id_basic_info.get('subtype'), + 'data': atomical_id_basic_info + } + if atomical_id_basic_info.get('$realm'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + elif atomical_id_basic_info.get('$subrealm'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get('$subrealm') + return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + elif atomical_id_basic_info.get('$dmitem'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') + elif atomical_id_basic_info.get('$ticker'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') + return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') + elif atomical_id_basic_info.get('$container'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get('$container') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + # Label them as candidates if they were candidates + elif atomical_id_basic_info.get('subtype') == 'request_realm': + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') + return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') + return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get('$realm_candidates') + elif atomical_id_basic_info.get('subtype') == 'request_subrealm': + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get('$subrealm_candidates') + return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') + return_struct['atomicals'][atomical_id_ref]['request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') + return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + elif atomical_id_basic_info.get('subtype') == 'request_dmitem': + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get('$dmitem_candidates') + return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') + return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') + elif atomical_id_basic_info.get('subtype') == 'request_container': + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['container_candidates'] = atomical_id_basic_info.get('$container_candidates') + return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') + return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') + elif atomical_id_basic_info.get('$request_ticker_status'): + return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') + return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') + return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') + return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') + + if returned_utxo['height'] <= 0: + return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] + else: + return_struct['atomicals'][atomical_id_ref]['confirmed'] += returned_utxo["atomicals"][atomical_id_ref] + + return return_struct + + async def atomicals_get_tx(self, txids): + return await self.atomical_get_tx(txids) + + async def scripthash_get_history(self, scripthash): + '''Return the confirmed and unconfirmed history of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.confirmed_and_unconfirmed_history(hashX) + + async def scripthash_get_mempool(self, scripthash): + '''Return the mempool transactions touching a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.unconfirmed_history(hashX) + + async def scripthash_listunspent(self, scripthash): + '''Return the list of UTXOs of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_listunspent(hashX) + + async def scripthash_subscribe(self, scripthash): + '''Subscribe to a script hash. + + scripthash: the SHA256 hash of the script to subscribe to''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_subscribe(hashX, scripthash) + + async def scripthash_unsubscribe(self, scripthash): + '''Unsubscribe from a script hash.''' + self.bump_cost(0.1) + hashX = scripthash_to_hashX(scripthash) + return self.unsubscribe_hashX(hashX) is not None + + async def _merkle_proof(self, cp_height, height): + max_height = self.db.db_height + if not height <= cp_height <= max_height: + raise RPCError(BAD_REQUEST, + f'require header height {height:,d} <= ' + f'cp_height {cp_height:,d} <= ' + f'chain height {max_height:,d}') + branch, root = await self.db.header_branch_and_root(cp_height + 1, + height) + return { + 'branch': [hash_to_hex_str(elt) for elt in branch], + 'root': hash_to_hex_str(root), + } + + async def block_header(self, height, cp_height=0): + '''Return a raw block header as a hexadecimal string, or as a + dictionary with a merkle proof.''' + height = non_negative_integer(height) + cp_height = non_negative_integer(cp_height) + raw_header_hex = (await self.session_mgr.raw_header(height)).hex() + self.bump_cost(1.25 - (cp_height == 0)) + if cp_height == 0: + return raw_header_hex + result = {'header': raw_header_hex} + result.update(await self._merkle_proof(cp_height, height)) + return result + + async def block_headers(self, start_height, count, cp_height=0): + '''Return count concatenated block headers as hex for the main chain; + starting at start_height. + + start_height and count must be non-negative integers. At most + MAX_CHUNK_SIZE headers will be returned. + ''' + start_height = non_negative_integer(start_height) + count = non_negative_integer(count) + cp_height = non_negative_integer(cp_height) + cost = count / 50 + + max_size = self.MAX_CHUNK_SIZE + count = min(count, max_size) + headers, count = await self.db.read_headers(start_height, count) + result = {'hex': headers.hex(), 'count': count, 'max': max_size} + if count and cp_height: + cost += 1.0 + last_height = start_height + count - 1 + result.update(await self._merkle_proof(cp_height, last_height)) + self.bump_cost(cost) + return result + + def is_tor(self): + '''Try to detect if the connection is to a tor hidden service we are + running.''' + proxy_address = self.peer_mgr.proxy_address() + if not proxy_address: + return False + remote_addr = self.remote_address() + if not remote_addr: + return False + return remote_addr.host == proxy_address.host + + async def replaced_banner(self, banner): + network_info = await self.daemon_request('getnetworkinfo') + ni_version = network_info['version'] + major, minor = divmod(ni_version, 1000000) + minor, revision = divmod(minor, 10000) + revision //= 100 + daemon_version = f'{major:d}.{minor:d}.{revision:d}' + for pair in [ + ('$SERVER_VERSION', electrumx.version_short), + ('$SERVER_SUBVERSION', electrumx.version), + ('$DAEMON_VERSION', daemon_version), + ('$DAEMON_SUBVERSION', network_info['subversion']), + ('$DONATION_ADDRESS', self.env.donation_address), + ]: + banner = banner.replace(*pair) + return banner + + async def donation_address(self): + '''Return the donation address as a string, empty if there is none.''' + self.bump_cost(0.1) + return self.env.donation_address + + async def banner(self): + '''Return the server banner text.''' + banner = f'You are connected to an {electrumx.version} server.' + self.bump_cost(0.5) + + if self.is_tor(): + banner_file = self.env.tor_banner_file + else: + banner_file = self.env.banner_file + if banner_file: + try: + with codecs.open(banner_file, 'r', 'utf-8') as f: + banner = f.read() + except (OSError, UnicodeDecodeError) as e: + self.logger.error(f'reading banner file {banner_file}: {e!r}') + else: + banner = await self.replaced_banner(banner) + + return banner + + async def relayfee(self): + '''The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.''' + self.bump_cost(1.0) + return await self.daemon_request('relayfee') + + async def estimatefee(self, number, mode=None): + '''The estimated transaction fee per kilobyte to be paid for a + transaction to be included within a certain number of blocks. + + number: the number of blocks + mode: CONSERVATIVE or ECONOMICAL estimation mode + ''' + number = non_negative_integer(number) + # use whitelist for mode, otherwise it would be easy to force a cache miss: + if mode not in self.coin.ESTIMATEFEE_MODES: + raise RPCError(BAD_REQUEST, f'unknown estimatefee mode: {mode}') + self.bump_cost(0.1) + + number = self.coin.bucket_estimatefee_block_target(number) + cache = self.session_mgr.estimatefee_cache + + cache_item = cache.get((number, mode)) + if cache_item is not None: + blockhash, feerate, lock = cache_item + if blockhash and blockhash == self.session_mgr.bp.tip: + return feerate + else: + # create lock now, store it, and only then await on it + lock = asyncio.Lock() + cache[(number, mode)] = (None, None, lock) + async with lock: + cache_item = cache.get((number, mode)) + if cache_item is not None: + blockhash, feerate, lock = cache_item + if blockhash == self.session_mgr.bp.tip: + return feerate + self.bump_cost(2.0) # cache miss incurs extra cost + blockhash = self.session_mgr.bp.tip + if mode: + feerate = await self.daemon_request('estimatefee', number, mode) + else: + feerate = await self.daemon_request('estimatefee', number) + assert feerate is not None + assert blockhash is not None + cache[(number, mode)] = (blockhash, feerate, lock) + return feerate + + async def ping(self): + '''Serves as a connection keep-alive mechanism and for the client to + confirm the server is still responding. + ''' + self.bump_cost(0.1) + return None + + async def server_version(self, client_name='', protocol_version=None): + '''Returns the server version as a string. + + client_name: a string identifying the client + protocol_version: the protocol version spoken by the client + ''' + self.bump_cost(0.5) + if self.sv_seen: + raise RPCError(BAD_REQUEST, f'server.version already sent') + self.sv_seen = True + + if client_name: + client_name = str(client_name) + if self.env.drop_client is not None and \ + self.env.drop_client.match(client_name): + raise ReplyAndDisconnect(RPCError( + BAD_REQUEST, f'unsupported client: {client_name}')) + self.client = client_name[:17] + + # Find the highest common protocol version. Disconnect if + # that protocol version in unsupported. + ptuple, client_min = util.protocol_version( + protocol_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX) + + await self.crash_old_client(ptuple, self.env.coin.CRASH_CLIENT_VER) + + if ptuple is None: + if client_min > self.PROTOCOL_MIN: + self.logger.info(f'client requested future protocol version ' + f'{util.version_string(client_min)} ' + f'- is your software out of date?') + raise ReplyAndDisconnect(RPCError( + BAD_REQUEST, f'unsupported protocol version: {protocol_version}')) + self.set_request_handlers(ptuple) + + return electrumx.version, self.protocol_version_string() + + async def crash_old_client(self, ptuple, crash_client_ver): + if crash_client_ver: + client_ver = util.protocol_tuple(self.client) + is_old_protocol = ptuple is None or ptuple <= (1, 2) + is_old_client = client_ver != (0,) and client_ver <= crash_client_ver + if is_old_protocol and is_old_client: + self.logger.info(f'attempting to crash old client with version {self.client}') + # this can crash electrum client 2.6 <= v < 3.1.2 + await self.send_notification('blockchain.relayfee', ()) + # this can crash electrum client (v < 2.8.2) UNION (3.0.0 <= v < 3.3.0) + await self.send_notification('blockchain.estimatefee', ()) + + async def transaction_broadcast_validate(self, raw_tx): + '''Simulate a Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules''' + self.bump_cost(0.25 + len(raw_tx) / 5000) + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) + return hex_hash + except AtomicalsValidationError as e: + self.logger.info(f'error validating atomicals transaction: {e}') + raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' + f'atomicals rules.\n\n{e}\n[{raw_tx}]') + + async def transaction_broadcast(self, raw_tx): + '''Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string''' + self.bump_cost(0.25 + len(raw_tx) / 5000) + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]') + except AtomicalsValidationError as e: + self.logger.info(f'error validating atomicals transaction: {e}') + raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' + f'atomicals rules.\n\n{e}\n[{raw_tx}]') + + else: + self.txs_sent += 1 + client_ver = util.protocol_tuple(self.client) + if client_ver != (0, ): + msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) + if msg: + self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' + f'client from {self.client}') + return msg + + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash + + async def transaction_broadcast_force(self, raw_tx): + '''Broadcast a raw transaction to the network. Force even if invalid FT transfer + raw_tx: the raw transaction as a hexadecimal string''' + self.bump_cost(0.25 + len(raw_tx) / 5000) + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]') + else: + self.txs_sent += 1 + client_ver = util.protocol_tuple(self.client) + if client_ver != (0, ): + msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) + if msg: + self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' + f'client from {self.client}') + return msg + + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash + + + async def transaction_get(self, tx_hash, verbose=False): + '''Return the serialized raw transaction given its hash + + tx_hash: the transaction hash as a hexadecimal string + verbose: passed on to the daemon + ''' + assert_tx_hash(tx_hash) + if verbose not in (True, False): + raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') + + self.bump_cost(1.0) + return await self.daemon_request('getrawtransaction', tx_hash, verbose) + + async def transaction_merkle(self, tx_hash, height): + '''Return the merkle branch to a confirmed transaction given its hash + and height. + + tx_hash: the transaction hash as a hexadecimal string + height: the height of the block it is in + ''' + tx_hash = assert_tx_hash(tx_hash) + height = non_negative_integer(height) + + branch, tx_pos, cost = await self.session_mgr.merkle_branch_for_tx_hash( + height, tx_hash) + self.bump_cost(cost) + + return {"block_height": height, "merkle": branch, "pos": tx_pos} + + async def transaction_id_from_pos(self, height, tx_pos, merkle=False): + '''Return the txid and optionally a merkle proof, given + a block height and position in the block. + ''' + tx_pos = non_negative_integer(tx_pos) + height = non_negative_integer(height) + if merkle not in (True, False): + raise RPCError(BAD_REQUEST, '"merkle" must be a boolean') + + if merkle: + branch, tx_hash, cost = await self.session_mgr.merkle_branch_for_tx_pos( + height, tx_pos) + self.bump_cost(cost) + return {"tx_hash": tx_hash, "merkle": branch} + else: + tx_hashes, cost = await self.session_mgr.tx_hashes_at_blockheight(height) + try: + tx_hash = tx_hashes[tx_pos] + except IndexError: + raise RPCError(BAD_REQUEST, + f'no tx at position {tx_pos:,d} in block at height {height:,d}') + self.bump_cost(cost) + return hash_to_hex_str(tx_hash) + + async def compact_fee_histogram(self): + self.bump_cost(1.0) + return await self.mempool.compact_fee_histogram() + + async def atomicals_transaction(self, txid): + return await self.session_mgr.get_transaction_detail(txid) + + async def get_transaction_detail_by_height(self, height, limit, offset, op_type, reverse=True): + res = [] + txs_list = [] + txs = self.db.get_atomicals_block_txs(height) + for tx in txs: + # get operation by db method + tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) + txs_list.append({ + "tx_num": tx_num, + "tx_hash": tx, + "height": height + }) + + txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) + for tx in txs_list: + data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) + if (op_type and op_type == data["op"]) or (not op_type and data["op"]): + res.append(data) + total = len(res) + return res[offset:offset+limit], total + + # get the whole transaction by block height + # return transaction detail + async def transaction_by_height(self, height, limit=10, offset=0, op_type=None, reverse=True): + res, total = await self.get_transaction_detail_by_height(height, limit, offset, op_type, reverse) + return {"result": res, "total": total, "limit": limit, "offset": offset} + + # get transaction by atomical id + async def transaction_by_atomical_id(self, compact_atomical_id_or_atomical_number, limit=10, offset=0, op_type=None, reverse=True): + res = [] + compact_atomical_id = compact_atomical_id_or_atomical_number + if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): + assert_atomical_id(compact_atomical_id) + else: + compact_atomical_id = location_id_bytes_to_compact(self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + hashX = double_sha256(atomical_id) + + res = [] + if op_type: + op = self.session_mgr.bp.op_list.get(op_type, None) + history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) + else: + history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) + for history in history_data: + tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) + data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) + if data and data["op"]: + if (op_type and data["op"] == op_type) or not op_type: + res.append(data) + return {"result": res, "total": total, "limit": limit, "offset": offset} + + # get transaction by scripthash + async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_type=None, reverse=True): + hashX = scripthash_to_hashX(scripthash) + res = [] + if op_type: + op = self.session_mgr.bp.op_list.get(op_type, None) + history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) + else: + history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) + + for history in history_data: + tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) + data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) + if data and data["op"]: + if data["op"] and (data["op"] == op_type or not op_type): + res.append(data) + return {"result": res, "total": total, "limit": limit, "offset": offset} + + def set_request_handlers(self, ptuple): + self.protocol_tuple = ptuple + handlers = { + 'blockchain.block.header': self.block_header, + 'blockchain.block.headers': self.block_headers, + 'blockchain.estimatefee': self.estimatefee, + 'blockchain.headers.subscribe': self.headers_subscribe, + 'blockchain.relayfee': self.relayfee, + 'blockchain.scripthash.get_balance': self.scripthash_get_balance, + 'blockchain.scripthash.get_history': self.scripthash_get_history, + 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, + 'blockchain.scripthash.listunspent': self.scripthash_listunspent, + 'blockchain.scripthash.subscribe': self.scripthash_subscribe, + 'blockchain.transaction.broadcast': self.transaction_broadcast, + 'blockchain.transaction.broadcast_force': self.transaction_broadcast_force, + 'blockchain.transaction.get': self.transaction_get, + 'blockchain.transaction.get_merkle': self.transaction_merkle, + 'blockchain.transaction.id_from_pos': self.transaction_id_from_pos, + 'mempool.get_fee_histogram': self.compact_fee_histogram, + 'server.add_peer': self.add_peer, + 'server.banner': self.banner, + 'server.donation_address': self.donation_address, + 'server.features': self.server_features_async, + 'server.peers.subscribe': self.peers_subscribe, + 'server.ping': self.ping, + 'server.version': self.server_version, + # The Atomicals era has begun # + 'blockchain.atomicals.validate': self.transaction_broadcast_validate, + 'blockchain.atomicals.get_ft_balances_scripthash': self.atomicals_get_ft_balances, + 'blockchain.atomicals.get_nft_balances_scripthash': self.atomicals_get_nft_balances, + 'blockchain.atomicals.listscripthash': self.atomicals_listscripthash, + 'blockchain.atomicals.list': self.atomicals_list, + 'blockchain.atomicals.get_numbers': self.atomicals_num_to_id, + 'blockchain.atomicals.get_block_hash': self.atomicals_block_hash, + 'blockchain.atomicals.get_block_txs': self.atomicals_block_txs, + 'blockchain.atomicals.dump': self.atomicals_dump, + 'blockchain.atomicals.at_location': self.atomicals_at_location, + 'blockchain.atomicals.get_location': self.atomicals_get_location, + 'blockchain.atomicals.get': self.atomicals_get, + 'blockchain.atomicals.get_global': self.atomicals_get_global, + 'blockchain.atomicals.get_state': self.atomical_get_state, + 'blockchain.atomicals.get_state_history': self.atomical_get_state_history, + 'blockchain.atomicals.get_events': self.atomical_get_events, + 'blockchain.atomicals.get_tx_history': self.atomicals_get_tx_history, + 'blockchain.atomicals.get_realm_info': self.atomicals_get_realm_info, + 'blockchain.atomicals.get_by_realm': self.atomicals_get_by_realm, + 'blockchain.atomicals.get_by_subrealm': self.atomicals_get_by_subrealm, + 'blockchain.atomicals.get_by_dmitem': self.atomicals_get_by_dmitem, + 'blockchain.atomicals.get_by_ticker': self.atomicals_get_by_ticker, + 'blockchain.atomicals.get_by_container': self.atomicals_get_by_container, + 'blockchain.atomicals.get_by_container_item': self.atomicals_get_by_container_item, + 'blockchain.atomicals.get_by_container_item_validate': self.atomicals_get_by_container_item_validation, + 'blockchain.atomicals.get_container_items': self.atomicals_get_container_items, + 'blockchain.atomicals.get_ft_info': self.atomicals_get_ft_info, + 'blockchain.atomicals.get_dft_mints': self.atomicals_get_dft_mints, + 'blockchain.atomicals.find_tickers': self.atomicals_search_tickers, + 'blockchain.atomicals.find_realms': self.atomicals_search_realms, + 'blockchain.atomicals.find_subrealms': self.atomicals_search_subrealms, + 'blockchain.atomicals.find_containers': self.atomicals_search_containers, + 'blockchain.atomicals.get_holders': self.atomicals_get_holders, + 'blockchain.atomicals.transaction': self.atomicals_transaction, + 'blockchain.atomicals.transaction_global': self.session_mgr.transaction_global, + 'blockchain.atomicals.transaction_by_height': self.transaction_by_height, + 'blockchain.atomicals.transaction_by_atomical_id': self.transaction_by_atomical_id, + 'blockchain.atomicals.transaction_by_scripthash': self.transaction_by_scripthash, + } + if ptuple >= (1, 4, 2): + handlers['blockchain.scripthash.unsubscribe'] = self.scripthash_unsubscribe + self.request_handlers = handlers + +class LocalRPC(SessionBase): + '''A local TCP RPC server session.''' + + processing_timeout = 10**9 # disable timeouts + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.client = 'RPC' + self.connection.max_response_size = 0 + + def protocol_version_string(self): + return 'RPC' + + +class DashElectrumX(ElectrumX): + '''A TCP server that handles incoming Electrum Dash connections.''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mns = set() + self.mn_cache_height = 0 + self.mn_cache = [] + + def set_request_handlers(self, ptuple): + super().set_request_handlers(ptuple) + self.request_handlers.update({ + 'masternode.announce.broadcast': + self.masternode_announce_broadcast, + 'masternode.subscribe': self.masternode_subscribe, + 'masternode.list': self.masternode_list, + 'protx.diff': self.protx_diff, + 'protx.info': self.protx_info, + }) + + async def _notify_inner(self, touched, height_changed): + '''Notify the client about changes in masternode list.''' + await super()._notify_inner(touched, height_changed) + for mn in self.mns.copy(): + status = await self.daemon_request('masternode_list', + ('status', mn)) + await self.send_notification('masternode.subscribe', + (mn, status.get(mn))) + + # Masternode command handlers + async def masternode_announce_broadcast(self, signmnb): + '''Pass through the masternode announce message to be broadcast + by the daemon. + + signmnb: signed masternode broadcast message.''' + try: + return await self.daemon_request('masternode_broadcast', + ('relay', signmnb)) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'masternode_broadcast: {message}') + raise RPCError(BAD_REQUEST, 'the masternode broadcast was ' + f'rejected.\n\n{message}\n[{signmnb}]') + + async def masternode_subscribe(self, collateral): + '''Returns the status of masternode. + + collateral: masternode collateral. + ''' + result = await self.daemon_request('masternode_list', + ('status', collateral)) + if result is not None: + self.mns.add(collateral) + return result.get(collateral) + return None + + async def masternode_list(self, payees): + ''' + Returns the list of masternodes. + + payees: a list of masternode payee addresses. + ''' + if not isinstance(payees, list): + raise RPCError(BAD_REQUEST, 'expected a list of payees') + + def get_masternode_payment_queue(mns): + '''Returns the calculated position in the payment queue for all the + valid masterernodes in the given mns list. + + mns: a list of masternodes information. + ''' + now = int(datetime.datetime.utcnow().strftime("%s")) + mn_queue = [] + + # Only ENABLED masternodes are considered for the list. + for line in mns: + mnstat = mns[line].split() + if mnstat[0] == 'ENABLED': + # if last paid time == 0 + if int(mnstat[5]) == 0: + # use active seconds + mnstat.append(int(mnstat[4])) + else: + # now minus last paid + delta = now - int(mnstat[5]) + # if > active seconds, use active seconds + if delta >= int(mnstat[4]): + mnstat.append(int(mnstat[4])) + # use active seconds + else: + mnstat.append(delta) + mn_queue.append(mnstat) + mn_queue = sorted(mn_queue, key=lambda x: x[8], reverse=True) + return mn_queue + + def get_payment_position(payment_queue, address): + ''' + Returns the position of the payment list for the given address. + + payment_queue: position in the payment queue for the masternode. + address: masternode payee address. + ''' + position = -1 + for pos, mn in enumerate(payment_queue, start=1): + if mn[2] == address: + position = pos + break + return position + + # Accordingly with the masternode payment queue, a custom list + # with the masternode information including the payment + # position is returned. + cache = self.session_mgr.mn_cache + if not cache or self.session_mgr.mn_cache_height != self.db.db_height: + full_mn_list = await self.daemon_request('masternode_list', + ('full',)) + mn_payment_queue = get_masternode_payment_queue(full_mn_list) + mn_payment_count = len(mn_payment_queue) + mn_list = [] + for key, value in full_mn_list.items(): + mn_data = value.split() + mn_info = { + 'vin': key, + 'status': mn_data[0], + 'protocol': mn_data[1], + 'payee': mn_data[2], + 'lastseen': mn_data[3], + 'activeseconds': mn_data[4], + 'lastpaidtime': mn_data[5], + 'lastpaidblock': mn_data[6], + 'ip': mn_data[7] + } + mn_info['paymentposition'] = get_payment_position( + mn_payment_queue, mn_info['payee'] + ) + mn_info['inselection'] = ( + mn_info['paymentposition'] < mn_payment_count // 10 + ) + hashX = self.coin.address_to_hashX(mn_info['payee']) + balance = await self.get_balance(hashX) + mn_info['balance'] = (sum(balance.values()) + / self.coin.VALUE_PER_COIN) + mn_list.append(mn_info) + cache.clear() + cache.extend(mn_list) + self.session_mgr.mn_cache_height = self.db.db_height + + # If payees is an empty list the whole masternode list is returned + if payees: + return [mn for mn in cache if mn['payee'] in payees] + else: + return cache + + async def protx_diff(self, base_height, height): + ''' + Calculates a diff between two deterministic masternode lists. + The result also contains proof data. + + base_height: The starting block height (starting from 1). + height: The ending block height. + ''' + if not isinstance(base_height, int) or not isinstance(height, int): + raise RPCError(BAD_REQUEST, 'expected a int block heights') + + max_height = self.db.db_height + if (not 1 <= base_height <= max_height or + not base_height <= height <= max_height): + raise RPCError(BAD_REQUEST, + f'require 1 <= base_height {base_height:,d} <= ' + f'height {height:,d} <= ' + f'chain height {max_height:,d}') + + return await self.daemon_request('protx', + ('diff', base_height, height)) + + async def protx_info(self, protx_hash): + ''' + Returns detailed information about a deterministic masternode. + + protx_hash: The hash of the initial ProRegTx + ''' + if not isinstance(protx_hash, str): + raise RPCError(BAD_REQUEST, 'expected protx hash string') + + res = await self.daemon_request('protx', ('info', protx_hash)) + if 'wallet' in res: + del res['wallet'] + return res + + +class SmartCashElectrumX(DashElectrumX): + '''A TCP server that handles incoming Electrum-SMART connections.''' + + def set_request_handlers(self, ptuple): + super().set_request_handlers(ptuple) + self.request_handlers.update({ + 'smartrewards.current': self.smartrewards_current, + 'smartrewards.check': self.smartrewards_check + }) + + async def smartrewards_current(self): + '''Returns the current smartrewards info.''' + result = await self.daemon_request('smartrewards', ('current',)) + if result is not None: + return result + return None + + async def smartrewards_check(self, addr): + ''' + Returns the status of an address + + addr: a single smartcash address + ''' + result = await self.daemon_request('smartrewards', ('check', addr)) + if result is not None: + return result + return None + + +class AuxPoWElectrumX(ElectrumX): + async def block_header(self, height, cp_height=0): + result = await super().block_header(height, cp_height) + + # Older protocol versions don't truncate AuxPoW + if self.protocol_tuple < (1, 4, 1): + return result + + # Not covered by a checkpoint; return full AuxPoW data + if cp_height == 0: + return result + + # Covered by a checkpoint; truncate AuxPoW data + result['header'] = self.truncate_auxpow(result['header'], height) + return result + + async def block_headers(self, start_height, count, cp_height=0): + result = await super().block_headers(start_height, count, cp_height) + + # Older protocol versions don't truncate AuxPoW + if self.protocol_tuple < (1, 4, 1): + return result + + # Not covered by a checkpoint; return full AuxPoW data + if cp_height == 0: + return result + + # Covered by a checkpoint; truncate AuxPoW data + result['hex'] = self.truncate_auxpow(result['hex'], start_height) + return result + + def truncate_auxpow(self, headers_full_hex, start_height): + height = start_height + headers_full = util.hex_to_bytes(headers_full_hex) + cursor = 0 + headers = bytearray() + + while cursor < len(headers_full): + headers += headers_full[cursor:cursor+self.coin.TRUNCATED_HEADER_SIZE] + cursor += self.db.dynamic_header_len(height) + height += 1 + + return headers.hex() + + +class NameIndexElectrumX(ElectrumX): + def set_request_handlers(self, ptuple): + super().set_request_handlers(ptuple) + + if ptuple >= (1, 4, 3): + self.request_handlers['blockchain.name.get_value_proof'] = self.name_get_value_proof + + async def name_get_value_proof(self, scripthash, cp_height=0): + history = await self.scripthash_get_history(scripthash) + + trimmed_history = [] + prev_height = None + + for update in history[::-1]: + txid = update['tx_hash'] + height = update['height'] + + if (self.coin.NAME_EXPIRATION is not None + and prev_height is not None + and height < prev_height - self.coin.NAME_EXPIRATION): + break + + tx = await(self.transaction_get(txid)) + update['tx'] = tx + del update['tx_hash'] + + tx_merkle = await self.transaction_merkle(txid, height) + del tx_merkle['block_height'] + update['tx_merkle'] = tx_merkle + + if height <= cp_height: + header = await self.block_header(height, cp_height) + update['header'] = header + + trimmed_history.append(update) + + if height <= cp_height: + break + + prev_height = height + + return {scripthash: trimmed_history} + + +class NameIndexAuxPoWElectrumX(NameIndexElectrumX, AuxPoWElectrumX): + pass From e00b06103944dc4a989e098649cdf0b73d5fbb1d Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 22 May 2024 09:55:28 +0800 Subject: [PATCH 04/13] Merge from develop --- electrumx/server/session/session_manager.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py index 178ac734..863375af 100644 --- a/electrumx/server/session/session_manager.py +++ b/electrumx/server/session/session_manager.py @@ -954,7 +954,8 @@ async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): tx_hash, tx, self.bp.get_atomicals_id_mint_info, - True + self.bp.is_dmint_activated(height), + self.bp.is_custom_coloring_activated(height), ) is_burned = blueprint_builder.are_fts_burned is_cleanly_assigned = blueprint_builder.cleanly_assigned @@ -1063,7 +1064,7 @@ async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): "atomical_id": compact_atomical_id, "type": "FT", "index": k, - "value": output_ft.satvalue + "value": output_ft.sat_value } if k not in res["transfers"]["outputs"]: res["transfers"]["outputs"][k] = [ft_data] @@ -1109,8 +1110,7 @@ async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): else: res["transfers"]["outputs"][k].append(nft_data) - atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found( - tx) + atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(tx) if atomical_id_for_payment: res["info"]["payment"] = { "atomical_id": location_id_bytes_to_compact(atomical_id_for_payment), From 612165d9f8c5b266d6214402f4e1614d056495c7 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 22 May 2024 09:55:39 +0800 Subject: [PATCH 05/13] Fix tuple --- electrumx/server/block_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/electrumx/server/block_processor.py b/electrumx/server/block_processor.py index e6ee1eee..01da66d7 100644 --- a/electrumx/server/block_processor.py +++ b/electrumx/server/block_processor.py @@ -2882,7 +2882,7 @@ def advance_txs( is_unspendable: Callable[[bytes], bool], header, height - ) -> tuple[list[bytes], list[bytes]]: + ) -> Tuple[list[bytes], list[bytes]]: self.tx_hashes.append(b''.join(tx_hash for tx, tx_hash in txs)) self.atomicals_rpc_format_cache.clear() self.atomicals_rpc_general_cache.clear() From 53116c44ba54ed43e7f09653828d75f4a025de88 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Mon, 27 May 2024 16:02:53 +0800 Subject: [PATCH 06/13] Fix circulating imports --- electrumx/server/session/http_session.py | 5 +---- electrumx/server/session/session_manager.py | 4 ++-- electrumx/server/session/shared_session.py | 4 ++-- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index 5e0e8091..0e82ff9f 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -8,15 +8,12 @@ import aiorpcx from aiohttp import web -from aiorpcx import RPCError import electrumx.lib.util as util from electrumx.lib.script2addr import get_address_from_output_script from electrumx.lib.util_atomicals import * -from electrumx.server.session import BAD_REQUEST -from electrumx.server.session.session_base import assert_tx_hash, scripthash_to_hashX, non_negative_integer, \ - assert_atomical_id from electrumx.server.session.shared_session import SharedSession +from electrumx.server.session.util import * from electrumx.version import electrumx_version diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py index 863375af..aaa6254b 100644 --- a/electrumx/server/session/session_manager.py +++ b/electrumx/server/session/session_manager.py @@ -17,10 +17,12 @@ from electrumx.lib.text import sessions_lines from electrumx.lib.util import OldTaskGroup from electrumx.lib.util_atomicals import * +from electrumx.server.daemon import DaemonError, Daemon from electrumx.server.history import TXNUM_LEN from electrumx.server.http_middleware import * from electrumx.server.mempool import MemPool from electrumx.server.session import BAD_REQUEST, DAEMON_ERROR +from electrumx.server.session.http_session import HttpHandler from electrumx.server.session.util import non_negative_integer from electrumx.server.peers import PeerManager @@ -30,10 +32,8 @@ if TYPE_CHECKING: from electrumx.server.block_processor import BlockProcessor - from electrumx.server.daemon import DaemonError, Daemon from electrumx.server.db import DB from electrumx.server.env import Env - from electrumx.server.session.http_session import HttpHandler @attr.s(slots=True) diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index 2fb91871..c837ec96 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -7,10 +7,10 @@ from electrumx.lib.util_atomicals import AtomicalsValidationError from electrumx.server.daemon import DaemonError from electrumx.server.session import ATOMICALS_INVALID_TX, BAD_REQUEST -from electrumx.server.session.session_manager import SessionManager if TYPE_CHECKING: from electrumx.lib.coins import AtomicalsCoinMixin, Coin + from electrumx.server.session.session_manager import SessionManager class SharedSession: @@ -18,7 +18,7 @@ def __init__( self, logger: LoggerAdapter, coin: Type[Union['Coin', 'AtomicalsCoinMixin']], - session_mgr: SessionManager, + session_mgr: Type['SessionManager'], client: str, ): self.session_mgr = session_mgr From e8d7159ef388ac9b7aea72b4ce53e038546c5edd Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Tue, 28 May 2024 09:48:14 +0800 Subject: [PATCH 07/13] Catch `KeyboardInterrupt` --- electrumx_server | 2 ++ 1 file changed, 2 insertions(+) diff --git a/electrumx_server b/electrumx_server index 4420ca19..779a1335 100755 --- a/electrumx_server +++ b/electrumx_server @@ -47,6 +47,8 @@ def main(): logger.setLevel(env.log_level) controller = Controller(env) asyncio.run(controller.run()) + except KeyboardInterrupt: + logger.info('ElectrumX server terminated (normally) before initialized') except Exception: logger.exception('ElectrumX server terminated abnormally') else: From 17204840dd86599be7de03e24ad242f9afe7ec61 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Tue, 28 May 2024 23:11:29 +0800 Subject: [PATCH 08/13] Move handlers to the shared session --- electrumx/lib/util_atomicals.py | 353 ++-- electrumx/server/session/electrumx_session.py | 1734 +-------------- electrumx/server/session/http_session.py | 1869 +---------------- electrumx/server/session/session_base.py | 3 +- electrumx/server/session/session_manager.py | 160 +- electrumx/server/session/shared_session.py | 1559 +++++++++++++- electrumx/server/session/util.py | 4 + 7 files changed, 1938 insertions(+), 3744 deletions(-) diff --git a/electrumx/lib/util_atomicals.py b/electrumx/lib/util_atomicals.py index faa57427..0573b4a7 100644 --- a/electrumx/lib/util_atomicals.py +++ b/electrumx/lib/util_atomicals.py @@ -46,7 +46,7 @@ class AtomicalsValidationError(Exception): '''Raised when Atomicals Validation Error''' - + # The maximum height difference between the commit and reveal transactions of any Atomical mint # This is used to limit the amount of cache we would need in future optimizations. MINT_GENERAL_COMMIT_REVEAL_DELAY_BLOCKS = 100 @@ -98,12 +98,12 @@ class AtomicalsValidationError(Exception): DFT_MINT_HEIGHT_MIN = 0 # This value would never change, it's added in case someone accidentally tries to use a unixtime DFT_MINT_HEIGHT_MAX = 10000000 # 10 million blocks - + def pad_bytes_n(val, n): padlen = n if len(val) > padlen: raise ValueError('pad_bytes_n input val is out of range') - new_val = val + new_val = val extra_bytes_needed = padlen - len(val) new_val = new_val + bytes(extra_bytes_needed) return new_val @@ -140,7 +140,7 @@ def is_integer_num(n): # Check whether the value is hex string def is_hex_string(value): if not isinstance(value, str): - return False + return False try: int(value, 16) # Throws ValueError if it cannot be validated as hex string return True @@ -151,19 +151,19 @@ def is_hex_string(value): # Check whether the value is hex string def is_hex_string_regex(value): if not isinstance(value, str): - return False + return False m = re.compile(r'^[a-z0-9]+$') if m.match(value): return True - return False + return False # Check whether the value is a 36 byte hex string def is_atomical_id_long_form_string(value): if not value: - return False + return False if not isinstance(value, str): - return False + return False try: int(value, 16) # Throws ValueError if it cannot be validated as hex string @@ -175,7 +175,7 @@ def is_atomical_id_long_form_string(value): # Check whether the value is a 36 byte sequence def is_atomical_id_long_form_bytes(value): if not isinstance(value, bytes): - return False + return False try: if len(value) == 36: return True @@ -183,16 +183,17 @@ def is_atomical_id_long_form_bytes(value): pass return False + # Check whether the value is a compact form location/atomical id def is_compact_atomical_id(value): - '''Whether this is a compact atomical id or not - ''' + """Whether this is a compact atomical id or not + """ if isinstance(value, int): return False - if value == None or value == "": + if value is None or value == "": return False index_of_i = value.find("i") - if index_of_i != 64: + if index_of_i != 64: return False raw_hash = hex_str_to_hash(value[ : 64]) if len(raw_hash) == 32: @@ -207,11 +208,11 @@ def compact_to_location_id_bytes(value): raise TypeError(f'value in compact_to_location_id_bytes is not set') index_of_i = value.index("i") - if index_of_i != 64: + if index_of_i != 64: raise TypeError(f'{value} should be 32 bytes hex followed by i') - + raw_hash = hex_str_to_hash(value[ : 64]) - + if len(raw_hash) != 32: raise TypeError(f'{value} should be 32 bytes hex followed by i') @@ -221,16 +222,16 @@ def compact_to_location_id_bytes(value): raise TypeError(f'{value} index output number was parsed to be less than 0 or greater than 100000') return raw_hash + pack_le_uint32(num) - + # Convert 36 byte sequence to compact form string def location_id_bytes_to_compact(location_id): digit, = unpack_le_uint32_from(location_id[32:]) return f'{hash_to_hex_str(location_id[:32])}i{digit}' - + # Get the tx hash from the location/atomical id -def get_tx_hash_index_from_location_id(location_id): +def get_tx_hash_index_from_location_id(location_id): output_index, = unpack_le_uint32_from(location_id[ 32 : 36]) - return location_id[ : 32], output_index + return location_id[ : 32], output_index # Check if the operation is a valid distributed mint (dmint) type def is_valid_dmt_op_format(tx_hash, dmt_op): @@ -260,10 +261,10 @@ def is_valid_dmt_op_format(tx_hash, dmt_op): } return False, {} -# Validate that a string is a valid hex +# Validate that a string is a valid hex def is_validate_pow_prefix_string(pow_prefix, pow_prefix_ext): if not pow_prefix: - return False + return False m = re.compile(r'^[a-f0-9]{1,64}$') if pow_prefix: if pow_prefix_ext: @@ -292,7 +293,7 @@ def is_proof_of_work_prefix_match(tx_hash, powprefix, powprefix_ext): initial_test_matches_main_prefix = txid.startswith(powprefix) if not initial_test_matches_main_prefix: return False - + # Now check that the next digit is within the range of powprefix_ext next_char = txid[len(powprefix)] char_map = { @@ -317,7 +318,7 @@ def is_proof_of_work_prefix_match(tx_hash, powprefix, powprefix_ext): # powprefix_ext == 0 is functionally equivalent to not having a powprefix_ext (because it makes the entire 16 valued range available) # powprefix_ext == 15 is functionally equivalent to extending the powprefix by 1 (because it's the same as just requiring 16x more hashes) if get_numeric_value >= powprefix_ext: - return True + return True return False else: @@ -326,15 +327,15 @@ def is_proof_of_work_prefix_match(tx_hash, powprefix, powprefix_ext): return txid.startswith(powprefix) # Parse a bitwork stirng such as '123af.15' -def is_valid_bitwork_string(bitwork): +def is_valid_bitwork_string(bitwork): if not bitwork: - return None, None + return None, None if not isinstance(bitwork, str): - return None, None - + return None, None + if bitwork.count('.') > 1: - return None, None + return None, None splitted = bitwork.split('.') prefix = splitted[0] @@ -362,7 +363,7 @@ def has_requested_proof_of_work(operations_found_at_inputs): return False, None payload_dict = operations_found_at_inputs['payload'] - args = payload_dict.get('args') + args = payload_dict.get('args') if not isinstance(args, dict): return False, None @@ -371,13 +372,13 @@ def has_requested_proof_of_work(operations_found_at_inputs): request_pow_reveal = args.get('bitworkr') pow_reveal = None - + # Proof of work was requested on the commit if request_pow_commit: valid_str, bitwork_parts = is_valid_bitwork_string(request_pow_commit) if valid_str and is_proof_of_work_prefix_match(operations_found_at_inputs['commit_txid'], bitwork_parts['prefix'], bitwork_parts['ext']): pow_commit = request_pow_commit - else: + else: # The proof of work was invalid, therefore the current request is fundamentally invalid too return True, None @@ -386,7 +387,7 @@ def has_requested_proof_of_work(operations_found_at_inputs): valid_str, bitwork_parts = is_valid_bitwork_string(request_pow_reveal) if valid_str and is_proof_of_work_prefix_match(operations_found_at_inputs['reveal_location_txid'], bitwork_parts['prefix'], bitwork_parts['ext']): pow_reveal = request_pow_reveal - else: + else: # The proof of work was invalid, therefore the current request is fundamentally invalid too return True, None @@ -400,7 +401,7 @@ def has_requested_proof_of_work(operations_found_at_inputs): # Return whether the provided parent atomical id was spent in the inputs # Used to enforce the '$parents' check for those Atomicals which requested a parent to be # included in the spent inputs in order to allow the mint to succeed -def get_if_parent_spent_in_same_tx(parent_atomical_id_compact, expected_minimum_total_value, atomicals_spent_at_inputs): +def get_if_parent_spent_in_same_tx(parent_atomical_id_compact, expected_minimum_total_value, atomicals_spent_at_inputs): parent_atomical_id = compact_to_location_id_bytes(parent_atomical_id_compact) id_to_total_value_map = {} for idx, atomical_entry_list in atomicals_spent_at_inputs.items(): @@ -414,10 +415,10 @@ def get_if_parent_spent_in_same_tx(parent_atomical_id_compact, expected_minimum_ id_to_total_value_map[atomical_id] += input_value total_sum = id_to_total_value_map.get(parent_atomical_id) if total_sum == None: - return False - + return False + if total_sum >= expected_minimum_total_value: - return True + return True else: return False @@ -433,7 +434,7 @@ def build_base_mint_info(commit_txid, commit_index, reveal_location_txid, reveal txout = tx.outputs[expected_output_index] scripthash = double_sha256(txout.pk_script) hashX = script_hashX(txout.pk_script) - output_idx_le = pack_le_uint32(expected_output_index) + output_idx_le = pack_le_uint32(expected_output_index) atomical_id = commit_txid + pack_le_uint32(commit_index) location = reveal_location_txid + pack_le_uint32(reveal_location_index) # sat_value = pack_le_uint64(txout.value) @@ -482,12 +483,12 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): init = op_found_payload.get('init', {}) if not isinstance(init, dict): return False - mint_info['args'] = args + mint_info['args'] = args mint_info['ctx'] = ctx - mint_info['meta'] = meta - mint_info['init'] = init + mint_info['meta'] = meta + mint_info['init'] = init return True - + op = op_found_struct['op'] payload = op_found_struct['payload'] payload_bytes = op_found_struct['payload_bytes'] @@ -501,7 +502,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): if not populate_args_meta_ctx_init(mint_info, op_found_struct['payload']): logger.warning(f'get_mint_info_op_factory - not populate_args_meta_ctx_init {hash_to_hex_str(tx_hash)}') return None, None - + # The 'args.i' field indicates it is immutable and no mod/evt state allowed is_immutable = mint_info['args'].get('i') if is_immutable and not isinstance(is_immutable, bool): @@ -510,7 +511,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): # Check if there was requested proof of work, and if there was then only allow the mint to happen if it was successfully executed the proof of work is_pow_requested, pow_result = has_requested_proof_of_work(op_found_struct) - if is_pow_requested and not pow_result: + if is_pow_requested and not pow_result: logger.warning(f'get_mint_info_op_factory: proof of work was requested, but the proof of work was invalid. Ignoring Atomical operation at {hash_to_hex_str(tx_hash)}. Skipping...') return None, None @@ -574,7 +575,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): found_parent = get_if_parent_spent_in_same_tx(parent_atomical_id, value, atomicals_spent_at_inputs) if not found_parent: logger.warning(f'Ignoring operation due to invalid parent input not provided') - return None, None + return None, None mint_info['$parents'] = parents_enforced ############################################ @@ -607,7 +608,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): logger.debug(f'NFT request_realm {hash_to_hex_str(tx_hash)}, {realm}') if not isinstance(realm, str) or not is_valid_realm_string_name(realm): logger.warning(f'NFT request_realm is invalid {hash_to_hex_str(tx_hash)}, {realm}. Skipping....') - return None, None + return None, None mint_info['$request_realm'] = realm elif subrealm: @@ -627,7 +628,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): parent_realm_id_compact = mint_info['args'].get('parent_realm') if not isinstance(parent_realm_id_compact, str) or not is_compact_atomical_id(parent_realm_id_compact): logger.warning(f'NFT request_subrealm parent_realm is invalid {hash_to_hex_str(tx_hash)}, {parent_realm_id_compact}. Skipping...') - return None, None + return None, None mint_info['$request_subrealm'] = subrealm # Save in the compact form to make it easier to understand for developers and users # It requires an extra step to convert, but it makes it easier to understand the format @@ -641,7 +642,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): parent_container_id_compact = mint_info['args'].get('parent_container') if not isinstance(parent_container_id_compact, str) or not is_compact_atomical_id(parent_container_id_compact): logger.warning(f'NFT request_dmitem parent_container is invalid {hash_to_hex_str(tx_hash)}, {parent_container_id_compact}. Skipping...') - return None, None + return None, None mint_info['$request_dmitem'] = dmitem # Save in the compact form to make it easier to understand for developers and users # It requires an extra step to convert, but it makes it easier to understand the format @@ -656,7 +657,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): if container or realm or subrealm: logger.warning(f'NFT is invalid because container or realm or subrealm cannot be immutable {hash_to_hex_str(tx_hash)}. Skipping...') return None, None - mint_info['$immutable'] = True + mint_info['$immutable'] = True ############################################ # @@ -669,7 +670,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): ticker = mint_info['args'].get('request_ticker', None) if not isinstance(ticker, str) or not is_valid_ticker_string(ticker): logger.warning(f'FT mint has invalid ticker {tx_hash}, {ticker}. Skipping...') - return None, None + return None, None mint_info['$request_ticker'] = ticker # FTs are not allowed to be immutable @@ -683,34 +684,34 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): ticker = mint_info['args'].get('request_ticker', None) if not isinstance(ticker, str) or not is_valid_ticker_string(ticker): logger.warning(f'DFT init has invalid ticker {hash_to_hex_str(tx_hash)}, {ticker}. Skipping...') - return None, None + return None, None mint_info['$request_ticker'] = ticker mint_height = mint_info['args'].get('mint_height', None) if not isinstance(mint_height, int) or mint_height < DFT_MINT_HEIGHT_MIN or mint_height > DFT_MINT_HEIGHT_MAX: logger.warning(f'DFT init has invalid mint_height {hash_to_hex_str(tx_hash)}, {mint_height}. Skipping...') return None, None - + mint_amount = mint_info['args'].get('mint_amount', None) if not isinstance(mint_amount, int) or mint_amount < DFT_MINT_AMOUNT_MIN or mint_amount > DFT_MINT_AMOUNT_MAX: logger.warning(f'DFT init has invalid mint_amount {hash_to_hex_str(tx_hash)}, {mint_amount}. Skipping...') return None, None - + max_mints = mint_info['args'].get('max_mints', None) if not isinstance(max_mints, int) or max_mints < DFT_MINT_MAX_MIN_COUNT: logger.warning(f'DFT init has invalid max_mints {hash_to_hex_str(tx_hash)}, {max_mints}. Skipping...') return None, None - + if height < coin.ATOMICALS_ACTIVATION_HEIGHT_DENSITY: if max_mints > DFT_MINT_MAX_MAX_COUNT_LEGACY: logger.warning(f'DFT init has invalid max_mints legacy {hash_to_hex_str(tx_hash)}, {max_mints}. Skipping...') return None, None - + elif height >= coin.ATOMICALS_ACTIVATION_HEIGHT_DENSITY: if max_mints > DFT_MINT_MAX_MAX_COUNT_DENSITY: logger.warning(f'DFT init has invalid max_mints {hash_to_hex_str(tx_hash)}, {max_mints}. Skipping...') return None, None - + mint_info['$mint_height'] = mint_height mint_info['$mint_amount'] = mint_amount mint_info['$max_mints'] = max_mints @@ -722,17 +723,17 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): valid_commit_str, bitwork_commit_parts = is_valid_bitwork_string(mint_pow_commit) if valid_commit_str: mint_info['$mint_bitworkc'] = mint_pow_commit - else: + else: logger.warning(f'DFT mint has invalid mint_bitworkc. Skipping...') return None, None - + # If set it requires the mint reveal tx to have POW matching the mint_reveal_powprefix to claim a mint mint_pow_reveal = mint_info['args'].get('mint_bitworkr') if mint_pow_reveal: valid_reveal_str, bitwork_reveal_parts = is_valid_bitwork_string(mint_pow_reveal) if valid_reveal_str: mint_info['$mint_bitworkr'] = mint_pow_reveal - else: + else: # Fail to create on invalid bitwork string logger.warning(f'DFT mint has invalid mint_bitworkr. Skipping...') return None, None @@ -743,10 +744,10 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): return None, None dft_mode = mint_info['args'].get('md') - if dft_mode != 1 and dft_mode != 0 and dft_mode != None: + if dft_mode != 1 and dft_mode != 0 and dft_mode != None: logger.warning(f'DFT init has invalid md {hash_to_hex_str(tx_hash)}, {dft_mode}. Skipping...') - return None, None - + return None, None + # Perpetual mint mode available on activation if height >= coin.ATOMICALS_ACTIVATION_HEIGHT_DENSITY and dft_mode == 1: bv = mint_info['args'].get('bv') @@ -755,44 +756,44 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): bcs = mint_info['args'].get('bcs', 64) brs = mint_info['args'].get('brs', 64) if (not bci and not bri) or not bv: - return None, None - + return None, None + if not is_hex_string_regex(bv) or len(bv) < 4: logger.warning(f'DFT init has invalid bv must be at least length 4 hex {hash_to_hex_str(tx_hash)}, {bv}. Skipping...') - return None, None - + return None, None + if mint_info.get('$mint_bitworkr'): logger.warning(f'DFT init has invalid because mint_bitworkr cannot be set when perpetual mode {hash_to_hex_str(tx_hash)}. Skipping...') - return None, None - + return None, None + if mint_info.get('$mint_bitworkc'): logger.warning(f'DFT init has invalid because mint_bitworkc cannot be set when perpetual mode {hash_to_hex_str(tx_hash)}. Skipping...') - return None, None - + return None, None + # Do not require mint bitworkc if there is no mint bitworkc increment - if bci == None: + if bci == None: pass elif not isinstance(bci, int) or bci < 0 or bci > 64: logger.warning(f'DFT init has invalid bci {hash_to_hex_str(tx_hash)}, {bci}. Skipping...') return None, None - + if bci: if not isinstance(bcs, int) or bcs < 64 or bcs > 256: logger.warning(f'DFT init has invalid bcs {hash_to_hex_str(tx_hash)}, {bcs}. Skipping...') return None, None - + # Do not require mint bitworkr if there is no mint bitworkr increment if bri == None: pass elif not isinstance(bri, int) or bri < 0 or bri > 64: logger.warning(f'DFT init has invalid bri {hash_to_hex_str(tx_hash)}, {bri}. Skipping...') return None, None - + if bri: if not isinstance(brs, int) or brs < 64 or brs > 256: logger.warning(f'DFT init has invalid brs {hash_to_hex_str(tx_hash)}, {brs}. Skipping...') return None, None - + mint_info['$mint_mode'] = 'perpetual' mint_info['$mint_bitworkc_inc'] = bci mint_info['$mint_bitworkr_inc'] = bri @@ -805,19 +806,19 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): if max_mints > 100000: logger.warning(f'DFT init has invalid max_mints must be <= 100000 with perpetual mining {hash_to_hex_str(tx_hash)}, {max_mints}. Skipping...') return None, None - + max_mints_global = mint_info['args'].get('maxg') - if max_mints_global != None: + if max_mints_global != None: if not isinstance(max_mints_global, int) or max_mints_global < DFT_MINT_MAX_MIN_COUNT or max_mints_global > DFT_MINT_MAX_MAX_COUNT_DENSITY: logger.warning(f'DFT init has invalid maxg {hash_to_hex_str(tx_hash)}, {max_mints_global}. Skipping...') return None, None mint_info['$max_mints_global'] = max_mints_global - else: + else: mint_info['$mint_mode'] = 'fixed' if not mint_info or not mint_info.get('type'): return None, None - + # Check if there are any POW constraints # Populated for convenience so it is easy to see at a glance that someone intended it to be used # This is the general purpose proof of work request. Typically used for NFTs, but nothing stopping it from being used for @@ -832,7 +833,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): logger.warning(f'Name type mint does not have prefix of at least length 4 of bitworkc. Skipping...') return None, None mint_info['$bitworkc'] = request_pow_commit - else: + else: # Fail to create on invalid bitwork string logger.warning(f'Mint has invalid bitworkc. Skipping...') return None, None @@ -847,7 +848,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): valid_reveal_str, bitwork_reveal_parts = is_valid_bitwork_string(request_pow_reveal) if valid_reveal_str: mint_info['$bitworkr'] = request_pow_reveal - else: + else: logger.warning(f'Mint has invalid bitworkr. Skipping...') # Fail to create on invalid bitwork string return None, None @@ -893,7 +894,7 @@ def format_name_type_candidates_to_rpc_for_subname(raw_entries, atomical_id_to_c applicable_rule = dataset.get('applicable_rule') base_candidate['applicable_rule'] = applicable_rule return reformatted - + # Format the relevant byte fields in the mint raw data into strings to send on rpc calls well formatted def convert_db_mint_info_to_rpc_mint_info_format(header_hash, mint_info): mint_info['atomical_id'] = location_id_bytes_to_compact(mint_info['atomical_id']) @@ -905,38 +906,38 @@ def convert_db_mint_info_to_rpc_mint_info_format(header_hash, mint_info): mint_info['mint_info']['reveal_location_header'] = mint_info['mint_info']['reveal_location_header'].hex() mint_info['mint_info']['reveal_location_scripthash'] = hash_to_hex_str(mint_info['mint_info']['reveal_location_scripthash']) mint_info['mint_info']['reveal_location_script'] = mint_info['mint_info']['reveal_location_script'].hex() - return mint_info + return mint_info # A valid ticker string must be at least 1 characters and max 21 with a-z0-9 def is_valid_ticker_string(ticker): if not ticker: - return None + return None m = re.compile(r'^[a-z0-9]{1,21}$') if m.match(ticker): return True - return False + return False # Check that the base requirement is satisfied def is_valid_namebase_string_name(realm_or_subrealm_name): if not realm_or_subrealm_name: - return False + return False if not isinstance(realm_or_subrealm_name, str): return False - + if len(realm_or_subrealm_name) > 64 or len(realm_or_subrealm_name) <= 0: - return False - + return False + if realm_or_subrealm_name[0] == '-': - return False + return False if realm_or_subrealm_name[-1] == '-': - return False - + return False + return True -# A valid realm string must begin with a-z and have up to 63 characters after it +# A valid realm string must begin with a-z and have up to 63 characters after it # Including a-z0-9 and hyphen's "-" def is_valid_realm_string_name(realm_name): if not is_valid_namebase_string_name(realm_name): @@ -945,7 +946,7 @@ def is_valid_realm_string_name(realm_name): m = re.compile(r'^[a-z][a-z0-9\-]{0,63}$') if m.match(realm_name): return True - return False + return False # A valid subrealm string must begin with a-z0-9 and have up to 63 characters after it # Including a-z0-9 and hyphen's "-" @@ -956,8 +957,8 @@ def is_valid_subrealm_string_name(subrealm_name): m = re.compile(r'^[a-z0-9][a-z0-9\-]{0,63}$') if m.match(subrealm_name): return True - return False - + return False + # A valid container string must begin with a-z0-9 and have up to 63 characters after it # Including a-z0-9 and hyphen's "-" def is_valid_container_string_name(container_name): @@ -967,7 +968,7 @@ def is_valid_container_string_name(container_name): m = re.compile(r'^[a-z0-9][a-z0-9\-]{0,63}$') if m.match(container_name): return True - return False + return False # Is valid container item name # Including a-z0-9 and hyphen's "-" @@ -978,7 +979,7 @@ def is_valid_container_dmitem_string_name(dmitem): m = re.compile(r'^[a-z0-9][a-z0-9\-]{0,63}$') if m.match(dmitem): return True - return False + return False # Parses the push datas from a bitcoin script byte sequence def parse_push_data(op, n, script): @@ -1037,30 +1038,30 @@ def parse_operation_from_script(script, n): atom_op = script[n : n + three_letter_op_len].hex() if atom_op == "036e6674": atom_op_decoded = 'nft' # nft - Mint non-fungible token - elif atom_op == "03646674": + elif atom_op == "03646674": atom_op_decoded = 'dft' # dft - Deploy distributed mint fungible token starting point - elif atom_op == "036d6f64": + elif atom_op == "036d6f64": atom_op_decoded = 'mod' # mod - Modify general state - elif atom_op == "03657674": + elif atom_op == "03657674": atom_op_decoded = 'evt' # evt - Message response/reply - elif atom_op == "03646d74": + elif atom_op == "03646d74": atom_op_decoded = 'dmt' # dmt - Mint tokens of distributed mint type (dft) - elif atom_op == "03646174": + elif atom_op == "03646174": atom_op_decoded = 'dat' # dat - Store data on a transaction (dat) if atom_op_decoded: return atom_op_decoded, parse_atomicals_data_definition_operation(script, n + three_letter_op_len) - + # check the 2 letter protocol operations if n + two_letter_op_len < script_len: atom_op = script[n : n + two_letter_op_len].hex() if atom_op == "026674": atom_op_decoded = 'ft' # ft - Mint fungible token with direct fixed supply - elif atom_op == "02736c": + elif atom_op == "02736c": atom_op_decoded = 'sl' # sl - Seal an NFT and lock it from further changes forever - + if atom_op_decoded: return atom_op_decoded, parse_atomicals_data_definition_operation(script, n + two_letter_op_len) - + # check the 1 letter if n + one_letter_op_len < script_len: atom_op = script[n : n + one_letter_op_len].hex() @@ -1073,29 +1074,29 @@ def parse_operation_from_script(script, n): atom_op_decoded = 'z' if atom_op_decoded: return atom_op_decoded, parse_atomicals_data_definition_operation(script, n + one_letter_op_len) - + print(f'Invalid Atomicals Operation Code. Skipping... "{script[n : n + 4].hex()}"') return None, None def is_valid_regex(regex): if not regex: - return False + return False if '(' in regex or ')' in regex: return False try: re.compile(rf"{regex}") return True - except Exception as e: - return False + except Exception as e: + return False # Check for a payment marker and return the potential atomical id being indicate that is paid in current tx def is_op_return_subrealm_payment_marker_atomical_id(script): if not script: - return None - + return None + # The output script is too short if len(script) < (1+5+2+1+36): # 6a04<01>p - return None + return None # Ensure it is an OP_RETURN first_byte = script[:1] @@ -1110,15 +1111,15 @@ def is_op_return_subrealm_payment_marker_atomical_id(script): # Check for the envelope format if script[start_index:start_index+5].hex() != ATOMICALS_ENVELOPE_MARKER_BYTES: - return None + return None # Check the next op code matches b'p' for payment if script[start_index+5:start_index+5+2].hex() != '0170': - return None - + return None + # Check there is a 36 byte push data if script[start_index+5+2:start_index+5+2+1].hex() != '24': - return None + return None # Return the potential atomical id that the payment marker is associated with return script[start_index+5+2+1:start_index+5+2+1+36] @@ -1126,11 +1127,11 @@ def is_op_return_subrealm_payment_marker_atomical_id(script): # Check for a payment marker and return the potential atomical id being indicate that is paid in current tx def is_op_return_dmitem_payment_marker_atomical_id(script): if not script: - return None - + return None + # The output script is too short if len(script) < (1+5+2+1+36): # 6a04<01>d - return None + return None # Ensure it is an OP_RETURN first_byte = script[:1] @@ -1145,19 +1146,19 @@ def is_op_return_dmitem_payment_marker_atomical_id(script): # Check for the envelope format if script[start_index:start_index+5].hex() != ATOMICALS_ENVELOPE_MARKER_BYTES: - return None + return None # Check the next op code matches b'd' for payment if script[start_index+5:start_index+5+2].hex() != '0164': - return None - + return None + # Check there is a 36 byte push data if script[start_index+5+2:start_index+5+2+1].hex() != '24': - return None + return None # Return the potential atomical id that the payment marker is associated with return script[start_index+5+2+1:start_index+5+2+1+36] - + # Parses and detects valid Atomicals protocol operations in a witness script # Stops when it finds the first operation in the first input def parse_protocols_operations_from_witness_for_input(txinwitness): @@ -1177,7 +1178,7 @@ def parse_protocols_operations_from_witness_for_input(txinwitness): n = n + 32 while n < script_entry_len - 5: op = script[n] - n += 1 + n += 1 # Get the next if statement if op == OpCodes.OP_IF: if ATOMICALS_ENVELOPE_MARKER_BYTES == script[n : n + 5].hex(): @@ -1203,16 +1204,16 @@ def parse_protocols_operations_from_witness_array(tx, tx_hash, allow_args_bytes) # All inputs are parsed but further upstream most operations will only function if placed in the 0'th input op_name, payload = parse_protocols_operations_from_witness_for_input(txinwitness) if not op_name: - continue + continue decoded_object = {} - if payload: + if payload: # Ensure that the payload is cbor encoded dictionary or empty try: decoded_object = loads(payload) if not isinstance(decoded_object, dict): print(f'parse_protocols_operations_from_witness_array found {op_name} but decoded CBOR payload is not a dict for {tx}. Skipping tx input...') continue - except Exception as e: + except Exception as e: print(f'parse_protocols_operations_from_witness_array found {op_name} but CBOR payload parsing failed for {tx}. Skipping tx input...{e}') continue # Also enforce that if there are meta, args, or ctx fields that they must be dicts @@ -1220,7 +1221,7 @@ def parse_protocols_operations_from_witness_array(tx, tx_hash, allow_args_bytes) # Ensure that they are not allowed to contain bytes like objects if not is_sanitized_dict_whitelist_only(decoded_object.get('meta', {})) or not is_sanitized_dict_whitelist_only(decoded_object.get('args', {}), allow_args_bytes) or not is_sanitized_dict_whitelist_only(decoded_object.get('ctx', {})) or not is_sanitized_dict_whitelist_only(decoded_object.get('init', {}), True): print(f'parse_protocols_operations_from_witness_array found {op_name} but decoded CBOR payload has an args, meta, ctx, or init that has not permitted data type {tx} {decoded_object}. Skipping tx input...') - continue + continue # Return immediately at the first successful parse of the payload # It doesn't mean that it will be valid when processed, because most operations require the txin_idx=0 @@ -1248,40 +1249,40 @@ def encode_atomical_ids_hex(state): if isinstance(state, bytes): if is_atomical_id_long_form_bytes(state): return location_id_bytes_to_compact(state) - else: + else: return state.hex() if not isinstance(state, dict) and not isinstance(state, list): - return state - + return state + if isinstance(state, list): reformatted_list = [] for item in state: reformatted_list.append(encode_atomical_ids_hex(item)) - return reformatted_list - + return reformatted_list + cloned_state = {} for key, value in state.items(): cloned_state[encode_atomical_ids_hex(key)] = encode_atomical_ids_hex(value) - return cloned_state + return cloned_state def encode_tx_hash_hex(state): if isinstance(state, bytes): return hash_to_hex_str(state) if not isinstance(state, dict) and not isinstance(state, list): - return state - + return state + if isinstance(state, list): reformatted_list = [] for item in state: reformatted_list.append(encode_tx_hash_hex(item)) - return reformatted_list - + return reformatted_list + cloned_state = {} for key, value in state.items(): cloned_state[encode_tx_hash_hex(key)] = encode_tx_hash_hex(value) - return cloned_state + return cloned_state # Auto encodes data into structured bytes data. @@ -1354,10 +1355,10 @@ def is_within_acceptable_blocks_for_general_reveal(commit_height, reveal_locatio def is_within_acceptable_blocks_for_name_reveal(commit_height, reveal_location_height): return commit_height >= reveal_location_height - MINT_REALM_CONTAINER_TICKER_COMMIT_REVEAL_DELAY_BLOCKS -# A payment for a subrealm is acceptable as long as it is within MINT_SUBNAME_COMMIT_PAYMENT_DELAY_BLOCKS of the commit_height +# A payment for a subrealm is acceptable as long as it is within MINT_SUBNAME_COMMIT_PAYMENT_DELAY_BLOCKS of the commit_height def is_within_acceptable_blocks_for_sub_item_payment(commit_height, current_height): return current_height <= commit_height + MINT_SUBNAME_COMMIT_PAYMENT_DELAY_BLOCKS - + # Log an item with a prefix def print_subrealm_calculate_log(item): print(f'calculate_subrealm_rules_list_as_of_height {item}') @@ -1386,7 +1387,7 @@ def validate_subrealm_rules_outputs_format(outputs): # script must be paid to mint a subrealm if not is_hex_string(expected_output_script): print_subrealm_calculate_log(f'validate_subrealm_rules_outputs_format: expected output script is not a valid hex string') - return False # Reject if one of the payment output script is not a valid hex + return False # Reject if one of the payment output script is not a valid hex return True def apply_set_state_mutation(current_object, state_mutation_map, is_top_level): @@ -1403,11 +1404,11 @@ def apply_set_state_mutation(current_object, state_mutation_map, is_top_level): # Key not found, set it if not current_object.get(prop): current_object[prop] = value - else: + else: # key is found, set it if it's a scalar if not isinstance(current_object[prop], dict): - current_object[prop] = value - else: + current_object[prop] = value + else: # There already exists a dictionary at this level, we recurse to set the properties below apply_set_state_mutation(current_object[prop], value, False) return current_object @@ -1442,20 +1443,20 @@ def calculate_latest_state_from_mod_history(mod_history): # If omitted we just assume if has_action_prop and isinstance(has_action_prop, int) and has_action_prop == 1: # delete = 1 apply_delete_state_mutation(current_object_state, element['data'], True) - else: + else: apply_set_state_mutation(current_object_state, element['data'], True) - else: + else: return current_object_state def validate_rules_data(namespace_data): if not namespace_data or not isinstance(namespace_data, dict): - return None + return None return validate_rules(namespace_data) # Validate the rules array data for subrealm mints def validate_rules(namespace_data): rules = namespace_data.get('rules', None) - if not rules or not isinstance(rules, list) or len(rules) <= 0: + if not rules or not isinstance(rules, list) or len(rules) <= 0: print_subrealm_calculate_log(f'rules not found') return None @@ -1475,12 +1476,12 @@ def validate_rules(namespace_data): # Ensure that the price entry is a list (pattern, price, output) if not isinstance(rule_set_entry, dict): print_subrealm_calculate_log(f'rule_set_entry is not a dict') - return None + return None # regex is the first pattern that will be checked to match for minting a subrealm regex_pattern = rule_set_entry.get('p') if not isinstance(regex_pattern, str): print_subrealm_calculate_log(f'regex pattern is not a string') - return None + return None if len(regex_pattern) > MAX_SUBNAME_RULE_SIZE_LEN or len(regex_pattern) < 1: print_subrealm_calculate_log(f'rule empty or too large') return None # Reject if the rule has more than MAX_SUBNAME_RULE_SIZE_LEN chars @@ -1497,7 +1498,7 @@ def validate_rules(namespace_data): # Check that regex is a valid regex pattern try: re.compile(rf"{regex_pattern}") - except Exception as e: + except Exception as e: print_subrealm_calculate_log(f'Regex compile error {e}') return None # Reject if one of the regexe's could not be compiled. # Build the price point (ie: could be paid in sats, ARC20 or bitwork) @@ -1506,7 +1507,7 @@ def validate_rules(namespace_data): } # There must be at least one rule type for minting if not outputs and not bitworkc and not bitworkr: - return None + return None # Sanity check that bitworkc and bitworkr must be at least well formatted if they are set if bitworkc: valid_str, bitwork_parts = is_valid_bitwork_string(bitworkc) @@ -1523,7 +1524,7 @@ def validate_rules(namespace_data): elif is_bitwork_const(bitworkr): price_point['bitworkr'] = bitworkr else: - return None + return None if outputs: # check for a list of outputs if not isinstance(outputs, dict) or len(outputs.keys()) < 1: @@ -1531,18 +1532,18 @@ def validate_rules(namespace_data): return None # Reject if one of the payment outputs is not a valid list if not validate_subrealm_rules_outputs_format(outputs): - return None + return None price_point['o'] = outputs validated_rules_list.append(price_point) elif bitworkc or bitworkr: # Also accepted if there was just bitwork (the bitworkc and bitworkr are added above) validated_rules_list.append(price_point) - else: + else: print_subrealm_calculate_log(f'list element does not p or o fields') - return None + return None # If we got this far, it means there is a valid rule as of the requested height, return the information return validated_rules_list - + def is_splat_operation(operations_found_at_inputs): return operations_found_at_inputs and operations_found_at_inputs.get('op') == 'x' and operations_found_at_inputs.get('input_index') == 0 @@ -1567,7 +1568,7 @@ def is_mint_operation(operations_found_at_inputs): # expired_revealed_late - Atomical was revealed beyond the permissible delay, therefore it is not eligible to claim the name # verified - Atomical has been verified to have successfully claimed the name (realm, container, or ticker). # claimed_by_other - Failed to claim for current Atomical because it was claimed first by another Atomical -def get_name_request_candidate_status(atomical_info, status, candidate_id, name_type): +def get_name_request_candidate_status(atomical_info, status, candidate_id, name_type): MAX_BLOCKS_STR = str(MINT_REALM_CONTAINER_TICKER_COMMIT_REVEAL_DELAY_BLOCKS) # Check if the candidates are different or for the current atomical requested mint_info = atomical_info['mint_info'] @@ -1617,7 +1618,7 @@ def get_name_request_candidate_status(atomical_info, status, candidate_id, name_ # The status is different or this is a subrealm or dmitem return { 'status': status, - 'pending_candidate_atomical_id': candidate_id_compact + 'pending_candidate_atomical_id': candidate_id_compact } @@ -1732,7 +1733,7 @@ def get_subname_request_candidate_status(current_height, atomical_info, status, def get_next_bitwork_full_str(bitwork_vec, current_prefix_len): - base_bitwork_padded = bitwork_vec.ljust(32, '0') + base_bitwork_padded = bitwork_vec.ljust(32, '0') if current_prefix_len >= 31: return base_bitwork_padded return base_bitwork_padded[:current_prefix_len + 1] @@ -1749,7 +1750,7 @@ def is_txid_valid_for_perpetual_bitwork(txid, bitwork_vec, actual_mints, max_min next_full_bitwork_prefix = get_next_bitwork_full_str(bitwork_vec, len(prefix)) if is_mint_pow_valid(txid, next_full_bitwork_prefix): return True, next_full_bitwork_prefix - return False, None + return False, None def calculate_expected_bitwork(bitwork_vec, actual_mints, max_mints, target_increment, starting_target): if starting_target < 64 or starting_target > 256: @@ -1789,7 +1790,7 @@ def is_bitwork_subset(first_bitwork, second_bitwork): second_fullstr, second_parts = is_valid_bitwork_string(second_bitwork) if not second_fullstr: raise Exception(f'Invalid bitwork string {second_bitwork}') - + if second_parts['prefix'].startswith(first_parts['prefix']): print(f'second_parts={second_parts} first_parts={first_parts}') if len(second_parts['prefix']) > len(first_parts['prefix']): @@ -1805,7 +1806,7 @@ def is_mint_pow_valid(txid, mint_pow_commit): mint_bitwork_prefix = bitwork_commit_parts['prefix'] mint_bitwork_ext = bitwork_commit_parts['ext'] if is_proof_of_work_prefix_match(txid, mint_bitwork_prefix, mint_bitwork_ext): - return True + return True return False def expand_spend_utxo_data(data): @@ -1822,7 +1823,7 @@ def validate_dmitem_mint_args_with_container_dmint(mint_args, mint_data_payload, if not proof or not isinstance(proof, list) or len(proof) == 0: print(f'validate_dmitem_mint_args_with_container_dmint: proof is not valid list') return False - else: + else: for proof_item in proof: if not isinstance(proof_item, dict) or len(proof_item) == 0: print(f'validate_dmitem_mint_args_with_container_dmint: proof item is not a valid dict') @@ -1866,7 +1867,7 @@ def get_container_dmint_format_status(dmint): } errors = [] rules_list = validate_rules_data(dmint) - + if not rules_list or len(rules_list) == 0: errors.append('rules list is invalid') @@ -1882,7 +1883,7 @@ def get_container_dmint_format_status(dmint): if immutable: if not isinstance(immutable, bool): errors.append('immutable must be a bool') - + merkle = dmint.get('merkle') if not merkle or not isinstance(merkle, str) or len(merkle) != 64: errors.append('merkle str must be 64 hex characters') @@ -1892,22 +1893,22 @@ def get_container_dmint_format_status(dmint): if not isinstance(mint_height, int) or mint_height < 0: errors.append('mint height invalid') - base_status['errors'] = errors + base_status['errors'] = errors if len(errors) == 0: base_status['status'] = 'valid' - else: + else: base_status['status'] = 'invalid' return base_status - + def validate_merkle_proof_dmint(expected_root_hash, item_name, possible_bitworkc, possible_bitworkr, main, main_hash, proof): # print(f'expected_root_hash={expected_root_hash} item_name={item_name} possible_bitworkc={possible_bitworkc} possible_bitworkr={possible_bitworkr} main={main} main_hash={main_hash} proof={proof} ') # There could be 4 ways to have encoded the merkle proof, we will test each way to find it # The reason for this is we do not know if the bitworkc/bitworkr was 'any' or a specific value # Therefore to not put more data into the request, we just loop over all possible combinations (there are 4) # Only one of them can be validate, and then the proof is completed - + # Combinations can be: # any/any # specific_bitworkc/any @@ -1926,7 +1927,7 @@ def check_validate_proof(target_hash, proof): formatted_proof.append({ 'right': leaf_hash }) - else: + else: # Accept hashes as bytes or string leaf_hash = item['d'] if isinstance(leaf_hash, bytes): @@ -1934,7 +1935,7 @@ def check_validate_proof(target_hash, proof): formatted_proof.append({ 'left': leaf_hash }) - return mt.validate_proof(formatted_proof, target_hash, expected_root_hash) + return mt.validate_proof(formatted_proof, target_hash, expected_root_hash) # Case 1: any/any concat_str1 = item_name + ':' + 'any' + ':' + 'any' + ':' + main + ':' + main_hash diff --git a/electrumx/server/session/electrumx_session.py b/electrumx/server/session/electrumx_session.py index df987147..6b44deec 100644 --- a/electrumx/server/session/electrumx_session.py +++ b/electrumx/server/session/electrumx_session.py @@ -1,4 +1,3 @@ -import asyncio import codecs import datetime @@ -6,8 +5,6 @@ from aiorpcx import timeout_after, TaskTimeout, ReplyAndDisconnect from electrumx.lib import util -from electrumx.lib.script2addr import get_address_from_output_script -from electrumx.lib.util_atomicals import * from electrumx.server.daemon import DaemonError from electrumx.server.session.session_base import SessionBase from electrumx.server.session.util import * @@ -17,9 +14,6 @@ class ElectrumX(SessionBase): """A TCP server that handles incoming Electrum connections.""" - PROTOCOL_MIN = (1, 4) - PROTOCOL_MAX = (1, 4, 3) - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.subscribe_headers = False @@ -27,14 +21,13 @@ def __init__(self, *args, **kwargs): self.hashX_subs = {} self.sv_seen = False self.mempool_statuses = {} - self.set_request_handlers(self.PROTOCOL_MAX) - self.is_peer = False + self.set_request_handlers(SESSION_PROTOCOL_MAX) self.cost = 5.0 # Connection cost @classmethod def protocol_min_max_strings(cls): return [util.version_string(ver) - for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] + for ver in (SESSION_PROTOCOL_MIN, SESSION_PROTOCOL_MAX)] @classmethod def server_features(cls, env): @@ -81,10 +74,6 @@ def on_disconnect_due_to_excessive_session_cost(self): def sub_count(self): return len(self.hashX_subs) - def unsubscribe_hashX(self, hashX): - self.mempool_statuses.pop(hashX, None) - return self.hashX_subs.pop(hashX, None) - async def notify(self, touched, height_changed): """Wrap _notify_inner; websockets raises exceptions for unclear reasons.""" try: @@ -101,7 +90,7 @@ async def _notify_inner(self, touched, height_changed): updates or new blocks) and height. """ if height_changed and self.subscribe_headers: - args = (await self.subscribe_headers_result(),) + args = (await self.ss.subscribe_headers_result(),) await self.send_notification('blockchain.headers.subscribe', args) touched = touched.intersection(self.hashX_subs) @@ -135,1374 +124,104 @@ async def _notify_inner(self, touched, height_changed): def set_request_handlers(self, protocols): self.protocol_tuple: Tuple[int, ...] = protocols handlers = { - 'blockchain.block.header': self.block_header, - 'blockchain.block.headers': self.block_headers, - 'blockchain.estimatefee': self.estimatefee, - 'blockchain.headers.subscribe': self.headers_subscribe, - 'blockchain.relayfee': self.relayfee, - 'blockchain.scripthash.get_balance': self.scripthash_get_balance, - 'blockchain.scripthash.get_history': self.scripthash_get_history, - 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, - 'blockchain.scripthash.listunspent': self.scripthash_listunspent, - 'blockchain.scripthash.subscribe': self.scripthash_subscribe, - 'blockchain.transaction.broadcast': self.transaction_broadcast, - 'blockchain.transaction.broadcast_force': self.transaction_broadcast_force, - 'blockchain.transaction.get': self.transaction_get, - 'blockchain.transaction.get_merkle': self.transaction_merkle, - 'blockchain.transaction.id_from_pos': self.transaction_id_from_pos, - 'mempool.get_fee_histogram': self.compact_fee_histogram, - 'server.add_peer': self.add_peer, - 'server.banner': self.banner, - 'server.donation_address': self.donation_address, + 'blockchain.headers.subscribe': self.ss.headers_subscribe, + 'blockchain.block.header': self.ss.block_header, + 'blockchain.block.headers': self.ss.block_headers, + 'blockchain.estimatefee': self.ss.estimate_fee, + 'blockchain.relayfee': self.ss.relay_fee, + 'blockchain.scripthash.get_balance': self.ss.scripthash_get_balance, + 'blockchain.scripthash.get_history': self.ss.scripthash_get_history, + 'blockchain.scripthash.get_mempool': self.ss.scripthash_get_mempool, + 'blockchain.scripthash.listunspent': self.ss.scripthash_list_unspent, + 'blockchain.scripthash.subscribe': self.ss.scripthash_subscribe, + 'blockchain.transaction.broadcast': self.ss.transaction_broadcast, + 'blockchain.transaction.broadcast_force': self.ss.transaction_broadcast_force, + 'blockchain.transaction.get': self.ss.transaction_get, + 'blockchain.transaction.get_merkle': self.ss.transaction_merkle, + 'blockchain.transaction.id_from_pos': self.ss.transaction_id_from_pos, + 'mempool.get_fee_histogram': self.ss.compact_fee_histogram, + # 'server.banner': self.banner, + 'server.donation_address': self.ss.donation_address, 'server.features': self.server_features_async, - 'server.peers.subscribe': self.peers_subscribe, - 'server.ping': self.ping, - 'server.version': self.server_version, + # 'server.peers.subscribe': self.peers_subscribe, + # 'server.ping': self.ss.ping, + # 'server.version': self.server_version, # The Atomicals era has begun # - 'blockchain.atomicals.validate': self.transaction_broadcast_validate, - 'blockchain.atomicals.get_ft_balances_scripthash': self.atomicals_get_ft_balances, - 'blockchain.atomicals.get_nft_balances_scripthash': self.atomicals_get_nft_balances, - 'blockchain.atomicals.listscripthash': self.atomicals_listscripthash, - 'blockchain.atomicals.list': self.atomicals_list, - 'blockchain.atomicals.get_numbers': self.atomicals_num_to_id, - 'blockchain.atomicals.get_block_hash': self.atomicals_block_hash, - 'blockchain.atomicals.get_block_txs': self.atomicals_block_txs, - 'blockchain.atomicals.dump': self.atomicals_dump, - 'blockchain.atomicals.at_location': self.atomicals_at_location, - 'blockchain.atomicals.get_location': self.atomicals_get_location, - 'blockchain.atomicals.get': self.atomicals_get, - 'blockchain.atomicals.get_global': self.atomicals_get_global, - 'blockchain.atomicals.get_state': self.atomical_get_state, - 'blockchain.atomicals.get_state_history': self.atomical_get_state_history, - 'blockchain.atomicals.get_events': self.atomical_get_events, - 'blockchain.atomicals.get_tx_history': self.atomicals_get_tx_history, - 'blockchain.atomicals.get_realm_info': self.atomicals_get_realm_info, - 'blockchain.atomicals.get_by_realm': self.atomicals_get_by_realm, - 'blockchain.atomicals.get_by_subrealm': self.atomicals_get_by_subrealm, - 'blockchain.atomicals.get_by_dmitem': self.atomicals_get_by_dmitem, - 'blockchain.atomicals.get_by_ticker': self.atomicals_get_by_ticker, - 'blockchain.atomicals.get_by_container': self.atomicals_get_by_container, - 'blockchain.atomicals.get_by_container_item': self.atomicals_get_by_container_item, - 'blockchain.atomicals.get_by_container_item_validate': self.atomicals_get_by_container_item_validation, - 'blockchain.atomicals.get_container_items': self.atomicals_get_container_items, - 'blockchain.atomicals.get_ft_info': self.atomicals_get_ft_info, - 'blockchain.atomicals.get_dft_mints': self.atomicals_get_dft_mints, - 'blockchain.atomicals.find_tickers': self.atomicals_search_tickers, - 'blockchain.atomicals.find_realms': self.atomicals_search_realms, - 'blockchain.atomicals.find_subrealms': self.atomicals_search_subrealms, - 'blockchain.atomicals.find_containers': self.atomicals_search_containers, - 'blockchain.atomicals.get_holders': self.atomicals_get_holders, - 'blockchain.atomicals.transaction': self.atomicals_transaction, - 'blockchain.atomicals.transaction_by_height': self.transaction_by_height, - 'blockchain.atomicals.transaction_by_atomical_id': self.transaction_by_atomical_id, - 'blockchain.atomicals.transaction_by_scripthash': self.transaction_by_scripthash, + 'blockchain.atomicals.validate': self.ss.transaction_broadcast_validate, + 'blockchain.atomicals.get_ft_balances_scripthash': self.ss.atomicals_get_ft_balances, + 'blockchain.atomicals.get_nft_balances_scripthash': self.ss.atomicals_get_nft_balances, + 'blockchain.atomicals.listscripthash': self.ss.atomicals_list_scripthash, + 'blockchain.atomicals.list': self.ss.atomicals_list, + 'blockchain.atomicals.get_numbers': self.ss.atomicals_num_to_id, + 'blockchain.atomicals.get_block_hash': self.ss.atomicals_block_hash, + 'blockchain.atomicals.get_block_txs': self.ss.atomicals_block_txs, + # 'blockchain.atomicals.dump': self.ss.atomicals_dump, + 'blockchain.atomicals.at_location': self.ss.atomicals_at_location, + 'blockchain.atomicals.get_location': self.ss.atomicals_get_location, + 'blockchain.atomicals.get': self.ss.atomicals_get, + 'blockchain.atomicals.get_global': self.ss.atomicals_get_global, + 'blockchain.atomicals.get_state': self.ss.atomical_get_state, + 'blockchain.atomicals.get_state_history': self.ss.atomical_get_state_history, + 'blockchain.atomicals.get_events': self.ss.atomical_get_events, + 'blockchain.atomicals.get_tx_history': self.ss.atomicals_get_tx_history, + 'blockchain.atomicals.get_ft_info': self.ss.atomicals_get_ft_info, + 'blockchain.atomicals.get_dft_mints': self.ss.atomicals_get_dft_mints, + 'blockchain.atomicals.get_realm_info': self.ss.atomicals_get_realm_info, + 'blockchain.atomicals.get_by_realm': self.ss.atomicals_get_by_realm, + 'blockchain.atomicals.get_by_subrealm': self.ss.atomicals_get_by_subrealm, + 'blockchain.atomicals.get_by_dmitem': self.ss.atomicals_get_by_dmitem, + 'blockchain.atomicals.get_by_ticker': self.ss.atomicals_get_by_ticker, + 'blockchain.atomicals.get_by_container': self.ss.atomicals_get_by_container, + 'blockchain.atomicals.get_by_container_item': self.ss.atomicals_get_by_container_item, + 'blockchain.atomicals.get_by_container_item_validate': self.ss.atomicals_get_by_container_item_validation, + 'blockchain.atomicals.get_container_items': self.ss.atomicals_get_container_items, + 'blockchain.atomicals.find_tickers': self.ss.atomicals_search_tickers, + 'blockchain.atomicals.find_realms': self.ss.atomicals_search_realms, + 'blockchain.atomicals.find_subrealms': self.ss.atomicals_search_subrealms, + 'blockchain.atomicals.find_containers': self.ss.atomicals_search_containers, + 'blockchain.atomicals.get_holders': self.ss.atomicals_get_holders, + 'blockchain.atomicals.transaction': self.session_mgr.get_transaction_detail, + 'blockchain.atomicals.transaction_by_height': self.ss.transaction_by_height, + 'blockchain.atomicals.transaction_by_atomical_id': self.ss.transaction_by_atomical_id, + 'blockchain.atomicals.transaction_by_scripthash': self.ss.transaction_by_scripthash, + 'blockchain.atomicals.transaction_global': self.session_mgr.transaction_global, } if protocols >= (1, 4, 2): - handlers['blockchain.scripthash.unsubscribe'] = self.scripthash_unsubscribe + handlers['blockchain.scripthash.unsubscribe'] = self.ss.scripthash_unsubscribe self.request_handlers = handlers - async def subscribe_headers_result(self): - """The result of a header subscription or notification.""" - return self.session_mgr.hsub_results - - async def headers_subscribe(self): - """Subscribe to get raw headers of new blocks.""" - if not self.subscribe_headers: - self.subscribe_headers = True - self.bump_cost(0.25) - return await self.subscribe_headers_result() - - async def add_peer(self, features): - """Add a peer (but only if the peer resolves to the source).""" - self.is_peer = True - self.bump_cost(100.0) - return await self.peer_mgr.on_add_peer(features, self.remote_address()) + async def banner(self): + """Return the server banner text.""" + banner = f'You are connected to an {electrumx_version} server.' + self.bump_cost(0.5) + if self.is_tor(): + banner_file = self.env.tor_banner_file + else: + banner_file = self.env.banner_file + if banner_file: + try: + with codecs.open(banner_file, 'r', 'utf-8') as f: + banner = f.read() + except (OSError, UnicodeDecodeError) as e: + self.logger.error(f'reading banner file {banner_file}: {e!r}') + else: + banner = await self.replaced_banner(banner) + return banner async def peers_subscribe(self): """Return the server peers as a list of (ip, host, details) tuples.""" self.bump_cost(1.0) return self.peer_mgr.on_peers_subscribe(self.is_tor()) - async def address_status(self, hashX): - """Returns an address status. - - Status is a hex string, but must be None if there is no history. - """ - # Note history is ordered and mempool unordered in electrum-server - # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 - db_history, cost = await self.session_mgr.limited_history(hashX) - mempool = await self.mempool.transaction_summaries(hashX) - - status = ''.join(f'{hash_to_hex_str(tx_hash)}:' - f'{height:d}:' - for tx_hash, height in db_history) - status += ''.join(f'{hash_to_hex_str(tx.hash)}:' - f'{-tx.has_unconfirmed_inputs:d}:' - for tx in mempool) - - # Add status hashing cost - self.bump_cost(cost + 0.1 + len(status) * 0.00002) - - if status: - status = sha256(status.encode()).hex() - else: - status = None - - if mempool: - self.mempool_statuses[hashX] = status - else: - self.mempool_statuses.pop(hashX, None) - - return status - - async def subscription_address_status(self, hashX): + async def subscription_address_status(self, hash_x): """As for address_status, but if it can't be calculated the subscription is discarded.""" try: - return await self.address_status(hashX) + return await self.ss.address_status(hash_x) except RPCError: - self.unsubscribe_hashX(hashX) + self.ss.unsubscribe_hash_x(hash_x) return None - async def hashX_listunspent(self, hashX): - """Return the list of UTXOs of a script hash, including mempool - effects.""" - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = await self.mempool.potential_spends(hashX) - returned_utxos = [] - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - # Todo need to combine mempool atomicals - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'tx_hash': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'tx_pos': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - return returned_utxos - - # Get atomical_id from an atomical inscription number - def get_atomical_id_by_atomical_number(self, atomical_number): - return self.db.get_atomical_id_by_atomical_number(atomical_number) - - # Get atomicals base information from db or placeholder information if mint is still in the mempool and unconfirmed - async def atomical_id_get(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - if atomical: - return atomical - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - async def atomical_id_get_ft_info(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - if atomical['subtype'] == 'decentralized': - atomical = await self.session_mgr.bp.get_dft_mint_info_rpc_format_by_atomical_id(atomical_id) - elif atomical['subtype'] == 'direct': - atomical = await self.session_mgr.bp.get_ft_mint_info_rpc_format_by_atomical_id(atomical_id) - else: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not a fungible token (FT)') - - if atomical: - return atomical - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - async def atomical_id_get_state(self, compact_atomical_id, Verbose=False): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_latest_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_state_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_history_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_events(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_events_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_tx_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - history = await self.scripthash_get_history(hash_to_hex_str(double_sha256(atomical_id))) - history.sort(key=lambda x: x['height'], reverse=True) - - atomical['tx'] = { - 'history': history - } - return atomical - - async def atomical_id_get_location(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def get_summary_info(self, atomical_hash_count=10): - - if atomical_hash_count and atomical_hash_count > 100000: - atomical_hash_count = 100000 - - db_height = self.db.db_height - last_block_hash = self.db.get_atomicals_block_hash(db_height) - ret = { - 'coin': self.env.coin.__name__, - 'network': self.coin.NET, - 'height': db_height, - 'block_tip': hash_to_hex_str(self.db.db_tip), - 'server_time': datetime.datetime.now().isoformat(), - 'atomicals_block_tip': last_block_hash, - 'atomical_count': self.db.db_atomical_count - } - - list_hashes = {} - ret['atomicals_block_hashes'] = {} - # ret['atomicals_block_hashes'][db_height] = last_block_hash - for i in range(atomical_hash_count): - next_db_height = db_height - i - nextblockhash = self.db.get_atomicals_block_hash(next_db_height) - ret['atomicals_block_hashes'][next_db_height] = nextblockhash - return ret - - async def atomicals_list_get(self, limit, offset, asc): - atomicals = await self.db.get_atomicals_list(limit, offset, asc) - atomicals_populated = [] - for atomical_id in atomicals: - atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) - atomicals_populated.append(atomical) - return {'global': await self.get_summary_info(), 'result': atomicals_populated} - - async def atomicals_num_to_id(self, limit, offset, asc): - atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) - atomicals_num_to_id_map_reformatted = {} - for num, id in atomicals_num_to_id_map.items(): - atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) - return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted} - - async def atomicals_block_hash(self, height): - if not height: - height = self.session_mgr.bp.height - block_hash = self.db.get_atomicals_block_hash(height) - return {'result': block_hash} - - async def atomicals_block_txs(self, height): - tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) - return {'global': await self.get_summary_info(), 'result': tx_list} - - async def hashX_subscribe(self, hashX, alias): - # Store the subscription only after address_status succeeds - result = await self.address_status(hashX) - self.hashX_subs[hashX] = alias - return result - - async def get_balance(self, hashX): - utxos = await self.db.all_utxos(hashX) - confirmed = sum(utxo.value for utxo in utxos) - unconfirmed = await self.mempool.balance_delta(hashX) - self.bump_cost(1.0 + len(utxos) / 50) - return {'confirmed': confirmed, 'unconfirmed': unconfirmed} - - async def scripthash_get_balance(self, scripthash): - """Return the confirmed and unconfirmed balance of a scripthash.""" - hashX = scripthash_to_hashX(scripthash) - return await self.get_balance(hashX) - - async def unconfirmed_history(self, hashX): - # Note unconfirmed history is unordered in electrum-server - # height is -1 if it has unconfirmed inputs, otherwise 0 - result = [{'tx_hash': hash_to_hex_str(tx.hash), - 'height': -tx.has_unconfirmed_inputs, - 'fee': tx.fee} - for tx in await self.mempool.transaction_summaries(hashX)] - self.bump_cost(0.25 + len(result) / 50) - return result - - async def confirmed_and_unconfirmed_history(self, hashX): - # Note history is ordered but unconfirmed is unordered in e-s - history, cost = await self.session_mgr.limited_history(hashX) - self.bump_cost(cost) - conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} - for tx_hash, height in history] - return conf + await self.unconfirmed_history(hashX) - - async def atomicals_listscripthash(self, scripthash, verbose=False): - """Return the list of Atomical UTXOs for an address""" - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listscripthash_atomicals(hashX, verbose) - - async def atomicals_list(self, offset, limit, asc): - """Return the list of atomicals order by reverse atomical number""" - return await self.atomicals_list_get(offset, limit, asc) - - async def atomicals_get(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get(compact_atomical_id)} - - async def atomicals_dump(self): - if True: - self.db.dump() - return {'result': True} - # else: - # return {'result': False} - - async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - entries = self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, limit, offset) - return {'global': await self.get_summary_info(), 'result': entries} - - async def atomicals_get_ft_info(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} - - async def atomicals_get_global(self, hashes=10): - return {'global': await self.get_summary_info(hashes)} - - async def atomicals_get_location(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_location(compact_atomical_id)} - - async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} - - async def atomical_get_state_history(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_state_history(compact_atomical_id)} - - async def atomical_get_events(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_events(compact_atomical_id)} - - def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = compact_atomical_id_or_atomical_number - if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - found_atomical_id = self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) - if not found_atomical_id: - raise RPCError(BAD_REQUEST, f'not found atomical: {compact_atomical_id_or_atomical_number}') - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - return compact_atomical_id - - async def atomicals_get_tx_history(self, compact_atomical_id_or_atomical_number): - """Return the history of an Atomical``` - atomical_id: the mint transaction hash + 'i' of the atomical id - verbose: to determine whether to print extended information - """ - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact( - self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} - - async def atomicals_get_by_ticker(self, ticker): - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'ticker' - } - return { - 'result': return_result - } - - async def atomicals_get_by_container(self, container): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'container' - } - return { - 'result': return_result - } - - def auto_populate_container_regular_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - value['status'] = 'verified' - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - def auto_populate_container_dmint_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - async def atomicals_get_container_items(self, container, limit, offset): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, - self.session_mgr.bp.height) - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container not found') - - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - items = [] - if container_dmint_status: - if limit > 100: - limit = 100 - if offset < 0: - offset = 0 - height = self.session_mgr.bp.height - items = await self.session_mgr.bp.get_effective_dmitems_paginated(found_atomical_id, limit, offset, height) - return { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'dmint', - 'items': self.auto_populate_container_dmint_items_fields(items) - } - } - } - else: - container_mod_history = self.session_mgr.bp.get_mod_history(found_atomical_id, self.session_mgr.bp.height) - current_height_latest_state = calculate_latest_state_from_mod_history(container_mod_history) - items = current_height_latest_state.get('items', []) - return { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'regular', - 'items': self.auto_populate_container_regular_items_fields(items) - } - } - } - - async def atomicals_get_by_container_item(self, container, item_name): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - self.logger.info(f'formatted_entries {formatted_entries}') - raise RPCError(BAD_REQUEST, f'Container does not exist') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, - item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item' - } - return { - 'result': return_result - } - - async def atomicals_get_by_container_item_validation(self, container, item_name, bitworkc, bitworkr, main_name, - main_hash, proof, check_without_sealed): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_parent_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if status == 'verified': - found_parent_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container does not exist') - compact_atomical_id = location_id_bytes_to_compact(found_parent_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - errors = container_dmint_status.get('errors') - if not container_dmint_status: - raise RPCError(BAD_REQUEST, f'Container dmint status not exist') - if container_dmint_status.get('status') != 'valid': - errors = container_dmint_status.get('errors') - if check_without_sealed and errors and len(errors) == 1 and errors[0] == 'container not sealed': - pass - else: - raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') - - dmint = container_dmint_status.get('dmint') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, - item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - - # validate the proof data nonetheless - if not proof or not isinstance(proof, list) or len(proof) == 0: - raise RPCError(BAD_REQUEST, f'Proof must be provided') - - applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, - item_name, - height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, - DMINT_PATH) - proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, - bitworkr, main_name, main_hash, proof) - if applicable_rule and applicable_rule.get('matched_rule'): - applicable_rule = applicable_rule.get('matched_rule') - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item', - 'applicable_rule': applicable_rule, - 'proof_valid': proof_valid, - 'target_vector': target_vector, - 'target_hash': target_hash, - 'dmint': state_at_height.get('dmint') - } - return { - 'result': return_result - } - - async def atomicals_get_by_realm(self, name): - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'realm' - } - return { - 'result': return_result - } - - async def atomicals_get_by_subrealm(self, parent_compact_atomical_id_or_atomical_number, name): - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, - name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'subrealm' - } - return { - 'result': return_result - } - - async def atomicals_get_by_dmitem(self, parent_compact_atomical_id_or_atomical_number, name): - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, - height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'dmitem' - } - return { - 'result': return_result - } - - # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm - async def atomicals_get_realm_info(self, full_name, Verbose=False): - if not full_name or not isinstance(full_name, str): - raise RPCError(BAD_REQUEST, f'invalid input full_name: {full_name}') - full_name = full_name.lower() - split_names = full_name.split('.') - total_name_parts = len(split_names) - level = 0 - last_found_realm_atomical_id = None - last_found_realm = None - realms_path = [] - latest_all_entries_candidates = [] - height = self.session_mgr.bp.height - for name_part in split_names: - if level == 0: - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm( - name_part, height) - else: - self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm( - last_found_realm, name_part, height) - # stops when it does not found the realm component - if realm_status != 'verified': - break - # Save the latest realm (could be the top level realm, or the parent of a subrealm, or even the subrealm itself) - last_found_realm_atomical_id = last_found_realm - # Add it to the list of paths - realms_path.append({ - 'atomical_id': location_id_bytes_to_compact(last_found_realm), - 'name_part': name_part, - 'candidates': latest_all_entries_candidates - }) - level += 1 - - joined_name = '' - is_first_name_part = True - for name_element in realms_path: - if is_first_name_part: - is_first_name_part = False - else: - joined_name += '.' - joined_name += name_element['name_part'] - # Nothing was found - realms_path_len = len(realms_path) - if realms_path_len == 0: - return {'result': { - 'atomical_id': None, - 'top_level_realm_atomical_id': None, - 'top_level_realm_name': None, - 'nearest_parent_realm_atomical_id': None, - 'nearest_parent_realm_name': None, - 'request_full_realm_name': full_name, - 'found_full_realm_name': None, - 'missing_name_parts': full_name, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates))} - } - # Populate the subrealm minting rules for a parent atomical - that = self - - def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): - current_height = that.session_mgr.bp.height - subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) - current_height_latest_state = calculate_latest_state_from_mod_history(subrealm_mint_mod_history) - current_height_rules_list = validate_rules_data(current_height_latest_state.get(SUBREALM_MINT_PATH, None)) - nearest_parent_realm_subrealm_mint_allowed = False - struct_to_populate['nearest_parent_realm_subrealm_mint_rules'] = { - 'nearest_parent_realm_atomical_id': location_id_bytes_to_compact(parent_atomical_id), - 'current_height': current_height, - 'current_height_rules': current_height_rules_list - } - if current_height_rules_list and len(current_height_rules_list) > 0: - nearest_parent_realm_subrealm_mint_allowed = True - struct_to_populate[ - 'nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed - - # - # - # - # At least the top level realm was found if we got this far - # - # - # The number of realms returned and name components is equal, therefore the subrealm was found correctly - if realms_path_len == total_name_parts: - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-2]['atomical_id'] - nearest_parent_realm_name = realms_path[-2]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, - compact_to_location_id_bytes( - nearest_parent_realm_atomical_id), - final_subrealm_name) - return_struct = { - 'atomical_id': realms_path[-1]['atomical_id'], - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': None, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates)) - } - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), - return_struct, Verbose) - return {'result': return_struct} - - # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found - # But the final subrealm does not exist yet - # if realms_path_len < total_name_parts: - # It is known if we got this far that realms_path_len < total_name_parts - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-1]['atomical_id'] - nearest_parent_realm_name = realms_path[-1]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - - missing_name_parts = '.'.join(split_names[len(realms_path):]) - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, - compact_to_location_id_bytes( - nearest_parent_realm_atomical_id), - final_subrealm_name) - return_struct = { - 'atomical_id': None, - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': missing_name_parts, - 'final_subrealm_name': final_subrealm_name, - 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates)) - } - if Verbose: - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), - return_struct, Verbose) - return {'result': return_struct} - - # Perform a search for tickers, containers, and realms - def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, - Limit=1000, Offset=0, is_verified_only=False): - db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, prefix, Reverse, Limit, Offset) - formatted_results = [] - for item in db_entries: - status = None - if name_type_str == "ticker": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.ticker_data_cache) - elif name_type_str == "realm": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.realm_data_cache) - elif name_type_str == "collection": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.container_data_cache) - elif name_type_str == "subrealm": - status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], - self.session_mgr.bp.height) - obj = { - 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), - 'tx_num': item['tx_num'], - name_type_str + '_hex': item['name_hex'], - name_type_str: item['name'], - 'status': status, - } - if is_verified_only and status == "verified": - formatted_results.append(obj) - elif not is_verified_only: - formatted_results.append(obj) - return {'result': formatted_results} - - async def atomicals_search_tickers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_search_realms(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_search_subrealms(self, parent_realm_id_compact, prefix=None, Reverse=False, Limit=100, Offset=0, - is_verified_only=False): - parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, - Limit, Offset, is_verified_only) - - async def atomicals_search_containers(self, prefix=None, Reverse=False, Limit=100, Offset=0, - is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_at_location(self, compact_location_id): - """Return the Atomicals at a specific location id``` - """ - atomical_basic_infos = [] - atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form( - compact_to_location_id_bytes(compact_location_id)) - for atomical_id in atomicals_found_at_location['atomicals']: - atomical_basic_info = self.session_mgr.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) - atomical_basic_info['value'] = self.db.get_uxto_atomicals_value( - compact_to_location_id_bytes(compact_location_id), - atomical_id - ) - atomical_basic_infos.append(atomical_basic_info) - return { - 'location_info': atomicals_found_at_location['location_info'], - 'atomicals': atomical_basic_infos - } - - async def atomicals_get_ft_balances(self, scripthash): - """Return the FT balances for a scripthash address""" - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_ft_balances_atomicals(hashX) - - async def atomicals_get_nft_balances(self, scripthash): - """Return the NFT balances for a scripthash address""" - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_nft_balances_atomicals(hashX) - - async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): - """Return the holder by a specific location id``` - """ - formatted_results = [] - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - atomical = await self.db.populate_extended_atomical_holder_info(atomical_id, atomical) - if atomical["type"] == "FT": - if atomical.get("$mint_mode", "fixed") == "fixed": - max_supply = atomical.get('$max_supply', 0) - else: - max_supply = atomical.get('$max_supply', -1) - if max_supply < 0: - mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") - max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount - for holder in atomical.get("holders", [])[offset:offset + limit]: - percent = holder['holding'] / max_supply - formatted_results.append({ - "percent": percent, - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - elif atomical["type"] == "NFT": - for holder in atomical.get("holders", [])[offset:offset + limit]: - formatted_results.append({ - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - return formatted_results - - async def hashX_ft_balances_atomicals(self, hashX): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert (atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'FT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['ticker'] = atomical_id_basic_info.get('$ticker') - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][ - atomical_id_compact] - return return_struct - - async def hashX_nft_balances_atomicals(self, hashX): - Verbose = False - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert (atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'NFT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if atomical_id_basic_info.get('subtype'): - return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get( - 'subtype') - if atomical_id_basic_info.get('$request_container'): - return_struct['balances'][atomical_id_compact][ - 'request_container'] = atomical_id_basic_info.get('$request_container') - if atomical_id_basic_info.get('$container'): - return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get( - '$container') - if atomical_id_basic_info.get('$dmitem'): - return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') - if atomical_id_basic_info.get('$request_dmitem'): - return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - if atomical_id_basic_info.get('$realm'): - return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') - if atomical_id_basic_info.get('$request_realm'): - return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - if atomical_id_basic_info.get('$subrealm'): - return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get( - '$subrealm') - if atomical_id_basic_info.get('$request_subrealm'): - return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - if atomical_id_basic_info.get('$full_realm_name'): - return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - if atomical_id_basic_info.get('$parent_container'): - return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - if atomical_id_basic_info.get('$parent_realm'): - return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - if atomical_id_basic_info.get('$parent_container_name'): - return_struct['balances'][atomical_id_compact][ - 'parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') - if atomical_id_basic_info.get('$bitwork'): - return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get( - '$bitwork') - if atomical_id_basic_info.get('$parents'): - return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get( - '$parents') - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] - return return_struct - - async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id( - atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if Verbose or len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'global': await self.get_summary_info(), - 'atomicals': {}, - 'utxos': returned_utxos - } - - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_ref = atomical_id_basic_info['atomical_id'] - if return_struct['atomicals'].get(atomical_id_ref) is None: - return_struct['atomicals'][atomical_id_ref] = { - 'atomical_id': atomical_id_ref, - 'atomical_number': atomical_id_basic_info['atomical_number'], - 'type': atomical_id_basic_info['type'], - 'confirmed': 0, - # 'subtype': atomical_id_basic_info.get('subtype'), - 'data': atomical_id_basic_info - } - if atomical_id_basic_info.get('$realm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - elif atomical_id_basic_info.get('$subrealm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get( - '$subrealm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - elif atomical_id_basic_info.get('$dmitem'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') - elif atomical_id_basic_info.get('$ticker'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( - '$ticker_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( - '$request_ticker') - return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') - elif atomical_id_basic_info.get('$container'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get( - '$container') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( - '$request_container') - # Label them as candidates if they were candidates - elif atomical_id_basic_info.get('subtype') == 'request_realm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get( - '$realm_candidates') - elif atomical_id_basic_info.get('subtype') == 'request_subrealm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get( - '$subrealm_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref][ - 'request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - elif atomical_id_basic_info.get('subtype') == 'request_dmitem': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get( - '$dmitem_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - elif atomical_id_basic_info.get('subtype') == 'request_container': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'container_candidates'] = atomical_id_basic_info.get('$container_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( - '$request_container') - elif atomical_id_basic_info.get('$request_ticker_status'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( - '$ticker_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( - '$request_ticker') - - if returned_utxo['height'] <= 0: - return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] - else: - return_struct['atomicals'][atomical_id_ref]['confirmed'] += returned_utxo["atomicals"][atomical_id_ref] - - return return_struct - - async def scripthash_get_history(self, scripthash): - """Return the confirmed and unconfirmed history of a scripthash.""" - hashX = scripthash_to_hashX(scripthash) - return await self.confirmed_and_unconfirmed_history(hashX) - - async def scripthash_get_mempool(self, scripthash): - """Return the mempool transactions touching a scripthash.""" - hashX = scripthash_to_hashX(scripthash) - return await self.unconfirmed_history(hashX) - - async def scripthash_listunspent(self, scripthash): - """Return the list of UTXOs of a scripthash.""" - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listunspent(hashX) - - async def scripthash_subscribe(self, scripthash): - """Subscribe to a script hash. - - scripthash: the SHA256 hash of the script to subscribe to""" - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_subscribe(hashX, scripthash) - - async def scripthash_unsubscribe(self, scripthash): - """Unsubscribe from a script hash.""" - self.bump_cost(0.1) - hashX = scripthash_to_hashX(scripthash) - return self.unsubscribe_hashX(hashX) is not None - - async def _merkle_proof(self, cp_height, height): - max_height = self.db.db_height - if not height <= cp_height <= max_height: - raise RPCError(BAD_REQUEST, - f'require header height {height:,d} <= ' - f'cp_height {cp_height:,d} <= ' - f'chain height {max_height:,d}') - branch, root = await self.db.header_branch_and_root(cp_height + 1, - height) - return { - 'branch': [hash_to_hex_str(elt) for elt in branch], - 'root': hash_to_hex_str(root), - } - - async def block_header(self, height, cp_height=0): - """Return a raw block header as a hexadecimal string, or as a - dictionary with a merkle proof.""" - height = non_negative_integer(height) - cp_height = non_negative_integer(cp_height) - raw_header_hex = (await self.session_mgr.raw_header(height)).hex() - self.bump_cost(1.25 - (cp_height == 0)) - if cp_height == 0: - return raw_header_hex - result = {'header': raw_header_hex} - result.update(await self._merkle_proof(cp_height, height)) - return result - - async def block_headers(self, start_height, count, cp_height=0): - """Return count concatenated block headers as hex for the main chain; - starting at start_height. - - start_height and count must be non-negative integers. At most - MAX_CHUNK_SIZE headers will be returned. - """ - start_height = non_negative_integer(start_height) - count = non_negative_integer(count) - cp_height = non_negative_integer(cp_height) - cost = count / 50 - - max_size = self.MAX_CHUNK_SIZE - count = min(count, max_size) - headers, count = await self.db.read_headers(start_height, count) - result = {'hex': headers.hex(), 'count': count, 'max': max_size} - if count and cp_height: - cost += 1.0 - last_height = start_height + count - 1 - result.update(await self._merkle_proof(cp_height, last_height)) - self.bump_cost(cost) - return result - def is_tor(self): """Try to detect if the connection is to a tor hidden service we are running.""" @@ -1531,86 +250,6 @@ async def replaced_banner(self, banner): banner = banner.replace(*pair) return banner - async def donation_address(self): - """Return the donation address as a string, empty if there is none.""" - self.bump_cost(0.1) - return self.env.donation_address - - async def banner(self): - """Return the server banner text.""" - banner = f'You are connected to an {electrumx_version} server.' - self.bump_cost(0.5) - - if self.is_tor(): - banner_file = self.env.tor_banner_file - else: - banner_file = self.env.banner_file - if banner_file: - try: - with codecs.open(banner_file, 'r', 'utf-8') as f: - banner = f.read() - except (OSError, UnicodeDecodeError) as e: - self.logger.error(f'reading banner file {banner_file}: {e!r}') - else: - banner = await self.replaced_banner(banner) - - return banner - - async def relayfee(self): - """The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.""" - self.bump_cost(1.0) - return await self.daemon_request('relayfee') - - async def estimatefee(self, number, mode=None): - """The estimated transaction fee per kilobyte to be paid for a - transaction to be included within a certain number of blocks. - - number: the number of blocks - mode: CONSERVATIVE or ECONOMICAL estimation mode - """ - number = non_negative_integer(number) - # use whitelist for mode, otherwise it would be easy to force a cache miss: - if mode not in self.coin.ESTIMATEFEE_MODES: - raise RPCError(BAD_REQUEST, f'unknown estimatefee mode: {mode}') - self.bump_cost(0.1) - - number = self.coin.bucket_estimatefee_block_target(number) - cache = self.session_mgr.estimatefee_cache - - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash and blockhash == self.session_mgr.bp.tip: - return feerate - else: - # create lock now, store it, and only then await on it - lock = asyncio.Lock() - cache[(number, mode)] = (None, None, lock) - async with lock: - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash == self.session_mgr.bp.tip: - return feerate - self.bump_cost(2.0) # cache miss incurs extra cost - blockhash = self.session_mgr.bp.tip - if mode: - feerate = await self.daemon_request('estimatefee', number, mode) - else: - feerate = await self.daemon_request('estimatefee', number) - assert feerate is not None - assert blockhash is not None - cache[(number, mode)] = (blockhash, feerate, lock) - return feerate - - async def ping(self): - """Serves as a connection keep-alive mechanism and for the client to - confirm the server is still responding. - """ - self.bump_cost(0.1) - return None - async def server_version(self, client_name='', protocol_version=None): """Returns the server version as a string. @@ -1624,28 +263,32 @@ async def server_version(self, client_name='', protocol_version=None): if client_name: client_name = str(client_name) - if self.env.drop_client is not None and \ - self.env.drop_client.match(client_name): - raise ReplyAndDisconnect(RPCError( - BAD_REQUEST, f'unsupported client: {client_name}')) + if self.env.drop_client is not None and self.env.drop_client.match(client_name): + raise ReplyAndDisconnect( + RPCError(BAD_REQUEST, f'unsupported client: {client_name}') + ) self.client = client_name[:17] # Find the highest common protocol version. Disconnect if # that protocol version in unsupported. ptuple, client_min = util.protocol_version( - protocol_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX) - + protocol_version, + SESSION_PROTOCOL_MIN, + SESSION_PROTOCOL_MAX + ) await self.crash_old_client(ptuple, self.env.coin.CRASH_CLIENT_VER) - if ptuple is None: - if client_min > self.PROTOCOL_MIN: - self.logger.info(f'client requested future protocol version ' - f'{util.version_string(client_min)} ' - f'- is your software out of date?') - raise ReplyAndDisconnect(RPCError( - BAD_REQUEST, f'unsupported protocol version: {protocol_version}')) - self.set_request_handlers(ptuple) + if client_min > SESSION_PROTOCOL_MIN: + self.logger.info( + f'client requested future protocol version ' + f'{util.version_string(client_min)} ' + f'- is your software out of date?' + ) + raise ReplyAndDisconnect( + RPCError(BAD_REQUEST, f'unsupported protocol version: {protocol_version}') + ) + self.set_request_handlers(ptuple) return electrumx_version, self.protocol_version_string() async def crash_old_client(self, ptuple, crash_client_ver): @@ -1660,156 +303,6 @@ async def crash_old_client(self, ptuple, crash_client_ver): # this can crash electrum client (v < 2.8.2) UNION (3.0.0 <= v < 3.3.0) await self.send_notification('blockchain.estimatefee', ()) - async def transaction_broadcast_validate(self, raw_tx): - self.bump_cost(0.25 + len(raw_tx) / 5000) - return await self.ss.transaction_broadcast_validate(raw_tx) - - async def transaction_broadcast(self, raw_tx): - """Broadcast a raw transaction to the network. - - raw_tx: the raw transaction as a hexadecimal string""" - self.bump_cost(0.25 + len(raw_tx) / 5000) - return await self.ss.transaction_broadcast(raw_tx) - - async def transaction_broadcast_force(self, raw_tx): - """Broadcast a raw transaction to the network. Force even if invalid FT transfer - raw_tx: the raw transaction as a hexadecimal string""" - self.bump_cost(0.25 + len(raw_tx) / 5000) - return await self.ss.transaction_broadcast_force(raw_tx) - - async def transaction_get(self, tx_hash, verbose=False): - """Return the serialized raw transaction given its hash - - tx_hash: the transaction hash as a hexadecimal string - verbose: passed on to the daemon - """ - assert_tx_hash(tx_hash) - if verbose not in (True, False): - raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') - - self.bump_cost(1.0) - return await self.daemon_request('getrawtransaction', tx_hash, verbose) - - async def transaction_merkle(self, tx_hash, height): - """Return the merkle branch to a confirmed transaction given its hash - and height. - - tx_hash: the transaction hash as a hexadecimal string - height: the height of the block it is in - """ - tx_hash = assert_tx_hash(tx_hash) - height = non_negative_integer(height) - - branch, tx_pos, cost = await self.session_mgr.merkle_branch_for_tx_hash( - height, tx_hash) - self.bump_cost(cost) - - return {"block_height": height, "merkle": branch, "pos": tx_pos} - - async def transaction_id_from_pos(self, height, tx_pos, merkle=False): - """Return the txid and optionally a merkle proof, given - a block height and position in the block. - """ - tx_pos = non_negative_integer(tx_pos) - height = non_negative_integer(height) - if merkle not in (True, False): - raise RPCError(BAD_REQUEST, '"merkle" must be a boolean') - - if merkle: - branch, tx_hash, cost = await self.session_mgr.merkle_branch_for_tx_pos( - height, tx_pos) - self.bump_cost(cost) - return {"tx_hash": tx_hash, "merkle": branch} - else: - tx_hashes, cost = await self.session_mgr.tx_hashes_at_blockheight(height) - try: - tx_hash = tx_hashes[tx_pos] - except IndexError: - raise RPCError(BAD_REQUEST, - f'no tx at position {tx_pos:,d} in block at height {height:,d}') - self.bump_cost(cost) - return hash_to_hex_str(tx_hash) - - async def compact_fee_histogram(self): - self.bump_cost(1.0) - return await self.mempool.compact_fee_histogram() - - async def atomicals_transaction(self, txid): - return await self.session_mgr.get_transaction_detail(txid) - - async def get_transaction_detail_by_height(self, height, limit, offset, op_type, reverse=True): - res = [] - txs_list = [] - txs = self.db.get_atomicals_block_txs(height) - for tx in txs: - # get operation by db method - tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) - txs_list.append({ - "tx_num": tx_num, - "tx_hash": tx, - "height": height - }) - - txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) - for tx in txs_list: - data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) - if (op_type and op_type == data["op"]) or (not op_type and data["op"]): - res.append(data) - total = len(res) - return res[offset:offset + limit], total - - # get the whole transaction by block height - # return transaction detail - async def transaction_by_height(self, height, limit=10, offset=0, op_type=None, reverse=True): - res, total = await self.get_transaction_detail_by_height(height, limit, offset, op_type, reverse) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by atomical id - async def transaction_by_atomical_id(self, compact_atomical_id_or_atomical_number, limit=10, offset=0, op_type=None, - reverse=True): - res = [] - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact( - self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - hashX = double_sha256(atomical_id) - - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if (op_type and data["op"] == op_type) or not op_type: - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by scripthash - async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_type=None, reverse=True): - hashX = scripthash_to_hashX(scripthash) - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if data["op"] and (data["op"] == op_type or not op_type): - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - class DashElectrumX(ElectrumX): """A TCP server that handles incoming Electrum Dash connections.""" @@ -1946,13 +439,10 @@ def get_payment_position(payment_queue, address): mn_info['paymentposition'] = get_payment_position( mn_payment_queue, mn_info['payee'] ) - mn_info['inselection'] = ( - mn_info['paymentposition'] < mn_payment_count // 10 - ) - hashX = self.coin.address_to_hashX(mn_info['payee']) - balance = await self.get_balance(hashX) - mn_info['balance'] = (sum(balance.values()) - / self.coin.VALUE_PER_COIN) + mn_info['inselection'] = (mn_info['paymentposition'] < mn_payment_count // 10) + hash_x = self.coin.address_to_hashX(mn_info['payee']) + balance = await self.ss.get_balance(hash_x) + mn_info['balance'] = (sum(balance.values()) / self.coin.VALUE_PER_COIN) mn_list.append(mn_info) cache.clear() cache.extend(mn_list) @@ -2032,7 +522,7 @@ async def smartrewards_check(self, addr): class AuxPoWElectrumX(ElectrumX): async def block_header(self, height, cp_height=0): - result = await super().block_header(height, cp_height) + result = await super().ss.block_header(height, cp_height) # Older protocol versions don't truncate AuxPoW if self.protocol_tuple < (1, 4, 1): @@ -2047,7 +537,7 @@ async def block_header(self, height, cp_height=0): return result async def block_headers(self, start_height, count, cp_height=0): - result = await super().block_headers(start_height, count, cp_height) + result = await super().ss.block_headers(start_height, count, cp_height) # Older protocol versions don't truncate AuxPoW if self.protocol_tuple < (1, 4, 1): @@ -2079,11 +569,11 @@ class NameIndexElectrumX(ElectrumX): def set_request_handlers(self, ptuple): super().set_request_handlers(ptuple) - if ptuple >= (1, 4, 3): + if ptuple >= SESSION_PROTOCOL_MAX: self.request_handlers['blockchain.name.get_value_proof'] = self.name_get_value_proof async def name_get_value_proof(self, scripthash, cp_height=0): - history = await self.scripthash_get_history(scripthash) + history = await self.ss.scripthash_get_history(scripthash) trimmed_history = [] prev_height = None @@ -2097,16 +587,16 @@ async def name_get_value_proof(self, scripthash, cp_height=0): and height < prev_height - self.coin.NAME_EXPIRATION): break - tx = await(self.transaction_get(txid)) + tx = await self.ss.transaction_get(txid) update['tx'] = tx del update['tx_hash'] - tx_merkle = await self.transaction_merkle(txid, height) + tx_merkle = await self.ss.transaction_merkle(txid, height) del tx_merkle['block_height'] update['tx_merkle'] = tx_merkle if height <= cp_height: - header = await self.block_header(height, cp_height) + header = await self.ss.block_header(height, cp_height) update['header'] = header trimmed_history.append(update) diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index 0e82ff9f..b9311ff4 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -1,17 +1,13 @@ # -*- coding: utf-8 -*- -import asyncio -import datetime import json from decimal import Decimal -from typing import Optional +from typing import Any, Awaitable, Callable import aiorpcx from aiohttp import web import electrumx.lib.util as util -from electrumx.lib.script2addr import get_address_from_output_script -from electrumx.lib.util_atomicals import * from electrumx.server.session.shared_session import SharedSession from electrumx.server.session.util import * from electrumx.version import electrumx_version @@ -24,7 +20,7 @@ def default(self, o): return super(DecimalEncoder, self).default(o) -async def format_params(request: web.Request): +async def formatted_request(request, call: Callable[[Any], Awaitable["web.StreamResponse"]]): params: list if request.method == "GET": params = json.loads(request.query.get("params", "[]")) @@ -33,13 +29,10 @@ async def format_params(request: web.Request): params = json_data.get("params", []) else: params = [] - return dict(zip(range(len(params)), params)) + return await call(*params) class HttpHandler(object): - PROTOCOL_MIN = (1, 4) - PROTOCOL_MAX = (1, 4, 3) - def __init__(self, session_mgr, db, mempool, peer_mgr, kind): # self.transport = transport self.logger = util.class_logger(__name__, self.__class__.__name__) @@ -64,9 +57,88 @@ def __init__(self, session_mgr, db, mempool, peer_mgr, kind): self.logger, self.coin, self.session_mgr, + self.peer_mgr, self.client, ) + async def add_endpoints(self, router, protocols): + handlers = { + 'health': self.health, + 'blockchain.headers.subscribe': self.ss.headers_subscribe, + 'blockchain.block.header': self.ss.block_header, + 'blockchain.block.headers': self.ss.block_headers, + 'blockchain.estimatefee': self.ss.estimate_fee, + 'blockchain.relayfee': self.ss.relay_fee, + 'blockchain.scripthash.get_balance': self.ss.scripthash_get_balance, + 'blockchain.scripthash.get_history': self.ss.scripthash_get_history, + 'blockchain.scripthash.get_mempool': self.ss.scripthash_get_mempool, + 'blockchain.scripthash.listunspent': self.ss.scripthash_list_unspent, + 'blockchain.scripthash.subscribe': self.ss.scripthash_subscribe, + 'blockchain.transaction.broadcast': self.ss.transaction_broadcast, + 'blockchain.transaction.broadcast_force': self.ss.transaction_broadcast_force, + 'blockchain.transaction.get': self.ss.transaction_get, + 'blockchain.transaction.get_merkle': self.ss.transaction_merkle, + 'blockchain.transaction.id_from_pos': self.ss.transaction_id_from_pos, + 'mempool.get_fee_histogram': self.ss.compact_fee_histogram, + # 'server.banner': self.ss.banner, + 'server.donation_address': self.ss.donation_address, + 'server.features': self.server_features_async, + # 'server.peers.subscribe': self.ss.peers_subscribe, + # 'server.ping': self.ss.ping, + # 'server.version': self.server_version, + # The Atomicals era has begun # + 'blockchain.atomicals.validate': self.ss.transaction_broadcast_validate, + 'blockchain.atomicals.get_ft_balances_scripthash': self.ss.atomicals_get_ft_balances, + 'blockchain.atomicals.get_nft_balances_scripthash': self.ss.atomicals_get_nft_balances, + 'blockchain.atomicals.listscripthash': self.ss.atomicals_list_scripthash, + 'blockchain.atomicals.list': self.ss.atomicals_list, + 'blockchain.atomicals.get_numbers': self.ss.atomicals_num_to_id, + 'blockchain.atomicals.get_block_hash': self.ss.atomicals_block_hash, + 'blockchain.atomicals.get_block_txs': self.ss.atomicals_block_txs, + # 'blockchain.atomicals.dump': self.ss.atomicals_dump, + 'blockchain.atomicals.at_location': self.ss.atomicals_at_location, + 'blockchain.atomicals.get_location': self.ss.atomicals_get_location, + 'blockchain.atomicals.get': self.ss.atomicals_get, + 'blockchain.atomicals.get_global': self.ss.atomicals_get_global, + 'blockchain.atomicals.get_state': self.ss.atomical_get_state, + 'blockchain.atomicals.get_state_history': self.ss.atomical_get_state_history, + 'blockchain.atomicals.get_events': self.ss.atomical_get_events, + 'blockchain.atomicals.get_tx_history': self.ss.atomicals_get_tx_history, + 'blockchain.atomicals.get_ft_info': self.ss.atomicals_get_ft_info, + 'blockchain.atomicals.get_dft_mints': self.ss.atomicals_get_dft_mints, + 'blockchain.atomicals.get_realm_info': self.ss.atomicals_get_realm_info, + 'blockchain.atomicals.get_by_realm': self.ss.atomicals_get_by_realm, + 'blockchain.atomicals.get_by_subrealm': self.ss.atomicals_get_by_subrealm, + 'blockchain.atomicals.get_by_dmitem': self.ss.atomicals_get_by_dmitem, + 'blockchain.atomicals.get_by_ticker': self.ss.atomicals_get_by_ticker, + 'blockchain.atomicals.get_by_container': self.ss.atomicals_get_by_container, + 'blockchain.atomicals.get_by_container_item': self.ss.atomicals_get_by_container_item, + 'blockchain.atomicals.get_by_container_item_validate': self.ss.atomicals_get_by_container_item_validation, + 'blockchain.atomicals.get_container_items': self.ss.atomicals_get_container_items, + 'blockchain.atomicals.find_tickers': self.ss.atomicals_search_tickers, + 'blockchain.atomicals.find_realms': self.ss.atomicals_search_realms, + 'blockchain.atomicals.find_subrealms': self.ss.atomicals_search_subrealms, + 'blockchain.atomicals.find_containers': self.ss.atomicals_search_containers, + 'blockchain.atomicals.get_holders': self.ss.atomicals_get_holders, + 'blockchain.atomicals.transaction': self.session_mgr.get_transaction_detail, + 'blockchain.atomicals.transaction_by_height': self.ss.transaction_by_height, + 'blockchain.atomicals.transaction_by_atomical_id': self.ss.transaction_by_atomical_id, + 'blockchain.atomicals.transaction_by_scripthash': self.ss.transaction_by_scripthash, + 'blockchain.atomicals.transaction_global': self.session_mgr.transaction_global, + } + if protocols >= (1, 4, 2): + handlers['blockchain.scripthash.unsubscribe'] = self.ss.scripthash_unsubscribe + for m, h in handlers.items(): + method = f'/proxy/{m}' + router.add_get(method, lambda r, handler=h: formatted_request(r, handler)) + router.add_post(method, lambda r, handler=h: formatted_request(r, handler)) + + # Fallback proxy recognition + router.add_get('/proxy', self.proxy) + router.add_post('/proxy', self.proxy) + router.add_get('/proxy/{method}', self.handle_get_method) + router.add_post('/proxy/{method}', self.handle_post_method) + async def get_rpc_server(self): for service in self.env.services: if service.protocol == 'tcp': @@ -74,8 +146,7 @@ async def get_rpc_server(self): @classmethod def protocol_min_max_strings(cls): - return [util.version_string(ver) - for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] + return [util.version_string(ver) for ver in (SESSION_PROTOCOL_MIN, SESSION_PROTOCOL_MAX)] @classmethod def server_features(cls, env): @@ -98,656 +169,6 @@ def server_features(cls, env): 'services': [str(service) for service in env.report_services], } - async def _merkle_proof(self, cp_height, height): - max_height = self.db.db_height - if not height <= cp_height <= max_height: - raise RPCError(BAD_REQUEST, - f'require header height {height:,d} <= ' - f'cp_height {cp_height:,d} <= ' - f'chain height {max_height:,d}') - branch, root = await self.db.header_branch_and_root(cp_height + 1, - height) - return { - 'branch': [hash_to_hex_str(elt) for elt in branch], - 'root': hash_to_hex_str(root), - } - - async def address_listunspent(self, request): - '''Return the list of UTXOs of an address.''' - addrs = request.match_info.get('addrs', '') - if not addrs: - return web.Response(status=404) - list_addr = list(dict.fromkeys(addrs.split(','))) - list_tx = list() - for address in list_addr: - hashX = self.address_to_hashX(address) - list_utxo = await self.hashX_listunspent(hashX) - for utxo in list_utxo: - tx_detail = await self.transaction_get(utxo["tx_hash"]) - list_tx.append(await self.wallet_unspent(address, utxo, tx_detail)) - return web.json_response(list_tx) - - async def address(self, request): - addr = request.match_info.get('addr', '') - if not addr: - return web.Response(status=404) - addr_balance = await self.address_get_balance(addr) - confirmed_sat = addr_balance["confirmed"] - unconfirmed_sat = addr_balance["unconfirmed"] - res = {"addrStr": addr, - "balance": float(self.coin.decimal_value(confirmed_sat)), - "balanceSat": confirmed_sat, - "unconfirmedBalance": float(self.coin.decimal_value(unconfirmed_sat)), - "unconfirmedBalanceSat": addr_balance["unconfirmed"]} - return web.json_response(res) - - async def address_status(self, hashX): - """Returns an address status. - - Status is a hex string, but must be None if there is no history. - """ - # Note history is ordered and mempool unordered in electrum-server - # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 - db_history, cost = await self.session_mgr.limited_history(hashX) - mempool = await self.mempool.transaction_summaries(hashX) - - status = ''.join(f'{hash_to_hex_str(tx_hash)}:' - f'{height:d}:' - for tx_hash, height in db_history) - status += ''.join(f'{hash_to_hex_str(tx.hash)}:' - f'{-tx.has_unconfirmed_inputs:d}:' - for tx in mempool) - - # Add status hashing cost - # self.bump_cost(cost + 0.1 + len(status) * 0.00002) - - if status: - status = sha256(status.encode()).hex() - else: - status = None - - if mempool: - self.mempool_statuses[hashX] = status - else: - self.mempool_statuses.pop(hashX, None) - - return status - - async def hashX_subscribe(self, hashX, alias): - # Store the subscription only after address_status succeeds - result = await self.address_status(hashX) - self.hashX_subs[hashX] = alias - return result - - def address_to_hashX(self, address): - try: - return self.coin.address_to_hashX(address) - except Exception: - pass - raise RPCError(BAD_REQUEST, f'{address} is not a valid address') - - async def address_get_balance(self, address): - """Return the confirmed and unconfirmed balance of an address.""" - hashX = self.address_to_hashX(address) - return await self.get_balance(hashX) - - async def address_get_history(self, address): - """Return the confirmed and unconfirmed history of an address.""" - hashX = self.address_to_hashX(address) - return await self.confirmed_and_unconfirmed_history(hashX) - - async def get_balance(self, hashX): - utxos = await self.db.all_utxos(hashX) - confirmed = sum(utxo.value for utxo in utxos) - unconfirmed = await self.mempool.balance_delta(hashX) - return {'confirmed': confirmed, 'unconfirmed': unconfirmed} - - async def unconfirmed_history(self, hashX): - # Note unconfirmed history is unordered in electrum-server - # height is -1 if it has unconfirmed inputs, otherwise 0 - return [{'tx_hash': hash_to_hex_str(tx.hash), - 'height': -tx.has_unconfirmed_inputs, - 'fee': tx.fee} - for tx in await self.mempool.transaction_summaries(hashX)] - - async def confirmed_history(self, hashX): - # Note history is ordered - history, cost = await self.session_mgr.limited_history(hashX) - conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} - for tx_hash, height in history] - return conf - - async def confirmed_and_unconfirmed_history(self, hashX): - # Note history is ordered but unconfirmed is unordered in e-s - history, cost = await self.session_mgr.limited_history(hashX) - conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} - for tx_hash, height in history] - return conf + await self.unconfirmed_history(hashX) - - async def mempool_get(self, verbose=False): - """Returns all transaction ids in memory pool as a json array of string transaction ids - - verbose: True for a json object, false for array of transaction ids - """ - if verbose not in (True, False): - raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean') - - return await self.daemon_request('getrawmempool', verbose) - - # Get atomicals base information from db or placeholder information if mint is still in the mempool and unconfirmed - async def atomical_id_get(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - if atomical: - return atomical - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - async def atomicals_list_get(self, limit, offset, asc): - atomicals = await self.db.get_atomicals_list(limit, offset, asc) - atomicals_populated = [] - for atomical_id in atomicals: - atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) - atomicals_populated.append(atomical) - return {'global': await self.get_summary_info(), 'result': atomicals_populated} - - async def atomical_id_get_ft_info(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - - if atomical['subtype'] == 'decentralized': - atomical = await self.session_mgr.bp.get_dft_mint_info_rpc_format_by_atomical_id(atomical_id) - elif atomical['subtype'] == 'direct': - atomical = await self.session_mgr.bp.get_ft_mint_info_rpc_format_by_atomical_id(atomical_id) - else: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not a fungible token (FT)') - - if atomical: - return atomical - - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - # Perform a search for tickers, containers, realms, subrealms - def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, reverse=False, - limit=100, offset=0, is_verified_only=False): - search_prefix = b'' - if prefix: - search_prefix = prefix.encode() - - db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, search_prefix, reverse, limit, - offset) - formatted_results = [] - for item in db_entries: - status = None - if name_type_str == "ticker": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.ticker_data_cache) - elif name_type_str == "realm": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.realm_data_cache) - elif name_type_str == "collection": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], - self.session_mgr.bp.height, - self.session_mgr.bp.container_data_cache) - elif name_type_str == "subrealm": - status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], - self.session_mgr.bp.height) - - obj = {'atomical_id': location_id_bytes_to_compact(item['atomical_id']), 'tx_num': item['tx_num'], - name_type_str: item['name'], name_type_str + '_hex': item.get('name_hex'), 'status': status} - if is_verified_only and status == "verified": - formatted_results.append(obj) - elif not is_verified_only: - formatted_results.append(obj) - return {'result': formatted_results} - - def auto_populate_container_dmint_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - async def search_token(self, db_prefix, name_type_str, prefix=None, Reverse=False, Limit=100, Offset=0): - search_prefix = b'' - if prefix: - search_prefix = prefix.encode() - db_entries = self.db.get_name_entries_template_limited(db_prefix, None, search_prefix, Reverse, Limit, Offset) - formatted_results = [] - for item in db_entries: - atomical_id = location_id_bytes_to_compact(item['atomical_id']) - atomical_data = await self.atomical_id_get_ft_info(atomical_id) - obj = { - 'atomical_id': atomical_id, - 'tx_num': item['tx_num'], - 'atomical_data': atomical_data, - name_type_str: item['name'] - } - formatted_results.append(obj) - return {'result': formatted_results} - - async def hashX_listunspent(self, hashX): - """Return the list of UTXOs of a script hash, including mempool - effects.""" - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - # self.bump_cost(1.0 + len(utxos) / 50) - spends = await self.mempool.potential_spends(hashX) - returned_utxos = [] - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - # Todo need to combine mempool atomicals - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'tx_hash': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'tx_pos': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - return returned_utxos - - async def hashX_ft_balances_atomicals(self, hashX): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert (atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'FT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['ticker'] = atomical_id_basic_info.get('$ticker') - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] - return return_struct - - async def hashX_nft_balances_atomicals(self, hashX): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert (atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'NFT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if atomical_id_basic_info.get('subtype'): - return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get( - 'subtype') - if atomical_id_basic_info.get('$request_container'): - return_struct['balances'][atomical_id_compact][ - 'request_container'] = atomical_id_basic_info.get('$request_container') - if atomical_id_basic_info.get('$container'): - return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get( - '$container') - if atomical_id_basic_info.get('$dmitem'): - return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') - if atomical_id_basic_info.get('$request_dmitem'): - return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - if atomical_id_basic_info.get('$realm'): - return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') - if atomical_id_basic_info.get('$request_realm'): - return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - if atomical_id_basic_info.get('$subrealm'): - return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get( - '$subrealm') - if atomical_id_basic_info.get('$request_subrealm'): - return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - if atomical_id_basic_info.get('$full_realm_name'): - return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - if atomical_id_basic_info.get('$parent_container'): - return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - if atomical_id_basic_info.get('$parent_realm'): - return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - if atomical_id_basic_info.get('$parent_container_name'): - return_struct['balances'][atomical_id_compact][ - 'parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') - if atomical_id_basic_info.get('$bitwork'): - return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get( - '$bitwork') - if atomical_id_basic_info.get('$parents'): - return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get( - '$parents') - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] - return return_struct - - def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = compact_atomical_id_or_atomical_number - if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - found_atomical_id = self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) - if not found_atomical_id: - raise RPCError(BAD_REQUEST, f'not found atomical: {compact_atomical_id_or_atomical_number}') - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - return compact_atomical_id - - async def atomical_id_get_location(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def get_summary_info(self, atomical_hash_count=10): - if atomical_hash_count and atomical_hash_count > 100000: - atomical_hash_count = 100000 - - db_height = self.db.db_height - last_block_hash = self.db.get_atomicals_block_hash(db_height) - ret = { - 'coin': self.env.coin.__name__, - 'network': self.coin.NET, - 'height': db_height, - 'block_tip': hash_to_hex_str(self.db.db_tip), - 'server_time': datetime.datetime.now().isoformat(), - 'atomicals_block_tip': last_block_hash, - 'atomical_count': self.db.db_atomical_count - } - - list_hashes = {} - ret['atomicals_block_hashes'] = {} - # ret['atomicals_block_hashes'][db_height] = last_block_hash - for i in range(atomical_hash_count): - next_db_height = db_height - i - nextblockhash = self.db.get_atomicals_block_hash(next_db_height) - ret['atomicals_block_hashes'][next_db_height] = nextblockhash - return ret - - async def atomical_id_get_state(self, compact_atomical_id, Verbose=False): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_latest_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_state_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_history_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_events(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_events_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_tx_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - history = await self.scripthash_get_history(hash_to_hex_str(double_sha256(atomical_id))) - history.sort(key=lambda x: x['height'], reverse=True) - - atomical['tx'] = { - 'history': history - } - return atomical - - async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if Verbose or len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - - # Aggregate balances - return_struct = { - 'global': await self.get_summary_info(), - 'atomicals': {}, - 'utxos': returned_utxos - } - - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_ref = atomical_id_basic_info['atomical_id'] - if return_struct['atomicals'].get(atomical_id_ref) is None: - return_struct['atomicals'][atomical_id_ref] = { - 'atomical_id': atomical_id_ref, - 'atomical_number': atomical_id_basic_info['atomical_number'], - 'type': atomical_id_basic_info['type'], - 'confirmed': 0, - # 'subtype': atomical_id_basic_info.get('subtype'), - 'data': atomical_id_basic_info - } - if atomical_id_basic_info.get('$realm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - elif atomical_id_basic_info.get('$subrealm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get( - '$subrealm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get( - '$full_realm_name') - elif atomical_id_basic_info.get('$dmitem'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') - elif atomical_id_basic_info.get('$ticker'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( - '$ticker_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( - '$request_ticker') - return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') - elif atomical_id_basic_info.get('$container'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get( - '$container') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( - '$request_container') - # Label them as candidates if they were candidates - elif atomical_id_basic_info.get('subtype') == 'request_realm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get( - '$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get( - '$realm_candidates') - elif atomical_id_basic_info.get('subtype') == 'request_subrealm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get( - '$subrealm_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref][ - 'request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get( - '$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get( - '$parent_realm') - elif atomical_id_basic_info.get('subtype') == 'request_dmitem': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get( - '$dmitem_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get( - '$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get( - '$parent_container') - elif atomical_id_basic_info.get('subtype') == 'request_container': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref][ - 'container_candidates'] = atomical_id_basic_info.get('$container_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get( - '$request_container') - elif atomical_id_basic_info.get('$request_ticker_status'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get( - '$ticker_candidates') - return_struct['atomicals'][atomical_id_ref][ - 'request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get( - '$request_ticker') - - if returned_utxo['height'] <= 0: - return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] - else: - return_struct['atomicals'][atomical_id_ref]['confirmed'] += returned_utxo["atomicals"][atomical_id_ref] - - return return_struct - - ############################################ - # get method - ############################################ async def handle_get_method(self, request): method = request.match_info.get('method', None) params = json.loads(request.query.get("params", "[]")) @@ -759,9 +180,6 @@ async def handle_get_method(self, request): return result - ############################################ - # post method - ############################################ async def handle_post_method(self, request): json_data = await request.json() method = request.match_info.get('method', None) @@ -773,12 +191,7 @@ async def handle_post_method(self, request): return result - ############################################ - # http method - ############################################ - - # verified - async def proxy(self, request): + async def proxy(self): result = { "success": True, "info": { @@ -798,1119 +211,23 @@ async def proxy(self, request): } return web.json_response(data=result) - # verified - async def health(self, request): + async def health(self): result = {"success": True, "health": True} return web.json_response(data=result) - # verified - async def atomicals_list(self, request): - params = await format_params(request) - offset = params.get(0, 100) - limit = params.get(1, 0) - asc = params.get(2, True) - - """Return the list of atomicals order by reverse atomical number""" - formatted_results = await self.atomicals_list_get(offset, limit, asc) - return formatted_results - - # verified - async def atomicals_get(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get(compact_atomical_id)} - - # verified - async def scripthash_listunspent(self, request): - """Return the list of UTXOs of a scripthash.""" - params = await format_params(request) - scripthash = params.get(0, "") - - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listunspent(hashX) - - # verified - async def scripthash_get_history(self, request): - """Return the confirmed and unconfirmed history of a scripthash.""" - params = await format_params(request) - scripthash = params.get(0) - - hashX = scripthash_to_hashX(scripthash) - return await self.confirmed_and_unconfirmed_history(hashX) - - # verified - async def transaction_get(self, request): - """Return the serialized raw transaction given its hash - - tx_hash: the transaction hash as a hexadecimal string - verbose: passed on to the daemon - """ - params = await format_params(request) - tx_hash = params.get(0, "") - verbose = params.get(1, False) - - assert_tx_hash(tx_hash) - if verbose not in (True, False): - raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') - - return await self.daemon_request('getrawtransaction', tx_hash, verbose) - - # verified - async def atomical_get_state(self, request): - # async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - Verbose = params.get(0, False) - - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} - - # verified - async def scripthash_get_balance(self, request): - """Return the confirmed and unconfirmed balance of a scripthash.""" - params = await format_params(request) - scripthash = params.get(0, "") - - hashX = scripthash_to_hashX(scripthash) - return await self.get_balance(hashX) - - # verified - async def atomicals_get_location(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_location(compact_atomical_id)} - - # verified - async def atomicals_listscripthash(self, request): - """Return the list of Atomical UTXOs for an address""" - params = await format_params(request) - scripthash = params.get(0, "") - Verbose = params.get(1, False) - - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listscripthash_atomicals(hashX, Verbose) - - # verified - async def atomicals_get_global(self, request): - params = await format_params(request) - hashes = params.get(0, 10) - return {'global': await self.get_summary_info(hashes)} - - async def block_header(self, request): - """Return a raw block header as a hexadecimal string, or as a - dictionary with a merkle proof.""" - params = await format_params(request) - height = params.get(0, 0) - cp_height = params.get(1, 0) - height = non_negative_integer(height) - cp_height = non_negative_integer(cp_height) - raw_header_hex = (await self.session_mgr.raw_header(height)).hex() - if cp_height == 0: - return raw_header_hex - result = {'header': raw_header_hex} - result.update(await self._merkle_proof(cp_height, height)) - return result - - async def block_headers(self, request): - """Return count concatenated block headers as hex for the main chain; - starting at start_height. - - start_height and count must be non-negative integers. At most - MAX_CHUNK_SIZE headers will be returned. - """ - params = await format_params(request) - start_height = params.get(0, 0) - count = params.get(1, 0) - cp_height = params.get(2, 0) - - start_height = non_negative_integer(start_height) - count = non_negative_integer(count) - cp_height = non_negative_integer(cp_height) - - max_size = self.MAX_CHUNK_SIZE - count = min(count, max_size) - headers, count = await self.db.read_headers(start_height, count) - result = {'hex': headers.hex(), 'count': count, 'max': max_size} - if count and cp_height: - last_height = start_height + count - 1 - result.update(await self._merkle_proof(cp_height, last_height)) - return result - - async def estimatefee(self, request): - """The estimated transaction fee per kilobyte to be paid for a - transaction to be included within a certain number of blocks. - - number: the number of blocks - mode: CONSERVATIVE or ECONOMICAL estimation mode - """ - params = await format_params(request) - number = params.get(0, 0) - mode = params.get(1, None) - - number = non_negative_integer(number) - # use whitelist for mode, otherwise it would be easy to force a cache miss: - if mode not in self.coin.ESTIMATEFEE_MODES: - raise RPCError(BAD_REQUEST, f'unknown estimatefee mode: {mode}') - - number = self.coin.bucket_estimatefee_block_target(number) - cache = self.session_mgr.estimatefee_cache - - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash and blockhash == self.session_mgr.bp.tip: - return feerate - else: - # create lock now, store it, and only then await on it - lock = asyncio.Lock() - cache[(number, mode)] = (None, None, lock) - async with lock: - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash == self.session_mgr.bp.tip: - return feerate - blockhash = self.session_mgr.bp.tip - if mode: - feerate = await self.daemon_request('estimatefee', number, mode) - else: - feerate = await self.daemon_request('estimatefee', number) - assert feerate is not None - assert blockhash is not None - cache[(number, mode)] = (blockhash, feerate, lock) - return feerate - - async def headers_subscribe(self, request): - """Subscribe to get raw headers of new blocks.""" - self.subscribe_headers = True - return self.session_mgr.hsub_results - - async def relayfee(self, request): - """The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.""" - return await self.daemon_request('relayfee') - - async def scripthash_get_mempool(self, request): - """Return the mempool transactions touching a scripthash.""" - params = await format_params(request) - scripthash = params.get(0, "") - - hashX = scripthash_to_hashX(scripthash) - return await self.unconfirmed_history(hashX) - - async def scripthash_subscribe(self, request): - """Subscribe to a script hash. - - scripthash: the SHA256 hash of the script to subscribe to""" - params = await format_params(request) - scripthash = params.get(0, "") - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_subscribe(hashX, scripthash) - - async def transaction_merkle(self, request): - """Return the merkle branch to a confirmed transaction given its hash - and height. - - tx_hash: the transaction hash as a hexadecimal string - height: the height of the block it is in - """ - params = await format_params(request) - tx_hash = params.get(0, "") - height = params.get(1, "") - - tx_hash = assert_tx_hash(tx_hash) - height = non_negative_integer(height) - - branch, tx_pos, cost = await self.session_mgr.merkle_branch_for_tx_hash( - height, tx_hash) - - res = {"block_height": height, "merkle": branch, "pos": tx_pos} - return res - - async def transaction_id_from_pos(self, request): - """Return the txid and optionally a merkle proof, given - a block height and position in the block. - """ - params = await format_params(request) - height = params.get(0, 0) - tx_pos = params.get(1, 0) - merkle = params.get(2, False) - - tx_pos = non_negative_integer(tx_pos) - height = non_negative_integer(height) - if merkle not in (True, False): - raise RPCError(BAD_REQUEST, '"merkle" must be a boolean') - - if merkle: - branch, tx_hash, cost = await self.session_mgr.merkle_branch_for_tx_pos( - height, tx_pos) - return {"tx_hash": tx_hash, "merkle": branch} - else: - tx_hashes, cost = await self.session_mgr.tx_hashes_at_blockheight(height) - try: - tx_hash = tx_hashes[tx_pos] - except IndexError: - raise RPCError(BAD_REQUEST, - f'no tx at position {tx_pos:,d} in block at height {height:,d}') - return hash_to_hex_str(tx_hash) - - async def compact_fee_histogram(self, request): - return await self.mempool.compact_fee_histogram() - - async def rpc_add_peer(self, request): - """Add a peer. - - real_name: "bch.electrumx.cash t50001 s50002" for example - """ - params = await format_params(request) - real_name = params.get(0, "") - await self.peer_mgr.add_localRPC_peer(real_name) - - res = f"peer '{real_name}' added" - return res - - async def donation_address(self, request): + async def donation_address(self): """Return the donation address as a string, empty if there is none.""" return self.env.donation_address - async def server_features_async(self, request): + async def server_features_async(self): return self.server_features(self.env) - async def peers_subscribe(self, request): + async def peers_subscribe(self): """Return the server peers as a list of (ip, host, details) tuples.""" return self.peer_mgr.on_peers_subscribe(False) - async def ping(self, request): - """Serves as a connection keep-alive mechanism and for the client to - confirm the server is still responding. - """ - return None - - async def transaction_broadcast_validate(self, request): - """Simulate a Broadcast a raw transaction to the network. - - raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules""" - params = await format_params(request) - raw_tx = params.get(0, "") - return await self.ss.transaction_broadcast_validate(raw_tx) - - # need verify - async def transaction_broadcast(self, request): - """Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string""" - params = await format_params(request) - raw_tx = params.get(0, "") - return await self.ss.transaction_broadcast(raw_tx) - - # need verify - async def transaction_broadcast_force(self, request): - """Broadcast a raw transaction to the network. - raw_tx: the raw transaction as a hexadecimal string""" - params = await format_params(request) - raw_tx = params.get(0, "") - return await self.ss.transaction_broadcast_force(raw_tx) - - async def atomicals_get_ft_balances(self, request): - """Return the FT balances for a scripthash address""" - params = await format_params(request) - scripthash = params.get(0, "") - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_ft_balances_atomicals(hashX) - - async def atomicals_get_nft_balances(self, request): - """Return the NFT balances for a scripthash address""" - params = await format_params(request) - scripthash = params.get(0, "") - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_nft_balances_atomicals(hashX) - - async def atomicals_num_to_id(self, request): - params = await format_params(request) - limit = params.get(0, 10) - offset = params.get(1, 0) - asc = params.get(2, False) - - atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) - atomicals_num_to_id_map_reformatted = {} - for num, id in atomicals_num_to_id_map.items(): - atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) - return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted} - - async def atomicals_block_hash(self, request): - params = await format_params(request) - height = params.get(0, self.session_mgr.bp.height) - block_hash = self.db.get_atomicals_block_hash(height) - return {'result': block_hash} - - async def atomicals_block_txs(self, request): - params = await format_params(request) - height = params.get(0, "") - tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) - return {'global': await self.get_summary_info(), 'result': tx_list} - - async def atomicals_dump(self, request): - self.db.dump() - return {'result': True} - - async def atomicals_at_location(self, request): - """Return the Atomicals at a specific location id``` - """ - params = await format_params(request) - compact_location_id = params.get(0, "") - - atomical_basic_infos = [] - atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form( - compact_to_location_id_bytes(compact_location_id)) - # atomicals_found_at_location['atomicals'] - # atomicals_found_at_location['atomicals'].sort(key=lambda x: x['atomical_number']) - for atomical_id in atomicals_found_at_location['atomicals']: - atomical_basic_info = self.session_mgr.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) - atomical_basic_info['value'] = self.db.get_uxto_atomicals_value(compact_to_location_id_bytes(compact_location_id), atomical_id) - atomical_basic_infos.append(atomical_basic_info) - return { - 'location_info': atomicals_found_at_location['location_info'], - 'atomicals': atomical_basic_infos - } - - async def atomical_get_state_history(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_state_history(compact_atomical_id)} - - async def atomical_get_events(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_events(compact_atomical_id)} - - async def atomicals_get_tx_history(self, request): - """Return the history of an Atomical``` - atomical_id: the mint transaction hash + 'i' of the atomical id - verbose: to determine whether to print extended information - """ - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact( - self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} - - # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm - async def atomicals_get_realm_info(self, request): - params = await format_params(request) - full_name = params.get(0, "") - Verbose = params.get(1, False) - - if not full_name or not isinstance(full_name, str): - raise RPCError(BAD_REQUEST, f'invalid input full_name: {full_name}') - full_name = full_name.lower() - split_names = full_name.split('.') - total_name_parts = len(split_names) - level = 0 - last_found_realm_atomical_id = None - last_found_realm = None - realms_path = [] - latest_all_entries_candidates = [] - height = self.session_mgr.bp.height - for name_part in split_names: - if level == 0: - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm( - name_part, height) - else: - self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm( - last_found_realm, name_part, height) - # stops when it does not found the realm component - if realm_status != 'verified': - break - # Save the latest realm (could be the top level realm, or the parent of a subrealm, or even the subrealm itself) - last_found_realm_atomical_id = last_found_realm - # Add it to the list of paths - realms_path.append({ - 'atomical_id': location_id_bytes_to_compact(last_found_realm), - 'name_part': name_part, - 'candidates': latest_all_entries_candidates - }) - level += 1 - - joined_name = '' - is_first_name_part = True - for name_element in realms_path: - if is_first_name_part: - is_first_name_part = False - else: - joined_name += '.' - joined_name += name_element['name_part'] - # Nothing was found - realms_path_len = len(realms_path) - if realms_path_len == 0: - return {'result': { - 'atomical_id': None, - 'top_level_realm_atomical_id': None, - 'top_level_realm_name': None, - 'nearest_parent_realm_atomical_id': None, - 'nearest_parent_realm_name': None, - 'request_full_realm_name': full_name, - 'found_full_realm_name': None, - 'missing_name_parts': full_name, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates))} - } - # Populate the subrealm minting rules for a parent atomical - that = self - - def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): - current_height = that.session_mgr.bp.height - subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) - current_height_latest_state = calculate_latest_state_from_mod_history(subrealm_mint_mod_history) - current_height_rules_list = validate_rules_data(current_height_latest_state.get(SUBREALM_MINT_PATH, None)) - nearest_parent_realm_subrealm_mint_allowed = False - struct_to_populate['nearest_parent_realm_subrealm_mint_rules'] = { - 'nearest_parent_realm_atomical_id': location_id_bytes_to_compact(parent_atomical_id), - 'current_height': current_height, - 'current_height_rules': current_height_rules_list - } - if current_height_rules_list and len(current_height_rules_list) > 0: - nearest_parent_realm_subrealm_mint_allowed = True - struct_to_populate[ - 'nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed - - # - # - # - # At least the top level realm was found if we got this far - # - # - # The number of realms returned and name components is equal, therefore the subrealm was found correctly - if realms_path_len == total_name_parts: - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-2]['atomical_id'] - nearest_parent_realm_name = realms_path[-2]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, - compact_to_location_id_bytes( - nearest_parent_realm_atomical_id), - final_subrealm_name) - return_struct = { - 'atomical_id': realms_path[-1]['atomical_id'], - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': None, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates)) - } - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), - return_struct, Verbose) - return {'result': return_struct} - - # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found - # But the final subrealm does not exist yet - # if realms_path_len < total_name_parts: - # It is known if we got this far that realms_path_len < total_name_parts - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-1]['atomical_id'] - nearest_parent_realm_name = realms_path[-1]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - - missing_name_parts = '.'.join(split_names[len(realms_path):]) - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, - compact_to_location_id_bytes( - nearest_parent_realm_atomical_id), - final_subrealm_name) - return_struct = { - 'atomical_id': None, - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': missing_name_parts, - 'final_subrealm_name': final_subrealm_name, - 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - latest_all_entries_candidates)) - } - if Verbose: - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), - return_struct, Verbose) - return {'result': return_struct} - - async def atomicals_get_by_realm(self, request): - params = await format_params(request) - name = params.get(0, "") - - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'realm' - } - res = { - 'result': return_result - } - return res - - async def atomicals_get_by_subrealm(self, request): - params = await format_params(request) - parent_compact_atomical_id_or_atomical_number = params.get(0, "") - name = params.get(1, "") - - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, - name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'subrealm' - } - res = { - 'result': return_result - } - return res - - async def atomicals_get_by_dmitem(self, request): - params = await format_params(request) - parent_compact_atomical_id_or_atomical_number = params.get(0, "") - name = params.get(1, "") - - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, - height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'dmitem' - } - res = { - 'result': return_result - } - return res - - # verified - async def atomicals_get_by_ticker(self, request): - params = await format_params(request) - ticker = params.get(0, "") - - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'ticker' - } - return { - 'result': return_result - } - - async def atomicals_get_by_container(self, request): - params = await format_params(request) - container = params.get(0, "") - - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'container' - } - res = { - 'result': return_result - } - return res - - async def atomicals_get_by_container_item(self, request): - params = await format_params(request) - container = params.get(0, "") - item_name = params.get(1, "") - - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - self.logger.info(f'formatted_entries {formatted_entries}') - raise RPCError(BAD_REQUEST, f'Container does not exist') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, - item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - if status is None: - formatted_entries = [] - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item' - } - return { - 'result': return_result - } - - async def atomicals_get_by_container_item_validation(self, request): - params = await format_params(request) - container = params.get(0, "") - item_name = params.get(1, "") - bitworkc = params.get(2, "") - bitworkr = params.get(3, "") - main_name = params.get(4, "") - main_hash = params.get(5, "") - proof = params.get(6, "") - check_without_sealed = params.get(7, "") - - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_parent_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if status == 'verified': - found_parent_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container does not exist') - compact_atomical_id = location_id_bytes_to_compact(found_parent_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - errors = container_dmint_status.get('errors') - if not container_dmint_status or container_dmint_status.get('status') != 'valid': - errors = container_dmint_status.get('errors') - if check_without_sealed and errors and len(errors) == 1 and errors[0] == 'container not sealed': - pass - else: - raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') - - dmint = container_dmint_status.get('dmint') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, - item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, - self.session_mgr.bp.build_atomical_id_to_candidate_map( - all_entries)) - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - - # validate the proof data nonetheless - if not proof or not isinstance(proof, list) or len(proof) == 0: - raise RPCError(BAD_REQUEST, f'Proof must be provided') - - applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, - item_name, - height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, - DMINT_PATH) - proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, - bitworkr, main_name, main_hash, proof) - if applicable_rule and applicable_rule.get('matched_rule'): - applicable_rule = applicable_rule.get('matched_rule') - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item', - 'applicable_rule': applicable_rule, - 'proof_valid': proof_valid, - 'target_vector': target_vector, - 'target_hash': target_hash, - 'dmint': state_at_height.get('dmint') - } - res = { - 'result': return_result - } - return res - - def auto_populate_container_regular_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - value['status'] = 'verified' - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - async def atomicals_get_container_items(self, request): - params = await format_params(request) - container = params.get(0, "") - limit = params.get(1, 10) - offset = params.get(2, 0) - - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, - self.session_mgr.bp.height) - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container not found') - - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - items = [] - if container_dmint_status: - if limit > 100: - limit = 100 - if offset < 0: - offset = 0 - height = self.session_mgr.bp.height - items = await self.session_mgr.bp.get_effective_dmitems_paginated(found_atomical_id, limit, offset, height) - res = { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'dmint', - 'items': self.auto_populate_container_dmint_items_fields(items) - } - } - } - else: - container_mod_history = self.session_mgr.bp.get_mod_history(found_atomical_id, self.session_mgr.bp.height) - current_height_latest_state = calculate_latest_state_from_mod_history(container_mod_history) - items = current_height_latest_state.get('items', []) - res = { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'regular', - 'items': self.auto_populate_container_regular_items_fields(items) - } - } - } - return res - - async def atomicals_get_ft_info(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), - 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} - - async def atomicals_get_dft_mints(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - atomical_id = compact_to_location_id_bytes(compact_atomical_id_or_atomical_number) - Limit = params.get(1, 100) - Offset = params.get(2, 0) - return {'global': await self.get_summary_info(), - 'result': self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, Limit, Offset)} - - # verified - async def atomicals_search_tickers(self, request): - params = await format_params(request) - prefix = params.get(0, None) - Reverse = params.get(1, False) - Limit = params.get(2, 100) - Offset = params.get(3, 0) - is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_search_realms(self, request): - params = await format_params(request) - prefix = params.get(0, None) - Reverse = params.get(1, False) - Limit = params.get(2, 100) - Offset = params.get(3, 0) - is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_search_subrealms(self, request): - params = await format_params(request) - parent_realm_id_compact = params.get(0, "") - prefix = params.get(1, None) - Reverse = params.get(2, False) - Limit = params.get(3, 100) - Offset = params.get(4, 0) - is_verified_only = params.get(5, True) - parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) - return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, - Limit, Offset, is_verified_only) - - async def atomicals_search_containers(self, request): - params = await format_params(request) - prefix = params.get(0, None) - Reverse = params.get(1, False) - Limit = params.get(2, 100) - Offset = params.get(3, 0) - is_verified_only = params.get(4, True) - return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, - is_verified_only) - - async def atomicals_get_holders(self, request): - """Return the holder by a specific location id``` - """ - params = await format_params(request) - compact_atomical_id = params.get(0, "") - limit = params.get(1, 50) - offset = params.get(2, 0) - - formatted_results = [] - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - atomical = await self.db.populate_extended_atomical_holder_info(atomical_id, atomical) - if atomical["type"] == "FT": - if atomical.get("$mint_mode", "fixed") == "fixed": - max_supply = atomical.get('$max_supply', 0) - else: - max_supply = atomical.get('$max_supply', -1) - if max_supply < 0: - mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") - max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount - for holder in atomical.get("holders", [])[offset:offset + limit]: - percent = holder['holding'] / max_supply - formatted_results.append({ - "percent": percent, - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - elif atomical["type"] == "NFT": - for holder in atomical.get("holders", [])[offset:offset + limit]: - formatted_results.append({ - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - return formatted_results - - async def atomicals_transaction(self, request): - params = await format_params(request) - txid = params.get(0, "") - return await self.session_mgr.get_transaction_detail(txid) - - async def get_transaction_detail_by_height(self, height, limit, offset, op_type: Optional[str] = None, reverse=True): - res = [] - txs_list = [] - txs = self.db.get_atomicals_block_txs(height) - for tx in txs: - # get operation by db method - tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) - txs_list.append({ - "tx_num": tx_num, - "tx_hash": tx, - "height": height - }) - txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) - for tx in txs_list: - data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) - if (op_type and op_type == data["op"]) or (not op_type and data["op"]): - res.append(data) - total = len(res) - return res[offset:offset + limit], total - - # get the whole transaction by block height - # return transaction detail - async def transaction_by_height(self, request): - params = await format_params(request) - height = params.get(0, "") - limit = params.get(1, 10) - offset = params.get(2, 0) - op_type = params.get(3, None) - reverse = params.get(4, True) - - res, total = await self.get_transaction_detail_by_height(height, limit, offset, op_type, reverse) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by atomical id - async def transaction_by_atomical_id(self, request): - params = await format_params(request) - compact_atomical_id_or_atomical_number = params.get(0, "") - limit = params.get(1, 10) - offset = params.get(2, 0) - op_type = params.get(3, None) - reverse = params.get(4, True) - - res = [] - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id( - compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact( - self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - hashX = double_sha256(atomical_id) - - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if (op_type and data["op"] == op_type) or not op_type: - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by scripthash - async def transaction_by_scripthash(self, request): - params = await format_params(request) - scripthash = params.get(0, "") - limit = params.get(1, 10) - offset = params.get(2, 0) - op_type = params.get(3, None) - reverse = params.get(4, True) - - hashX = scripthash_to_hashX(scripthash) - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if data["op"] and (data["op"] == op_type or not op_type): - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # searah for global - async def transaction_global(self, request): - params = await format_params(request) - limit = params.get(0, 10) - offset = params.get(1, 0) - op_type = params.get(2, None) - reverse = params.get(3, True) - return await self.session_mgr.transaction_global(limit, offset, op_type, reverse) + # async def ping(self, request): + # """Serves as a connection keep-alive mechanism and for the client to + # confirm the server is still responding. + # """ + # return None diff --git a/electrumx/server/session/session_base.py b/electrumx/server/session/session_base.py index 7f7ec393..4041b74b 100644 --- a/electrumx/server/session/session_base.py +++ b/electrumx/server/session/session_base.py @@ -22,7 +22,6 @@ class SessionBase(RPCSession): sessions. """ - MAX_CHUNK_SIZE = 2016 session_counter = itertools.count() def __init__( @@ -64,7 +63,9 @@ def __init__( self.logger, self.coin, self.session_mgr, + self.peer_mgr, self.client, + maybe_bump_cost=self.bump_cost ) async def notify(self, touched, height_changed): diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py index aaa6254b..5c2bb49c 100644 --- a/electrumx/server/session/session_manager.py +++ b/electrumx/server/session/session_manager.py @@ -23,7 +23,7 @@ from electrumx.server.mempool import MemPool from electrumx.server.session import BAD_REQUEST, DAEMON_ERROR from electrumx.server.session.http_session import HttpHandler -from electrumx.server.session.util import non_negative_integer +from electrumx.server.session.util import non_negative_integer, SESSION_PROTOCOL_MAX from electrumx.server.peers import PeerManager from typing import TYPE_CHECKING, Type @@ -111,9 +111,9 @@ def __init__( self.session_event = Event() # Set up the RPC request handlers - cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' - 'query reorg sessions stop debug_memusage_list_all_objects ' - 'debug_memusage_get_random_backref_chain'.split()) + # cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' + # 'query reorg sessions stop debug_memusage_list_all_objects ' + # 'debug_memusage_get_random_backref_chain'.split()) # LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) for cmd in cmds} def _ssl_context(self): @@ -134,157 +134,7 @@ async def _start_servers(self, services): request_middleware(self), ]) handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) - # GET - app.router.add_get('/proxy', handler.proxy) - app.router.add_get('/proxy/health', handler.health) - app.router.add_get('/proxy/blockchain.block.header', handler.block_header) - app.router.add_get('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_get('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_get('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_get('/proxy/relayfee', handler.relayfee) - app.router.add_get('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_get('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_get('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_get('/proxy/blockchain.transaction.broadcast_force', handler.transaction_broadcast_force) - app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_get('/proxy/server.add_peer', handler.add_peer) - # app.router.add_get('/proxy/server.banner', handler.banner) - app.router.add_get('/proxy/server.donation_address', handler.donation_address) - app.router.add_get('/proxy/server.features', handler.server_features_async) - app.router.add_get('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_get('/proxy/server.ping', handler.ping) - # app.router.add_get('/proxy/server.version', handler.server_version) - app.router.add_get('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_balances_scripthash', - handler.atomicals_get_ft_balances) - app.router.add_get('/proxy/blockchain.atomicals.get_nft_balances_scripthash', - handler.atomicals_get_nft_balances) - app.router.add_get('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_get('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_get('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_get('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_get('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_get('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_get('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_get('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_get('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_get('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_get('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_get('/proxy/blockchain.atomicals.get_state_history', - handler.atomical_get_state_history) - app.router.add_get('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_get('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_get('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_get('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_get('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container', - handler.atomicals_get_by_container) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item', - handler.atomicals_get_by_container_item) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item_validate', - handler.atomicals_get_by_container_item_validation) - app.router.add_get('/proxy/blockchain.atomicals.get_container_items', - handler.atomicals_get_container_items) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_get('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_get('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_get('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_get('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) - app.router.add_get('/proxy/blockchain.atomicals.find_containers', - handler.atomicals_search_containers) - app.router.add_get('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_get('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_height', - handler.transaction_by_height) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_atomical_id', - handler.transaction_by_atomical_id) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_scripthash', - handler.transaction_by_scripthash) - app.router.add_get('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # POST - app.router.add_post('/proxy', handler.proxy) - app.router.add_post('/proxy/blockchain.block.header', handler.block_header) - app.router.add_post('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_post('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_post('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_post('/proxy/relayfee', handler.relayfee) - app.router.add_post('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_post('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_post('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_post('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_post('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_post('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_post('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_post('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_post('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_post('/proxy/server.add_peer', handler.add_peer) - # app.router.add_post('/proxy/server.banner', handler.banner) - app.router.add_post('/proxy/server.donation_address', handler.donation_address) - app.router.add_post('/proxy/server.features', handler.server_features_async) - app.router.add_post('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_post('/proxy/server.ping', handler.ping) - # app.router.add_post('/proxy/server.version', handler.server_version) - app.router.add_post('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_balances_scripthash', - handler.atomicals_get_ft_balances) - app.router.add_post('/proxy/blockchain.atomicals.get_nft_balances_scripthash', - handler.atomicals_get_nft_balances) - app.router.add_post('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_post('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_post('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_post('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_post('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_post('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_post('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_post('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_post('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_post('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_post('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_post('/proxy/blockchain.atomicals.get_state_history', - handler.atomical_get_state_history) - app.router.add_post('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_post('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_post('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_post('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_subrealm', - handler.atomicals_get_by_subrealm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_post('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container', - handler.atomicals_get_by_container) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item', - handler.atomicals_get_by_container_item) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item_validate', - handler.atomicals_get_by_container_item_validation) - app.router.add_post('/proxy/blockchain.atomicals.get_container_items', - handler.atomicals_get_container_items) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_post('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_post('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_post('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_post('/proxy/blockchain.atomicals.find_subrealms', - handler.atomicals_search_subrealms) - app.router.add_post('/proxy/blockchain.atomicals.find_containers', - handler.atomicals_search_containers) - app.router.add_post('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_post('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_height', - handler.transaction_by_height) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_atomical_id', - handler.transaction_by_atomical_id) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_scripthash', - handler.transaction_by_scripthash) - app.router.add_post('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # common proxy - app.router.add_get('/proxy/{method}', handler.handle_get_method) - app.router.add_post('/proxy/{method}', handler.handle_post_method) + await handler.add_endpoints(app.router, SESSION_PROTOCOL_MAX) app['rate_limiter'] = rate_limiter runner = web.AppRunner(app) await runner.setup() diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index c837ec96..6a5db37e 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -1,15 +1,21 @@ -from typing import Type, TYPE_CHECKING, Union +import asyncio +import datetime from aiorpcx import RPCError from logging import LoggerAdapter +from typing import TYPE_CHECKING, Callable, Union, Optional from electrumx.lib import util -from electrumx.lib.util_atomicals import AtomicalsValidationError +from electrumx.lib.script2addr import get_address_from_output_script +from electrumx.lib.util_atomicals import * from electrumx.server.daemon import DaemonError from electrumx.server.session import ATOMICALS_INVALID_TX, BAD_REQUEST +from electrumx.server.session.util import assert_atomical_id, non_negative_integer, SESSION_BASE_MAX_CHUNK_SIZE, \ + scripthash_to_hashX, assert_tx_hash if TYPE_CHECKING: from electrumx.lib.coins import AtomicalsCoinMixin, Coin + from electrumx.server.peers import PeerManager from electrumx.server.session.session_manager import SessionManager @@ -17,21 +23,909 @@ class SharedSession: def __init__( self, logger: LoggerAdapter, - coin: Type[Union['Coin', 'AtomicalsCoinMixin']], - session_mgr: Type['SessionManager'], + coin: Union['Coin', 'AtomicalsCoinMixin'], + session_mgr: 'SessionManager', + peer_mgr: 'PeerManager', client: str, + maybe_bump_cost: Optional[Callable[[float], None]] = None ): - self.session_mgr = session_mgr - self.logger = logger - self.txs_sent: int = 0 self.client: str = client self.coin = coin + self.logger = logger + self.session_mgr = session_mgr + self.peer_mgr = peer_mgr + self.bump_cost = maybe_bump_cost + + self.bp = session_mgr.bp + self.daemon_request = session_mgr.daemon_request + self.db = session_mgr.db + self.env = session_mgr.env + self.mempool = session_mgr.mempool + self.subscribe_headers = False + self.mempool_status = {} + self.hash_x_subs = {} + self.txs_sent: int = 0 + self.is_peer = False + + ################################################################################################################ + + async def block_header(self, height, cp_height=0): + """Return a raw block header as a hexadecimal string, or as a + dictionary with a merkle proof.""" + height = non_negative_integer(height) + cp_height = non_negative_integer(cp_height) + raw_header_hex = (await self.session_mgr.raw_header(height)).hex() + self.bump_cost(1.25 - (cp_height == 0)) + if cp_height == 0: + return raw_header_hex + result = {'header': raw_header_hex} + result.update(await self._merkle_proof(cp_height, height)) + return result + + async def block_headers(self, start_height, count, cp_height=0): + """Return count concatenated block headers as hex for the main chain; + starting at start_height. + + start_height and count must be non-negative integers. At most + MAX_CHUNK_SIZE headers will be returned. + """ + start_height = non_negative_integer(start_height) + count = non_negative_integer(count) + cp_height = non_negative_integer(cp_height) + cost = count / 50 + + max_size = SESSION_BASE_MAX_CHUNK_SIZE + count = min(count, max_size) + headers, count = await self.db.read_headers(start_height, count) + result = {'hex': headers.hex(), 'count': count, 'max': max_size} + if count and cp_height: + cost += 1.0 + last_height = start_height + count - 1 + result.update(await self._merkle_proof(cp_height, last_height)) + self.bump_cost(cost) + return result + + def headers_subscribe(self): + """Subscribe to get raw headers of new blocks.""" + if not self.subscribe_headers: + self.subscribe_headers = True + self.bump_cost(0.25) + return self.subscribe_headers_result() + + def subscribe_headers_result(self): + """The result of a header subscription or notification.""" + return self.session_mgr.hsub_results + + async def estimate_fee(self, number, mode=None): + """The estimated transaction fee per kilobyte to be paid for a + transaction to be included within a certain number of blocks. + + number: the number of blocks + mode: CONSERVATIVE or ECONOMICAL estimation mode + """ + number = non_negative_integer(number) + # use whitelist for mode, otherwise it would be easy to force a cache miss: + if mode not in self.coin.ESTIMATEFEE_MODES: + raise RPCError(BAD_REQUEST, f'unknown estimatefee mode: {mode}') + self.bump_cost(0.1) + + number = self.coin.bucket_estimatefee_block_target(number) + cache = self.session_mgr.estimatefee_cache + + cache_item = cache.get((number, mode)) + if cache_item is not None: + blockhash, fee_rate, lock = cache_item + if blockhash and blockhash == self.bp.tip: + return fee_rate + else: + # create lock now, store it, and only then await on it + lock = asyncio.Lock() + cache[(number, mode)] = (None, None, lock) + async with lock: + cache_item = cache.get((number, mode)) + if cache_item is not None: + blockhash, fee_rate, lock = cache_item + if blockhash == self.bp.tip: + return fee_rate + self.bump_cost(2.0) # cache miss incurs extra cost + blockhash = self.bp.tip + if mode: + fee_rate = await self.daemon_request('estimatefee', number, mode) + else: + fee_rate = await self.daemon_request('estimatefee', number) + assert fee_rate is not None + assert blockhash is not None + cache[(number, mode)] = (blockhash, fee_rate, lock) + return fee_rate + + async def relay_fee(self): + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" + self.bump_cost(1.0) + return await self.daemon_request('relayfee') + + async def scripthash_get_balance(self, scripthash): + """Return the confirmed and unconfirmed balance of a scripthash.""" + hash_x = scripthash_to_hashX(scripthash) + return await self.get_balance(hash_x) + + async def scripthash_get_history(self, scripthash): + """Return the confirmed and unconfirmed history of a scripthash.""" + hash_x = scripthash_to_hashX(scripthash) + return await self._confirmed_and_unconfirmed_history(hash_x) + + async def scripthash_get_mempool(self, scripthash): + """Return the mempool transactions touching a scripthash.""" + hash_x = scripthash_to_hashX(scripthash) + return await self._unconfirmed_history(hash_x) + + async def scripthash_list_unspent(self, scripthash): + """Return the list of UTXOs of a scripthash.""" + hash_x = scripthash_to_hashX(scripthash) + return await self._hash_x_list_unspent(hash_x) + + async def scripthash_subscribe(self, scripthash): + """Subscribe to a script hash. + + scripthash: the SHA256 hash of the script to subscribe to""" + hash_x = scripthash_to_hashX(scripthash) + return await self._hash_x_subscribe(hash_x, scripthash) + + async def scripthash_unsubscribe(self, scripthash): + """Unsubscribe from a script hash.""" + self.bump_cost(0.1) + hash_x = scripthash_to_hashX(scripthash) + return self.unsubscribe_hash_x(hash_x) is not None + + async def compact_fee_histogram(self): + self.bump_cost(1.0) + return await self.mempool.compact_fee_histogram() + + async def donation_address(self): + """Return the donation address as a string, empty if there is none.""" + self.bump_cost(0.1) + return self.env.donation_address + + async def ping(self): + """Serves as a connection keep-alive mechanism and for the client to + confirm the server is still responding. + """ + self.bump_cost(0.1) + return None + + async def atomicals_get_ft_balances(self, scripthash): + """Return the FT balances for a scripthash address""" + hash_x = scripthash_to_hashX(scripthash) + return await self._hash_x_ft_balances_atomicals(hash_x) + + async def atomicals_get_nft_balances(self, scripthash): + """Return the NFT balances for a scripthash address""" + hash_x = scripthash_to_hashX(scripthash) + return await self._hash_x_nft_balances_atomicals(hash_x) + + async def atomicals_list_scripthash(self, scripthash, verbose=False): + """Return the list of Atomical UTXOs for an address""" + hash_x = scripthash_to_hashX(scripthash) + return await self._hash_x_list_scripthash_atomicals(hash_x, verbose) + + async def atomicals_list(self, limit, offset, asc): + atomicals = await self.db.get_atomicals_list(limit, offset, asc) + atomicals_populated = [] + for atomical_id in atomicals: + atomical = await self._atomical_id_get(location_id_bytes_to_compact(atomical_id)) + atomicals_populated.append(atomical) + return {'global': await self._get_summary_info(), 'result': atomicals_populated} + + async def atomicals_num_to_id(self, limit, offset, asc): + atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) + atomicals_num_to_id_map_reformatted = {} + for num, atomical_id in atomicals_num_to_id_map.items(): + atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(atomical_id) + return {'global': await self._get_summary_info(), 'result': atomicals_num_to_id_map_reformatted} + + async def atomicals_block_hash(self, height): + if not height: + height = self.bp.height + block_hash = self.db.get_atomicals_block_hash(height) + return {'result': block_hash} + + async def atomicals_block_txs(self, height): + tx_list = self.bp.get_atomicals_block_txs(height) + return {'global': await self._get_summary_info(), 'result': tx_list} + + async def atomicals_dump(self): + self.db.dump() + return {'result': True} + + async def atomicals_at_location(self, compact_location_id): + """Return the Atomicals at a specific location id``` + """ + atomical_basic_infos = [] + atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form( + compact_to_location_id_bytes(compact_location_id)) + for atomical_id in atomicals_found_at_location['atomicals']: + basic_info = self.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) + basic_info['value'] = self.db.get_uxto_atomicals_value( + compact_to_location_id_bytes(compact_location_id), + atomical_id + ) + atomical_basic_infos.append(basic_info) + return { + 'location_info': atomicals_found_at_location['location_info'], + 'atomicals': atomical_basic_infos + } + + async def atomicals_get_location(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_location(compact_atomical_id) + } + + async def atomicals_get(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get(compact_atomical_id) + } + + async def atomicals_get_global(self, hashes: int = 10): + return {'global': await self._get_summary_info(hashes)} + + async def atomical_get_state(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_state(compact_atomical_id) + } + + async def atomical_get_state_history(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_state_history(compact_atomical_id) + } + + async def atomical_get_events(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_events(compact_atomical_id) + } + + async def atomicals_get_tx_history(self, compact_atomical_id_or_atomical_number): + """Return the history of an Atomical``` + atomical_id: the mint transaction hash + 'i' of the atomical id + verbose: to determine whether to print extended information + """ + compact_atomical_id = compact_atomical_id_or_atomical_number + if is_compact_atomical_id(compact_atomical_id_or_atomical_number): + assert_atomical_id(compact_atomical_id) + else: + compact_atomical_id = location_id_bytes_to_compact( + self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) + ) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_tx_history(compact_atomical_id) + } + + async def atomicals_get_ft_info(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = self._atomical_resolve_id(compact_atomical_id_or_atomical_number) + return { + 'global': await self._get_summary_info(), + 'result': await self._atomical_id_get_ft_info(compact_atomical_id) + } + + async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + entries = self.bp.get_distmints_by_atomical_id(atomical_id, limit, offset) + return {'global': await self._get_summary_info(), 'result': entries} + + # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm + async def atomicals_get_realm_info(self, full_name, verbose=False): + if not full_name or not isinstance(full_name, str): + raise RPCError(BAD_REQUEST, f'invalid input full_name: {full_name}') + full_name = full_name.lower() + split_names = full_name.split('.') + total_name_parts = len(split_names) + level = 0 + last_found = None + realms_path = [] + candidates = [] + height = self.bp.height + for name_part in split_names: + if level == 0: + status, last_found, candidates = self.bp.get_effective_realm(name_part, height) + else: + self.logger.info(f'atomicals_get_realm_info {last_found} {name_part}') + status, last_found, candidates = self.bp.get_effective_subrealm( + last_found, + name_part, + height + ) + # stops when it does not found the realm component + if status != 'verified': + break + # Save the latest realm + # (could be the top level realm, or the parent of a subrealm, or even the subrealm itself) + last_found_realm_atomical_id = last_found + # Add it to the list of paths + realms_path.append({ + 'atomical_id': location_id_bytes_to_compact(last_found_realm_atomical_id), + 'name_part': name_part, + 'candidates': candidates + }) + level += 1 + + joined_name = '' + is_first_name_part = True + for name_element in realms_path: + if is_first_name_part: + is_first_name_part = False + else: + joined_name += '.' + joined_name += name_element['name_part'] + # Nothing was found + realms_path_len = len(realms_path) + if realms_path_len == 0: + return { + 'result': { + 'atomical_id': None, + 'top_level_realm_atomical_id': None, + 'top_level_realm_name': None, + 'nearest_parent_realm_atomical_id': None, + 'nearest_parent_realm_name': None, + 'request_full_realm_name': full_name, + 'found_full_realm_name': None, + 'missing_name_parts': full_name, + 'candidates': format_name_type_candidates_to_rpc( + candidates, + self.bp.build_atomical_id_to_candidate_map(candidates), + ), + } + } + # Populate the subrealm minting rules for a parent atomical + that = self + + def populate_rules_response_struct(parent_atomical_id, struct_to_populate): + current_height = that.bp.height + subrealm_mint_mod_history = that.bp.get_mod_history(parent_atomical_id, current_height) + current_height_latest_state = calculate_latest_state_from_mod_history(subrealm_mint_mod_history) + current_height_rules_list = validate_rules_data(current_height_latest_state.get(SUBREALM_MINT_PATH, None)) + nearest_parent_mint_allowed = False + struct_to_populate['nearest_parent_realm_subrealm_mint_rules'] = { + 'nearest_parent_realm_atomical_id': location_id_bytes_to_compact(parent_atomical_id), + 'current_height': current_height, + 'current_height_rules': current_height_rules_list + } + if current_height_rules_list and len(current_height_rules_list) > 0: + nearest_parent_mint_allowed = True + struct_to_populate['nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_mint_allowed + + # At least the top level realm was found if we got this far. + # The number of realms returned and name components is equal, therefore the subrealm was found correctly. + if realms_path_len == total_name_parts: + nearest_parent_realm_atomical_id = None + nearest_parent_realm_name = None + top_level_realm = realms_path[0]['atomical_id'] + top_level_realm_name = realms_path[0]['name_part'] + if realms_path_len >= 2: + nearest_parent_realm_atomical_id = realms_path[-2]['atomical_id'] + nearest_parent_realm_name = realms_path[-2]['name_part'] + elif realms_path_len == 1: + nearest_parent_realm_atomical_id = top_level_realm + nearest_parent_realm_name = top_level_realm_name + # final_subrealm_name = split_names[-1] + # applicable_rule_map = self.bp.build_applicable_rule_map( + # candidates, + # compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + # final_subrealm_name + # ) + return_struct = { + 'atomical_id': realms_path[-1]['atomical_id'], + 'top_level_realm_atomical_id': top_level_realm, + 'top_level_realm_name': top_level_realm_name, + 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, + 'nearest_parent_realm_name': nearest_parent_realm_name, + 'request_full_realm_name': full_name, + 'found_full_realm_name': joined_name, + 'missing_name_parts': None, + 'candidates': format_name_type_candidates_to_rpc( + candidates, + self.bp.build_atomical_id_to_candidate_map(candidates) + ), + } + populate_rules_response_struct( + compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct, + ) + return {'result': return_struct} + + # The number of realms and components do not match, that is because at least the top level realm + # or intermediate subrealm was found. + # But the final subrealm does not exist yet + # if realms_path_len < total_name_parts: + # It is known if we got this far that realms_path_len < total_name_parts + nearest_parent_realm_atomical_id = None + nearest_parent_realm_name = None + top_level_realm = realms_path[0]['atomical_id'] + top_level_realm_name = realms_path[0]['name_part'] + if realms_path_len >= 2: + nearest_parent_realm_atomical_id = realms_path[-1]['atomical_id'] + nearest_parent_realm_name = realms_path[-1]['name_part'] + elif realms_path_len == 1: + nearest_parent_realm_atomical_id = top_level_realm + nearest_parent_realm_name = top_level_realm_name + + missing_name_parts = '.'.join(split_names[len(realms_path):]) + final_subrealm_name = split_names[-1] + # applicable_rule_map = self.bp.build_applicable_rule_map( + # candidates, + # compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + # final_subrealm_name + # ) + return_struct = { + 'atomical_id': None, + 'top_level_realm_atomical_id': top_level_realm, + 'top_level_realm_name': top_level_realm_name, + 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, + 'nearest_parent_realm_name': nearest_parent_realm_name, + 'request_full_realm_name': full_name, + 'found_full_realm_name': joined_name, + 'missing_name_parts': missing_name_parts, + 'final_subrealm_name': final_subrealm_name, + 'candidates': format_name_type_candidates_to_rpc_for_subname( + candidates, + self.bp.build_atomical_id_to_candidate_map(candidates) + ), + } + if verbose: + populate_rules_response_struct( + compact_to_location_id_bytes(nearest_parent_realm_atomical_id), + return_struct + ) + return {'result': return_struct} + + async def atomicals_get_by_realm(self, name): + height = self.bp.height + status, candidate_atomical_id, all_entries = self.bp.get_effective_realm(name, height) + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries), + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'realm' + } + return {'result': return_result} + + async def atomicals_get_by_subrealm(self, parent_compact_atomical_id_or_atomical_number, name): + height = self.bp.height + compact_atomical_id_parent = self._atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) + parent_id = compact_to_location_id_bytes(compact_atomical_id_parent) + status, candidate_atomical_id, all_entries = self.bp.get_effective_subrealm(parent_id, name, height) + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'subrealm' + } + return {'result': return_result} + + async def atomicals_get_by_dmitem(self, parent_compact_atomical_id_or_atomical_number, name): + height = self.bp.height + compact_atomical_id_parent = self._atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) + parent_id = compact_to_location_id_bytes(compact_atomical_id_parent) + status, candidate_atomical_id, all_entries = self.bp.get_effective_dmitem(parent_id, name, height) + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'dmitem' + } + return {'result': return_result} + + async def atomicals_get_by_ticker(self, ticker): + height = self.bp.height + status, candidate_atomical_id, all_entries = self.bp.get_effective_ticker(ticker, height) + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'ticker' + } + return {'result': return_result} + + async def atomicals_get_by_container(self, container): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.bp.height + status, candidate_atomical_id, all_entries = self.bp.get_effective_container(container, height) + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + found_atomical_id = None + if status == 'verified': + found_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_atomical_id, + 'candidates': formatted_entries, + 'type': 'container' + } + return {'result': return_result} + + async def atomicals_get_by_container_item(self, container, item_name): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.bp.height + status, candidate_atomical_id, all_entries = self.bp.get_effective_container(container, height) + if status != 'verified': + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + self.logger.info(f'formatted_entries {formatted_entries}') + raise RPCError(BAD_REQUEST, f'Container does not exist') + found_atomical_id = candidate_atomical_id + status, candidate_atomical_id, all_entries = self.bp.get_effective_dmitem(found_atomical_id, item_name, height) + found_item_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + if status == 'verified': + found_item_atomical_id = candidate_atomical_id + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_item_atomical_id, + 'candidates': formatted_entries, + 'type': 'item' + } + return {'result': return_result} + + async def atomicals_get_by_container_item_validation( + self, + container, + item_name, + bitworkc, + bitworkr, + main_name, + main_hash, + proof, + check_without_sealed + ): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + height = self.bp.height + status, candidate_atomical_id, all_entries = self.bp.get_effective_container(container, height) + if status != 'verified': + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + self.logger.info(f'formatted_entries {formatted_entries}') + raise RPCError(BAD_REQUEST, f'Container does not exist') + found_parent = candidate_atomical_id + compact_atomical_id = location_id_bytes_to_compact(found_parent) + container_info = await self._atomical_id_get(compact_atomical_id) + # If it is a dmint container then there is no items field, instead construct it from the dmitems + container_dmint_status = container_info.get('$container_dmint_status') + errors = container_dmint_status.get('errors') + if not container_dmint_status: + raise RPCError(BAD_REQUEST, f'Container dmint status not exist') + if container_dmint_status.get('status') != 'valid': + errors = container_dmint_status.get('errors') + if check_without_sealed and errors and len(errors) == 1 and errors[0] == 'container not sealed': + pass + raise RPCError(BAD_REQUEST, f'Container dmint status is invalid: {errors}') + + dmint = container_dmint_status.get('dmint') + status, candidate_atomical_id, all_entries = self.bp.get_effective_dmitem(found_parent, item_name, height) + found_item_atomical_id = None + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + if candidate_atomical_id: + candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) + if status == 'verified': + found_item_atomical_id = candidate_atomical_id + + # validate the proof data nonetheless + if not proof or not isinstance(proof, list) or len(proof) == 0: + raise RPCError(BAD_REQUEST, f'Proof must be provided') + + applicable_rule, state_at_height = self.bp.get_applicable_rule_by_height( + found_parent, + item_name, + height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, + DMINT_PATH + ) + proof_valid, target_vector, target_hash = validate_merkle_proof_dmint( + dmint['merkle'], + item_name, + bitworkc, + bitworkr, + main_name, + main_hash, + proof + ) + if applicable_rule and applicable_rule.get('matched_rule'): + applicable_rule = applicable_rule.get('matched_rule') + return_result = { + 'status': status, + 'candidate_atomical_id': candidate_atomical_id, + 'atomical_id': found_item_atomical_id, + 'candidates': formatted_entries, + 'type': 'item', + 'applicable_rule': applicable_rule, + 'proof_valid': proof_valid, + 'target_vector': target_vector, + 'target_hash': target_hash, + 'dmint': state_at_height.get('dmint') + } + return {'result': return_result} + + async def atomicals_get_container_items(self, container, limit, offset): + if not isinstance(container, str): + raise RPCError(BAD_REQUEST, f'empty container') + status, candidate_atomical_id, all_entries = self.bp.get_effective_container(container, self.bp.height) + if status != 'verified': + formatted_entries = format_name_type_candidates_to_rpc( + all_entries, + self.bp.build_atomical_id_to_candidate_map(all_entries) + ) + self.logger.info(f'formatted_entries {formatted_entries}') + raise RPCError(BAD_REQUEST, f'Container does not exist') + found_atomical_id = candidate_atomical_id + compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) + container_info = await self._atomical_id_get(compact_atomical_id) + # If it is a dmint container then there is no items field, instead construct it from the dmitems + container_dmint_status = container_info.get('$container_dmint_status') + if container_dmint_status: + if limit > 100: + limit = 100 + if offset < 0: + offset = 0 + height = self.bp.height + items = await self.bp.get_effective_dmitems_paginated(found_atomical_id, limit, offset, height) + return { + 'result': { + 'container': container_info, + 'item_data': { + 'limit': limit, + 'offset': offset, + 'type': 'dmint', + 'items': _auto_populate_container_dmint_items_fields(items) + } + } + } + container_mod_history = self.bp.get_mod_history(found_atomical_id, self.bp.height) + current_height_latest_state = calculate_latest_state_from_mod_history(container_mod_history) + items = current_height_latest_state.get('items', []) + return { + 'result': { + 'container': container_info, + 'item_data': { + 'limit': limit, + 'offset': offset, + 'type': 'regular', + 'items': _auto_populate_container_regular_items_fields(items) + } + } + } + + async def atomicals_search_tickers(self, prefix=None, reverse=False, limit=100, offset=0, is_verified_only=False): + if isinstance(prefix, str): + prefix = prefix.encode() + return self._atomicals_search_name_template( + b'tick', + 'ticker', + None, + prefix, + reverse, + limit, + offset, + is_verified_only + ) + + async def atomicals_search_realms( + self, + prefix=None, + reverse=False, + limit=100, + offset=0, + is_verified_only=False + ): + if isinstance(prefix, str): + prefix = prefix.encode() + return self._atomicals_search_name_template( + b'rlm', + 'realm', + None, + prefix, + reverse, + limit, + offset, + is_verified_only + ) + + async def atomicals_search_subrealms( + self, + parent, + prefix=None, + reverse=False, + limit=100, + offset=0, + is_verified_only=False + ): + parent_realm_id_long_form = compact_to_location_id_bytes(parent) + if isinstance(prefix, str): + prefix = prefix.encode() + return self._atomicals_search_name_template( + b'srlm', + 'subrealm', + parent_realm_id_long_form, + prefix, + reverse, + limit, + offset, + is_verified_only + ) + + async def atomicals_search_containers( + self, + prefix=None, + reverse=False, + limit=100, + offset=0, + is_verified_only=False + ): + if isinstance(prefix, str): + prefix = prefix.encode() + return self._atomicals_search_name_template( + b'co', + 'collection', + None, + prefix, + reverse, + limit, + offset, + is_verified_only + ) + + async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): + """Return the holder by a specific location id``` + """ + formatted_results = [] + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + atomical = await self.db.populate_extended_atomical_holder_info(atomical_id, atomical) + if atomical["type"] == "FT": + if atomical.get("$mint_mode", "fixed") == "fixed": + max_supply = atomical.get('$max_supply', 0) + else: + max_supply = atomical.get('$max_supply', -1) + if max_supply < 0: + mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") + max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount + for holder in atomical.get("holders", [])[offset:offset + limit]: + percent = holder['holding'] / max_supply + formatted_results.append({ + "percent": percent, + "address": get_address_from_output_script(bytes.fromhex(holder['script'])), + "holding": holder["holding"] + }) + elif atomical["type"] == "NFT": + for holder in atomical.get("holders", [])[offset:offset + limit]: + formatted_results.append({ + "address": get_address_from_output_script(bytes.fromhex(holder['script'])), + "holding": holder["holding"] + }) + return formatted_results + + # get the whole transaction by block height + # return transaction detail + async def transaction_by_height(self, height, limit=10, offset=0, op_type=None, reverse=True): + res, total = await self.get_transaction_detail_by_height(height, limit, offset, op_type, reverse) + return {"result": res, "total": total, "limit": limit, "offset": offset} + + # get transaction by atomical id + async def transaction_by_atomical_id(self, compact_id_or_number, limit=10, offset=0, op_type=None, reverse=True): + compact_atomical_id = compact_id_or_number + if is_compact_atomical_id(compact_id_or_number): + assert_atomical_id(compact_atomical_id) + else: + compact_atomical_id = location_id_bytes_to_compact( + self.db.get_atomical_id_by_atomical_number(compact_id_or_number) + ) + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + hash_x = double_sha256(atomical_id) + if op_type: + op = self.session_mgr.bp.op_list.get(op_type, None) + history_data, total = await self.session_mgr.get_history_op(hash_x, limit, offset, op, reverse) + else: + history_data, total = await self.session_mgr.get_history_op(hash_x, limit, offset, None, reverse) + res = [] + for history in history_data: + tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) + data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) + if data and data["op"]: + if (op_type and data["op"] == op_type) or not op_type: + res.append(data) + return {"result": res, "total": total, "limit": limit, "offset": offset} + + # get transaction by scripthash + async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_type=None, reverse=True): + hash_x = scripthash_to_hashX(scripthash) + res = [] + if op_type: + op = self.session_mgr.bp.op_list.get(op_type, None) + history_data, total = await self.session_mgr.get_history_op(hash_x, limit, offset, op, reverse) + else: + history_data, total = await self.session_mgr.get_history_op(hash_x, limit, offset, None, reverse) + + for history in history_data: + tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) + data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) + if data and data["op"]: + if data["op"] and (data["op"] == op_type or not op_type): + res.append(data) + return {"result": res, "total": total, "limit": limit, "offset": offset} async def transaction_broadcast_validate(self, raw_tx: str = ""): """Simulate a Broadcast a raw transaction to the network. raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules""" # This returns errors as JSON RPC errors, as is natural + self.bump_cost(0.25 + len(raw_tx) / 5000) try: hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) return hex_hash @@ -47,6 +941,7 @@ async def transaction_broadcast(self, raw_tx): raw_tx: the raw transaction as a hexadecimal string""" # This returns errors as JSON RPC errors, as is natural. + self.bump_cost(0.25 + len(raw_tx) / 5000) try: hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) except DaemonError as e: @@ -66,34 +961,670 @@ async def transaction_broadcast(self, raw_tx): else: self.txs_sent += 1 client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): + if client_ver != (0,): msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') + self.logger.info( + f'sent tx: {hex_hash}, and warned user to upgrade their ' + f'client from {self.client}' + ) return msg self.logger.info(f'sent tx: {hex_hash}') return hex_hash async def transaction_broadcast_force(self, raw_tx: str): + self.bump_cost(0.25 + len(raw_tx) / 5000) try: hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) except DaemonError as e: error, = e.args message = error['message'] self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') + raise RPCError( + BAD_REQUEST, + 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]' + ) else: self.txs_sent += 1 client_ver = util.protocol_tuple(self.client) if client_ver != (0,): msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') + self.logger.info( + f'sent tx: {hex_hash}. and warned user to upgrade their ' + f'client from {self.client}' + ) return msg self.logger.info(f'sent tx: {hex_hash}') return hex_hash + + async def transaction_get(self, tx_hash, verbose=False): + """Return the serialized raw transaction given its hash + + tx_hash: the transaction hash as a hexadecimal string + verbose: passed on to the daemon + """ + assert_tx_hash(tx_hash) + if verbose not in (True, False): + raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') + + self.bump_cost(1.0) + return await self.daemon_request('getrawtransaction', tx_hash, verbose) + + async def transaction_merkle(self, tx_hash, height): + """Return the merkle branch to a confirmed transaction given its hash + and height. + + tx_hash: the transaction hash as a hexadecimal string + height: the height of the block it is in + """ + tx_hash = assert_tx_hash(tx_hash) + height = non_negative_integer(height) + + branch, tx_pos, cost = await self.session_mgr.merkle_branch_for_tx_hash(height, tx_hash) + self.bump_cost(cost) + + return {"block_height": height, "merkle": branch, "pos": tx_pos} + + async def transaction_id_from_pos(self, height, tx_pos, merkle=False): + """Return the txid and optionally a merkle proof, given + a block height and position in the block. + """ + tx_pos = non_negative_integer(tx_pos) + height = non_negative_integer(height) + if merkle not in (True, False): + raise RPCError(BAD_REQUEST, '"merkle" must be a boolean') + + if merkle: + branch, tx_hash, cost = await self.session_mgr.merkle_branch_for_tx_pos(height, tx_pos) + self.bump_cost(cost) + return {"tx_hash": tx_hash, "merkle": branch} + else: + tx_hashes, cost = await self.session_mgr.tx_hashes_at_blockheight(height) + try: + tx_hash = tx_hashes[tx_pos] + except IndexError: + raise RPCError( + BAD_REQUEST, + f'no tx at position {tx_pos:,d} in block at height {height:,d}' + ) + self.bump_cost(cost) + return hash_to_hex_str(tx_hash) + + ################################################################################################################ + + async def _merkle_proof(self, cp_height, height): + max_height = self.db.db_height + if not height <= cp_height <= max_height: + raise RPCError( + BAD_REQUEST, + f'require header height {height:,d} <= ' + f'cp_height {cp_height:,d} <= ' + f'chain height {max_height:,d}' + ) + branch, root = await self.db.header_branch_and_root(cp_height + 1, height) + return { + 'branch': [hash_to_hex_str(elt) for elt in branch], + 'root': hash_to_hex_str(root), + } + + async def _get_summary_info(self, atomical_hash_count: int = 10): + if atomical_hash_count and atomical_hash_count > 100: + atomical_hash_count = 100 + db_height = self.db.db_height + last_block_hash = self.db.get_atomicals_block_hash(db_height) + ret = { + 'coin': self.coin.__name__, + 'network': self.coin.NET, + 'height': db_height, + 'block_tip': hash_to_hex_str(self.db.db_tip), + 'server_time': datetime.datetime.now().isoformat(), + 'atomicals_block_tip': last_block_hash, + 'atomical_count': self.db.db_atomical_count, + 'atomicals_block_hashes': {}, + } + # ret['atomicals_block_hashes'][db_height] = last_block_hash + for i in range(atomical_hash_count): + next_db_height = db_height - i + next_block_hash = self.db.get_atomicals_block_hash(next_db_height) + ret['atomicals_block_hashes'][next_db_height] = next_block_hash + return ret + + def _atomical_resolve_id(self, compact_atomical_id_or_atomical_number): + compact_atomical_id = compact_atomical_id_or_atomical_number + if is_compact_atomical_id(compact_atomical_id_or_atomical_number): + assert_atomical_id(compact_atomical_id) + else: + found_atomical_id = self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) + if not found_atomical_id: + raise RPCError(BAD_REQUEST, f'not found atomical: {compact_atomical_id_or_atomical_number}') + compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) + return compact_atomical_id + + # Get atomicals base information from db or placeholder information if mint is still in the mempool and unconfirmed + async def _atomical_id_get(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + if atomical: + return atomical + # Check mempool + atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) + if atomical_in_mempool is None: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') + return atomical_in_mempool + + async def _atomical_id_get_location(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def _atomical_id_get_state(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + height = self.bp.height + self.db.populate_extended_mod_state_latest_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def _atomical_id_get_state_history(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + height = self.bp.height + self.db.populate_extended_mod_state_history_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def _atomical_id_get_events(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + height = self.bp.height + self.db.populate_extended_events_atomical_info(atomical_id, atomical, height) + await self.db.populate_extended_location_atomical_info(atomical_id, atomical) + return atomical + + async def _atomical_id_get_tx_history(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self._atomical_id_get(compact_atomical_id) + history = await self.scripthash_get_history(hash_to_hex_str(double_sha256(atomical_id))) + history.sort(key=lambda x: x['height'], reverse=True) + atomical['tx'] = {'history': history} + return atomical + + async def _atomical_id_get_ft_info(self, compact_atomical_id): + atomical_id = compact_to_location_id_bytes(compact_atomical_id) + atomical = await self.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + if atomical['subtype'] == 'decentralized': + atomical = await self.bp.get_dft_mint_info_rpc_format_by_atomical_id(atomical_id) + elif atomical['subtype'] == 'direct': + atomical = await self.bp.get_ft_mint_info_rpc_format_by_atomical_id(atomical_id) + else: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not a fungible token (FT)') + + if atomical: + return atomical + # Check mempool + atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) + if atomical_in_mempool is None: + raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') + return atomical_in_mempool + + async def address_status(self, hash_x): + """Returns an address status. + + Status is a hex string, but must be None if there is no history. + """ + # Note history is ordered and mempool unordered in electrum-server + # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 + db_history, cost = await self.session_mgr.limited_history(hash_x) + mempool = await self.mempool.transaction_summaries(hash_x) + status = ''.join( + f'{hash_to_hex_str(tx_hash)}:{height:d}:' + for tx_hash, height in db_history + ) + status += ''.join( + f'{hash_to_hex_str(tx.hash)}:{-tx.has_unconfirmed_inputs:d}:' + for tx in mempool + ) + # Add status hashing cost + self.bump_cost(cost + 0.1 + len(status) * 0.00002) + + if status: + status = sha256(status.encode()).hex() + else: + status = None + if mempool: + self.mempool_status[hash_x] = status + else: + self.mempool_status.pop(hash_x, None) + return status + + async def get_balance(self, hash_x): + utxos = await self.db.all_utxos(hash_x) + confirmed = sum(utxo.value for utxo in utxos) + unconfirmed = await self.mempool.balance_delta(hash_x) + self.bump_cost(1.0 + len(utxos) / 50) + return {'confirmed': confirmed, 'unconfirmed': unconfirmed} + + async def _confirmed_and_unconfirmed_history(self, hash_x): + # Note history is ordered but unconfirmed is unordered in e-s + history, cost = await self.session_mgr.limited_history(hash_x) + self.bump_cost(cost) + conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} + for tx_hash, height in history] + return conf + await self._unconfirmed_history(hash_x) + + async def _unconfirmed_history(self, hash_x): + # Note unconfirmed history is unordered in electrum-server + # height is -1 if it has unconfirmed inputs, otherwise 0 + result = [ + { + 'tx_hash': hash_to_hex_str(tx.hash), + 'height': -tx.has_unconfirmed_inputs, + 'fee': tx.fee + } + for tx in await self.mempool.transaction_summaries(hash_x) + ] + self.bump_cost(0.25 + len(result) / 50) + return result + + async def _hash_x_list_unspent(self, hash_x): + """Return the list of UTXOs of a script hash, including mempool + effects.""" + utxos = await self.db.all_utxos(hash_x) + utxos = sorted(utxos) + utxos.extend(await self.mempool.unordered_UTXOs(hash_x)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = await self.mempool.potential_spends(hash_x) + returned_utxos = [] + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + # Todo need to combine mempool atomicals + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'tx_hash': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'tx_pos': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + return returned_utxos + + async def _hash_x_subscribe(self, hash_x, alias): + # Store the subscription only after address_status succeeds + result = await self.address_status(hash_x) + self.hash_x_subs[hash_x] = alias + return result + + def unsubscribe_hash_x(self, hash_x): + self.mempool_status.pop(hash_x, None) + return self.hash_x_subs.pop(hash_x, None) + + async def _hash_x_ft_balances_atomicals(self, hash_x): + utxos = await self.db.all_utxos(hash_x) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + atomical_basic_info = await self.bp.get_base_mint_info_rpc_format_by_atomical_id( + atomical_id) + compact_id = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[compact_id] = atomical_basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[compact_id] = self.db.get_uxto_atomicals_value(location, atomical_id) + if len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + balances = {} + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] + compact_id = atomical_id_basic_info['atomical_id'] + assert (compact_id == atomical_id_entry_compact) + if atomical_id_basic_info.get('type') != 'FT': + continue + if balances.get(compact_id) is None: + balances[compact_id] = { + 'id': compact_id, + 'ticker': atomical_id_basic_info.get('$ticker'), + 'confirmed': 0, + } + if returned_utxo['height'] > 0: + balances[compact_id]['confirmed'] += returned_utxo['atomicals'][compact_id] + return {'balances': balances} + + async def _hash_x_nft_balances_atomicals(self, hash_x): + utxos = await self.db.all_utxos(hash_x) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + atomical_basic_info = await self.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + compact_id = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[compact_id] = atomical_basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[compact_id] = self.db.get_uxto_atomicals_value(location, atomical_id) + if len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + balances = {} + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] + compact_id = atomical_id_basic_info['atomical_id'] + assert (compact_id == atomical_id_entry_compact) + if atomical_id_basic_info.get('type') != 'NFT': + continue + if balances.get(compact_id) is None: + balances[compact_id] = { + 'id': compact_id, + 'confirmed': 0, + } + if atomical_id_basic_info.get('subtype'): + balances[compact_id]['subtype'] = atomical_id_basic_info.get('subtype') + if atomical_id_basic_info.get('$request_container'): + balances[compact_id]['request_container'] = atomical_id_basic_info.get('$request_container') + if atomical_id_basic_info.get('$container'): + balances[compact_id]['container'] = atomical_id_basic_info.get('$container') + if atomical_id_basic_info.get('$dmitem'): + balances[compact_id]['dmitem'] = atomical_id_basic_info.get('$dmitem') + if atomical_id_basic_info.get('$request_dmitem'): + balances[compact_id]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') + if atomical_id_basic_info.get('$realm'): + balances[compact_id]['realm'] = atomical_id_basic_info.get('$realm') + if atomical_id_basic_info.get('$request_realm'): + balances[compact_id]['request_realm'] = atomical_id_basic_info.get('$request_realm') + if atomical_id_basic_info.get('$subrealm'): + balances[compact_id]['subrealm'] = atomical_id_basic_info.get('$subrealm') + if atomical_id_basic_info.get('$request_subrealm'): + balances[compact_id]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') + if atomical_id_basic_info.get('$full_realm_name'): + balances[compact_id]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') + if atomical_id_basic_info.get('$parent_container'): + balances[compact_id]['parent_container'] = atomical_id_basic_info.get('$parent_container') + if atomical_id_basic_info.get('$parent_realm'): + balances[compact_id]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') + if atomical_id_basic_info.get('$parent_container_name'): + balances[compact_id]['parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') + if atomical_id_basic_info.get('$bitwork'): + balances[compact_id]['bitwork'] = atomical_id_basic_info.get('$bitwork') + if atomical_id_basic_info.get('$parents'): + balances[compact_id]['parents'] = atomical_id_basic_info.get('$parents') + if returned_utxo['height'] > 0: + balances[compact_id]['confirmed'] += returned_utxo['atomicals'][compact_id] + return {'balances': balances} + + async def _hash_x_list_scripthash_atomicals(self, hash_x, verbose=False): + utxos = await self.db.all_utxos(hash_x) + utxos = sorted(utxos) + # Comment out the utxos for now and add it in later + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + self.bump_cost(1.0 + len(utxos) / 50) + spends = [] # await self.mempool.potential_spends(hashX) + returned_utxos = [] + atomicals_id_map = {} + for utxo in utxos: + if (utxo.tx_hash, utxo.tx_pos) in spends: + continue + atomicals = self.db.get_atomicals_by_utxo(utxo, True) + atomicals_basic_infos = {} + for atomical_id in atomicals: + # This call is efficient in that it's cached underneath. + # Now we only show the atomical id and its corresponding value + # because it can always be fetched separately which is more efficient. + basic_info = await self.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + atomicals_id_map[atomical_id_compact] = basic_info + location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) + atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) + if verbose or len(atomicals) > 0: + returned_utxos.append({ + 'txid': hash_to_hex_str(utxo.tx_hash), + 'index': utxo.tx_pos, + 'vout': utxo.tx_pos, + 'height': utxo.height, + 'value': utxo.value, + 'atomicals': atomicals_basic_infos + }) + # Aggregate balances + return_struct = { + 'global': await self._get_summary_info(), + 'atomicals': {}, + 'utxos': returned_utxos + } + atomicals = {} + + for returned_utxo in returned_utxos: + for atomical_id_entry_compact in returned_utxo['atomicals']: + basic_info = atomicals_id_map[atomical_id_entry_compact] + id_ref = basic_info['atomical_id'] + if atomicals.get(id_ref) is None: + atomicals[id_ref] = { + 'atomical_id': id_ref, + 'atomical_number': basic_info['atomical_number'], + 'type': basic_info['type'], + 'confirmed': 0, + # 'subtype': atomical_id_basic_info.get('subtype'), + 'data': basic_info + } + if basic_info.get('$realm'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['request_realm_status'] = basic_info.get('$request_realm_status') + atomicals[id_ref]['request_realm'] = basic_info.get('$request_realm') + atomicals[id_ref]['realm'] = basic_info.get('$realm') + atomicals[id_ref]['full_realm_name'] = basic_info.get('$full_realm_name') + elif basic_info.get('$subrealm'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['request_subrealm_status'] = basic_info.get('$request_subrealm_status') + atomicals[id_ref]['request_subrealm'] = basic_info.get('$request_subrealm') + atomicals[id_ref]['parent_realm'] = basic_info.get('$parent_realm') + atomicals[id_ref]['subrealm'] = basic_info.get('$subrealm') + atomicals[id_ref]['full_realm_name'] = basic_info.get('$full_realm_name') + elif basic_info.get('$dmitem'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['request_dmitem_status'] = basic_info.get('$request_dmitem_status') + atomicals[id_ref]['request_dmitem'] = basic_info.get('$request_dmitem') + atomicals[id_ref]['parent_container'] = basic_info.get('$parent_container') + atomicals[id_ref]['dmitem'] = basic_info.get('$dmitem') + elif basic_info.get('$ticker'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['ticker_candidates'] = basic_info.get('$ticker_candidates') + atomicals[id_ref]['request_ticker_status'] = basic_info.get('$request_ticker_status') + atomicals[id_ref]['request_ticker'] = basic_info.get('$request_ticker') + atomicals[id_ref]['ticker'] = basic_info.get('$ticker') + elif basic_info.get('$container'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['request_container_status'] = basic_info.get('$request_container_status') + atomicals[id_ref]['container'] = basic_info.get('$container') + atomicals[id_ref]['request_container'] = basic_info.get('$request_container') + # Label them as candidates if they were candidates + elif basic_info.get('subtype') == 'request_realm': + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['request_realm_status'] = basic_info.get('$request_realm_status') + atomicals[id_ref]['request_realm'] = basic_info.get('$request_realm') + atomicals[id_ref]['realm_candidates'] = basic_info.get('$realm_candidates') + elif basic_info.get('subtype') == 'request_subrealm': + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['subrealm_candidates'] = basic_info.get('$subrealm_candidates') + atomicals[id_ref]['request_subrealm_status'] = basic_info.get('$request_subrealm_status') + atomicals[id_ref]['request_full_realm_name'] = basic_info.get('$request_full_realm_name') + atomicals[id_ref]['request_subrealm'] = basic_info.get('$request_subrealm') + atomicals[id_ref]['parent_realm'] = basic_info.get('$parent_realm') + elif basic_info.get('subtype') == 'request_dmitem': + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['dmitem_candidates'] = basic_info.get('$dmitem_candidates') + atomicals[id_ref]['request_dmitem_status'] = basic_info.get('$request_dmitem_status') + atomicals[id_ref]['request_dmitem'] = basic_info.get('$request_dmitem') + atomicals[id_ref]['parent_container'] = basic_info.get('$parent_container') + elif basic_info.get('subtype') == 'request_container': + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['container_candidates'] = basic_info.get('$container_candidates') + atomicals[id_ref]['request_container_status'] = basic_info.get('$request_container_status') + atomicals[id_ref]['request_container'] = basic_info.get('$request_container') + elif basic_info.get('$request_ticker_status'): + atomicals[id_ref]['subtype'] = basic_info.get('subtype') + atomicals[id_ref]['ticker_candidates'] = basic_info.get('$ticker_candidates') + atomicals[id_ref]['request_ticker_status'] = basic_info.get('$request_ticker_status') + atomicals[id_ref]['request_ticker'] = basic_info.get('$request_ticker') + + if returned_utxo['height'] <= 0: + atomicals[id_ref]['unconfirmed'] += returned_utxo["atomicals"][id_ref] + else: + atomicals[id_ref]['confirmed'] += returned_utxo["atomicals"][id_ref] + + return_struct['atomicals'] = atomicals + return return_struct + + # Perform a search for tickers, containers, and realms + def _atomicals_search_name_template( + self, + db_prefix, + name_type_str, + parent_prefix=None, + prefix=None, + reverse=False, + limit=1000, + offset=0, + is_verified_only=False + ): + db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, prefix, reverse, limit, offset) + formatted_results = [] + for item in db_entries: + height = self.bp.height + status = None + if name_type_str == "ticker": + status, _, _ = self.bp.get_effective_name_template( + b'tick', + item['name'], + height, + self.bp.ticker_data_cache + ) + elif name_type_str == "realm": + status, _, _ = self.bp.get_effective_name_template( + b'rlm', + item['name'], + height, + self.bp.realm_data_cache + ) + elif name_type_str == "collection": + status, _, _ = self.bp.get_effective_name_template( + b'co', + item['name'], + height, + self.bp.container_data_cache + ) + elif name_type_str == "subrealm": + status, _, _ = self.bp.get_effective_subrealm( + parent_prefix, + item['name'], + height + ) + obj = { + 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), + 'tx_num': item['tx_num'], + name_type_str + '_hex': item['name_hex'], + name_type_str: item['name'], + 'status': status, + } + if is_verified_only and status == "verified": + formatted_results.append(obj) + elif not is_verified_only: + formatted_results.append(obj) + return {'result': formatted_results} + + async def get_transaction_detail_by_height(self, height, limit, offset, op_type, reverse=True): + res = [] + txs_list = [] + txs = self.db.get_atomicals_block_txs(height) + for tx in txs: + # get operation by db method + tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) + txs_list.append({ + "tx_num": tx_num, + "tx_hash": tx, + "height": height + }) + + txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) + for tx in txs_list: + data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) + if (op_type and op_type == data["op"]) or (not op_type and data["op"]): + res.append(data) + total = len(res) + return res[offset:offset + limit], total + + +######################################################################################################################## + +def _auto_populate_container_regular_items_fields(items): + if not items or not isinstance(items, dict): + return {} + for item, value in items.items(): + provided_id = value.get('id') + value['status'] = 'verified' + if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: + value['$id'] = location_id_bytes_to_compact(provided_id) + return auto_encode_bytes_elements(items) + + +def _auto_populate_container_dmint_items_fields(items): + if not items or not isinstance(items, dict): + return {} + for item, value in items.items(): + provided_id = value.get('id') + if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: + value['$id'] = location_id_bytes_to_compact(provided_id) + return auto_encode_bytes_elements(items) diff --git a/electrumx/server/session/util.py b/electrumx/server/session/util.py index 1140d956..62b1b08c 100644 --- a/electrumx/server/session/util.py +++ b/electrumx/server/session/util.py @@ -3,6 +3,10 @@ from electrumx.lib.hash import hex_str_to_hash, HASHX_LEN from electrumx.server.session import BAD_REQUEST +SESSION_BASE_MAX_CHUNK_SIZE = 2016 +SESSION_PROTOCOL_MIN = (1, 4) +SESSION_PROTOCOL_MAX = (1, 4, 3) + def scripthash_to_hashX(scripthash): try: From 6822ee5eec341a46b2781cdf5391b484bdc9df34 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 29 May 2024 00:38:06 +0800 Subject: [PATCH 09/13] Fix type imply --- electrumx/server/session/http_session.py | 10 +++++++--- electrumx/server/session/session_base.py | 9 ++++++--- electrumx/server/session/shared_session.py | 2 +- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index b9311ff4..b4bb4c13 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -2,12 +2,12 @@ import json from decimal import Decimal -from typing import Any, Awaitable, Callable import aiorpcx from aiohttp import web import electrumx.lib.util as util +from electrumx.server.http_middleware import success_resp, error_resp from electrumx.server.session.shared_session import SharedSession from electrumx.server.session.util import * from electrumx.version import electrumx_version @@ -20,7 +20,7 @@ def default(self, o): return super(DecimalEncoder, self).default(o) -async def formatted_request(request, call: Callable[[Any], Awaitable["web.StreamResponse"]]): +async def formatted_request(request, call): params: list if request.method == "GET": params = json.loads(request.query.get("params", "[]")) @@ -29,7 +29,11 @@ async def formatted_request(request, call: Callable[[Any], Awaitable["web.Stream params = json_data.get("params", []) else: params = [] - return await call(*params) + try: + result = await call(*params) + return success_resp(result) + except Exception as e: + return error_resp(500, e) class HttpHandler(object): diff --git a/electrumx/server/session/session_base.py b/electrumx/server/session/session_base.py index 4041b74b..b5dde6b0 100644 --- a/electrumx/server/session/session_base.py +++ b/electrumx/server/session/session_base.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Callable, Dict, TYPE_CHECKING +from typing import TYPE_CHECKING, Awaitable, Dict, Optional, Tuple import electrumx.lib.util as util import itertools @@ -57,7 +57,7 @@ def __init__( self.session_mgr.add_session(self) self.recalc_concurrency() # must be called after session_mgr.add_session self.protocol_tuple: Optional[Tuple[int, ...]] = None - self.request_handlers: Optional[Dict[str, Callable]] = None + self.request_handlers: Optional[Dict[str, Awaitable]] = None # Use the sharing session to manage handlers. self.ss = SharedSession( self.logger, @@ -129,7 +129,10 @@ async def handle_request(self, request): self.session_mgr.method_counts[method] += 1 coro = handler_invocation(handler, request)() - return await coro + if isinstance(coro, Awaitable): + return await coro + else: + return coro class LocalRPC(SessionBase): diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index 6a5db37e..9a009d54 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -19,7 +19,7 @@ from electrumx.server.session.session_manager import SessionManager -class SharedSession: +class SharedSession(object): def __init__( self, logger: LoggerAdapter, From 3e217e9433bc9fb12fd2436c36bcff9eca4433b7 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 29 May 2024 00:59:09 +0800 Subject: [PATCH 10/13] Add `server.info` --- electrumx/server/session/electrumx_session.py | 15 ++++++------ electrumx/server/session/http_session.py | 20 +++++++++------- electrumx/server/session/shared_session.py | 24 +++++++++---------- electrumx/version.py | 12 +++++++++- 4 files changed, 43 insertions(+), 28 deletions(-) diff --git a/electrumx/server/session/electrumx_session.py b/electrumx/server/session/electrumx_session.py index 6b44deec..9fd9080d 100644 --- a/electrumx/server/session/electrumx_session.py +++ b/electrumx/server/session/electrumx_session.py @@ -8,7 +8,7 @@ from electrumx.server.daemon import DaemonError from electrumx.server.session.session_base import SessionBase from electrumx.server.session.util import * -from electrumx.version import electrumx_version, electrumx_version_short +from electrumx.version import electrumx_version, electrumx_version_short, get_server_info class ElectrumX(SessionBase): @@ -124,6 +124,13 @@ async def _notify_inner(self, touched, height_changed): def set_request_handlers(self, protocols): self.protocol_tuple: Tuple[int, ...] = protocols handlers = { + # 'server.banner': self.banner, + 'server.donation_address': self.ss.donation_address, + 'server.features': self.server_features_async, + 'server.info': get_server_info, + # 'server.peers.subscribe': self.peers_subscribe, + # 'server.ping': self.ss.ping, + # 'server.version': self.server_version, 'blockchain.headers.subscribe': self.ss.headers_subscribe, 'blockchain.block.header': self.ss.block_header, 'blockchain.block.headers': self.ss.block_headers, @@ -140,12 +147,6 @@ def set_request_handlers(self, protocols): 'blockchain.transaction.get_merkle': self.ss.transaction_merkle, 'blockchain.transaction.id_from_pos': self.ss.transaction_id_from_pos, 'mempool.get_fee_histogram': self.ss.compact_fee_histogram, - # 'server.banner': self.banner, - 'server.donation_address': self.ss.donation_address, - 'server.features': self.server_features_async, - # 'server.peers.subscribe': self.peers_subscribe, - # 'server.ping': self.ss.ping, - # 'server.version': self.server_version, # The Atomicals era has begun # 'blockchain.atomicals.validate': self.ss.transaction_broadcast_validate, 'blockchain.atomicals.get_ft_balances_scripthash': self.ss.atomicals_get_ft_balances, diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index b4bb4c13..0ab28c8a 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -2,6 +2,7 @@ import json from decimal import Decimal +from typing import Awaitable import aiorpcx from aiohttp import web @@ -10,7 +11,7 @@ from electrumx.server.http_middleware import success_resp, error_resp from electrumx.server.session.shared_session import SharedSession from electrumx.server.session.util import * -from electrumx.version import electrumx_version +from electrumx.version import electrumx_version, get_server_info class DecimalEncoder(json.JSONEncoder): @@ -30,7 +31,9 @@ async def formatted_request(request, call): else: params = [] try: - result = await call(*params) + result = call(*params) + if isinstance(result, Awaitable): + result = await result return success_resp(result) except Exception as e: return error_resp(500, e) @@ -68,6 +71,13 @@ def __init__(self, session_mgr, db, mempool, peer_mgr, kind): async def add_endpoints(self, router, protocols): handlers = { 'health': self.health, + # 'server.banner': self.ss.banner, + 'server.donation_address': self.ss.donation_address, + 'server.features': self.server_features_async, + 'server.info': get_server_info, + # 'server.peers.subscribe': self.ss.peers_subscribe, + # 'server.ping': self.ss.ping, + # 'server.version': self.server_version, 'blockchain.headers.subscribe': self.ss.headers_subscribe, 'blockchain.block.header': self.ss.block_header, 'blockchain.block.headers': self.ss.block_headers, @@ -84,12 +94,6 @@ async def add_endpoints(self, router, protocols): 'blockchain.transaction.get_merkle': self.ss.transaction_merkle, 'blockchain.transaction.id_from_pos': self.ss.transaction_id_from_pos, 'mempool.get_fee_histogram': self.ss.compact_fee_histogram, - # 'server.banner': self.ss.banner, - 'server.donation_address': self.ss.donation_address, - 'server.features': self.server_features_async, - # 'server.peers.subscribe': self.ss.peers_subscribe, - # 'server.ping': self.ss.ping, - # 'server.version': self.server_version, # The Atomicals era has begun # 'blockchain.atomicals.validate': self.ss.transaction_broadcast_validate, 'blockchain.atomicals.get_ft_balances_scripthash': self.ss.atomicals_get_ft_balances, diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index 9a009d54..50301c5c 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -49,6 +49,18 @@ def __init__( ################################################################################################################ + async def donation_address(self): + """Return the donation address as a string, empty if there is none.""" + self.bump_cost(0.1) + return self.env.donation_address + + async def ping(self): + """Serves as a connection keep-alive mechanism and for the client to + confirm the server is still responding. + """ + self.bump_cost(0.1) + return None + async def block_header(self, height, cp_height=0): """Return a raw block header as a hexadecimal string, or as a dictionary with a merkle proof.""" @@ -181,18 +193,6 @@ async def compact_fee_histogram(self): self.bump_cost(1.0) return await self.mempool.compact_fee_histogram() - async def donation_address(self): - """Return the donation address as a string, empty if there is none.""" - self.bump_cost(0.1) - return self.env.donation_address - - async def ping(self): - """Serves as a connection keep-alive mechanism and for the client to - confirm the server is still responding. - """ - self.bump_cost(0.1) - return None - async def atomicals_get_ft_balances(self, scripthash): """Return the FT balances for a scripthash address""" hash_x = scripthash_to_hashX(scripthash) diff --git a/electrumx/version.py b/electrumx/version.py index 77cdc88b..32c13602 100644 --- a/electrumx/version.py +++ b/electrumx/version.py @@ -1,3 +1,13 @@ -__version__ = "1.4.2.0" +__version__ = "1.5.0.0" electrumx_version = f'ElectrumX {__version__}' electrumx_version_short = __version__ + +__aip__ = [1, 3] +aip_implemented = __aip__ + + +def get_server_info(): + return { + 'aip_implemented': aip_implemented, + 'version': electrumx_version_short, + } From 1456a983f05c4be6b9984dbb45067e3df9e6537e Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 29 May 2024 10:58:12 +0800 Subject: [PATCH 11/13] Format server files --- electrumx/lib/atomicals_blueprint_builder.py | 1465 ++++--- electrumx/lib/util_atomicals.py | 12 +- electrumx/server/block_processor.py | 85 +- electrumx/server/controller.py | 17 +- electrumx/server/daemon.py | 136 +- electrumx/server/env.py | 14 +- electrumx/server/history.py | 22 +- electrumx/server/mempool.py | 128 +- electrumx/server/peers.py | 50 +- electrumx/server/session.py | 3490 ----------------- electrumx/server/session/electrumx_session.py | 4 +- electrumx/server/session/http_session.py | 2 +- electrumx/server/session/session_manager.py | 4 +- electrumx/server/session/shared_session.py | 22 +- electrumx/server/session/util.py | 2 +- electrumx/server/storage.py | 36 +- tests/lib/test_atomicals_blueprint_builder.py | 2 +- 17 files changed, 1129 insertions(+), 4362 deletions(-) delete mode 100644 electrumx/server/session.py diff --git a/electrumx/lib/atomicals_blueprint_builder.py b/electrumx/lib/atomicals_blueprint_builder.py index a65fcfb5..41eb0505 100644 --- a/electrumx/lib/atomicals_blueprint_builder.py +++ b/electrumx/lib/atomicals_blueprint_builder.py @@ -1,3 +1,5 @@ +from electrumx.lib.hash import hash_to_hex_str +from electrumx.lib.script import is_unspendable_legacy, is_unspendable_genesis from electrumx.lib.util_atomicals import ( is_custom_colored_operation, is_splat_operation, @@ -11,30 +13,38 @@ is_integer_num, SUBNAME_MIN_PAYMENT_DUST_LIMIT ) -from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN -from electrumx.lib.util import ( - unpack_le_uint64, unpack_le_uint16_from -) -from electrumx.lib.script import SCRIPTHASH_LEN, is_unspendable_legacy, is_unspendable_genesis + class FtColoringSummary: - def __init__(self, atomical_id_to_expected_outs_map, fts_burned, cleanly_assigned, atomicals_list): - self.atomical_id_to_expected_outs_map = atomical_id_to_expected_outs_map - self.cleanly_assigned = cleanly_assigned - self.fts_burned = fts_burned - self.atomicals_list = atomicals_list + def __init__(self, atomical_id_to_expected_outs_map, fts_burned, cleanly_assigned, atomicals_list): + self.atomical_id_to_expected_outs_map = atomical_id_to_expected_outs_map + self.cleanly_assigned = cleanly_assigned + self.fts_burned = fts_burned + self.atomicals_list = atomicals_list + + def __repr__(self): + return ( + f'FtColoringSummary ' + f'cleanly_assigned: {self.cleanly_assigned}, ' + f'fts_burned: {self.fts_burned}, ' + f'atomicals_list: {self.atomicals_list}' + ) - def __repr__(self): - return f'FtColoringSummary cleanly_assigned: {self.cleanly_assigned}, fts_burned: {self.fts_burned}, atomicals_list: {self.atomicals_list}' class ExpectedOutputSet: - '''Store the expected output indexes to be colored and the exponent for the outputs to apply''' - def __init__(self, expected_outputs, expected_values): - self.expected_outputs = expected_outputs - self.expected_values = expected_values + """Store the expected output indexes to be colored and the exponent for the outputs to apply""" + + def __init__(self, expected_outputs, expected_values): + self.expected_outputs = expected_outputs + self.expected_values = expected_values + + def __repr__(self): + return ( + f'ExpectedOutputSet ' + f'expected_outputs: {self.expected_outputs}, ' + f'expected_values: {self.expected_values}' + ) - def __repr__(self): - return f'ExpectedOutputSet expected_outputs: {self.expected_outputs}, expected_values: {self.expected_values}' def build_reverse_output_to_atomical_id_exponent_map(atomical_id_to_output_index_map): if not atomical_id_to_output_index_map: @@ -46,14 +56,25 @@ def build_reverse_output_to_atomical_id_exponent_map(atomical_id_to_output_index reverse_mapped[out_idx][atomical_id] = output_info.expected_values return reverse_mapped + def get_nominal_atomical_value(value): return value -def calculate_outputs_to_color_for_ft_atomical_ids(tx, ft_atomicals, sort_by_fifo, is_custom_coloring_activated) -> FtColoringSummary: + +def calculate_outputs_to_color_for_ft_atomical_ids( + tx, + ft_atomicals, + sort_by_fifo, + is_custom_coloring_activated +) -> FtColoringSummary | None: num_fts = len(ft_atomicals.keys()) if num_fts == 0: return None - # return FtColoringSummary(potential_atomical_ids_to_output_idxs_map, fts_burned, not non_clean_output_slots, atomical_list) + # return FtColoringSummary( + # potential_atomical_ids_to_output_idxs_map, + # fts_burned, + # not non_clean_output_slots, atomical_list + # ) atomical_list = order_ft_inputs(ft_atomicals, sort_by_fifo) next_start_out_idx = 0 potential_atomical_ids_to_output_idxs_map = {} @@ -61,49 +82,79 @@ def calculate_outputs_to_color_for_ft_atomical_ids(tx, ft_atomicals, sort_by_fif utxo_cleanly_assigned = True fts_burned = {} for item in atomical_list: - atomical_id = item.atomical_id - # If a target exponent was provided, then use that instead - cleanly_assigned, expected_outputs, remaining_value_from_assign = AtomicalsTransferBlueprintBuilder.assign_expected_outputs_basic(item.total_atomical_value, tx, next_start_out_idx, is_custom_coloring_activated) - if not cleanly_assigned: - utxo_cleanly_assigned = False - if not is_custom_coloring_activated: - if cleanly_assigned and len(expected_outputs) > 0: - next_start_out_idx = expected_outputs[-1] + 1 - potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet(expected_outputs, item.total_atomical_value) - else: - # Erase the potential for safety - potential_atomical_ids_to_output_idxs_map = {} - non_clean_output_slots = True - break - else: - if remaining_value_from_assign > 0: - fts_burned[atomical_id] = remaining_value_from_assign - # no need cleanly_assigned - if len(expected_outputs) > 0: - next_start_out_idx = expected_outputs[-1] + 1 - potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet(expected_outputs, item.total_atomical_value) + atomical_id = item.atomical_id + # If a target exponent was provided, then use that instead + cleanly_assigned, expected_outputs, remaining_value_from_assign = AtomicalsTransferBlueprintBuilder.assign_expected_outputs_basic( + item.total_atomical_value, + tx, + next_start_out_idx, + is_custom_coloring_activated + ) + if not cleanly_assigned: + utxo_cleanly_assigned = False + if not is_custom_coloring_activated: + if cleanly_assigned and len(expected_outputs) > 0: + next_start_out_idx = expected_outputs[-1] + 1 + potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet( + expected_outputs, + item.total_atomical_value + ) + else: + # Erase the potential for safety + potential_atomical_ids_to_output_idxs_map = {} + non_clean_output_slots = True + break else: - # if no enable uxto - potential_atomical_ids_to_output_idxs_map = {} - non_clean_output_slots = True - break + if remaining_value_from_assign > 0: + fts_burned[atomical_id] = remaining_value_from_assign + # no need cleanly_assigned + if len(expected_outputs) > 0: + next_start_out_idx = expected_outputs[-1] + 1 + potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet( + expected_outputs, + item.total_atomical_value + ) + else: + # if no enable uxto + potential_atomical_ids_to_output_idxs_map = {} + non_clean_output_slots = True + break # If the output slots did not fit cleanly, then default to just assigning everything from the 0'th output index if non_clean_output_slots: - potential_atomical_ids_to_output_idxs_map = {} - for item in atomical_list: - atomical_id = item.atomical_id - cleanly_assigned, expected_outputs, remaining_value_from_assign = AtomicalsTransferBlueprintBuilder.assign_expected_outputs_basic(item.total_atomical_value, tx, 0, is_custom_coloring_activated) - potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet(expected_outputs, item.total_atomical_value) - if remaining_value_from_assign > 0: - fts_burned[atomical_id] = remaining_value_from_assign - if not cleanly_assigned: - utxo_cleanly_assigned = False - return FtColoringSummary(potential_atomical_ids_to_output_idxs_map, fts_burned, utxo_cleanly_assigned, atomical_list) - return FtColoringSummary(potential_atomical_ids_to_output_idxs_map, fts_burned, utxo_cleanly_assigned, atomical_list) - + potential_atomical_ids_to_output_idxs_map = {} + for item in atomical_list: + atomical_id = item.atomical_id + cleanly_assigned, expected_outputs, remaining_value_from_assign = AtomicalsTransferBlueprintBuilder.assign_expected_outputs_basic( + item.total_atomical_value, + tx, + 0, + is_custom_coloring_activated + ) + potential_atomical_ids_to_output_idxs_map[atomical_id] = ExpectedOutputSet( + expected_outputs, + item.total_atomical_value + ) + if remaining_value_from_assign > 0: + fts_burned[atomical_id] = remaining_value_from_assign + if not cleanly_assigned: + utxo_cleanly_assigned = False + return FtColoringSummary( + potential_atomical_ids_to_output_idxs_map, + fts_burned, + utxo_cleanly_assigned, + atomical_list + ) + return FtColoringSummary( + potential_atomical_ids_to_output_idxs_map, + fts_burned, + utxo_cleanly_assigned, + atomical_list + ) + + class AtomicalsTransferBlueprintBuilderError(Exception): - '''Raised when Atomicals Blueprint builder has an error''' + """Raised when Atomicals Blueprint builder has an error""" class AtomicalInputItem: @@ -146,574 +197,764 @@ def __repr__(self): class AtomicalColoredOutputNft: - def __init__(self, input_summary_info: AtomicalInputSummary): - self.input_summary_info = input_summary_info - + def __init__(self, input_summary_info: AtomicalInputSummary): + self.input_summary_info = input_summary_info + + class AtomicalFtOutputBlueprintAssignmentSummary: - def __init__(self, outputs, fts_burned, cleanly_assigned, first_atomical_id): - self.outputs = outputs - self.fts_burned = fts_burned - self.cleanly_assigned = cleanly_assigned - self.first_atomical_id = first_atomical_id - + def __init__(self, outputs, fts_burned, cleanly_assigned, first_atomical_id): + self.outputs = outputs + self.fts_burned = fts_burned + self.cleanly_assigned = cleanly_assigned + self.first_atomical_id = first_atomical_id + + class AtomicalNftOutputBlueprintAssignmentSummary: - def __init__(self, outputs): - self.outputs = outputs - -def order_ft_inputs(ft_atomicals: AtomicalInputSummary, sort_by_fifo): - atomical_list = [] - # If sorting is by FIFO, then get the mappng of which FTs are at which inputs - if sort_by_fifo: - input_idx_map = {} - for atomical_id, ft_info in ft_atomicals.items(): - for input_index_for_atomical in ft_info.input_indexes: - txin_index = input_index_for_atomical.txin_index - input_idx_map[txin_index] = input_idx_map.get(txin_index) or [] - input_idx_map[txin_index].append(atomical_id) - # Now for each input, we assign the atomicals, making sure to ignore the ones we've seen already - seen_atomical_id_map = {} - for input_idx, atomicals_array in sorted(input_idx_map.items()): - for atomical_id in sorted(atomicals_array): - if seen_atomical_id_map.get(atomical_id): - continue - seen_atomical_id_map[atomical_id] = True - atomical_list.append(ft_atomicals[atomical_id]) - else: - for atomical_id, ft_info in sorted(ft_atomicals.items()): - atomical_list.append(ft_info) - return atomical_list - -class AtomicalsTransferBlueprintBuilder: - '''Atomicals transfer blueprint builder for calculating outputs to color''' - def __init__( - self, - logger, - atomicals_spent_at_inputs, - operations_found_at_inputs, - tx_hash, - tx, - get_atomicals_id_mint_info, - sort_fifo, - is_custom_coloring_activated - ): - self.logger = logger - self.atomicals_spent_at_inputs = atomicals_spent_at_inputs - self.operations_found_at_inputs = operations_found_at_inputs - self.tx_hash = tx_hash - self.tx = tx - self.get_atomicals_id_mint_info = get_atomicals_id_mint_info - self.sort_fifo = sort_fifo - self.is_custom_coloring_activated = is_custom_coloring_activated - nft_atomicals, ft_atomicals, atomical_ids_spent = AtomicalsTransferBlueprintBuilder.build_atomical_input_summaries_by_type(self.get_atomicals_id_mint_info, atomicals_spent_at_inputs) - self.nft_atomicals = nft_atomicals - self.ft_atomicals = ft_atomicals - nft_output_blueprint, ft_output_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint(self.get_atomicals_id_mint_info, self.tx, self.nft_atomicals, self.ft_atomicals, self.atomicals_spent_at_inputs, self.operations_found_at_inputs, self.sort_fifo, self.is_custom_coloring_activated) - self.nft_output_blueprint = nft_output_blueprint - self.ft_output_blueprint = ft_output_blueprint - # if len(ft_atomicals) > 0 or len(nft_atomicals) > 0: - # self.logger.info(f'tx_hash={hash_to_hex_str(tx_hash)} atomicals_spent_at_inputs={encode_atomical_ids_hex(atomicals_spent_at_inputs)} operations_found_at_inputs={operations_found_at_inputs}') - self.fts_burned = ft_output_blueprint.fts_burned - self.cleanly_assigned = ft_output_blueprint.cleanly_assigned - self.are_fts_burned = len(ft_output_blueprint.fts_burned) > 0 - self.atomical_ids_spent = atomical_ids_spent - self.is_mint = is_mint_operation(self.operations_found_at_inputs) - - @classmethod - def order_ft_inputs(cls, ft_atomicals, sort_by_fifo): - ''' Order the inputs by FIFO or by legacy ''' + def __init__(self, outputs): + self.outputs = outputs + + +def order_ft_inputs(ft_atomicals, sort_by_fifo): atomical_list = [] # If sorting is by FIFO, then get the mappng of which FTs are at which inputs if sort_by_fifo: - input_idx_map = {} - for atomical_id, ft_info in ft_atomicals.items(): - for input_index_for_atomical in ft_info.input_indexes: - txin_index = input_index_for_atomical.txin_index - input_idx_map[txin_index] = input_idx_map.get(txin_index) or [] - input_idx_map[txin_index].append({ - 'atomical_id': atomical_id, - }) - # Now for each input, we assign the atomicals, making sure to ignore the ones we've seen already - seen_atomical_id_map = {} - for input_idx, atomicals_array in sorted(input_idx_map.items()): - for atomical_id_info in sorted(atomicals_array): - if seen_atomical_id_map.get(atomical_id_info['atomical_id']): - continue - seen_atomical_id_map[atomical_id_info['atomical_id']] = True - atomical_list.append(ft_atomicals[atomical_id_info['atomical_id']]) + input_idx_map = {} + for atomical_id, ft_info in ft_atomicals.items(): + for input_index_for_atomical in ft_info.input_indexes: + txin_index = input_index_for_atomical.txin_index + input_idx_map[txin_index] = input_idx_map.get(txin_index) or [] + input_idx_map[txin_index].append(atomical_id) + # Now for each input, we assign the atomicals, making sure to ignore the ones we've seen already + seen_atomical_id_map = {} + for input_idx, atomicals_array in sorted(input_idx_map.items()): + for atomical_id in sorted(atomicals_array): + if seen_atomical_id_map.get(atomical_id): + continue + seen_atomical_id_map[atomical_id] = True + atomical_list.append(ft_atomicals[atomical_id]) else: - for atomical_id, ft_info in sorted(ft_atomicals.items()): - atomical_list.append(ft_info) + for atomical_id, ft_info in sorted(ft_atomicals.items()): + atomical_list.append(ft_info) return atomical_list - # Maps all the inputs that contain NFTs - @classmethod - def build_nft_input_idx_to_atomical_map(cls, get_atomicals_id_mint_info, atomicals_spent_at_inputs): - input_idx_to_atomical_ids_map = {} - for txin_index, atomicals_entry_list in atomicals_spent_at_inputs.items(): - for atomicals_entry in atomicals_entry_list: - atomical_id = atomicals_entry['atomical_id'] - atomical_mint_info = get_atomicals_id_mint_info(atomical_id, True) - if not atomical_mint_info: - raise AtomicalsTransferBlueprintBuilderError(f'build_nft_input_idx_to_atomical_map {atomical_id.hex()} not found in mint info. IndexError.') - if atomical_mint_info['type'] != 'NFT': - continue - input_idx_to_atomical_ids_map[txin_index] = input_idx_to_atomical_ids_map.get(txin_index) or {} - input_idx_to_atomical_ids_map[txin_index][atomical_id] = AtomicalInputSummary(atomical_id, atomical_mint_info['type'], atomical_mint_info) - # Populate the summary information - value = atomicals_entry['data_value']['sat_value'] - # Exponent is always 0 for NFTs - input_idx_to_atomical_ids_map[txin_index][atomical_id].apply_input(txin_index, value, value) - return input_idx_to_atomical_ids_map - - @classmethod - def calculate_nft_atomicals_regular(cls, nft_map, nft_atomicals, tx, operations_found_at_inputs, sort_fifo): - # Use a simplified mapping of NFTs using FIFO to the outputs - if sort_fifo: - next_output_idx = 0 - map_output_idxs_for_atomicals = {} - # Build a map of input ids to NFTs - for input_idx, atomicals_ids_map in nft_map.items(): - found_atomical_at_input = False - for atomical_id, atomical_summary_info in atomicals_ids_map.items(): - found_atomical_at_input = True - expected_output_index = next_output_idx - if expected_output_index >= len(tx.outputs) or is_unspendable_genesis(tx.outputs[expected_output_index].pk_script) or is_unspendable_legacy(tx.outputs[expected_output_index].pk_script): + +class AtomicalsTransferBlueprintBuilder: + """Atomicals transfer blueprint builder for calculating outputs to color""" + + def __init__( + self, + logger, + atomicals_spent_at_inputs, + operations_found_at_inputs, + tx_hash, + tx, + get_atomicals_id_mint_info, + sort_fifo, + is_custom_coloring_activated + ): + self.logger = logger + self.atomicals_spent_at_inputs = atomicals_spent_at_inputs + self.operations_found_at_inputs = operations_found_at_inputs + self.tx_hash = tx_hash + self.tx = tx + self.get_atomicals_id_mint_info = get_atomicals_id_mint_info + self.sort_fifo = sort_fifo + self.is_custom_coloring_activated = is_custom_coloring_activated + nft_atomicals, ft_atomicals, atomical_ids_spent = AtomicalsTransferBlueprintBuilder.build_atomical_input_summaries_by_type( + self.get_atomicals_id_mint_info, + atomicals_spent_at_inputs + ) + self.nft_atomicals = nft_atomicals + self.ft_atomicals = ft_atomicals + nft_output_blueprint, ft_output_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint( + self.get_atomicals_id_mint_info, self.tx, self.nft_atomicals, self.ft_atomicals, + self.atomicals_spent_at_inputs, self.operations_found_at_inputs, self.sort_fifo, + self.is_custom_coloring_activated) + self.nft_output_blueprint = nft_output_blueprint + self.ft_output_blueprint = ft_output_blueprint + # if len(ft_atomicals) > 0 or len(nft_atomicals) > 0: + # self.logger.info( + # f'tx_hash={hash_to_hex_str(tx_hash)} ' + # f'atomicals_spent_at_inputs={encode_atomical_ids_hex(atomicals_spent_at_inputs)} ' + # f'operations_found_at_inputs={operations_found_at_inputs}' + # ) + self.fts_burned = ft_output_blueprint.fts_burned + self.cleanly_assigned = ft_output_blueprint.cleanly_assigned + self.are_fts_burned = len(ft_output_blueprint.fts_burned) > 0 + self.atomical_ids_spent = atomical_ids_spent + self.is_mint = is_mint_operation(self.operations_found_at_inputs) + + @classmethod + def order_ft_inputs(cls, ft_atomicals, sort_by_fifo): + """ Order the inputs by FIFO or by legacy """ + atomical_list = [] + # If sorting is by FIFO, then get the mappng of which FTs are at which inputs + if sort_by_fifo: + input_idx_map = {} + for atomical_id, ft_info in ft_atomicals.items(): + for input_index_for_atomical in ft_info.input_indexes: + txin_index = input_index_for_atomical.txin_index + input_idx_map[txin_index] = input_idx_map.get(txin_index) or [] + input_idx_map[txin_index].append({ + 'atomical_id': atomical_id, + }) + # Now for each input, we assign the atomicals, making sure to ignore the ones we've seen already + seen_atomical_id_map = {} + for input_idx, atomicals_array in sorted(input_idx_map.items()): + for atomical_id_info in sorted(atomicals_array): + if seen_atomical_id_map.get(atomical_id_info['atomical_id']): + continue + seen_atomical_id_map[atomical_id_info['atomical_id']] = True + atomical_list.append(ft_atomicals[atomical_id_info['atomical_id']]) + else: + for atomical_id, ft_info in sorted(ft_atomicals.items()): + atomical_list.append(ft_info) + return atomical_list + + # Maps all the inputs that contain NFTs + @classmethod + def build_nft_input_idx_to_atomical_map(cls, get_atomicals_id_mint_info, atomicals_spent_at_inputs): + input_idx_to_atomical_ids_map = {} + for txin_index, atomicals_entry_list in atomicals_spent_at_inputs.items(): + for atomicals_entry in atomicals_entry_list: + atomical_id = atomicals_entry['atomical_id'] + atomical_mint_info = get_atomicals_id_mint_info(atomical_id, True) + if not atomical_mint_info: + raise AtomicalsTransferBlueprintBuilderError( + f'build_nft_input_idx_to_atomical_map {atomical_id.hex()} not found in mint info. ' + f'IndexError.' + ) + if atomical_mint_info['type'] != 'NFT': + continue + input_idx_to_atomical_ids_map[txin_index] = input_idx_to_atomical_ids_map.get(txin_index) or {} + input_idx_to_atomical_ids_map[txin_index][atomical_id] = AtomicalInputSummary( + atomical_id, + atomical_mint_info['type'], + atomical_mint_info + ) + # Populate the summary information + value = atomicals_entry['data_value']['sat_value'] + # Exponent is always 0 for NFTs + input_idx_to_atomical_ids_map[txin_index][atomical_id].apply_input(txin_index, value, value) + return input_idx_to_atomical_ids_map + + @classmethod + def calculate_nft_atomicals_regular(cls, nft_map, nft_atomicals, tx, operations_found_at_inputs, sort_fifo): + # Use a simplified mapping of NFTs using FIFO to the outputs + if sort_fifo: + next_output_idx = 0 + map_output_idxs_for_atomicals = {} + # Build a map of input ids to NFTs + for input_idx, atomicals_ids_map in nft_map.items(): + found_atomical_at_input = False + for atomical_id, atomical_summary_info in atomicals_ids_map.items(): + found_atomical_at_input = True + expected_output_index = next_output_idx + if expected_output_index >= len(tx.outputs) or is_unspendable_genesis( + tx.outputs[expected_output_index].pk_script) or is_unspendable_legacy( + tx.outputs[expected_output_index].pk_script): + expected_output_index = 0 + # Also keep them at the 0'th index if the split command was used + if is_split_operation(operations_found_at_inputs): + expected_output_index = 0 + map_output_idxs_for_atomicals[expected_output_index] = map_output_idxs_for_atomicals.get( + expected_output_index) or {'atomicals': {}} + map_output_idxs_for_atomicals[expected_output_index]['atomicals'][ + atomical_id] = atomical_summary_info + if found_atomical_at_input: + next_output_idx += 1 + return AtomicalNftOutputBlueprintAssignmentSummary(map_output_idxs_for_atomicals) + else: + map_output_idxs_for_atomicals = {} + # Assign NFTs the legacy way with 1:1 inputs to outputs + for atomical_id, atomical_summary_info in nft_atomicals.items(): + expected_output_index = AtomicalsTransferBlueprintBuilder.calculate_nft_output_index_legacy( + atomical_summary_info.input_indexes[0].txin_index, + tx, + operations_found_at_inputs + ) + map_output_idxs_for_atomicals[expected_output_index] = map_output_idxs_for_atomicals.get( + expected_output_index) or {'atomicals': {}} + map_output_idxs_for_atomicals[expected_output_index]['atomicals'][atomical_id] = atomical_summary_info + return AtomicalNftOutputBlueprintAssignmentSummary(map_output_idxs_for_atomicals) + + @classmethod + def calculate_nft_atomicals_splat(cls, nft_atomicals, tx): + # Splat takes all the NFT atomicals across all inputs (including multiple atomicals at the same utxo) and then + # separates them into their own distinctive output such that the result of the operation is no two atomicals + # will share a resulting output. This operation requires that there are at least as many outputs + # as there are NFT atomicals. If there are not enough, then this is considered a noop and those extra NFTs + # are assigned to output 0. If there are enough outputs, then the earliest atomical + # (sorted lexicographically in ascending order) goes to the 0'th output, then the second atomical goes to the + # 1'st output, etc, until all atomicals are assigned to their own output. + expected_output_index_incrementing = 0 # Begin assigning splatted atomicals at the 0'th index + output_colored_map = {} + for atomical_id, atomical_summary_info in sorted(nft_atomicals.items()): + expected_output_index = expected_output_index_incrementing + if expected_output_index_incrementing >= len(tx.outputs) or is_unspendable_genesis( + tx.outputs[expected_output_index_incrementing].pk_script) or is_unspendable_legacy( + tx.outputs[expected_output_index_incrementing].pk_script): + expected_output_index = 0 + output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or { + 'atomicals': {}} + output_colored_map[expected_output_index]['atomicals'][atomical_id] = atomical_summary_info + expected_output_index_incrementing += 1 + return AtomicalNftOutputBlueprintAssignmentSummary(output_colored_map) + + @classmethod + def calculate_output_blueprint_nfts( + cls, + get_atomicals_id_mint_info, + tx, + nft_atomicals, + atomicals_spent_at_inputs, + operations_found_at_inputs, + sort_fifo + ): + if not nft_atomicals or len(nft_atomicals) == 0: + return AtomicalNftOutputBlueprintAssignmentSummary({}) + should_splat_nft_atomicals = is_splat_operation(operations_found_at_inputs) + if should_splat_nft_atomicals and len(nft_atomicals.keys()) > 0: + return AtomicalsTransferBlueprintBuilder.calculate_nft_atomicals_splat(nft_atomicals, tx) + else: + # To sort by fifo for NFTs, we also need to calculate a mapping of the nfts to inputs first + nft_map = AtomicalsTransferBlueprintBuilder.build_nft_input_idx_to_atomical_map( + get_atomicals_id_mint_info, + atomicals_spent_at_inputs + ) + return AtomicalsTransferBlueprintBuilder.calculate_nft_atomicals_regular( + nft_map, + nft_atomicals, + tx, + operations_found_at_inputs, + sort_fifo + ) + + @classmethod + def calculate_output_blueprint_fts( + cls, + tx, + ft_atomicals, + operations_found_at_inputs, + sort_fifo, + is_custom_coloring_activated + ): + if not ft_atomicals or len(ft_atomicals) == 0: + return AtomicalFtOutputBlueprintAssignmentSummary({}, {}, True, None) + # Split apart multiple NFT/FT from a UTXO + should_split_ft_atomicals = is_split_operation(operations_found_at_inputs) + if should_split_ft_atomicals: + return AtomicalsTransferBlueprintBuilder.color_ft_atomicals_split( + ft_atomicals, + operations_found_at_inputs, + tx, + is_custom_coloring_activated + ) + should_custom_colored_ft_atomicals = (is_custom_colored_operation(operations_found_at_inputs) and + is_custom_coloring_activated) + if should_custom_colored_ft_atomicals: + return AtomicalsTransferBlueprintBuilder.custom_color_ft_atomicals( + ft_atomicals, + operations_found_at_inputs, + tx + ) + # Normal assignment in all cases including fall through of failure to provide a target exponent + # in the above resubstantiation + return AtomicalsTransferBlueprintBuilder.color_ft_atomicals_regular( + ft_atomicals, + tx, + sort_fifo, + is_custom_coloring_activated + ) + + @classmethod + def custom_color_ft_atomicals(cls, ft_atomicals, operations_found_at_inputs, tx): + output_colored_map = {} + fts_burned = {} + cleanly_assigned = True + for atomical_id, atomical_info in sorted(ft_atomicals.items()): + remaining_value = atomical_info.total_atomical_value + for out_idx, txout in enumerate(tx.outputs): + expected_output_index = out_idx + compact_atomical_id = location_id_bytes_to_compact(atomical_id) + expected_value = operations_found_at_inputs["payload"].get(compact_atomical_id, {}).get( + str(expected_output_index), + 0 + ) + if expected_value <= 0 or remaining_value <= 0: + continue + # if expected_value > txout.value + # only can assigned txout's value + # expected_value will equal to txout.value + if expected_value > txout.value: + expected_value = txout.value + # set cleanly_assigned + if expected_value < txout.value: + cleanly_assigned = False + output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or { + 'atomicals': {}} + output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt( + txout.value, + expected_value, + atomical_info + ) + remaining_value -= expected_value + if remaining_value > 0: + cleanly_assigned = False + fts_burned[atomical_id] = remaining_value + return AtomicalFtOutputBlueprintAssignmentSummary( + output_colored_map, + fts_burned, + cleanly_assigned, + None) + + @classmethod + def color_ft_atomicals_split(cls, ft_atomicals, operations_found_at_inputs, tx, is_custom_coloring_activated): + output_colored_map = {} + fts_burned = {} + cleanly_assigned = True + for atomical_id, atomical_info in sorted(ft_atomicals.items()): + expected_output_indexes = [] + remaining_value = atomical_info.total_atomical_value + # The FT type has the 'split' (y) method which allows us to selectively split (skip) + # a certain total number of token units (satoshis) before beginning to color the outputs. + # Essentially this makes it possible to "split" out multiple FT's located at the same input + # If the input at index 0 has the split operation, then it will apply for the atomical token + # generally across all inputs and the first output will be skipped + total_amount_to_skip = 0 + # Uses the compact form of atomical id as the keys for developer convenience + total_amount_to_skip_potential = operations_found_at_inputs and operations_found_at_inputs.get( + 'payload').get(location_id_bytes_to_compact(atomical_id)) + # Sanity check to ensure it is a non-negative integer + if isinstance(total_amount_to_skip_potential, int) and total_amount_to_skip_potential >= 0: + total_amount_to_skip = total_amount_to_skip_potential + total_skipped_so_far = 0 + # is_custom_coloring logic + # use if else keep it simple + if is_custom_coloring_activated: + for out_idx, txout in enumerate(tx.outputs): + # If the first output should be skipped and we have not yet done so, then skip/ignore it + if total_amount_to_skip > 0 and total_skipped_so_far < total_amount_to_skip: + total_skipped_so_far += txout.value + continue + expected_output_indexes.append(out_idx) + if txout.value <= remaining_value: + expected_value = txout.value + else: + expected_value = remaining_value + remaining_value -= txout.value + output_colored_map[out_idx] = output_colored_map.get(out_idx) or {'atomicals': {}} + output_colored_map[out_idx]['atomicals'][atomical_id] = AtomicalColoredOutputFt( + txout.value, + expected_value, + atomical_info) + if remaining_value == 0: + break + if remaining_value < 0: + remaining_value = 0 + cleanly_assigned = False # Used to indicate that all was cleanly assigned + break + if remaining_value != 0: + cleanly_assigned = False + fts_burned[atomical_id] = remaining_value + else: + for out_idx, txout in enumerate(tx.outputs): + if total_amount_to_skip > 0 and total_skipped_so_far < total_amount_to_skip: + total_skipped_so_far += txout.value + continue + # For all remaining outputs attach colors as long as there is adequate remaining_value left + # to cover the entire output value + if txout.value <= remaining_value: + expected_output_indexes.append(out_idx) + remaining_value -= txout.value + output_colored_map[out_idx] = output_colored_map.get(out_idx) or {'atomicals': {}} + output_colored_map[out_idx]['atomicals'][atomical_id] = AtomicalColoredOutputFt( + txout.value, + txout.value, + atomical_info + ) + # We are done assigning all remaining values + if remaining_value == 0: + break + # Exit case when we have no more remaining_value to assign or the next output + # is greater than what we have in remaining_value + if txout.value > remaining_value or remaining_value < 0: + cleanly_assigned = False # Used to indicate that all was cleanly assigned + fts_burned[atomical_id] = remaining_value + break + if remaining_value != 0: + cleanly_assigned = False + fts_burned[atomical_id] = remaining_value + return AtomicalFtOutputBlueprintAssignmentSummary(output_colored_map, fts_burned, cleanly_assigned, None) + + @classmethod + def color_ft_atomicals_regular(cls, ft_atomicals, tx, sort_fifo, is_custom_coloring_activated): + output_colored_map = {} + ft_coloring_summary = calculate_outputs_to_color_for_ft_atomical_ids( + tx, + ft_atomicals, + sort_fifo, + is_custom_coloring_activated + ) + if not ft_coloring_summary: + return AtomicalFtOutputBlueprintAssignmentSummary({}, {}, True, None) + + first_atomical_id = None + if ft_coloring_summary.atomicals_list and len(ft_coloring_summary.atomicals_list): + first_atomical_id = ft_coloring_summary.atomicals_list[0].atomical_id + + if not is_custom_coloring_activated: + for atomical_id, atomical_info in ft_coloring_summary.atomical_id_to_expected_outs_map.items(): + for expected_output_index in atomical_info.expected_outputs: + txout = tx.outputs[expected_output_index] + output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or { + 'atomicals': {}} + output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt( + txout.value, + txout.value, + atomical_info + ) + return AtomicalFtOutputBlueprintAssignmentSummary( + output_colored_map, + ft_coloring_summary.fts_burned, + ft_coloring_summary.cleanly_assigned, + first_atomical_id + ) + else: + # for multiple expected_outputs case + cleanly_assigned = True + for atomical_id, atomical_info in ft_coloring_summary.atomical_id_to_expected_outs_map.items(): + total_value = atomical_info.expected_values + if not ft_coloring_summary.cleanly_assigned: + cleanly_assigned = False + for expected_output_index in atomical_info.expected_outputs: + txout = tx.outputs[expected_output_index] + output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or { + 'atomicals': {}} + if total_value >= txout.value: + expected_value = txout.value + total_value -= expected_value + else: + expected_value = total_value + output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt( + txout.value, + expected_value, + atomical_info + ) + return AtomicalFtOutputBlueprintAssignmentSummary( + output_colored_map, + ft_coloring_summary.fts_burned, + cleanly_assigned, + first_atomical_id + ) + + @classmethod + def calculate_output_blueprint( + cls, + get_atomicals_id_mint_info, + tx, + nft_atomicals, + ft_atomicals, + atomicals_spent_at_inputs, + operations_found_at_inputs, + sort_fifo, + is_custom_coloring_activated + ): + nft_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint_nfts( + get_atomicals_id_mint_info, + tx, + nft_atomicals, + atomicals_spent_at_inputs, + operations_found_at_inputs, + sort_fifo + ) + ft_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint_fts( + tx, + ft_atomicals, + operations_found_at_inputs, + sort_fifo, + is_custom_coloring_activated + ) + return nft_blueprint, ft_blueprint + + # Builds a map and image of all the inputs and their sat_value and atomical_value (adjusted by exponent) + + # This is the base datastructure used to color FT outputs and determine + # what exact sat_value will be needed to maintain input token value to outputs + @classmethod + def build_atomical_input_summaries( + cls, + get_atomicals_id_mint_info, + map_atomical_ids_to_summaries, + atomicals_entry_list, + txin_index + ): + atomicals_id_mint_info_map = {} + # For each input atomical spent at the current input... + for atomicals_entry in atomicals_entry_list: + atomical_id = atomicals_entry['atomical_id'] + # value, = unpack_le_uint64( + # atomicals_entry['data'][HASHX_LEN + SCRIPTHASH_LEN : HASHX_LEN + SCRIPTHASH_LEN + 8] + # ) + # exponent, = unpack_le_uint16_from( + # atomicals_entry['data'][HASHX_LEN + SCRIPTHASH_LEN + 8: HASHX_LEN + SCRIPTHASH_LEN + 8 + 2] + # ) + sat_value = atomicals_entry['data_value']['sat_value'] + atomical_value = atomicals_entry['data_value']['atomical_value'] + # Perform a cache lookup for the mint information since we do not want to query multiple times + # for same input atomical_id + if not atomicals_id_mint_info_map.get(atomical_id): + atomical_mint_info = get_atomicals_id_mint_info(atomical_id, True) + if not atomical_mint_info: + raise AtomicalsTransferBlueprintBuilderError( + f'build_atomical_input_summaries {atomical_id.hex()} not found in mint info.' + f'IndexError.' + ) + atomicals_id_mint_info_map[atomical_id] = atomical_mint_info + # The first time we encounter the atomical we build the initialization struct + # it doesn't matter if it's an NFT or FT + # However note that only FTs will have an exponent >= 0 as NFT will always be exponent = 0 + if not map_atomical_ids_to_summaries.get(atomical_id): + map_atomical_ids_to_summaries[atomical_id] = AtomicalInputSummary( + atomical_id, + atomicals_id_mint_info_map[atomical_id]['type'], + atomicals_id_mint_info_map[atomical_id] + ) + # use atomical_value, not value + # for Partially case + map_atomical_ids_to_summaries[atomical_id].apply_input(txin_index, sat_value, atomical_value) + return map_atomical_ids_to_summaries + + @classmethod + def build_atomical_input_summaries_by_type(cls, get_atomicals_id_mint_info, atomicals_spent_at_inputs): + map_atomical_ids_to_summaries = {} + for txin_index, atomicals_entry_list in atomicals_spent_at_inputs.items(): + # Accumulate the total input value by atomical_id + # The value will be used below to determine the amount of input we can allocate for FT's + AtomicalsTransferBlueprintBuilder.build_atomical_input_summaries( + get_atomicals_id_mint_info, + map_atomical_ids_to_summaries, + atomicals_entry_list, txin_index + ) + # Group the atomicals by NFT and FT for easier handling + nft_atomicals = {} + ft_atomicals = {} + for atomical_id, mint_info in map_atomical_ids_to_summaries.items(): + if mint_info.type == 'NFT': + nft_atomicals[atomical_id] = mint_info + elif mint_info.type == 'FT': + ft_atomicals[atomical_id] = mint_info + else: + raise AtomicalsTransferBlueprintBuilderError(f'color_atomicals_outputs: Invalid type. IndexError') + atomicals_ids_spent = [] + for atomical_id, unused in nft_atomicals.items(): + atomicals_ids_spent.append(atomical_id) + for atomical_id, unused in ft_atomicals.items(): + atomicals_ids_spent.append(atomical_id) + return nft_atomicals, ft_atomicals, atomicals_ids_spent + + @classmethod + def calculate_nft_output_index_legacy(cls, input_idx, tx, operations_found_at_inputs): + expected_output_index = input_idx + # If it was unspendable output, then just set it to the 0th location + # ...and never allow an NFT atomical to be burned accidentally by having insufficient number of outputs either + # The expected output index will become the 0'th index if the 'x' extract operation was specified + # or there are insufficient outputs + if expected_output_index >= len(tx.outputs) or is_unspendable_genesis(tx.outputs[expected_output_index].pk_script) or is_unspendable_legacy(tx.outputs[expected_output_index].pk_script): expected_output_index = 0 - # Also keep them at the 0'th index if the split command was used - if is_split_operation(operations_found_at_inputs): - expected_output_index = 0 - map_output_idxs_for_atomicals[expected_output_index] = map_output_idxs_for_atomicals.get(expected_output_index) or {'atomicals': {}} - map_output_idxs_for_atomicals[expected_output_index]['atomicals'][atomical_id] = atomical_summary_info - if found_atomical_at_input: - next_output_idx += 1 - return AtomicalNftOutputBlueprintAssignmentSummary(map_output_idxs_for_atomicals) - else: - map_output_idxs_for_atomicals = {} - # Assign NFTs the legacy way with 1:1 inputs to outputs - for atomical_id, atomical_summary_info in nft_atomicals.items(): - expected_output_index = AtomicalsTransferBlueprintBuilder.calculate_nft_output_index_legacy(atomical_summary_info.input_indexes[0].txin_index, tx, operations_found_at_inputs) - map_output_idxs_for_atomicals[expected_output_index] = map_output_idxs_for_atomicals.get(expected_output_index) or {'atomicals': {}} - map_output_idxs_for_atomicals[expected_output_index]['atomicals'][atomical_id] = atomical_summary_info - return AtomicalNftOutputBlueprintAssignmentSummary(map_output_idxs_for_atomicals) - - @classmethod - def calculate_nft_atomicals_splat(cls, nft_atomicals, tx): - # Splat takes all of the NFT atomicals across all inputs (including multiple atomicals at the same utxo) - # and then separates them into their own distinctive output such that the result of the operation is no two atomicals - # will share a resulting output. This operation requires that there are at least as many outputs as there are NFT atomicals - # If there are not enough, then this is considered a noop and those extra NFTs are assigned to output 0 - # If there are enough outputs, then the earliest atomical (sorted lexicographically in ascending order) goes to the 0'th output, - # then the second atomical goes to the 1'st output, etc until all atomicals are assigned to their own output. - expected_output_index_incrementing = 0 # Begin assigning splatted atomicals at the 0'th index - output_colored_map = {} - for atomical_id, atomical_summary_info in sorted(nft_atomicals.items()): - expected_output_index = expected_output_index_incrementing - if expected_output_index_incrementing >= len(tx.outputs) or is_unspendable_genesis(tx.outputs[expected_output_index_incrementing].pk_script) or is_unspendable_legacy(tx.outputs[expected_output_index_incrementing].pk_script): - expected_output_index = 0 - output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or {'atomicals': {}} - output_colored_map[expected_output_index]['atomicals'][atomical_id] = atomical_summary_info - expected_output_index_incrementing += 1 - return AtomicalNftOutputBlueprintAssignmentSummary(output_colored_map) - - @classmethod - def calculate_output_blueprint_nfts(cls, get_atomicals_id_mint_info, tx, nft_atomicals, atomicals_spent_at_inputs, operations_found_at_inputs, sort_fifo): - if not nft_atomicals or len(nft_atomicals) == 0: - return AtomicalNftOutputBlueprintAssignmentSummary({}) - should_splat_nft_atomicals = is_splat_operation(operations_found_at_inputs) - if should_splat_nft_atomicals and len(nft_atomicals.keys()) > 0: - return AtomicalsTransferBlueprintBuilder.calculate_nft_atomicals_splat(nft_atomicals, tx) - else: - # To sort by fifo for NFTs, we also need to calculate a mapping of the nfts to inputs first - nft_map = AtomicalsTransferBlueprintBuilder.build_nft_input_idx_to_atomical_map(get_atomicals_id_mint_info, atomicals_spent_at_inputs) - return AtomicalsTransferBlueprintBuilder.calculate_nft_atomicals_regular(nft_map, nft_atomicals, tx, operations_found_at_inputs, sort_fifo) - - @classmethod - def calculate_output_blueprint_fts(cls, tx, ft_atomicals, operations_found_at_inputs, sort_fifo, is_custom_coloring_activated): - if not ft_atomicals or len(ft_atomicals) == 0: - return AtomicalFtOutputBlueprintAssignmentSummary({}, {}, True, None) - - # Split apart multiple NFT/FT from a UTXO - should_split_ft_atomicals = is_split_operation(operations_found_at_inputs) - if should_split_ft_atomicals: - return AtomicalsTransferBlueprintBuilder.color_ft_atomicals_split(ft_atomicals, operations_found_at_inputs, tx, is_custom_coloring_activated) - - should_custom_colored_ft_atomicals = is_custom_colored_operation(operations_found_at_inputs) and is_custom_coloring_activated - if should_custom_colored_ft_atomicals: - return AtomicalsTransferBlueprintBuilder.custom_color_ft_atomicals(ft_atomicals, operations_found_at_inputs, tx) - # Normal assignment in all cases including fall through of failure to provide a target exponent in the above resubstantiation - return AtomicalsTransferBlueprintBuilder.color_ft_atomicals_regular(ft_atomicals, tx, sort_fifo, is_custom_coloring_activated) - - @classmethod - def custom_color_ft_atomicals(cls, ft_atomicals, operations_found_at_inputs, tx): - output_colored_map = {} - fts_burned = {} - cleanly_assigned = True - for atomical_id, atomical_info in sorted(ft_atomicals.items()): - remaining_value = atomical_info.total_atomical_value - for out_idx, txout in enumerate(tx.outputs): - expected_output_index = out_idx - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - expected_value = operations_found_at_inputs["payload"].get(compact_atomical_id, {}).get(str(expected_output_index), 0) - if expected_value <= 0 or remaining_value <= 0: - continue - # if expected_value > txout.value - # only can assigned txout's value - # expected_value will equal to txout.value - if expected_value > txout.value: - expected_value = txout.value - # set cleanly_assigned - if expected_value < txout.value: - cleanly_assigned = False - output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or {'atomicals': {}} - output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt(txout.value, expected_value, atomical_info) - remaining_value -= expected_value - if remaining_value > 0: - cleanly_assigned = False - fts_burned[atomical_id] = remaining_value - return AtomicalFtOutputBlueprintAssignmentSummary(output_colored_map, fts_burned, cleanly_assigned, None) - - @classmethod - def color_ft_atomicals_split(cls, ft_atomicals, operations_found_at_inputs, tx, is_custom_coloring_activated): - output_colored_map = {} - fts_burned = {} - cleanly_assigned = True - for atomical_id, atomical_info in sorted(ft_atomicals.items()): - expected_output_indexes = [] - remaining_value = atomical_info.total_atomical_value - # The FT type has the 'split' (y) method which allows us to selectively split (skip) a certain total number of token units (satoshis) - # before beginning to color the outputs. - # Essentially this makes it possible to "split" out multiple FT's located at the same input - # If the input at index 0 has the split operation, then it will apply for the atomical token generally across all inputs and the first output will be skipped - total_amount_to_skip = 0 - # Uses the compact form of atomical id as the keys for developer convenience - total_amount_to_skip_potential = operations_found_at_inputs and operations_found_at_inputs.get('payload').get(location_id_bytes_to_compact(atomical_id)) - # Sanity check to ensure it is a non-negative integer - if isinstance(total_amount_to_skip_potential, int) and total_amount_to_skip_potential >= 0: - total_amount_to_skip = total_amount_to_skip_potential - total_skipped_so_far = 0 - # is_custom_coloring logic - # use if else keep it simple - if is_custom_coloring_activated: - for out_idx, txout in enumerate(tx.outputs): - # If the first output should be skipped and we have not yet done so, then skip/ignore it - if total_amount_to_skip > 0 and total_skipped_so_far < total_amount_to_skip: - total_skipped_so_far += txout.value - continue - expected_output_indexes.append(out_idx) - if txout.value <= remaining_value: - expected_value = txout.value - else: - expected_value = remaining_value - remaining_value -= txout.value - output_colored_map[out_idx] = output_colored_map.get(out_idx) or {'atomicals': {}} - output_colored_map[out_idx]['atomicals'][atomical_id] = AtomicalColoredOutputFt(txout.value, expected_value, atomical_info) - if remaining_value == 0: - break - if remaining_value < 0: - remaining_value = 0 - cleanly_assigned = False # Used to indicate that all was cleanly assigned - break - if remaining_value != 0: - cleanly_assigned = False - fts_burned[atomical_id] = remaining_value - else: + # If this was the 'split' (y) command, then also move them to the 0th output + if is_split_operation(operations_found_at_inputs): + expected_output_index = 0 + return expected_output_index + + # Assign the ft quantity basic from the start_out_idx to the end until exhausted + # Returns the sequence of output indexes that matches until the final one that matched + # Also returns whether it fit cleanly in (ie: exact with no left overs or under) + @classmethod + def assign_expected_outputs_basic(cls, total_value_to_assign, tx, start_out_idx, is_custom_coloring_activated): + expected_output_indexes = [] + remaining_value = total_value_to_assign + idx_count = 0 + if start_out_idx >= len(tx.outputs): + return False, expected_output_indexes, 0 for out_idx, txout in enumerate(tx.outputs): - if total_amount_to_skip > 0 and total_skipped_so_far < total_amount_to_skip: - total_skipped_so_far += txout.value - continue - # For all remaining outputs attach colors as long as there is adequate remaining_value left to cover the entire output value - if txout.value <= remaining_value: - expected_output_indexes.append(out_idx) - remaining_value -= txout.value - output_colored_map[out_idx] = output_colored_map.get(out_idx) or {'atomicals': {}} - output_colored_map[out_idx]['atomicals'][atomical_id] = AtomicalColoredOutputFt(txout.value, txout.value, atomical_info) - # We are done assigning all remaining values - if remaining_value == 0: - break - # Exit case when we have no more remaining_value to assign or the next output is greater than what we have in remaining_value - if txout.value > remaining_value or remaining_value < 0: - cleanly_assigned = False # Used to indicate that all was cleanly assigned - fts_burned[atomical_id] = remaining_value - break - if remaining_value != 0: - cleanly_assigned = False - fts_burned[atomical_id] = remaining_value - return AtomicalFtOutputBlueprintAssignmentSummary(output_colored_map, fts_burned, cleanly_assigned, None) - - @classmethod - def color_ft_atomicals_regular(cls, ft_atomicals, tx, sort_fifo, is_custom_coloring_activated): - output_colored_map = {} - ft_coloring_summary = calculate_outputs_to_color_for_ft_atomical_ids(tx, ft_atomicals, sort_fifo, is_custom_coloring_activated) - if not ft_coloring_summary: - return AtomicalFtOutputBlueprintAssignmentSummary({}, {}, True, None) - - first_atomical_id = None - if ft_coloring_summary.atomicals_list and len(ft_coloring_summary.atomicals_list): - first_atomical_id = ft_coloring_summary.atomicals_list[0].atomical_id - - if not is_custom_coloring_activated: - for atomical_id, atomical_info in ft_coloring_summary.atomical_id_to_expected_outs_map.items(): - for expected_output_index in atomical_info.expected_outputs: - txout = tx.outputs[expected_output_index] - output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or {'atomicals': {}} - output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt(txout.value, txout.value, atomical_info) - return AtomicalFtOutputBlueprintAssignmentSummary(output_colored_map, ft_coloring_summary.fts_burned, ft_coloring_summary.cleanly_assigned, first_atomical_id) - else: - # for multiple expected_outputs case - cleanly_assigned = True - for atomical_id, atomical_info in ft_coloring_summary.atomical_id_to_expected_outs_map.items(): - total_value = atomical_info.expected_values - if not ft_coloring_summary.cleanly_assigned: - cleanly_assigned = False - for expected_output_index in atomical_info.expected_outputs: - txout = tx.outputs[expected_output_index] - output_colored_map[expected_output_index] = output_colored_map.get(expected_output_index) or {'atomicals': {}} - if total_value >= txout.value: - expected_value = txout.value - total_value -= expected_value - else: - expected_value = total_value - output_colored_map[expected_output_index]['atomicals'][atomical_id] = AtomicalColoredOutputFt(txout.value, expected_value, atomical_info) - return AtomicalFtOutputBlueprintAssignmentSummary(output_colored_map, ft_coloring_summary.fts_burned, cleanly_assigned, first_atomical_id) - - @classmethod - def calculate_output_blueprint(cls, get_atomicals_id_mint_info, tx, nft_atomicals, ft_atomicals, atomicals_spent_at_inputs, operations_found_at_inputs, sort_fifo, is_custom_coloring_activated): - nft_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint_nfts(get_atomicals_id_mint_info, tx, nft_atomicals, atomicals_spent_at_inputs, operations_found_at_inputs, sort_fifo) - ft_blueprint = AtomicalsTransferBlueprintBuilder.calculate_output_blueprint_fts(tx, ft_atomicals, operations_found_at_inputs, sort_fifo, is_custom_coloring_activated) - return nft_blueprint, ft_blueprint - - # Builds a map and image of all the inputs and their sat_value and atomical_value (adjusted by exponent) - # This is the base datastructure used to color FT outputs and determine what exact sat_value will be needed to maintain input token value to outputs - @classmethod - def build_atomical_input_summaries(cls, get_atomicals_id_mint_info, map_atomical_ids_to_summaries, atomicals_entry_list, txin_index): - atomicals_id_mint_info_map = {} - # For each input atomical spent at the current input... - for atomicals_entry in atomicals_entry_list: - atomical_id = atomicals_entry['atomical_id'] - # value, = unpack_le_uint64(atomicals_entry['data'][HASHX_LEN + SCRIPTHASH_LEN : HASHX_LEN + SCRIPTHASH_LEN + 8]) - # exponent, = unpack_le_uint16_from(atomicals_entry['data'][HASHX_LEN + SCRIPTHASH_LEN + 8: HASHX_LEN + SCRIPTHASH_LEN + 8 + 2]) - sat_value = atomicals_entry['data_value']['sat_value'] - atomical_value = atomicals_entry['data_value']['atomical_value'] - # Perform a cache lookup for the mint information since we do not want to query multiple times for same input atomical_id - if not atomicals_id_mint_info_map.get(atomical_id): - atomical_mint_info = get_atomicals_id_mint_info(atomical_id, True) - if not atomical_mint_info: - raise AtomicalsTransferBlueprintBuilderError(f'build_atomical_input_summaries {atomical_id.hex()} not found in mint info. IndexError.') - atomicals_id_mint_info_map[atomical_id] = atomical_mint_info - # The first time we encounter the atomical we build the initialization struct - # it doesn't matter if it's an NFT or FT - # However note that only FTs will have an exponent >= 0 as NFT will always be exponent = 0 - if not map_atomical_ids_to_summaries.get(atomical_id): - map_atomical_ids_to_summaries[atomical_id] = AtomicalInputSummary(atomical_id, atomicals_id_mint_info_map[atomical_id]['type'], atomicals_id_mint_info_map[atomical_id]) - # use atomical_value, not value - # for Partially case - map_atomical_ids_to_summaries[atomical_id].apply_input(txin_index, sat_value, atomical_value) - return map_atomical_ids_to_summaries - - @classmethod - def build_atomical_input_summaries_by_type(cls, get_atomicals_id_mint_info, atomicals_spent_at_inputs): - map_atomical_ids_to_summaries = {} - for txin_index, atomicals_entry_list in atomicals_spent_at_inputs.items(): - # Accumulate the total input value by atomical_id - # The value will be used below to determine the amount of input we can allocate for FT's - AtomicalsTransferBlueprintBuilder.build_atomical_input_summaries(get_atomicals_id_mint_info, map_atomical_ids_to_summaries, atomicals_entry_list, txin_index) - # Group the atomicals by NFT and FT for easier handling - nft_atomicals = {} - ft_atomicals = {} - for atomical_id, mint_info in map_atomical_ids_to_summaries.items(): - if mint_info.type == 'NFT': - nft_atomicals[atomical_id] = mint_info - elif mint_info.type == 'FT': - ft_atomicals[atomical_id] = mint_info - else: - raise AtomicalsTransferBlueprintBuilderError(f'color_atomicals_outputs: Invalid type. IndexError') - atomicals_ids_spent = [] - for atomical_id, unused in nft_atomicals.items(): - atomicals_ids_spent.append(atomical_id) - for atomical_id, unused in ft_atomicals.items(): - atomicals_ids_spent.append(atomical_id) - return nft_atomicals, ft_atomicals, atomicals_ids_spent - - @classmethod - def calculate_nft_output_index_legacy(cls, input_idx, tx, operations_found_at_inputs): - expected_output_index = input_idx - # If it was unspendable output, then just set it to the 0th location - # ...and never allow an NFT atomical to be burned accidentally by having insufficient number of outputs either - # The expected output index will become the 0'th index if the 'x' extract operation was specified or there are insufficient outputs - if expected_output_index >= len(tx.outputs) or is_unspendable_genesis(tx.outputs[expected_output_index].pk_script) or is_unspendable_legacy(tx.outputs[expected_output_index].pk_script): - expected_output_index = 0 - # If this was the 'split' (y) command, then also move them to the 0th output - if is_split_operation(operations_found_at_inputs): - expected_output_index = 0 - return expected_output_index - - # Assign the ft quantity basic from the start_out_idx to the end until exhausted - # Returns the sequence of output indexes that matches until the final one that matched - # Also returns whether it fit cleanly in (ie: exact with no left overs or under) - @classmethod - def assign_expected_outputs_basic(cls, total_value_to_assign, tx, start_out_idx, is_custom_coloring_activated): - expected_output_indexes = [] - remaining_value = total_value_to_assign - idx_count = 0 - if start_out_idx >= len(tx.outputs): - return False, expected_output_indexes, 0 - for out_idx, txout in enumerate(tx.outputs): - # Only consider outputs from the starting index - if idx_count < start_out_idx: - idx_count += 1 - continue - # For all remaining outputs attach colors as long as there is adequate remaining_value left to cover the entire output value - if is_unspendable_genesis(txout.pk_script) or is_unspendable_legacy(txout.pk_script): - idx_count += 1 - continue - if is_custom_coloring_activated: - # Add out_idx - expected_output_indexes.append(out_idx) - remaining_value -= txout.value - if remaining_value > 0: + # Only consider outputs from the starting index + if idx_count < start_out_idx: + idx_count += 1 continue - if remaining_value == 0: - return True, expected_output_indexes, remaining_value - return False, expected_output_indexes, remaining_value - else: - if txout.value <= remaining_value: + # For all remaining outputs attach colors as long as there is adequate remaining_value left + # to cover the entire output value + if is_unspendable_genesis(txout.pk_script) or is_unspendable_legacy(txout.pk_script): + idx_count += 1 + continue + if is_custom_coloring_activated: + # Add out_idx expected_output_indexes.append(out_idx) remaining_value -= txout.value + if remaining_value > 0: + continue if remaining_value == 0: - # The token input was fully exhausted cleanly into the outputs return True, expected_output_indexes, remaining_value - # Exit case output is greater than what we have in remaining_value - else: - # There was still some token units left, but the next output was greater than the amount. Therefore we burned the remainder tokens. return False, expected_output_indexes, remaining_value - idx_count += 1 - # There was still some token units left, but there were no more outputs to take the quantity. Tokens were burned. - return False, expected_output_indexes, remaining_value - - @classmethod - def get_atomical_id_for_payment_marker_if_found(cls, tx): - ''' Get the atomical id if found for a payment marker op_return''' - found_atomical_id = None - for idx, txout in enumerate(tx.outputs): - # Note that we accept 'p' and 'd' as payment marker types for either dmitem or subrealm payments now - found_atomical_id = is_op_return_subrealm_payment_marker_atomical_id(txout.pk_script) - if found_atomical_id: - return found_atomical_id, idx, 'subrealm' - found_atomical_id = is_op_return_dmitem_payment_marker_atomical_id(txout.pk_script) - if found_atomical_id: - return found_atomical_id, idx, 'dmitem' - - return found_atomical_id, None, None - - def are_payments_satisfied(self, expected_payment_outputs): - if not isinstance(expected_payment_outputs, dict) or len(expected_payment_outputs.keys()) < 1: - return False - - # Just in case do not allow payments to be satisfied for split operation as it allows reassigning ARC20 - if self.is_split_operation(): - return False - - # Just in case also ensure there was a payment marker for the current tx - atomical_id_to_pay, marker_idx, entity_type = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(self.tx) - if not atomical_id_to_pay: - return False - - expected_output_keys_satisfied = {} - # Set up the expected output scripts to be satisfied for the payments - for output_script_key, output_script_details in expected_payment_outputs.items(): - ft_atomical_id = output_script_details.get('id') - if ft_atomical_id: - if not is_compact_atomical_id(ft_atomical_id): - return False - # Map the output script hex with the atomical id that it must be colored with - atomical_id_expected_color_long_from = compact_to_location_id_bytes(ft_atomical_id) - expected_output_keys_satisfied[output_script_key + atomical_id_expected_color_long_from.hex()] = False - else: - # Map the output script hex only - expected_output_keys_satisfied[output_script_key] = False - - # Prepare the mapping of which ARC20 is paid at which output - ft_coloring_summary = calculate_outputs_to_color_for_ft_atomical_ids(self.tx, self.ft_atomicals, self.sort_fifo, self.is_custom_coloring_activated) - output_idx_to_atomical_id_map = {} - if ft_coloring_summary: - output_idx_to_atomical_id_map = build_reverse_output_to_atomical_id_exponent_map(ft_coloring_summary.atomical_id_to_expected_outs_map) - - # For each of the outputs, assess whether it matches any of the required payment output expectations - for idx, txout in enumerate(self.tx.outputs): - output_script_hex = txout.pk_script.hex() - # Ensure there is a payment rule for the current output of the tx, or skip it - expected_output_payment_value_dict = expected_payment_outputs.get(output_script_hex, None) - if not expected_output_payment_value_dict or not isinstance(expected_output_payment_value_dict, dict): - continue - - # There is no value defined or the expected payment is below the dust limit, or skip it - expected_output_payment_value = expected_output_payment_value_dict.get('v', None) - if not is_integer_num(expected_output_payment_value) or expected_output_payment_value < SUBNAME_MIN_PAYMENT_DUST_LIMIT: - continue - - expected_output_payment_id_type = expected_output_payment_value_dict.get('id', None) - # If it's a regular satoshi payment, then just check it is at least the amount of the expected payment value - if not expected_output_payment_id_type: - # Normal satoshi payment just check the amount of the sats is the expected amount - if txout.value >= expected_output_payment_value: - expected_output_keys_satisfied[output_script_hex] = True # Mark that the output was matched at least once - else: - # Otherwise it is a payment in a specific ARC20 fungible token - expected_output_payment_id_type_long_form = compact_to_location_id_bytes(expected_output_payment_id_type) - # Check in the reverse map if the current output idx is colored with the expected color - output_summary = output_idx_to_atomical_id_map.get(idx) - if output_summary and output_summary.get(expected_output_payment_id_type_long_form, None) != None: - # Ensure the normalized atomical_value is greater than or equal to the expected payment amount in that token type - # exponent_for_for_atomical_id = output_summary.get(expected_output_payment_id_type_long_form) - atomical_value = get_nominal_atomical_value(txout.value) - if atomical_value >= expected_output_payment_value: - expected_output_keys_satisfied[output_script_hex + expected_output_payment_id_type_long_form.hex()] = True # Mark that the output was matched at least once - # Check if there are any unsatisfied requirements - for output_script_not_used, satisfied in expected_output_keys_satisfied.items(): - if not satisfied: - self.logger.warning(f'are_payments_satisfied is_all_outputs_matched_not_satisfied={expected_output_keys_satisfied} tx_hash={hash_to_hex_str(self.tx_hash)}') - return False - # We got this far that means all requirements were satisfied, do one final check to ensure there was at least one payment output required - return len(expected_output_keys_satisfied) > 0 - - def validate_ft_transfer_has_no_inflation(self, atomical_id_to_expected_outs_map, tx, ft_atomicals): - sanity_check_sums = {} - - for atomical_id, outputs_to_color in atomical_id_to_expected_outs_map.items(): - sanity_check_sums[atomical_id] = 0 - for expected_output_index in outputs_to_color: - sanity_check_sums[atomical_id] += tx.outputs[expected_output_index].value - - # Sanity check that there can be no inflation - for atomical_id, ft_info in sorted(ft_atomicals.items()): - sum_out_value = sanity_check_sums.get(atomical_id) - input_value = ft_info['atomical_value'] - if sum_out_value and sum_out_value > input_value: - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - raise AtomicalsTransferBlueprintBuilderError( - 'validate_ft_transfer_has_no_inflation: ' - 'Fatal error the output sum of outputs is greater than input sum for Atomical: ' - f'atomical_id={atomical_id_compact} ' - f'input_value={input_value} ' - f'sum_out_value={sum_out_value} ' - f'{hash_to_hex_str(self.tx_hash)} ' - f'ft_atomicals={ft_atomicals}' + else: + if txout.value <= remaining_value: + expected_output_indexes.append(out_idx) + remaining_value -= txout.value + if remaining_value == 0: + # The token input was fully exhausted cleanly into the outputs + return True, expected_output_indexes, remaining_value + # Exit case output is greater than what we have in remaining_value + else: + # There was still some token units left, but the next output was greater than the amount. + # Therefore, we burned the remainder tokens. + return False, expected_output_indexes, remaining_value + idx_count += 1 + # There was still some token units left, but there were no more outputs to take the quantity. + # Tokens were burned. + return False, expected_output_indexes, remaining_value + + @classmethod + def get_atomical_id_for_payment_marker_if_found(cls, tx): + """ Get the atomical id if found for a payment marker op_return""" + found_atomical_id = None + for idx, txout in enumerate(tx.outputs): + # Note that we accept 'p' and 'd' as payment marker types for either dmitem or subrealm payments now + found_atomical_id = is_op_return_subrealm_payment_marker_atomical_id(txout.pk_script) + if found_atomical_id: + return found_atomical_id, idx, 'subrealm' + found_atomical_id = is_op_return_dmitem_payment_marker_atomical_id(txout.pk_script) + if found_atomical_id: + return found_atomical_id, idx, 'dmitem' + + return found_atomical_id, None, None + + def are_payments_satisfied(self, expected_payment_outputs): + if not isinstance(expected_payment_outputs, dict) or len(expected_payment_outputs.keys()) < 1: + return False + + # Just in case do not allow payments to be satisfied for split operation as it allows reassigning ARC20 + if self.is_split_operation(): + return False + + # Just in case also ensure there was a payment marker for the current tx + id_to_pay, _, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(self.tx) + if not id_to_pay: + return False + + expected_output_keys_satisfied = {} + # Set up the expected output scripts to be satisfied for the payments + for output_script_key, output_script_details in expected_payment_outputs.items(): + ft_atomical_id = output_script_details.get('id') + if ft_atomical_id: + if not is_compact_atomical_id(ft_atomical_id): + return False + # Map the output script hex with the atomical id that it must be colored with + atomical_id_expected_color_long_from = compact_to_location_id_bytes(ft_atomical_id) + expected_output_keys_satisfied[output_script_key + atomical_id_expected_color_long_from.hex()] = False + else: + # Map the output script hex only + expected_output_keys_satisfied[output_script_key] = False + + # Prepare the mapping of which ARC20 is paid at which output + ft_coloring_summary = calculate_outputs_to_color_for_ft_atomical_ids( + self.tx, + self.ft_atomicals, + self.sort_fifo, + self.is_custom_coloring_activated + ) + output_idx_to_atomical_id_map = {} + if ft_coloring_summary: + output_idx_to_atomical_id_map = build_reverse_output_to_atomical_id_exponent_map( + ft_coloring_summary.atomical_id_to_expected_outs_map ) - - def is_split_operation(self): - return is_split_operation(self.operations_found_at_inputs) - - def get_nft_output_blueprint(self): - return self.nft_output_blueprint - - def get_ft_output_blueprint(self): - return self.ft_output_blueprint - - def get_are_fts_burned(self): - return self.are_fts_burned - - def get_fts_burned(self): - return self.fts_burned - - def get_atomical_ids_spent(self): - return self.atomical_ids_spent \ No newline at end of file + + # For each of the outputs, assess whether it matches any of the required payment output expectations + for idx, txout in enumerate(self.tx.outputs): + output_script_hex = txout.pk_script.hex() + # Ensure there is a payment rule for the current output of the tx, or skip it + expected_output_payment_value_dict = expected_payment_outputs.get(output_script_hex, None) + if not expected_output_payment_value_dict or not isinstance(expected_output_payment_value_dict, dict): + continue + + # There is no value defined or the expected payment is below the dust limit, or skip it + expected_output_payment_value = expected_output_payment_value_dict.get('v', None) + if not is_integer_num( + expected_output_payment_value) or expected_output_payment_value < SUBNAME_MIN_PAYMENT_DUST_LIMIT: + continue + + expected_output_payment_id_type = expected_output_payment_value_dict.get('id', None) + # If it's a regular satoshi payment, then just check it is at least the amount of the expected payment value + if not expected_output_payment_id_type: + # Normal satoshi payment just check the amount of the sats is the expected amount + if txout.value >= expected_output_payment_value: + # Mark that the output was matched at least once + expected_output_keys_satisfied[output_script_hex] = True + else: + # Otherwise it is a payment in a specific ARC20 fungible token + expected_output_payment_id_type_long_form = compact_to_location_id_bytes( + expected_output_payment_id_type) + # Check in the reverse map if the current output idx is colored with the expected color + output_summary = output_idx_to_atomical_id_map.get(idx) + if output_summary and output_summary.get(expected_output_payment_id_type_long_form, None) is not None: + # Ensure the normalized atomical_value is greater than + # or equal to the expected payment amount in that token type. + # exponent_for_for_atomical_id = output_summary.get(expected_output_payment_id_type_long_form) + atomical_value = get_nominal_atomical_value(txout.value) + if atomical_value >= expected_output_payment_value: + # Mark that the output was matched at least once + key = output_script_hex + expected_output_payment_id_type_long_form.hex() + expected_output_keys_satisfied[key] = True + # Check if there are any unsatisfied requirements + for output_script_not_used, satisfied in expected_output_keys_satisfied.items(): + if not satisfied: + self.logger.warning( + f'are_payments_satisfied ' + f'is_all_outputs_matched_not_satisfied={expected_output_keys_satisfied} ' + f'tx_hash={hash_to_hex_str(self.tx_hash)}' + ) + return False + # We got this far that means all requirements were satisfied, + # do one final check to ensure there was at least one payment output required. + return len(expected_output_keys_satisfied) > 0 + + def validate_ft_transfer_has_no_inflation(self, atomical_id_to_expected_outs_map, tx, ft_atomicals): + sanity_check_sums = {} + + for atomical_id, outputs_to_color in atomical_id_to_expected_outs_map.items(): + sanity_check_sums[atomical_id] = 0 + for expected_output_index in outputs_to_color: + sanity_check_sums[atomical_id] += tx.outputs[expected_output_index].value + + # Sanity check that there can be no inflation + for atomical_id, ft_info in sorted(ft_atomicals.items()): + sum_out_value = sanity_check_sums.get(atomical_id) + input_value = ft_info['atomical_value'] + if sum_out_value and sum_out_value > input_value: + atomical_id_compact = location_id_bytes_to_compact(atomical_id) + raise AtomicalsTransferBlueprintBuilderError( + 'validate_ft_transfer_has_no_inflation: ' + 'Fatal error the output sum of outputs is greater than input sum for Atomical: ' + f'atomical_id={atomical_id_compact} ' + f'input_value={input_value} ' + f'sum_out_value={sum_out_value} ' + f'{hash_to_hex_str(self.tx_hash)} ' + f'ft_atomicals={ft_atomicals}' + ) + + def is_split_operation(self): + return is_split_operation(self.operations_found_at_inputs) + + def get_nft_output_blueprint(self): + return self.nft_output_blueprint + + def get_ft_output_blueprint(self): + return self.ft_output_blueprint + + def get_are_fts_burned(self): + return self.are_fts_burned + + def get_fts_burned(self): + return self.fts_burned + + def get_atomical_ids_spent(self): + return self.atomical_ids_spent diff --git a/electrumx/lib/util_atomicals.py b/electrumx/lib/util_atomicals.py index 0573b4a7..6bd3297e 100644 --- a/electrumx/lib/util_atomicals.py +++ b/electrumx/lib/util_atomicals.py @@ -414,7 +414,7 @@ def get_if_parent_spent_in_same_tx(parent_atomical_id_compact, expected_minimum_ input_value = unpack_le_uint64(atomical_entry['value'][ HASHX_LEN + SCRIPTHASH_LEN : HASHX_LEN + SCRIPTHASH_LEN + 8]) id_to_total_value_map[atomical_id] += input_value total_sum = id_to_total_value_map.get(parent_atomical_id) - if total_sum == None: + if total_sum is None: return False if total_sum >= expected_minimum_total_value: @@ -744,7 +744,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): return None, None dft_mode = mint_info['args'].get('md') - if dft_mode != 1 and dft_mode != 0 and dft_mode != None: + if dft_mode != 1 and dft_mode != 0 and dft_mode is not None: logger.warning(f'DFT init has invalid md {hash_to_hex_str(tx_hash)}, {dft_mode}. Skipping...') return None, None @@ -771,7 +771,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): return None, None # Do not require mint bitworkc if there is no mint bitworkc increment - if bci == None: + if bci is None: pass elif not isinstance(bci, int) or bci < 0 or bci > 64: logger.warning(f'DFT init has invalid bci {hash_to_hex_str(tx_hash)}, {bci}. Skipping...') @@ -783,7 +783,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): return None, None # Do not require mint bitworkr if there is no mint bitworkr increment - if bri == None: + if bri is None: pass elif not isinstance(bri, int) or bri < 0 or bri > 64: logger.warning(f'DFT init has invalid bri {hash_to_hex_str(tx_hash)}, {bri}. Skipping...') @@ -808,7 +808,7 @@ def populate_args_meta_ctx_init(mint_info, op_found_payload): return None, None max_mints_global = mint_info['args'].get('maxg') - if max_mints_global != None: + if max_mints_global is not None: if not isinstance(max_mints_global, int) or max_mints_global < DFT_MINT_MAX_MIN_COUNT or max_mints_global > DFT_MINT_MAX_MAX_COUNT_DENSITY: logger.warning(f'DFT init has invalid maxg {hash_to_hex_str(tx_hash)}, {max_mints_global}. Skipping...') return None, None @@ -1185,7 +1185,7 @@ def parse_protocols_operations_from_witness_for_input(txinwitness): found_operation_definition = True # Parse to ensure it is in the right format operation_type, payload = parse_operation_from_script(script, n + 5) - if operation_type != None: + if operation_type is not None: return operation_type, payload break if found_operation_definition: diff --git a/electrumx/server/block_processor.py b/electrumx/server/block_processor.py index 01da66d7..8ef0c53b 100644 --- a/electrumx/server/block_processor.py +++ b/electrumx/server/block_processor.py @@ -6,7 +6,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Block prefetcher and chain processor.''' +"""Block prefetcher and chain processor.""" import asyncio import time @@ -88,7 +88,7 @@ class Prefetcher: - '''Prefetches blocks (in the forward direction only).''' + """Prefetches blocks (in the forward direction only).""" def __init__( self, @@ -117,7 +117,7 @@ def __init__( self.polling_delay = polling_delay_secs async def main_loop(self, bp_height): - '''Loop forever polling for more blocks.''' + """Loop forever polling for more blocks.""" await self.reset_height(bp_height) while True: try: @@ -134,7 +134,7 @@ async def main_loop(self, bp_height): self.logger.exception(f'ignoring unexpected exception') def get_prefetched_blocks(self): - '''Called by block processor when it is processing queued blocks.''' + """Called by block processor when it is processing queued blocks.""" blocks = self.blocks self.blocks = [] self.cache_size = 0 @@ -142,12 +142,12 @@ def get_prefetched_blocks(self): return blocks async def reset_height(self, height): - '''Reset to prefetch blocks from the block processor's height. + """Reset to prefetch blocks from the block processor's height. Used in blockchain reorganisations. This coroutine can be called asynchronously to the _prefetch_blocks coroutine so we must synchronize with a semaphore. - ''' + """ async with self.semaphore: self.blocks.clear() self.cache_size = 0 @@ -165,10 +165,10 @@ async def reset_height(self, height): self.logger.info(f'caught up to daemon height {daemon_height:,d}') async def _prefetch_blocks(self): - '''Prefetch some blocks and put them on the queue. + """Prefetch some blocks and put them on the queue. Repeats until the queue is full or caught up. - ''' + """ daemon = self.daemon daemon_height = await daemon.height() async with self.semaphore: @@ -211,16 +211,17 @@ async def _prefetch_blocks(self): self.refill_event.clear() return True + class ChainError(Exception): - '''Raised on error processing blocks.''' + """Raised on error processing blocks.""" class BlockProcessor: - '''Process blocks and update the DB state to match. + """Process blocks and update the DB state to match. Employ a prefetcher to prefetch blocks in batches for processing. Coordinate backing up in case of chain reorganisations. - ''' + """ def __init__(self, env: 'Env', db: DB, daemon: Daemon, notifications: 'Notifications'): self.env = env @@ -286,7 +287,7 @@ def __init__(self, env: 'Env', db: DB, daemon: Daemon, notifications: 'Notificat "mint-dft": 1, "mint-ft": 2, "mint-nft": 3, "mint-nft-realm": 4, "mint-nft-subrealm": 5, "mint-nft-container": 6, "mint-nft-dmitem": 7, "dft": 20, "dat": 21, "split": 22, "splat": 23, - "seal": 24, "evt": 25, "mod": 26, + "seal": 24, "evt": 25, "mod": 26, "custom-color": 27, "transfer": 30, "payment-subrealm": 40, "payment-dmitem": 41, "payment-subrealm-failed": 42, "payment-dmitem-failed": 43, "mint-dft-failed": 51, "mint-ft-failed": 52, "mint-nft-failed": 53, "mint-nft-realm-failed": 54, @@ -308,9 +309,9 @@ async def run_in_thread_locked(): return await asyncio.shield(run_in_thread_locked()) async def check_and_advance_blocks(self, raw_blocks): - '''Process the list of raw blocks passed. Detects and handles + """Process the list of raw blocks passed. Detects and handles reorgs. - ''' + """ if not raw_blocks: return first = self.height + 1 @@ -348,10 +349,10 @@ async def check_and_advance_blocks(self, raw_blocks): async def reorg_chain(self, count=None): # Use Semaphore to ensure only one reorg signal was held. async with self.semaphore: - '''Handle a chain reorganisation. + """Handle a chain reorganisation. Count is the number of blocks to simulate a reorg, or None for - a real reorg.''' + a real reorg.""" if count is None: self.logger.info('chain reorg detected') else: @@ -386,12 +387,12 @@ def flush_backup(): self.backed_up_event.clear() async def reorg_hashes(self, count): - '''Return a pair (start, last, hashes) of blocks to back up during a + """Return a pair (start, last, hashes) of blocks to back up during a reorg. The hashes are returned in order of increasing height. Start is the height of the first hash, last of the last. - ''' + """ start, count = await self.calc_reorg_range(count) last = start + count - 1 s = '' if count == 1 else 's' @@ -401,11 +402,11 @@ async def reorg_hashes(self, count): return start, last, await self.db.fs_block_hashes(start, count) async def calc_reorg_range(self, count): - '''Calculate the reorg range''' + """Calculate the reorg range""" def diff_pos(hashes1, hashes2): - '''Returns the index of the first difference in the hash lists. - If both lists match returns their length.''' + """Returns the index of the first difference in the hash lists. + If both lists match returns their length.""" for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)): if hash1 != hash2: return n @@ -444,7 +445,7 @@ def estimate_txs_remaining(self): # - Flushing def flush_data(self): - '''The data for a flush. The lock must be taken.''' + """The data for a flush. The lock must be taken.""" assert self.state_lock.locked() return FlushData(self.height, self.tx_count, self.headers, self.tx_hashes, self.undo_infos, self.utxo_cache, @@ -473,7 +474,7 @@ async def _maybe_flush(self): self.next_cache_check = time.monotonic() + 30 def check_cache_size(self): - '''Flush a cache if it gets too big.''' + """Flush a cache if it gets too big.""" # Good average estimates based on traversal of subobjects and # requesting size from Python (see deep_getsizeof). one_MB = 1000*1000 @@ -498,10 +499,10 @@ def check_cache_size(self): return None def advance_blocks(self, blocks): - '''Synchronously advance the blocks. + """Synchronously advance the blocks. It is already verified they correctly connect onto our tip. - ''' + """ min_height = self.db.min_undo_height(self.daemon.cached_height()) height = self.height genesis_activation = self.coin.GENESIS_ACTIVATION @@ -814,7 +815,7 @@ def get_earliest_subrealm_payment(self, atomical_id): # Mints are only stored if they are less than the max_mints amount def put_decentralized_mint_data(self, atomical_id, location_id, value): self.logger.debug(f'put_decentralized_mint_data: atomical_id={atomical_id.hex()}, location_id={location_id.hex()}, value={value.hex()}') - if self.distmint_data_cache.get(atomical_id) == None: + if self.distmint_data_cache.get(atomical_id) is None: self.distmint_data_cache[atomical_id] = {} self.distmint_data_cache[atomical_id][location_id] = value @@ -855,7 +856,7 @@ def get_distmints_count_by_atomical_id(self, height, atomical_id, use_block_db_c # Count the number of mints in the cache and add it to the number of mints in the db below cache_count = 0 location_map_for_atomical = self.distmint_data_cache.get(atomical_id, None) - if location_map_for_atomical != None: + if location_map_for_atomical is not None: cache_count = len(location_map_for_atomical) def lookup_db_count(atomical_id): @@ -890,7 +891,7 @@ def lookup_db_count(atomical_id): # Spend all of the atomicals at a location def spend_atomicals_utxo(self, tx_hash: bytes, tx_idx: int, live_run) -> bytes: - '''Spend the atomicals entry for UTXO and return atomicals[].''' + """Spend the atomicals entry for UTXO and return atomicals[].""" idx_packed = pack_le_uint32(tx_idx) location_id = tx_hash + idx_packed cache_map = self.atomicals_utxo_cache.get(location_id) @@ -2078,7 +2079,7 @@ def populate_extended_field_summary_atomical_info(self, atomical_id, atomical): decoded_object = loads(db_mint_value) unpacked_data_summary = auto_encode_bytes_elements(decoded_object) atomical['mint_data'] = {} - if unpacked_data_summary != None: + if unpacked_data_summary is not None: atomical['mint_data']['fields'] = unpacked_data_summary else: atomical['mint_data']['fields'] = {} @@ -3209,11 +3210,11 @@ def create_or_delete_subname_payment_output_if_valid(self, tx_hash, tx, tx_num, return tx_hash, True def backup_blocks(self, raw_blocks: Sequence[bytes]): - '''Backup the raw blocks and flush. + """Backup the raw blocks and flush. The blocks should be in order of decreasing height, starting at. self.height. A flush is performed once the blocks are backed up. - ''' + """ self.db.assert_flushed(self.flush_data()) assert self.height >= len(raw_blocks) genesis_activation = self.coin.GENESIS_ACTIVATION @@ -3413,7 +3414,7 @@ def backup_txs( atomicals_value = atomicals_undo_item[ATOMICAL_ID_LEN + ATOMICAL_ID_LEN :] # There can be many atomicals at the same location # Group them by the location - if atomicals_undo_info_map.get(atomicals_location, None) == None: + if atomicals_undo_info_map.get(atomicals_location, None) is None: atomicals_undo_info_map[atomicals_location] = [] atomicals_undo_info_map[atomicals_location].append({ 'location_id': atomicals_location, @@ -3504,7 +3505,7 @@ def backup_txs( touched.add(hashX) # Restore the atomicals utxos in the undo information potential_atomicals_list_to_restore = atomicals_undo_info_map.get(txin.prev_hash + pack_le_uint32(txin.prev_idx)) - if potential_atomicals_list_to_restore != None: + if potential_atomicals_list_to_restore is not None: for atomical_to_restore in potential_atomicals_list_to_restore: atomical_id = atomical_to_restore['atomical_id'] location_id = atomical_to_restore['location_id'] @@ -3528,7 +3529,7 @@ def backup_txs( # Sanity checks... assert(atomical_num == self.atomical_count) - '''An in-memory UTXO cache, representing all changes to UTXO state + """An in-memory UTXO cache, representing all changes to UTXO state since the last DB flush. We want to store millions of these in memory for optimal @@ -3580,15 +3581,15 @@ def backup_txs( looking up a UTXO the prefix space of the compressed hash needs to be searched and resolved if necessary with the tx_num. The collision rate is low (<0.1%). - ''' + """ def spend_utxo(self, tx_hash: bytes, tx_idx: int) -> bytes: - '''Spend a UTXO and return (hashX + tx_num + sat_value). + """Spend a UTXO and return (hashX + tx_num + sat_value). If the UTXO is not in the cache it must be on disk. We store all UTXOs so not finding one indicates a logic error or DB corruption. - ''' + """ # Fast track is it being in the cache idx_packed = pack_le_uint32(tx_idx) cache_value: bytes | None = self.utxo_cache.pop(tx_hash + idx_packed, None) @@ -3628,7 +3629,7 @@ def spend_utxo(self, tx_hash: bytes, tx_idx: int) -> bytes: f'found in "h" table') async def _process_prefetched_blocks(self): - '''Loop forever processing blocks as they arrive.''' + """Loop forever processing blocks as they arrive.""" while True: if self.height == self.daemon.cached_height(): if not self._caught_up_event.is_set(): @@ -3665,7 +3666,7 @@ async def _first_open_dbs(self): # --- External API async def fetch_and_process_blocks(self, caught_up_event): - '''Fetch, process and index blocks from the daemon. + """Fetch, process and index blocks from the daemon. Sets caught_up_event when first caught up. Flushes to disk and shuts down cleanly if cancelled. @@ -3675,7 +3676,7 @@ async def fetch_and_process_blocks(self, caught_up_event): processed but not written to disk, it should write those to disk before exiting, as otherwise a significant amount of work could be lost. - ''' + """ self._caught_up_event = caught_up_event await self._first_open_dbs() try: @@ -3689,10 +3690,10 @@ async def fetch_and_process_blocks(self, caught_up_event): await self.flush(True) def force_chain_reorg(self, count): - '''Force a reorg of the given number of blocks. + """Force a reorg of the given number of blocks. Returns True if a reorg is queued, false if not caught up. - ''' + """ if self._caught_up_event.is_set(): self.reorg_count = count self.blocks_event.set() diff --git a/electrumx/server/controller.py b/electrumx/server/controller.py index ac1c8aef..d0d402da 100644 --- a/electrumx/server/controller.py +++ b/electrumx/server/controller.py @@ -73,15 +73,16 @@ async def on_block(self, touched, height): class Controller(ServerBase): - '''Manages server initialisation and stutdown. + """Manages server initialisation and stutdown. Servers are started once the mempool is synced after the block processor first catches up with the daemon. - ''' + """ + async def serve(self, shutdown_event): - '''Start the RPC server and wait for the mempool to synchronize. Then - start serving external clients. - ''' + """Start the RPC server and wait for the mempool to synchronize. + + Thenstart serving external clients.""" if not (0, 22, 0) <= aiorpcx_version < (0, 23): raise RuntimeError('aiorpcX version 0.22.x is required') @@ -104,6 +105,7 @@ async def serve(self, shutdown_event): # Set notifications up to implement the MemPoolAPI def get_db_height(): return db.db_height + notifications.height = daemon.height notifications.db_height = get_db_height notifications.cached_height = daemon.cached_height @@ -112,8 +114,9 @@ def get_db_height(): notifications.lookup_utxos = db.lookup_utxos MemPoolAPI.register(Notifications) mempool = MemPool( - env.coin, notifications, - refresh_secs=env.daemon_poll_interval_mempool_msec/1000, + env.coin, + notifications, + refresh_secs=env.daemon_poll_interval_mempool_msec / 1000, ) session_mgr = SessionManager(env, db, bp, daemon, mempool, shutdown_event) diff --git a/electrumx/server/daemon.py b/electrumx/server/daemon.py index fc392a02..1fa777f4 100644 --- a/electrumx/server/daemon.py +++ b/electrumx/server/daemon.py @@ -5,8 +5,8 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Class for handling asynchronous connections to a blockchain -daemon.''' +"""Class for handling asynchronous connections to a blockchain +daemon.""" import asyncio import itertools @@ -29,19 +29,20 @@ class DaemonError(Exception): - '''Raised when the daemon returns an error in its results.''' + """Raised when the daemon returns an error in its results.""" + class WarmingUpError(Exception): - '''Internal - when the daemon is warming up.''' + """Internal - when the daemon is warming up.""" class ServiceRefusedError(Exception): - '''Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for - some reason.''' + """Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for + some reason.""" class Daemon: - '''Handles connections to a daemon at the given URL.''' + """Handles connections to a daemon at the given URL.""" WARMING_UP = -28 id_counter = itertools.count() @@ -88,7 +89,7 @@ def connector(self): return None def set_url(self, url): - '''Set the URLS to the given list, and switch to the first one.''' + """Set the URLS to the given list, and switch to the first one.""" urls = url.split(',') urls = [self.coin.sanitize_url(url) for url in urls] for n, url in enumerate(urls): @@ -99,19 +100,19 @@ def set_url(self, url): self.urls = urls def current_url(self): - '''Returns the current daemon URL.''' + """Returns the current daemon URL.""" return self.urls[self.url_index] def logged_url(self, url=None): - '''The host and port part, for logging.''' + """The host and port part, for logging.""" url = url or self.current_url() return url[url.rindex('@') + 1:] def failover(self): - '''Call to fail-over to the next daemon URL. + """Call to fail-over to the next daemon URL. Returns False if there is only one, otherwise True. - ''' + """ if len(self.urls) > 1: self.url_index = (self.url_index + 1) % len(self.urls) self.logger.info(f'failing over to {self.logged_url()}') @@ -132,11 +133,12 @@ async def _send_data(self, data): raise aiohttp.ClientConnectionError async def _send(self, payload, processor): - '''Send a payload to be converted to JSON. + """Send a payload to be converted to JSON. Handles temporary connection issues. Daemon reponse errors are raise through DaemonError. - ''' + """ + def log_error(error): nonlocal last_error_log, retry now = time.monotonic() @@ -182,7 +184,8 @@ def log_error(error): retry = max(min(self.max_retry, retry * 2), self.init_retry) async def _send_single(self, method, params=None): - '''Send a single request to the daemon.''' + """Send a single request to the daemon.""" + def processor(result): err = result['error'] if not err: @@ -197,11 +200,12 @@ def processor(result): return await self._send(payload, processor) async def _send_vector(self, method, params_iterable, replace_errs=False): - '''Send several requests of the same method. + """Send several requests of the same method. The result will be an array of the same length as params_iterable. If replace_errs is true, any item with an error is returned as None, - otherwise an exception is raised.''' + otherwise an exception is raised.""" + def processor(result): errs = [item['error'] for item in result if item['error']] if any(err.get('code') == self.WARMING_UP for err in errs): @@ -217,10 +221,10 @@ def processor(result): return [] async def _is_rpc_available(self, method): - '''Return whether given RPC method is available in the daemon. + """Return whether given RPC method is available in the daemon. Results are cached and the daemon will generally not be queried with - the same method more than once.''' + the same method more than once.""" available = self.available_rpcs.get(method) if available is None: available = True @@ -234,41 +238,41 @@ async def _is_rpc_available(self, method): return available async def block_hex_hashes(self, first, count): - '''Return the hex hashes of count block starting at height first.''' - params_iterable = ((h, ) for h in range(first, first + count)) + """Return the hex hashes of count block starting at height first.""" + params_iterable = ((h,) for h in range(first, first + count)) return await self._send_vector('getblockhash', params_iterable) async def deserialised_block(self, hex_hash): - '''Return the deserialised block with the given hex hash.''' + """Return the deserialised block with the given hex hash.""" return await self._send_single('getblock', (hex_hash, True)) async def raw_blocks(self, hex_hashes): - '''Return the raw binary blocks with the given hex hashes.''' + """Return the raw binary blocks with the given hex hashes.""" params_iterable = ((h, False) for h in hex_hashes) blocks = await self._send_vector('getblock', params_iterable) # Convert hex string to bytes return [hex_to_bytes(block) for block in blocks] async def mempool_hashes(self): - '''Update our record of the daemon's mempool hashes.''' + """Update our record of the daemon's mempool hashes.""" return await self._send_single('getrawmempool') async def estimatefee(self, block_count, estimate_mode=None): - '''Return the fee estimate for the block count. Units are whole + """Return the fee estimate for the block count. Units are whole currency units per KB, e.g. 0.00000995, or -1 if no estimate is available. - ''' + """ if estimate_mode: args = (block_count, estimate_mode) else: - args = (block_count, ) + args = (block_count,) if await self._is_rpc_available('estimatesmartfee'): estimate = await self._send_single('estimatesmartfee', args) return estimate.get('feerate', -1) return await self._send_single('estimatefee', args) async def getnetworkinfo(self): - '''Return the result of the 'getnetworkinfo' RPC call.''' + """Return the result of the 'getnetworkinfo' RPC call.""" async with self._networkinfo_lock: cache_val, cache_time = self._networkinfo_cache if time.time() - cache_time < 60: # seconds @@ -278,21 +282,21 @@ async def getnetworkinfo(self): return val async def relayfee(self): - '''The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.''' + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" network_info = await self.getnetworkinfo() return network_info['relayfee'] async def getrawtransaction(self, hex_hash, verbose=False): - '''Return the serialized raw transaction with the given hash.''' + """Return the serialized raw transaction with the given hash.""" # Cast to int because some coin daemons are old and require it return await self._send_single('getrawtransaction', (hex_hash, int(verbose))) async def getrawtransactions(self, hex_hashes, replace_errs=True): - '''Return the serialized raw transactions with the given hashes. + """Return the serialized raw transactions with the given hashes. - Replaces errors with None by default.''' + Replaces errors with None by default.""" params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes) txs = await self._send_vector('getrawtransaction', params_iterable, replace_errs=replace_errs) @@ -300,19 +304,19 @@ async def getrawtransactions(self, hex_hashes, replace_errs=True): return [hex_to_bytes(tx) if tx else None for tx in txs] async def broadcast_transaction(self, raw_tx): - '''Broadcast a transaction to the network.''' - return await self._send_single('sendrawtransaction', (raw_tx, )) + """Broadcast a transaction to the network.""" + return await self._send_single('sendrawtransaction', (raw_tx,)) async def height(self): - '''Query the daemon for its current height.''' + """Query the daemon for its current height.""" self._height = await self._send_single('getblockcount') return self._height # return self.coin.ATOMICALS_ACTIVATION_HEIGHT - 1 def cached_height(self): - '''Return the cached daemon height. + """Return the cached daemon height. - If the daemon has not been queried yet this returns None.''' + If the daemon has not been queried yet this returns None.""" return self._height # return self.coin.ATOMICALS_ACTIVATION_HEIGHT - 1 @@ -320,44 +324,44 @@ def cached_height(self): class DashDaemon(Daemon): async def masternode_broadcast(self, params): - '''Broadcast a transaction to the network.''' + """Broadcast a transaction to the network.""" return await self._send_single('masternodebroadcast', params) async def masternode_list(self, params): - '''Return the masternode status.''' + """Return the masternode status.""" return await self._send_single('masternodelist', params) async def protx(self, params): - '''Set of commands to execute ProTx related actions.''' + """Set of commands to execute ProTx related actions.""" return await self._send_single('protx', params) class FakeEstimateFeeDaemon(Daemon): - '''Daemon that simulates estimatefee and relayfee RPC calls. Coin that - wants to use this daemon must define ESTIMATE_FEE & RELAY_FEE''' + """Daemon that simulates estimatefee and relayfee RPC calls. Coin that + wants to use this daemon must define ESTIMATE_FEE & RELAY_FEE""" async def estimatefee(self, block_count): - '''Return the fee estimate for the given parameters.''' + """Return the fee estimate for the given parameters.""" return self.coin.ESTIMATE_FEE async def relayfee(self): - '''The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.''' + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" return self.coin.RELAY_FEE class LegacyRPCDaemon(Daemon): - '''Handles connections to a daemon at the given URL. + """Handles connections to a daemon at the given URL. This class is useful for daemons that don't have the new 'getblock' RPC call that returns the block in hex, the workaround is to manually recreate the block bytes. The recreated block bytes may not be the exact as in the underlying blockchain but it is good enough for our indexing - purposes.''' + purposes.""" async def raw_blocks(self, hex_hashes): - '''Return the raw binary blocks with the given hex hashes.''' - params_iterable = ((h, ) for h in hex_hashes) + """Return the raw binary blocks with the given hex hashes.""" + params_iterable = ((h,) for h in hex_hashes) block_info = await self._send_vector('getblock', params_iterable) blocks = [] @@ -382,7 +386,7 @@ async def make_raw_header(self, b): ]) async def make_raw_block(self, b): - '''Construct a raw block''' + """Construct a raw block""" header = await self.make_raw_header(b) @@ -412,7 +416,7 @@ class FakeEstimateLegacyRPCDaemon(LegacyRPCDaemon, FakeEstimateFeeDaemon): class DecredDaemon(Daemon): async def raw_blocks(self, hex_hashes): - '''Return the raw binary blocks with the given hex hashes.''' + """Return the raw binary blocks with the given hex hashes.""" params_iterable = ((h, False) for h in hex_hashes) blocks = await self._send_vector('getblock', params_iterable) @@ -434,12 +438,12 @@ async def raw_blocks(self, hex_hashes): is_valid = valid_tx_tree[hash] else: # Do something complicated to figure out if this block is valid - header = await self._send_single('getblockheader', (hash, )) + header = await self._send_single('getblockheader', (hash,)) if 'nextblockhash' not in header: raise DaemonError(f'Could not find next block for {hash}') next_hash = header['nextblockhash'] next_header = await self._send_single('getblockheader', - (next_hash, )) + (next_hash,)) is_valid = self.is_valid_tx_tree(next_header['votebits']) if is_valid: @@ -494,29 +498,29 @@ def connector(self): class PreLegacyRPCDaemon(LegacyRPCDaemon): - '''Handles connections to a daemon at the given URL. + """Handles connections to a daemon at the given URL. This class is useful for daemons that don't have the new 'getblock' RPC call that returns the block in hex, and need the False parameter - for the getblock''' + for the getblock""" async def deserialised_block(self, hex_hash): - '''Return the deserialised block with the given hex hash.''' + """Return the deserialised block with the given hex hash.""" return await self._send_single('getblock', (hex_hash, False)) class SmartCashDaemon(Daemon): async def masternode_broadcast(self, params): - '''Broadcast a smartnode to the network.''' + """Broadcast a smartnode to the network.""" return await self._send_single('smartnodebroadcast', params) async def masternode_list(self, params): - '''Return the smartnode status.''' + """Return the smartnode status.""" return await self._send_single('smartnodelist', params) async def smartrewards(self, params): - '''Return smartrewards data.''' + """Return smartrewards data.""" return await self._send_single('smartrewards', params) @@ -525,25 +529,25 @@ class ZcoinMtpDaemon(Daemon): def strip_mtp_data(self, raw_block): if self.coin.is_mtp(raw_block): return \ - raw_block[:self.coin.MTP_HEADER_DATA_START*2] + \ - raw_block[self.coin.MTP_HEADER_DATA_END*2:] + raw_block[:self.coin.MTP_HEADER_DATA_START * 2] + \ + raw_block[self.coin.MTP_HEADER_DATA_END * 2:] return raw_block async def raw_blocks(self, hex_hashes): - '''Return the raw binary blocks with the given hex hashes.''' + """Return the raw binary blocks with the given hex hashes.""" params_iterable = ((h, False) for h in hex_hashes) blocks = await self._send_vector('getblock', params_iterable) # Convert hex string to bytes return [hex_to_bytes(self.strip_mtp_data(block)) for block in blocks] async def masternode_broadcast(self, params): - '''Broadcast a transaction to the network.''' + """Broadcast a transaction to the network.""" return await self._send_single('znodebroadcast', params) async def masternode_list(self, params): - '''Return the masternode status.''' + """Return the masternode status.""" return await self._send_single('znodelist', params) async def protx(self, params): - '''Set of commands to execute ProTx related actions.''' + """Set of commands to execute ProTx related actions.""" return await self._send_single('protx', params) diff --git a/electrumx/server/env.py b/electrumx/server/env.py index 006bca15..430aec4f 100644 --- a/electrumx/server/env.py +++ b/electrumx/server/env.py @@ -5,11 +5,11 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Class for handling environment configuration and defaults.''' +"""Class for handling environment configuration and defaults.""" import re -from typing import Type, Union, TYPE_CHECKING +from typing import Type, Union from aiorpcx import Service, ServicePart from ipaddress import IPv4Address, IPv6Address @@ -22,10 +22,10 @@ class ServiceError(Exception): class Env(EnvBase): - '''Wraps environment configuration. Optionally, accepts a Coin class + """Wraps environment configuration. Optionally, accepts a Coin class as first argument to have ElectrumX serve custom coins not part of the standard distribution. - ''' + """ # Peer discovery PD_OFF, PD_SELF, PD_ON = ('OFF', 'SELF', 'ON') @@ -106,9 +106,9 @@ def __init__(self, coin=None): self.report_services = self.services_to_report() def sane_max_sessions(self): - '''Return the maximum number of sessions to permit. Normally this - is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust - downwards if running with a small open file rlimit.''' + """Return the maximum number of sessions to permit. Normally this + is MAX_SESSIONS. However, to prevent open file exhaustion, adjust + downwards if running with a small open file rlimit.""" env_value = self.integer('MAX_SESSIONS', 1000) # No resource module on Windows try: diff --git a/electrumx/server/history.py b/electrumx/server/history.py index 759d2bf0..e855eb86 100644 --- a/electrumx/server/history.py +++ b/electrumx/server/history.py @@ -6,7 +6,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''History by script hash (address).''' +"""History by script hash (address).""" import ast import bisect @@ -125,7 +125,7 @@ def clear_excess(self, utxo_flush_count): self.logger.info('deleted excess history entries') def write_state(self, batch): - '''Write state to the history DB.''' + """Write state to the history DB.""" state = { 'flush_count': self.flush_count, 'comp_flush_count': self.comp_flush_count, @@ -209,10 +209,10 @@ def backup(self, hashXs, tx_count): self.logger.info(f'backing up removed {nremoves:,d} history entries') def get_txnums(self, hashX, limit=1000): - '''Generator that returns an unpruned, sorted list of tx_nums in the + """Generator that returns an unpruned, sorted list of tx_nums in the history of a hashX. Includes both spending and receiving transactions. By default yields at most 1000 entries. Set - limit to None to get them all. ''' + limit to None to get them all. """ limit = util.resolve_limit(limit) chunks = util.chunks txnum_padding = bytes(8-TXNUM_LEN) @@ -244,7 +244,7 @@ def get_txnums(self, hashX, limit=1000): # flush_count is reset to comp_flush_count, and comp_flush_count to -1 def _flush_compaction(self, cursor, write_items, keys_to_delete): - '''Flush a single compaction pass as a batch.''' + """Flush a single compaction pass as a batch.""" # Update compaction state if cursor == 65536: self.flush_count = self.comp_flush_count @@ -264,8 +264,8 @@ def _flush_compaction(self, cursor, write_items, keys_to_delete): def _compact_hashX(self, hashX, hist_map, hist_list, write_items, keys_to_delete): - '''Compres history for a hashX. hist_list is an ordered list of - the histories to be compressed.''' + """Compres history for a hashX. hist_list is an ordered list of + the histories to be compressed.""" # History entries (tx numbers) are TXNUM_LEN bytes each. Distribute # over rows of up to 50KB in size. A fixed row size means # future compactions will not need to update the first N - 1 @@ -299,8 +299,8 @@ def _compact_hashX(self, hashX, hist_map, hist_list, return write_size def _compact_prefix(self, prefix, write_items, keys_to_delete): - '''Compact all history entries for hashXs beginning with the - given prefix. Update keys_to_delete and write.''' + """Compact all history entries for hashXs beginning with the + given prefix. Update keys_to_delete and write.""" prior_hashX = None hist_map = {} hist_list = [] @@ -328,9 +328,9 @@ def _compact_prefix(self, prefix, write_items, keys_to_delete): return write_size def _compact_history(self, limit): - '''Inner loop of history compaction. Loops until limit bytes have + """Inner loop of history compaction. Loops until limit bytes have been processed. - ''' + """ keys_to_delete = set() write_items = [] # A list of (key, value) pairs write_size = 0 diff --git a/electrumx/server/mempool.py b/electrumx/server/mempool.py index e2aed33a..5e265e3d 100644 --- a/electrumx/server/mempool.py +++ b/electrumx/server/mempool.py @@ -5,7 +5,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Mempool handling.''' +"""Mempool handling.""" import itertools import time @@ -22,7 +22,8 @@ from electrumx.lib.tx import SkipTxDeserialize from electrumx.lib.util import class_logger, chunks, OldTaskGroup, pack_le_uint32, unpack_le_uint32 from electrumx.server.db import UTXO -from electrumx.lib.util_atomicals import get_mint_info_op_factory, parse_protocols_operations_from_witness_array, location_id_bytes_to_compact +from electrumx.lib.util_atomicals import get_mint_info_op_factory, parse_protocols_operations_from_witness_array, \ + location_id_bytes_to_compact from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN, double_sha256 @@ -52,52 +53,52 @@ class DBSyncError(Exception): class MemPoolAPI(ABC): - '''A concrete instance of this class is passed to the MemPool object - and used by it to query DB and blockchain state.''' + """A concrete instance of this class is passed to the MemPool object + and used by it to query DB and blockchain state.""" @abstractmethod async def height(self): - '''Query bitcoind for its height.''' + """Query bitcoind for its height.""" @abstractmethod def cached_height(self): - '''Return the height of bitcoind the last time it was queried, + """Return the height of bitcoind the last time it was queried, for any reason, without actually querying it. - ''' + """ @abstractmethod def db_height(self): - '''Return the height flushed to the on-disk DB.''' + """Return the height flushed to the on-disk DB.""" @abstractmethod async def mempool_hashes(self): - '''Query bitcoind for the hashes of all transactions in its - mempool, returned as a list.''' + """Query bitcoind for the hashes of all transactions in its + mempool, returned as a list.""" @abstractmethod async def raw_transactions(self, hex_hashes): - '''Query bitcoind for the serialized raw transactions with the given + """Query bitcoind for the serialized raw transactions with the given hashes. Missing transactions are returned as None. - hex_hashes is an iterable of hexadecimal hash strings.''' + hex_hashes is an iterable of hexadecimal hash strings.""" @abstractmethod async def lookup_utxos(self, prevouts): - '''Return a list of (hashX, value) pairs each prevout if unspent, + """Return a list of (hashX, value) pairs each prevout if unspent, otherwise return None if spent or not found. prevouts - an iterable of (hash, index) pairs - ''' + """ @abstractmethod async def on_mempool(self, touched, height): - '''Called each time the mempool is synchronized. touched is a set of + """Called each time the mempool is synchronized. touched is a set of hashXs touched since the previous call. height is the - daemon's height at the time the mempool was obtained.''' + daemon's height at the time the mempool was obtained.""" class MemPool: - '''Representation of the daemon's mempool. + """Representation of the daemon's mempool. coin - a coin class from coins.py api - an object implementing MemPoolAPI @@ -108,7 +109,7 @@ class MemPool: tx: tx_hash -> MemPoolTx hashXs: hashX -> set of all hashes of txs touching the hashX - ''' + """ def __init__( self, @@ -132,7 +133,7 @@ def __init__( self.lock = Lock() async def _logging(self, synchronized_event): - '''Print regular logs of mempool stats.''' + """Print regular logs of mempool stats.""" self.logger.info('beginning processing of daemon mempool. ' 'This can take some time...') start = time.monotonic() @@ -175,12 +176,12 @@ def _update_histogram(self, bin_size): def _compress_histogram( cls, histogram: Dict[float, int], *, bin_size: int ) -> Sequence[Tuple[float, int]]: - '''Calculate and return a compact fee histogram as needed for + """Calculate and return a compact fee histogram as needed for "mempool.get_fee_histogram" protocol request. histogram: feerate (sat/vbyte) -> total size in bytes of txs that pay approx feerate bin_size: ~minimum vsize of a bucket of txs in the result (e.g. 100 kb) - ''' + """ # Now compact it. For efficiency, get_fees returns a # compact histogram with variable bin size. The compact # histogram is an array of (fee_rate, vsize) values. @@ -210,12 +211,12 @@ def _compress_histogram( return compact def _accept_transactions(self, tx_map, utxo_map, touched): - '''Accept transactions in tx_map to the mempool if all their inputs + """Accept transactions in tx_map to the mempool if all their inputs can be found in the existing mempool or a utxo_map from the DB. Returns an (unprocessed tx_map, unspent utxo_map) pair. - ''' + """ hashXs = self.hashXs txs = self.txs @@ -254,16 +255,16 @@ def _accept_transactions(self, tx_map, utxo_map, touched): return deferred, {prevout: utxo_map[prevout] for prevout in unspent} def _accept_atomicals_updates(self, atomicals_map): - '''Process any atomicals updates in the mempool - ''' + """Process any atomicals updates in the mempool + """ for atomical_id, datafields in atomicals_map.items(): - tx_hash = atomical_id[ : 32 ] - if self.atomicals_mints.get(tx_hash) == None: - self.atomicals_mints[tx_hash] = {} - self.atomicals_mints[tx_hash][atomical_id] = datafields + tx_hash = atomical_id[: 32] + if self.atomicals_mints.get(tx_hash) is None: + self.atomicals_mints[tx_hash] = {} + self.atomicals_mints[tx_hash][atomical_id] = datafields async def _refresh_hashes(self, synchronized_event): - '''Refresh our view of the daemon's mempool.''' + """Refresh our view of the daemon's mempool.""" # Touched accumulates between calls to on_mempool and each # call transfers ownership touched = set() @@ -298,7 +299,7 @@ async def _process_mempool(self, all_hashes, touched, mempool_height): # First handle txs that have disappeared for tx_hash in (set(txs) - all_hashes): tx = txs.pop(tx_hash) - if self.atomicals_mints.get(tx_hash) != None: + if self.atomicals_mints.get(tx_hash) is not None: self.atomicals_mints.pop(tx_hash) tx_hashXs = {hashX for hashX, value in tx.in_pairs} tx_hashXs.update(hashX for hashX, value in tx.out_pairs) @@ -337,31 +338,37 @@ async def _process_mempool(self, all_hashes, touched, mempool_height): return touched async def _fetch_and_accept(self, hashes, all_hashes, touched): - '''Fetch a list of mempool transactions.''' + """Fetch a list of mempool transactions.""" hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes) raw_txs = await self.api.raw_transactions(hex_hashes_iter) script_hashX = self.coin.hashX_from_script - def deserialize_txs(): # This function is pure + def deserialize_txs(): # This function is pure to_hashX = self.coin.hashX_from_script deserializer = self.coin.DESERIALIZER txs = {} atomicals_updates_map = {} - def create_or_delete_atomical_from_definition(operation_found_at_inputs, tx, tx_hash, atomicals_updates_map): + + def create_or_delete_atomical_from_definition(operation_found_at_inputs, tx, tx_hash, + atomicals_updates_map): if not operation_found_at_inputs: - return + return op = operation_found_at_inputs['op'] - self.logger.info(f'atomicals_op={op} txid={hash_to_hex_str(tx_hash)}') - valid_create_op_type, mint_info = get_mint_info_op_factory(self.coin, tx, tx_hash, operation_found_at_inputs, None, 0, self.logger) + self.logger.info(f'atomicals_op={op} txid={hash_to_hex_str(tx_hash)}') + valid_create_op_type, mint_info = get_mint_info_op_factory(self.coin, tx, tx_hash, + operation_found_at_inputs, None, 0, + self.logger) if valid_create_op_type: atomical_id = mint_info['id'] - self.logger.info(f'atomicals_mint_type={valid_create_op_type}, txid={hash_to_hex_str(tx_hash)}, atomical_id={location_id_bytes_to_compact(atomical_id)}') + self.logger.info( + f'atomicals_mint_type={valid_create_op_type}, txid={hash_to_hex_str(tx_hash)}, atomical_id={location_id_bytes_to_compact(atomical_id)}') atomicals_updates_map[atomical_id] = { - 'atomical_id': location_id_bytes_to_compact(atomical_id), + 'atomical_id': location_id_bytes_to_compact(atomical_id), 'atomical_number': -1, 'type': mint_info['type'], 'confirmed': False } + for hash, raw_tx in zip(hashes, raw_txs): # The daemon may have evicted the tx from its # mempool or it may have gotten in a block @@ -371,10 +378,12 @@ def create_or_delete_atomical_from_definition(operation_found_at_inputs, tx, tx_ tx, tx_size = deserializer(raw_tx).read_tx_and_vsize() try: operations_found_at_inputs = parse_protocols_operations_from_witness_array(tx, hash, True) - create_or_delete_atomical_from_definition(operations_found_at_inputs, tx, hash, atomicals_updates_map) + create_or_delete_atomical_from_definition(operations_found_at_inputs, tx, hash, + atomicals_updates_map) except Exception as ex: - self.logger.error(f'skipping atomicals parsing due to error in mempool {hash_to_hex_str(hash)}: {ex}') - + self.logger.error( + f'skipping atomicals parsing due to error in mempool {hash_to_hex_str(hash)}: {ex}') + except SkipTxDeserialize as ex: self.logger.debug(f'skipping tx {hash_to_hex_str(hash)}: {ex}') continue @@ -411,17 +420,17 @@ def create_or_delete_atomical_from_definition(operation_found_at_inputs, tx, tx_ # async def keep_synchronized(self, synchronized_event): - '''Keep the mempool synchronized with the daemon.''' + """Keep the mempool synchronized with the daemon.""" async with OldTaskGroup() as group: await group.spawn(self._refresh_hashes(synchronized_event)) await group.spawn(self._refresh_histogram(synchronized_event)) await group.spawn(self._logging(synchronized_event)) async def balance_delta(self, hashX): - '''Return the unconfirmed amount in the mempool for hashX. + """Return the unconfirmed amount in the mempool for hashX. Can be positive or negative. - ''' + """ value = 0 if hashX in self.hashXs: for hash in self.hashXs[hashX]: @@ -431,16 +440,16 @@ async def balance_delta(self, hashX): return value async def compact_fee_histogram(self): - '''Return a compact fee histogram of the current mempool.''' + """Return a compact fee histogram of the current mempool.""" return self.cached_compact_histogram async def potential_spends(self, hashX): - '''Return a set of (prev_hash, prev_idx) pairs from mempool + """Return a set of (prev_hash, prev_idx) pairs from mempool transactions that touch hashX. None, some or all of these may be spends of the hashX, but all actual spends of it (in the DB or mempool) will be included. - ''' + """ result = set() for tx_hash in self.hashXs.get(hashX, ()): tx = self.txs[tx_hash] @@ -448,20 +457,20 @@ async def potential_spends(self, hashX): return result async def potential_atomicals_spends(self, hashX): - '''stub out and return empty - ''' + """stub out and return empty + """ return [] async def get_atomical_mint(self, atomical_id): - '''Check if there was an atomical minted in the mempool - ''' - tx_hash = atomical_id[ : 32 ] - if self.atomicals_mints.get(tx_hash) != None: + """Check if there was an atomical minted in the mempool + """ + tx_hash = atomical_id[: 32] + if self.atomicals_mints.get(tx_hash) is not None: return self.atomicals_mints[tx_hash][atomical_id] return None async def transaction_summaries(self, hashX): - '''Return a list of MemPoolTxSummary objects for the hashX.''' + """Return a list of MemPoolTxSummary objects for the hashX.""" result = [] for tx_hash in self.hashXs.get(hashX, ()): tx = self.txs[tx_hash] @@ -470,12 +479,12 @@ async def transaction_summaries(self, hashX): return result async def unordered_UTXOs(self, hashX): - '''Return an unordered list of UTXO named tuples from mempool + """Return an unordered list of UTXO named tuples from mempool transactions that pay to hashX. This does not consider if any other mempool transactions spend the outputs. - ''' + """ utxos = [] for tx_hash in self.hashXs.get(hashX, ()): tx = self.txs.get(tx_hash) @@ -486,13 +495,12 @@ async def unordered_UTXOs(self, hashX): # Todo, stubbed out for now async def unordered_atomicals_UTXOs(self, hashX): - '''Return an unordered list of Atomicals UTXO named tuples from mempool + """Return an unordered list of Atomicals UTXO named tuples from mempool transactions that pay to hashX. This does not consider if any other mempool transactions spend the outputs. - ''' + """ atomicals_utxos = [] # todo return atomicals_utxos - diff --git a/electrumx/server/peers.py b/electrumx/server/peers.py index 5a7f6e1c..8627f7a0 100644 --- a/electrumx/server/peers.py +++ b/electrumx/server/peers.py @@ -5,7 +5,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Peer management.''' +"""Peer management.""" import asyncio import random @@ -14,7 +14,7 @@ import time from collections import Counter, defaultdict from ipaddress import IPv4Address, IPv6Address -from typing import TYPE_CHECKING, Type +from typing import TYPE_CHECKING, Optional import aiohttp from aiorpcx import (Event, Notification, RPCSession, SOCKSError, @@ -48,7 +48,7 @@ def assert_good(message, result, instance): class PeerSession(RPCSession): - '''An outgoing session to a peer.''' + """An outgoing session to a peer.""" async def handle_request(self, request): # We subscribe so might be unlucky enough to get a notification... @@ -60,11 +60,11 @@ async def handle_request(self, request): class PeerManager: - '''Looks after the DB of peer network servers. + """Looks after the DB of peer network servers. Attempts to maintain a connection with up to 8 peers. Issues a 'peers.subscribe' RPC to them and tells them our data. - ''' + """ def __init__(self, env: 'Env', db: 'DB'): self.logger = class_logger(__name__, self.__class__.__name__) # Initialise the Peer class @@ -90,12 +90,12 @@ def __init__(self, env: 'Env', db: 'DB'): self.blacklist = set() def _my_clearnet_peer(self): - '''Returns the clearnet peer representing this server, if any.''' + """Returns the clearnet peer representing this server, if any.""" clearnet = [peer for peer in self.myselves if not peer.is_tor] return clearnet[0] if clearnet else None def _set_peer_statuses(self): - '''Set peer statuses.''' + """Set peer statuses.""" cutoff = time.time() - STALE_SECS for peer in self.peers: if peer.bad: @@ -108,10 +108,10 @@ def _set_peer_statuses(self): peer.status = PEER_NEVER def _features_to_register(self, peer, remote_peers): - '''If we should register ourselves to the remote peer, which has + """If we should register ourselves to the remote peer, which has reported the given list of known peers, return the clearnet identity features to register, otherwise None. - ''' + """ # Announce ourself if not present. Don't if disabled, we # are a non-public IP address, or to ourselves. if not self.env.peer_announce or peer in self.myselves: @@ -126,14 +126,14 @@ def _features_to_register(self, peer, remote_peers): return my.features def _permit_new_onion_peer(self, now): - '''Accept a new onion peer only once per random time interval.''' + """Accept a new onion peer only once per random time interval.""" if now < self.permit_onion_peer_time: return False self.permit_onion_peer_time = now + random.randrange(0, 1200) return True async def _import_peers(self): - '''Import hard-coded peers from a file or the coin defaults.''' + """Import hard-coded peers from a file or the coin defaults.""" imported_peers = self.myselves.copy() # Add the hard-coded ones unless only reporting ourself if self.env.peer_discovery != self.env.PD_SELF: @@ -180,11 +180,11 @@ def _get_recent_good_peers(self): return recent async def _detect_proxy(self): - '''Detect a proxy if we don't have one and some time has passed since + """Detect a proxy if we don't have one and some time has passed since the last attempt. If found self.proxy is set to a SOCKSProxy instance, otherwise None. - ''' + """ host = self.env.tor_proxy_host if self.env.tor_proxy_port is None: ports = [9050, 9150, 1080] @@ -201,8 +201,8 @@ async def _detect_proxy(self): self.logger.info('no proxy detected, will try later') await sleep(900) - async def _note_peers(self, peers, limit=2, check_ports=False, source=None): - '''Add a limited number of peers that are not already present.''' + async def _note_peers(self, peers, limit: Optional[int] = 2, check_ports=False, source=None): + """Add a limited number of peers that are not already present.""" new_peers = [] match_set = self.peers.copy() for peer in peers: @@ -459,12 +459,12 @@ async def _send_peers_subscribe(self, session, peer): # External interface # async def discover_peers(self): - '''Perform peer maintenance. This includes + """Perform peer maintenance. This includes 1) Forgetting unreachable peers. 2) Verifying connectivity of new peers. 3) Retrying old peers at regular intervals. - ''' + """ self.logger.info(f'peer discovery: {self.env.peer_discovery}') if self.env.peer_discovery != self.env.PD_ON: self.logger.info('peer discovery is disabled') @@ -480,7 +480,7 @@ async def discover_peers(self): await group.spawn(self._import_peers()) def info(self): - '''The number of peers.''' + """The number of peers.""" self._set_peer_statuses() counter = Counter(peer.status for peer in self.peers) return { @@ -492,11 +492,11 @@ def info(self): } async def add_localRPC_peer(self, real_name): - '''Add a peer passed by the admin over LocalRPC.''' + """Add a peer passed by the admin over LocalRPC.""" await self._note_peers([Peer.from_real_name(real_name, 'RPC')], check_ports=True) async def on_add_peer(self, features, source_addr): - '''Add a peer (but only if the peer resolves to the source).''' + """Add a peer (but only if the peer resolves to the source).""" if self.env.peer_discovery != self.env.PD_ON: return False if not source_addr: @@ -547,12 +547,12 @@ async def on_add_peer(self, features, source_addr): return permit def on_peers_subscribe(self, is_tor): - '''Returns the server peers as a list of (ip, host, details) tuples. + """Returns the server peers as a list of (ip, host, details) tuples. We return all peers we've connected to in the last day. Additionally, if we don't have onion routing, we return a few hard-coded onion servers. - ''' + """ recent = self._get_recent_good_peers() # Always report ourselves if valid (even if not public) @@ -584,12 +584,12 @@ def on_peers_subscribe(self, is_tor): return [peer.to_tuple() for peer in peers] def proxy_address(self): - '''Return the NetAddress of the proxy, if there is a proxy, otherwise - None.''' + """Return the NetAddress of the proxy, if there is a proxy, otherwise + None.""" return self.proxy.address if self.proxy else None def rpc_data(self): - '''Peer data for the peers RPC method.''' + """Peer data for the peers RPC method.""" self._set_peer_statuses() def peer_data(peer): diff --git a/electrumx/server/session.py b/electrumx/server/session.py deleted file mode 100644 index 612ef530..00000000 --- a/electrumx/server/session.py +++ /dev/null @@ -1,3490 +0,0 @@ -# Copyright (c) 2016-2018, Neil Booth -# -# All rights reserved. -# -# See the file "LICENCE" for information about the copyright -# and warranty status of this software. - -'''Classes for local RPC server and remote client TCP/SSL servers.''' - -import asyncio -import codecs -import datetime -import itertools -import math -import os -import ssl -import time -from collections import defaultdict -from functools import partial -from ipaddress import IPv4Address, IPv6Address, IPv4Network, IPv6Network -from typing import Optional, TYPE_CHECKING -import asyncio - -import attr -import pylru -from aiohttp import web -from aiorpcx import (Event, JSONRPCAutoDetect, JSONRPCConnection, - ReplyAndDisconnect, Request, RPCError, RPCSession, - handler_invocation, serve_rs, serve_ws, sleep, - NewlineFramer, TaskTimeout, timeout_after, run_in_thread) - -import electrumx -from electrumx.lib.atomicals_blueprint_builder import AtomicalsTransferBlueprintBuilder -from electrumx.lib.script2addr import get_address_from_output_script -import electrumx.lib.util as util -from electrumx.lib.util import OldTaskGroup, unpack_le_uint64 -from electrumx.lib.util_atomicals import ( - DFT_MINT_MAX_MAX_COUNT_DENSITY, - format_name_type_candidates_to_rpc, - SUBREALM_MINT_PATH, - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, - DMINT_PATH, - convert_db_mint_info_to_rpc_mint_info_format, - compact_to_location_id_bytes, - location_id_bytes_to_compact, - is_compact_atomical_id, - format_name_type_candidates_to_rpc_for_subname, - calculate_latest_state_from_mod_history, - parse_protocols_operations_from_witness_array, - validate_rules_data, - AtomicalsValidationError, - auto_encode_bytes_elements, - validate_merkle_proof_dmint -) -from electrumx.lib.hash import (HASHX_LEN, Base58Error, hash_to_hex_str, - hex_str_to_hash, sha256, double_sha256) -from electrumx.lib.merkle import MerkleCache -from electrumx.lib.text import sessions_lines -from electrumx.server.daemon import DaemonError -from electrumx.server.history import TXNUM_LEN -from electrumx.server.http_middleware import rate_limiter, cors_middleware, error_middleware, request_middleware -from electrumx.server.http_session import HttpHandler -from electrumx.server.peers import PeerManager -from electrumx.lib.script import SCRIPTHASH_LEN - -if TYPE_CHECKING: - from electrumx.server.db import DB - from electrumx.server.env import Env - from electrumx.server.block_processor import BlockProcessor - from electrumx.server.daemon import Daemon - from electrumx.server.mempool import MemPool - - -BAD_REQUEST = 1 -DAEMON_ERROR = 2 -ATOMICALS_INVALID_TX = 800422 - -def scripthash_to_hashX(scripthash): - try: - bin_hash = hex_str_to_hash(scripthash) - if len(bin_hash) == 32: - return bin_hash[:HASHX_LEN] - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') - -def non_negative_integer(value): - '''Return param value it is or can be converted to a non-negative - integer, otherwise raise an RPCError.''' - try: - value = int(value) - if value >= 0: - return value - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, - f'{value} should be a non-negative integer') - -def assert_boolean(value): - '''Return param value it is boolean otherwise raise an RPCError.''' - if value in (False, True): - return value - raise RPCError(BAD_REQUEST, f'{value} should be a boolean value') - -def assert_tx_hash(value): - '''Raise an RPCError if the value is not a valid hexadecimal transaction hash. - - If it is valid, return it as 32-byte binary hash. - ''' - try: - raw_hash = hex_str_to_hash(value) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') - -def assert_atomical_id(value): - '''Raise an RPCError if the value is not a valid atomical id - If it is valid, return it as 32-byte binary hash. - ''' - try: - if value == None or value == "": - raise RPCError(BAD_REQUEST, f'atomical_id required') - index_of_i = value.find("i") - if index_of_i != 64: - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - raw_hash = hex_str_to_hash(value[ : 64]) - if len(raw_hash) == 32: - return raw_hash - except (ValueError, TypeError): - pass - - raise RPCError(BAD_REQUEST, f'{value} should be an atomical_id') - -@attr.s(slots=True) -class SessionGroup: - name = attr.ib() - weight = attr.ib() - sessions = attr.ib() - retained_cost = attr.ib() - - def session_cost(self): - return sum(session.cost for session in self.sessions) - - def cost(self): - return self.retained_cost + self.session_cost() - - -@attr.s(slots=True) -class SessionReferences: - # All attributes are sets but groups is a list - sessions = attr.ib() - groups = attr.ib() - specials = attr.ib() # Lower-case strings - unknown = attr.ib() # Strings - - -class SessionManager: - '''Holds global state about all sessions.''' - - def __init__( - self, - env: 'Env', - db: 'DB', - bp: 'BlockProcessor', - daemon: 'Daemon', - mempool: 'MemPool', - shutdown_event: asyncio.Event, - ): - env.max_send = max(350000, env.max_send) - self.env = env - self.db = db - self.bp = bp - self.daemon = daemon - self.mempool = mempool - self.peer_mgr = PeerManager(env, db) - self.shutdown_event = shutdown_event - self.logger = util.class_logger(__name__, self.__class__.__name__) - self.servers = {} # service->server - self.sessions = {} # session->iterable of its SessionGroups - self.session_groups = {} # group name->SessionGroup instance - self.txs_sent = 0 - # Would use monotonic time, but aiorpcx sessions use Unix time: - self.start_time = time.time() - self._method_counts = defaultdict(int) - self._reorg_count = 0 - self._history_cache = pylru.lrucache(1000) - self._history_lookups = 0 - self._history_hits = 0 - self._history_op_cache = pylru.lrucache(1000) - self._tx_num_op_cache = pylru.lrucache(10000000) - self._tx_hashes_cache = pylru.lrucache(1000) - self._tx_hashes_lookups = 0 - self._tx_hashes_hits = 0 - # Really a MerkleCache cache - self._merkle_cache = pylru.lrucache(1000) - self._merkle_lookups = 0 - self._merkle_hits = 0 - self.estimatefee_cache = pylru.lrucache(1000) - self._tx_detail_cache = pylru.lrucache(1000000) - self.notified_height = None - self.hsub_results = None - self._task_group = OldTaskGroup() - self._sslc = None - # Event triggered when electrumx is listening for incoming requests. - self.server_listening = Event() - self.session_event = Event() - - # Set up the RPC request handlers - cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' - 'query reorg sessions stop debug_memusage_list_all_objects ' - 'debug_memusage_get_random_backref_chain'.split()) - LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) - for cmd in cmds} - - def _ssl_context(self): - if self._sslc is None: - self._sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) - self._sslc.load_cert_chain(self.env.ssl_certfile, keyfile=self.env.ssl_keyfile) - return self._sslc - - async def _start_servers(self, services): - for service in services: - kind = service.protocol.upper() - if service.protocol == 'http': - host = None if service.host == 'all_interfaces' else str(service.host) - try: - app = web.Application(middlewares=[ - cors_middleware(self), - error_middleware(self), - request_middleware(self), - ]) - handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) - # GET - app.router.add_get('/proxy', handler.proxy) - app.router.add_get('/proxy/health', handler.health) - app.router.add_get('/proxy/blockchain.block.header', handler.block_header) - app.router.add_get('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_get('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_get('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_get('/proxy/relayfee', handler.relayfee) - app.router.add_get('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_get('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_get('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_get('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_get('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_get('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_get('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_get('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_get('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_get('/proxy/server.add_peer', handler.add_peer) - # app.router.add_get('/proxy/server.banner', handler.banner) - app.router.add_get('/proxy/server.donation_address', handler.donation_address) - app.router.add_get('/proxy/server.features', handler.server_features_async) - app.router.add_get('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_get('/proxy/server.ping', handler.ping) - # app.router.add_get('/proxy/server.version', handler.server_version) - app.router.add_get('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) - app.router.add_get('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) - app.router.add_get('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_get('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_get('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_get('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_get('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_get('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_get('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_get('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_get('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_get('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_get('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_get('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) - app.router.add_get('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_get('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_get('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_get('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) - app.router.add_get('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_get('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) - app.router.add_get('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) - app.router.add_get('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) - app.router.add_get('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_get('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_get('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_get('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_get('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) - app.router.add_get('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) - app.router.add_get('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_get('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) - app.router.add_get('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) - app.router.add_get('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # POST - app.router.add_post('/proxy', handler.proxy) - app.router.add_post('/proxy/blockchain.block.header', handler.block_header) - app.router.add_post('/proxy/blockchain.block.headers', handler.block_headers) - app.router.add_post('/proxy/blockchain.estimatefee', handler.estimatefee) - # app.router.add_post('/proxy/headers.subscribe', handler.headers_subscribe) - # app.router.add_post('/proxy/relayfee', handler.relayfee) - app.router.add_post('/proxy/blockchain.scripthash.get_balance', handler.scripthash_get_balance) - app.router.add_post('/proxy/blockchain.scripthash.get_history', handler.scripthash_get_history) - app.router.add_post('/proxy/blockchain.scripthash.get_mempool', handler.scripthash_get_mempool) - app.router.add_post('/proxy/blockchain.scripthash.listunspent', handler.scripthash_listunspent) - app.router.add_post('/proxy/blockchain.scripthash.subscribe', handler.scripthash_subscribe) - app.router.add_post('/proxy/blockchain.transaction.broadcast', handler.transaction_broadcast) - app.router.add_post('/proxy/blockchain.transaction.get', handler.transaction_get) - app.router.add_post('/proxy/blockchain.transaction.get_merkle', handler.transaction_merkle) - app.router.add_post('/proxy/blockchain.transaction.id_from_pos', handler.transaction_id_from_pos) - # app.router.add_post('/proxy/server.add_peer', handler.add_peer) - # app.router.add_post('/proxy/server.banner', handler.banner) - app.router.add_post('/proxy/server.donation_address', handler.donation_address) - app.router.add_post('/proxy/server.features', handler.server_features_async) - app.router.add_post('/proxy/server.peers.subscribe', handler.peers_subscribe) - app.router.add_post('/proxy/server.ping', handler.ping) - # app.router.add_post('/proxy/server.version', handler.server_version) - app.router.add_post('/proxy/blockchain.atomicals.validate', handler.transaction_broadcast_validate) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_balances_scripthash', handler.atomicals_get_ft_balances) - app.router.add_post('/proxy/blockchain.atomicals.get_nft_balances_scripthash', handler.atomicals_get_nft_balances) - app.router.add_post('/proxy/blockchain.atomicals.listscripthash', handler.atomicals_listscripthash) - app.router.add_post('/proxy/blockchain.atomicals.list', handler.atomicals_list) - app.router.add_post('/proxy/blockchain.atomicals.get_numbers', handler.atomicals_num_to_id) - app.router.add_post('/proxy/blockchain.atomicals.get_block_hash', handler.atomicals_block_hash) - app.router.add_post('/proxy/blockchain.atomicals.get_block_txs', handler.atomicals_block_txs) - app.router.add_post('/proxy/blockchain.atomicals.dump', handler.atomicals_dump) - app.router.add_post('/proxy/blockchain.atomicals.at_location', handler.atomicals_at_location) - app.router.add_post('/proxy/blockchain.atomicals.get_location', handler.atomicals_get_location) - app.router.add_post('/proxy/blockchain.atomicals.get', handler.atomicals_get) - app.router.add_post('/proxy/blockchain.atomicals.get_global', handler.atomicals_get_global) - app.router.add_post('/proxy/blockchain.atomicals.get_state', handler.atomical_get_state) - app.router.add_post('/proxy/blockchain.atomicals.get_state_history', handler.atomical_get_state_history) - app.router.add_post('/proxy/blockchain.atomicals.get_events', handler.atomical_get_events) - app.router.add_post('/proxy/blockchain.atomicals.get_tx_history', handler.atomicals_get_tx_history) - app.router.add_post('/proxy/blockchain.atomicals.get_realm_info', handler.atomicals_get_realm_info) - app.router.add_post('/proxy/blockchain.atomicals.get_by_realm', handler.atomicals_get_by_realm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_subrealm', handler.atomicals_get_by_subrealm) - app.router.add_post('/proxy/blockchain.atomicals.get_by_dmitem', handler.atomicals_get_by_dmitem) - app.router.add_post('/proxy/blockchain.atomicals.get_by_ticker', handler.atomicals_get_by_ticker) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container', handler.atomicals_get_by_container) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item', handler.atomicals_get_by_container_item) - app.router.add_post('/proxy/blockchain.atomicals.get_by_container_item_validate', handler.atomicals_get_by_container_item_validation) - app.router.add_post('/proxy/blockchain.atomicals.get_container_items', handler.atomicals_get_container_items) - app.router.add_post('/proxy/blockchain.atomicals.get_ft_info', handler.atomicals_get_ft_info) - app.router.add_post('/proxy/blockchain.atomicals.get_dft_mints', handler.atomicals_get_dft_mints) - app.router.add_post('/proxy/blockchain.atomicals.find_tickers', handler.atomicals_search_tickers) - app.router.add_post('/proxy/blockchain.atomicals.find_realms', handler.atomicals_search_realms) - app.router.add_post('/proxy/blockchain.atomicals.find_subrealms', handler.atomicals_search_subrealms) - app.router.add_post('/proxy/blockchain.atomicals.find_containers', handler.atomicals_search_containers) - app.router.add_post('/proxy/blockchain.atomicals.get_holders', handler.atomicals_get_holders) - app.router.add_post('/proxy/blockchain.atomicals.transaction', handler.atomicals_transaction) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_height', handler.transaction_by_height) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_atomical_id', handler.transaction_by_atomical_id) - app.router.add_post('/proxy/blockchain.atomicals.transaction_by_scripthash', handler.transaction_by_scripthash) - app.router.add_post('/proxy/blockchain.atomicals.transaction_global', handler.transaction_global) - # common proxy - app.router.add_get('/proxy/{method}', handler.handle_get_method) - app.router.add_post('/proxy/{method}', handler.handle_post_method) - app['rate_limiter'] = rate_limiter - runner = web.AppRunner(app) - await runner.setup() - site = web.TCPSite(runner, host, service.port) - await site.start() - except Exception as e: - self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') - else: - self.logger.info(f'{kind} server listening on {service.address}') - else: - if service.protocol in self.env.SSL_PROTOCOLS: - sslc = self._ssl_context() - else: - sslc = None - if service.protocol == 'rpc': - session_class = LocalRPC - else: - session_class = self.env.coin.SESSIONCLS - if service.protocol in ('ws', 'wss'): - serve = serve_ws - else: - serve = serve_rs - # FIXME: pass the service not the kind - session_factory = partial(session_class, self, self.db, self.mempool, - self.peer_mgr, kind) - host = None if service.host == 'all_interfaces' else str(service.host) - try: - self.servers[service] = await serve(session_factory, host, - service.port, ssl=sslc) - except OSError as e: # don't suppress CancelledError - self.logger.error(f'{kind} server failed to listen on {service.address}: {e}') - else: - self.logger.info(f'{kind} server listening on {service.address}') - - - async def _start_external_servers(self): - '''Start listening on TCP and SSL ports, but only if the respective - port was given in the environment. - ''' - await self._start_servers(service for service in self.env.services - if service.protocol != 'rpc') - self.server_listening.set() - - async def _stop_servers(self, services): - '''Stop the servers of the given protocols.''' - server_map = {service: self.servers.pop(service) - for service in set(services).intersection(self.servers)} - # Close all before waiting - for service, server in server_map.items(): - self.logger.info(f'closing down server for {service}') - server.close() - # No value in doing these concurrently - for server in server_map.values(): - await server.wait_closed() - - async def _manage_servers(self): - paused = False - max_sessions = self.env.max_sessions - low_watermark = max_sessions * 19 // 20 - while True: - await self.session_event.wait() - self.session_event.clear() - if not paused and len(self.sessions) >= max_sessions: - self.logger.info(f'maximum sessions {max_sessions:,d} ' - f'reached, stopping new connections until ' - f'count drops to {low_watermark:,d}') - await self._stop_servers(service for service in self.servers - if service.protocol != 'rpc') - paused = True - # Start listening for incoming connections if paused and - # session count has fallen - if paused and len(self.sessions) <= low_watermark: - self.logger.info('resuming listening for incoming connections') - await self._start_external_servers() - paused = False - - async def _log_sessions(self): - '''Periodically log sessions.''' - log_interval = self.env.log_sessions - if log_interval: - while True: - await sleep(log_interval) - data = self._session_data(for_log=True) - for line in sessions_lines(data): - self.logger.info(line) - self.logger.info(util.json_serialize(self._get_info())) - - async def _disconnect_sessions(self, sessions, reason, *, force_after=1.0): - if sessions: - session_ids = ', '.join(str(session.session_id) for session in sessions) - self.logger.info(f'{reason} session ids {session_ids}') - for session in sessions: - await self._task_group.spawn(session.close(force_after=force_after)) - - async def _clear_stale_sessions(self): - '''Cut off sessions that haven't done anything for 10 minutes.''' - while True: - await sleep(60) - stale_cutoff = time.time() - self.env.session_timeout - stale_sessions = [session for session in self.sessions - if session.last_recv < stale_cutoff] - await self._disconnect_sessions(stale_sessions, 'closing stale') - del stale_sessions - - async def _handle_chain_reorgs(self): - '''Clear certain caches on chain reorgs.''' - while True: - await self.bp.backed_up_event.wait() - self.logger.info(f'reorg signalled; clearing tx_hashes and merkle caches') - self._reorg_count += 1 - self._tx_hashes_cache.clear() - self._merkle_cache.clear() - - async def _recalc_concurrency(self): - '''Periodically recalculate session concurrency.''' - session_class = self.env.coin.SESSIONCLS - period = 300 - while True: - await sleep(period) - hard_limit = session_class.cost_hard_limit - - # Reduce retained group cost - refund = period * hard_limit / 5000 - dead_groups = [] - for group in self.session_groups.values(): - group.retained_cost = max(0.0, group.retained_cost - refund) - if group.retained_cost == 0 and not group.sessions: - dead_groups.append(group) - # Remove dead groups - for group in dead_groups: - self.session_groups.pop(group.name) - - # Recalc concurrency for sessions where cost is changing gradually, and update - # cost_decay_per_sec. - for session in self.sessions: - # Subs have an on-going cost so decay more slowly with more subs - session.cost_decay_per_sec = hard_limit / (10000 + 5 * session.sub_count()) - session.recalc_concurrency() - - def _get_info(self): - '''A summary of server state.''' - cache_fmt = '{:,d} lookups {:,d} hits {:,d} entries' - sessions = self.sessions - return { - 'coin': self.env.coin.__name__, - 'daemon': self.daemon.logged_url(), - 'daemon height': self.daemon.cached_height(), - 'db height': self.db.db_height, - 'db_flush_count': self.db.history.flush_count, - 'groups': len(self.session_groups), - 'history cache': cache_fmt.format( - self._history_lookups, self._history_hits, len(self._history_cache)), - 'merkle cache': cache_fmt.format( - self._merkle_lookups, self._merkle_hits, len(self._merkle_cache)), - 'pid': os.getpid(), - 'peers': self.peer_mgr.info(), - 'request counts': self._method_counts, - 'request total': sum(self._method_counts.values()), - 'sessions': { - 'count': len(sessions), - 'count with subs': sum(len(getattr(s, 'hashX_subs', ())) > 0 for s in sessions), - 'errors': sum(s.errors for s in sessions), - 'logged': len([s for s in sessions if s.log_me]), - 'pending requests': sum(s.unanswered_request_count() for s in sessions), - 'subs': sum(s.sub_count() for s in sessions), - }, - 'tx hashes cache': cache_fmt.format( - self._tx_hashes_lookups, self._tx_hashes_hits, len(self._tx_hashes_cache)), - 'txs sent': self.txs_sent, - 'uptime': util.formatted_time(time.time() - self.start_time), - 'version': electrumx.version, - } - - def _session_data(self, for_log): - '''Returned to the RPC 'sessions' call.''' - now = time.time() - sessions = sorted(self.sessions, key=lambda s: s.start_time) - return [(session.session_id, - session.flags(), - session.remote_address_string(for_log=for_log), - session.client, - session.protocol_version_string(), - session.cost, - session.extra_cost(), - session.unanswered_request_count(), - session.txs_sent, - session.sub_count(), - session.recv_count, session.recv_size, - session.send_count, session.send_size, - now - session.start_time) - for session in sessions] - - def _group_data(self): - '''Returned to the RPC 'groups' call.''' - result = [] - for name, group in self.session_groups.items(): - sessions = group.sessions - result.append([name, - len(sessions), - group.session_cost(), - group.retained_cost, - sum(s.unanswered_request_count() for s in sessions), - sum(s.txs_sent for s in sessions), - sum(s.sub_count() for s in sessions), - sum(s.recv_count for s in sessions), - sum(s.recv_size for s in sessions), - sum(s.send_count for s in sessions), - sum(s.send_size for s in sessions), - ]) - return result - - async def _refresh_hsub_results(self, height): - '''Refresh the cached header subscription responses to be for height, - and record that as notified_height. - ''' - # Paranoia: a reorg could race and leave db_height lower - height = min(height, self.db.db_height) - raw = await self.raw_header(height) - self.hsub_results = {'hex': raw.hex(), 'height': height} - self.notified_height = height - - def _session_references(self, items, special_strings): - '''Return a SessionReferences object.''' - if not isinstance(items, list) or not all(isinstance(item, str) for item in items): - raise RPCError(BAD_REQUEST, 'expected a list of session IDs') - - sessions_by_id = {session.session_id: session for session in self.sessions} - groups_by_name = self.session_groups - - sessions = set() - groups = set() # Names as groups are not hashable - specials = set() - unknown = set() - - for item in items: - if item.isdigit(): - session = sessions_by_id.get(int(item)) - if session: - sessions.add(session) - else: - unknown.add(item) - else: - lc_item = item.lower() - if lc_item in special_strings: - specials.add(lc_item) - else: - if lc_item in groups_by_name: - groups.add(lc_item) - else: - unknown.add(item) - - groups = [groups_by_name[group] for group in groups] - return SessionReferences(sessions, groups, specials, unknown) - - # --- LocalRPC command handlers - - async def rpc_add_peer(self, real_name): - '''Add a peer. - - real_name: "bch.electrumx.cash t50001 s50002" for example - ''' - await self.peer_mgr.add_localRPC_peer(real_name) - return f"peer '{real_name}' added" - - async def rpc_disconnect(self, session_ids): - '''Disconnect sesssions. - - session_ids: array of session IDs - ''' - refs = self._session_references(session_ids, {'all'}) - result = [] - - if 'all' in refs.specials: - sessions = self.sessions - result.append('disconnecting all sessions') - else: - sessions = refs.sessions - result.extend(f'disconnecting session {session.session_id}' for session in sessions) - for group in refs.groups: - result.append(f'disconnecting group {group.name}') - sessions.update(group.sessions) - result.extend(f'unknown: {item}' for item in refs.unknown) - - await self._disconnect_sessions(sessions, 'local RPC request to disconnect') - return result - - async def rpc_log(self, session_ids): - '''Toggle logging of sesssions. - - session_ids: array of session or group IDs, or 'all', 'none', 'new' - ''' - refs = self._session_references(session_ids, {'all', 'none', 'new'}) - result = [] - - def add_result(text, value): - result.append(f'logging {text}' if value else f'not logging {text}') - - if 'all' in refs.specials: - for session in self.sessions: - session.log_me = True - SessionBase.log_new = True - result.append('logging all sessions') - if 'none' in refs.specials: - for session in self.sessions: - session.log_me = False - SessionBase.log_new = False - result.append('logging no sessions') - if 'new' in refs.specials: - SessionBase.log_new = not SessionBase.log_new - add_result('new sessions', SessionBase.log_new) - - sessions = refs.sessions - for session in sessions: - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - for group in refs.groups: - for session in group.sessions.difference(sessions): - sessions.add(session) - session.log_me = not session.log_me - add_result(f'session {session.session_id}', session.log_me) - - result.extend(f'unknown: {item}' for item in refs.unknown) - return result - - async def rpc_daemon_url(self, daemon_url): - '''Replace the daemon URL.''' - daemon_url = daemon_url or self.env.daemon_url - try: - self.daemon.set_url(daemon_url) - except Exception as e: - raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}') - return f'now using daemon at {self.daemon.logged_url()}' - - async def rpc_stop(self): - '''Shut down the server cleanly.''' - self.shutdown_event.set() - return 'stopping' - - async def rpc_getinfo(self): - '''Return summary information about the server process.''' - return self._get_info() - - async def rpc_groups(self): - '''Return statistics about the session groups.''' - return self._group_data() - - async def rpc_peers(self): - '''Return a list of data about server peers.''' - return self.peer_mgr.rpc_data() - - async def rpc_query(self, items, limit): - '''Returns data about a script, address or name.''' - coin = self.env.coin - db = self.db - lines = [] - - def arg_to_hashX(arg): - try: - script = bytes.fromhex(arg) - lines.append(f'Script: {arg}') - return coin.hashX_from_script(script) - except ValueError: - pass - - try: - hashX = coin.address_to_hashX(arg) - lines.append(f'Address: {arg}') - return hashX - except Base58Error: - pass - - try: - script = coin.build_name_index_script(arg.encode("ascii")) - hashX = coin.name_hashX_from_script(script) - lines.append(f'Name: {arg}') - return hashX - except (AttributeError, UnicodeEncodeError): - pass - - return None - - for arg in items: - hashX = arg_to_hashX(arg) - if not hashX: - continue - n = None - history = await db.limited_history(hashX, limit=limit) - for n, (tx_hash, height) in enumerate(history): - lines.append(f'History #{n:,d}: height {height:,d} ' - f'tx_hash {hash_to_hex_str(tx_hash)}') - if n is None: - lines.append('No history found') - n = None - utxos = await db.all_utxos(hashX) - for n, utxo in enumerate(utxos, start=1): - lines.append(f'UTXO #{n:,d}: tx_hash ' - f'{hash_to_hex_str(utxo.tx_hash)} ' - f'tx_pos {utxo.tx_pos:,d} height ' - f'{utxo.height:,d} value {utxo.value:,d}') - if n == limit: - break - if n is None: - lines.append('No UTXOs found') - - balance = sum(utxo.value for utxo in utxos) - lines.append(f'Balance: {coin.decimal_value(balance):,f} ' - f'{coin.SHORTNAME}') - - return lines - - async def rpc_sessions(self): - '''Return statistics about connected sessions.''' - return self._session_data(for_log=False) - - async def rpc_reorg(self, count): - '''Force a reorg of the given number of blocks. - - count: number of blocks to reorg - ''' - count = non_negative_integer(count) - if not self.bp.force_chain_reorg(count): - raise RPCError(BAD_REQUEST, 'still catching up with daemon') - return f'scheduled a reorg of {count:,d} blocks' - - async def rpc_debug_memusage_list_all_objects(self, limit: int) -> str: - """Return a string listing the most common types in memory.""" - import objgraph # optional dependency - import io - with io.StringIO() as fd: - objgraph.show_most_common_types( - limit=limit, - shortnames=False, - file=fd) - return fd.getvalue() - - async def rpc_debug_memusage_get_random_backref_chain(self, objtype: str) -> str: - """Return a dotfile as text containing the backref chain - for a randomly selected object of type objtype. - - Warning: very slow! and it blocks the server. - - To convert to image: - $ dot -Tps filename.dot -o outfile.ps - """ - import objgraph # optional dependency - import random - import io - with io.StringIO() as fd: - await run_in_thread( - lambda: - objgraph.show_chain( - objgraph.find_backref_chain( - random.choice(objgraph.by_type(objtype)), - objgraph.is_proper_module), - output=fd)) - return fd.getvalue() - - # --- External Interface - - async def serve(self, notifications, event): - '''Start the RPC server if enabled. When the event is triggered, - start TCP and SSL servers.''' - try: - await self._start_servers(service for service in self.env.services - if service.protocol == 'rpc') - await event.wait() - - session_class = self.env.coin.SESSIONCLS - session_class.cost_soft_limit = self.env.cost_soft_limit - session_class.cost_hard_limit = self.env.cost_hard_limit - session_class.cost_decay_per_sec = session_class.cost_hard_limit / 10000 - session_class.bw_cost_per_byte = 1.0 / self.env.bw_unit_cost - session_class.cost_sleep = self.env.request_sleep / 1000 - session_class.initial_concurrent = self.env.initial_concurrent - session_class.processing_timeout = self.env.request_timeout - - self.logger.info(f'max session count: {self.env.max_sessions:,d}') - self.logger.info(f'session timeout: {self.env.session_timeout:,d} seconds') - self.logger.info(f'session cost hard limit {self.env.cost_hard_limit:,d}') - self.logger.info(f'session cost soft limit {self.env.cost_soft_limit:,d}') - self.logger.info(f'bandwidth unit cost {self.env.bw_unit_cost:,d}') - self.logger.info(f'request sleep {self.env.request_sleep:,d}ms') - self.logger.info(f'request timeout {self.env.request_timeout:,d}s') - self.logger.info(f'initial concurrent {self.env.initial_concurrent:,d}') - - self.logger.info(f'max response size {self.env.max_send:,d} bytes') - if self.env.drop_client is not None: - self.logger.info( - f'drop clients matching: {self.env.drop_client.pattern}' - ) - for service in self.env.report_services: - self.logger.info(f'advertising service {service}') - # Start notifications; initialize hsub_results - await notifications.start(self.db.db_height, self._notify_sessions) - await self._start_external_servers() - # Peer discovery should start after the external servers - # because we connect to ourself - async with self._task_group as group: - await group.spawn(self.peer_mgr.discover_peers()) - await group.spawn(self._clear_stale_sessions()) - await group.spawn(self._handle_chain_reorgs()) - await group.spawn(self._recalc_concurrency()) - await group.spawn(self._log_sessions()) - await group.spawn(self._manage_servers()) - finally: - # Close servers then sessions - await self._stop_servers(self.servers.keys()) - async with OldTaskGroup() as group: - for session in list(self.sessions): - await group.spawn(session.close(force_after=1)) - - def extra_cost(self, session): - # Note there is no guarantee that session is still in self.sessions. Example traceback: - # notify_sessions->notify->address_status->bump_cost->recalc_concurrency->extra_cost - # during which there are many places the sesssion could be removed - groups = self.sessions.get(session) - if groups is None: - return 0 - return sum((group.cost() - session.cost) * group.weight for group in groups) - - async def _merkle_branch(self, height, tx_hashes, tx_pos): - tx_hash_count = len(tx_hashes) - cost = tx_hash_count - - if tx_hash_count >= 200: - self._merkle_lookups += 1 - merkle_cache = self._merkle_cache.get(height) - if merkle_cache: - self._merkle_hits += 1 - cost = 10 * math.sqrt(tx_hash_count) - else: - async def tx_hashes_func(start, count): - return tx_hashes[start: start + count] - merkle_cache = MerkleCache(self.db.merkle, tx_hashes_func) - self._merkle_cache[height] = merkle_cache - await merkle_cache.initialize(len(tx_hashes)) - branch, _root = await merkle_cache.branch_and_root(tx_hash_count, tx_pos) - else: - branch, _root = self.db.merkle.branch_and_root(tx_hashes, tx_pos) - - branch = [hash_to_hex_str(hash) for hash in branch] - return branch, cost / 2500 - - async def merkle_branch_for_tx_hash(self, height, tx_hash): - '''Return a triple (branch, tx_pos, cost).''' - tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) - try: - tx_pos = tx_hashes.index(tx_hash) - except ValueError: - raise RPCError(BAD_REQUEST, - f'tx {hash_to_hex_str(tx_hash)} not in block at height {height:,d}') - branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) - return branch, tx_pos, tx_hashes_cost + merkle_cost - - async def merkle_branch_for_tx_pos(self, height, tx_pos): - '''Return a triple (branch, tx_hash_hex, cost).''' - tx_hashes, tx_hashes_cost = await self.tx_hashes_at_blockheight(height) - try: - tx_hash = tx_hashes[tx_pos] - except IndexError: - raise RPCError(BAD_REQUEST, - f'no tx at position {tx_pos:,d} in block at height {height:,d}') - branch, merkle_cost = await self._merkle_branch(height, tx_hashes, tx_pos) - return branch, hash_to_hex_str(tx_hash), tx_hashes_cost + merkle_cost - - async def tx_hashes_at_blockheight(self, height): - '''Returns a pair (tx_hashes, cost). - - tx_hashes is an ordered list of binary hashes, cost is an estimated cost of - getting the hashes; cheaper if in-cache. Raises RPCError. - ''' - self._tx_hashes_lookups += 1 - tx_hashes = self._tx_hashes_cache.get(height) - if tx_hashes: - self._tx_hashes_hits += 1 - return tx_hashes, 0.1 - - # Ensure the tx_hashes are fresh before placing in the cache - while True: - reorg_count = self._reorg_count - try: - tx_hashes = await self.db.tx_hashes_at_blockheight(height) - except self.db.DBError as e: - raise RPCError(BAD_REQUEST, f'db error: {e!r}') - if reorg_count == self._reorg_count: - break - - self._tx_hashes_cache[height] = tx_hashes - - return tx_hashes, 0.25 + len(tx_hashes) * 0.0001 - - def session_count(self): - '''The number of connections that we've sent something to.''' - return len(self.sessions) - - async def daemon_request(self, method, *args): - '''Catch a DaemonError and convert it to an RPCError.''' - try: - return await getattr(self.daemon, method)(*args) - except DaemonError as e: - raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None - - async def raw_header(self, height): - '''Return the binary header at the given height.''' - try: - return await self.db.raw_header(height) - except IndexError: - raise RPCError(BAD_REQUEST, f'height {height:,d} ' - 'out of range') from None - - async def broadcast_transaction(self, raw_tx): - hex_hash = await self.daemon.broadcast_transaction(raw_tx) - self.txs_sent += 1 - return hex_hash - - async def broadcast_transaction_validated(self, raw_tx, live_run): - self.bp.validate_ft_rules_raw_tx(raw_tx) - if live_run: - hex_hash = await self.daemon.broadcast_transaction(raw_tx) - self.txs_sent += 1 - return hex_hash - else: - tx, tx_hash = self.env.coin.DESERIALIZER(bytes.fromhex(raw_tx), 0).read_tx_and_hash() - return hash_to_hex_str(tx_hash) - - async def limited_history(self, hashX): - '''Returns a pair (history, cost). - - History is a sorted list of (tx_hash, height) tuples, or an RPCError.''' - # History DoS limit. Each element of history is about 99 bytes when encoded - # as JSON. - limit = self.env.max_send // 99 - cost = 0.1 - self._history_lookups += 1 - result = self._history_cache.get(hashX) - if result: - self._history_hits += 1 - else: - result = await self.db.limited_history(hashX, limit=limit) - cost += 0.1 + len(result) * 0.001 - if len(result) >= limit: - result = RPCError(BAD_REQUEST, f'history too large', cost=cost) - self._history_cache[hashX] = result - - if isinstance(result, Exception): - raise result - return result, cost - - async def get_history_op(self, hashX, limit=10, offset=0, op=None, reverse=True): - history_data = self._history_op_cache.get(hashX, []) - if not history_data: - history_data = [] - txnum_padding = bytes(8-TXNUM_LEN) - for _key, hist in self.db.history.db.iterator(prefix=hashX, reverse=reverse): - for tx_numb in util.chunks(hist, TXNUM_LEN): - tx_num, = util.unpack_le_uint64(tx_numb + txnum_padding) - op_data = self._tx_num_op_cache.get(tx_num) - if not op_data: - op_prefix_key = b'op' + util.pack_le_uint64(tx_num) - tx_op = self.db.utxo_db.get(op_prefix_key) - if tx_op: - op_data, = util.unpack_le_uint32(tx_op) - self._tx_num_op_cache[tx_num] = op_data - history_data.append({"tx_num": tx_num, "op": op_data}) - self._history_op_cache[hashX] = history_data - if reverse: - history_data.sort(key=lambda x: x['tx_num'], reverse=reverse) - if op: - history_data = list(filter(lambda x: x["op"] == op, history_data)) - else: - history_data = list(filter(lambda x: x["op"], history_data)) - return history_data[offset:limit+offset], len(history_data) - - # Analysis the transaction detail by txid. - # See BlockProcessor.op_list for the complete op list. - async def get_transaction_detail(self, txid: str, height=None, tx_num=-1): - tx_hash = hex_str_to_hash(txid) - res = self._tx_detail_cache.get(tx_hash) - if res: - # txid maybe the same, this key should add height add key prefix - self.logger.debug(f"read transation detail from cache {txid}") - return res - if not height: - tx_num, height = self.db.get_tx_num_height_from_tx_hash(tx_hash) - - raw_tx = self.db.get_raw_tx_by_tx_hash(tx_hash) - if not raw_tx: - raw_tx = await self.daemon_request('getrawtransaction', txid, False) - raw_tx = bytes.fromhex(raw_tx) - tx, _tx_hash = self.env.coin.DESERIALIZER(raw_tx, 0).read_tx_and_hash() - assert tx_hash == _tx_hash - ops = self.db.get_op_by_tx_num(tx_num) - op_raw = self.bp.op_list_vk[ops[0]] if ops else "" - - operation_found_at_inputs = parse_protocols_operations_from_witness_array(tx, tx_hash, True) - atomicals_spent_at_inputs = self.bp.build_atomicals_spent_at_inputs_for_validation_only(tx) - atomicals_receive_at_outputs = self.bp.build_atomicals_receive_at_ouutput_for_validation_only(tx, tx_hash) - blueprint_builder = AtomicalsTransferBlueprintBuilder( - self.logger, - atomicals_spent_at_inputs, - operation_found_at_inputs, - tx_hash, - tx, - self.bp.get_atomicals_id_mint_info, - self.bp.is_dmint_activated(height), - self.bp.is_custom_coloring_activated(height), - ) - is_burned = blueprint_builder.are_fts_burned - is_cleanly_assigned = blueprint_builder.cleanly_assigned - # format burned_fts - raw_burned_fts = blueprint_builder.get_fts_burned() - burned_fts = {} - for ft_key, ft_value in raw_burned_fts.items(): - burned_fts[location_id_bytes_to_compact(ft_key)] = ft_value - - res = { - "txid": txid, - "height": height, - "tx_num": tx_num, - "info": {}, - "transfers": { - "inputs": {}, - "outputs": {}, - "is_burned": is_burned, - "burned_fts": burned_fts, - "is_cleanly_assigned": is_cleanly_assigned - } - } - operation_type = operation_found_at_inputs.get("op", "") if operation_found_at_inputs else "" - if operation_found_at_inputs: - payload = operation_found_at_inputs.get("payload") - payload_not_none = payload or {} - res["info"]["payload"] = payload_not_none - if blueprint_builder.is_mint and operation_type in ["dmt", "ft"]: - expected_output_index = 0 - txout = tx.outputs[expected_output_index] - location = tx_hash + util.pack_le_uint32(expected_output_index) - # if save into the db, it means mint success - has_atomicals = self.db.get_atomicals_by_location_long_form(location) - if len(has_atomicals): - ticker_name = payload_not_none.get("args", {}).get("mint_ticker", "") - status, candidate_atomical_id, _ = self.bp.get_effective_ticker(ticker_name, self.bp.height) - if status: - atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - res["info"] = { - "atomical_id": atomical_id, - "location_id": location_id_bytes_to_compact(location), - "payload": payload, - "outputs": { - expected_output_index: [{ - "address": get_address_from_output_script(txout.pk_script), - "atomical_id": atomical_id, - "type": "FT", - "index": expected_output_index, - "value": txout.value - }] - } - } - elif operation_type == "nft": - if atomicals_receive_at_outputs: - expected_output_index = 0 - location = tx_hash + util.pack_le_uint32(expected_output_index) - txout = tx.outputs[expected_output_index] - atomical_id = location_id_bytes_to_compact( - atomicals_receive_at_outputs[expected_output_index][-1]["atomical_id"] - ) - res["info"] = { - "atomical_id": atomical_id, - "location_id": location_id_bytes_to_compact(location), - "payload": payload, - "outputs": { - expected_output_index: [{ - "address": get_address_from_output_script(txout.pk_script), - "atomical_id": atomical_id, - "type": "NFT", - "index": expected_output_index, - "value": txout.value - }] - } - } - # no operation_found_at_inputs, it will be transfer. - if blueprint_builder.ft_atomicals and atomicals_spent_at_inputs: - if not operation_type and not op_raw: - op_raw = "transfer" - for atomical_id, input_ft in blueprint_builder.ft_atomicals.items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - for i in input_ft.input_indexes: - prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) - prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) - if not prev_raw_tx: - prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) - prev_raw_tx = bytes.fromhex(prev_raw_tx) - self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx - prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() - ft_data = { - "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), - "atomical_id": compact_atomical_id, - "type": "FT", - "index": i.txin_index, - "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value - } - if i.txin_index not in res["transfers"]["inputs"]: - res["transfers"]["inputs"][i.txin_index] = [ft_data] - else: - res["transfers"]["inputs"][i.txin_index].append(ft_data) - for k, v in blueprint_builder.ft_output_blueprint.outputs.items(): - for atomical_id, output_ft in v['atomicals'].items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - ft_data = { - "address": get_address_from_output_script(tx.outputs[k].pk_script), - "atomical_id": compact_atomical_id, - "type": "FT", - "index": k, - "value": output_ft.sat_value - } - if k not in res["transfers"]["outputs"]: - res["transfers"]["outputs"][k] = [ft_data] - else: - res["transfers"]["outputs"][k].append(ft_data) - if blueprint_builder.nft_atomicals and atomicals_spent_at_inputs: - if not operation_type and not op_raw: - op_raw = "transfer" - for atomical_id, input_nft in blueprint_builder.nft_atomicals.items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - for i in input_nft.input_indexes: - prev_txid = hash_to_hex_str(tx.inputs[i.txin_index].prev_hash) - prev_raw_tx = self.db.get_raw_tx_by_tx_hash(hex_str_to_hash(prev_txid)) - if not prev_raw_tx: - prev_raw_tx = await self.daemon_request('getrawtransaction', prev_txid, False) - prev_raw_tx = bytes.fromhex(prev_raw_tx) - self.bp.general_data_cache[b'rtx' + hex_str_to_hash(prev_txid)] = prev_raw_tx - prev_tx, _ = self.env.coin.DESERIALIZER(prev_raw_tx, 0).read_tx_and_hash() - nft_data = { - "address": get_address_from_output_script(prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].pk_script), - "atomical_id": compact_atomical_id, - "type": "NFT", - "index": i.txin_index, - "value": prev_tx.outputs[tx.inputs[i.txin_index].prev_idx].value - } - if i.txin_index not in res["transfers"]["inputs"]: - res["transfers"]["inputs"][i.txin_index] = [nft_data] - else: - res["transfers"]["inputs"][i.txin_index].append(nft_data) - for k, v in blueprint_builder.nft_output_blueprint.outputs.items(): - for atomical_id, output_nft in v['atomicals'].items(): - compact_atomical_id = location_id_bytes_to_compact(atomical_id) - nft_data = { - "address": get_address_from_output_script(tx.outputs[k].pk_script), - "atomical_id": compact_atomical_id, - "type": output_nft.type, - "index": k, - "value": output_nft.total_satsvalue - } - if k not in res["transfers"]["outputs"]: - res["transfers"]["outputs"][k] = [nft_data] - else: - res["transfers"]["outputs"][k].append(nft_data) - - atomical_id_for_payment, payment_marker_idx, _ = AtomicalsTransferBlueprintBuilder.get_atomical_id_for_payment_marker_if_found(tx) - if atomical_id_for_payment: - res["info"]["payment"] = { - "atomical_id": location_id_bytes_to_compact(atomical_id_for_payment), - "payment_marker_idx": payment_marker_idx - } - - if op_raw and height: - self._tx_detail_cache[tx_hash] = res - res["op"] = op_raw - - # Recursively encode the result. - return auto_encode_bytes_elements(res) - - async def transaction_global( - self, - limit: int = 10, - offset: int = 0, - op_type: Optional[str] = None, - reverse: bool = True - ): - height = self.bp.height - res = [] - count = 0 - history_list = [] - for current_height in range(height, self.env.coin.ATOMICALS_ACTIVATION_HEIGHT, -1): - txs = self.db.get_atomicals_block_txs(current_height) - for tx in txs: - tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) - history_list.append({ - "tx_num": tx_num, - "tx_hash": tx, - "height": current_height - }) - count += 1 - if count >= offset + limit: - break - history_list.sort(key=lambda x: x['tx_num'], reverse=reverse) - - for history in history_list: - data = await self.get_transaction_detail(history["tx_hash"], history["height"], history["tx_num"]) - if (op_type and op_type == data["op"]) or (not op_type and data["op"]): - res.append(data) - total = len(res) - return {"result": res[offset:offset+limit], "total": total, "limit": limit, "offset": offset} - - async def _notify_sessions(self, height, touched): - '''Notify sessions about height changes and touched addresses.''' - height_changed = height != self.notified_height - if height_changed: - await self._refresh_hsub_results(height) - # Invalidate all history caches since they rely on block heights - self._history_cache.clear() - # Invalidate our op cache for touched hashXs - op_cache = self._history_op_cache - for hashX in set(op_cache).intersection(touched): - op_cache.pop(hashX, None) - self.logger.info(f"refresh op cache {self.notified_height}") - time.sleep(2) - background_task = asyncio.create_task(self.get_history_op(hashX, 10, 0, None, True)) - await background_task - - for session in self.sessions: - if self._task_group.joined: # this can happen during shutdown - self.logger.warning(f"task group already terminated. not notifying sessions.") - return - await self._task_group.spawn(session.notify, touched, height_changed) - - def _ip_addr_group_name(self, session) -> Optional[str]: - host = session.remote_address().host - if isinstance(host, (IPv4Address, IPv6Address)): - if host.is_private: # exempt private addresses - return None - if isinstance(host, IPv4Address): - subnet_size = self.env.session_group_by_subnet_ipv4 - subnet = IPv4Network(host).supernet(prefixlen_diff=32 - subnet_size) - return str(subnet) - elif isinstance(host, IPv6Address): - subnet_size = self.env.session_group_by_subnet_ipv6 - subnet = IPv6Network(host).supernet(prefixlen_diff=128 - subnet_size) - return str(subnet) - return 'unknown_addr' - - def _session_group(self, name: Optional[str], weight: float) -> Optional[SessionGroup]: - if name is None: - return None - group = self.session_groups.get(name) - if not group: - group = SessionGroup(name, weight, set(), 0) - self.session_groups[name] = group - return group - - def add_session(self, session): - self.session_event.set() - # Return the session groups - groups = ( - self._session_group(self._ip_addr_group_name(session), 1.0), - ) - groups = tuple(group for group in groups if group is not None) - self.sessions[session] = groups - for group in groups: - group.sessions.add(session) - - def remove_session(self, session): - '''Remove a session from our sessions list if there.''' - self.session_event.set() - groups = self.sessions.pop(session) - for group in groups: - group.retained_cost += session.cost - group.sessions.remove(session) - - -class SessionBase(RPCSession): - '''Base class of ElectrumX JSON sessions. - - Each session runs its tasks in asynchronous parallelism with other - sessions. - ''' - - MAX_CHUNK_SIZE = 2016 - session_counter = itertools.count() - log_new = False - - def __init__( - self, - session_mgr: 'SessionManager', - db: 'DB', - mempool: 'MemPool', - peer_mgr: 'PeerManager', - kind: str, - transport, - ): - connection = JSONRPCConnection(JSONRPCAutoDetect) - super().__init__(transport, connection=connection) - self.session_mgr = session_mgr - self.db = db - self.mempool = mempool - self.peer_mgr = peer_mgr - self.kind = kind # 'RPC', 'TCP' etc. - self.env = session_mgr.env - self.coin = self.env.coin - self.client = 'unknown' - self.anon_logs = self.env.anon_logs - self.txs_sent = 0 - self.log_me = SessionBase.log_new - self.session_id = None - self.daemon_request = self.session_mgr.daemon_request - self.session_id = next(self.session_counter) - context = {'conn_id': f'{self.session_id}'} - logger = util.class_logger(__name__, self.__class__.__name__) - self.logger = util.ConnectionLogger(logger, context) - self.logger.info(f'{self.kind} {self.remote_address_string()}, ' - f'{self.session_mgr.session_count():,d} total') - self.session_mgr.add_session(self) - self.recalc_concurrency() # must be called after session_mgr.add_session - - async def notify(self, touched, height_changed): - pass - - def default_framer(self): - return NewlineFramer(max_size=self.env.max_recv) - - def remote_address_string(self, *, for_log=True): - '''Returns the peer's IP address and port as a human-readable - string, respecting anon logs if the output is for a log.''' - if for_log and self.anon_logs: - return 'xx.xx.xx.xx:xx' - return str(self.remote_address()) - - def flags(self): - '''Status flags.''' - status = self.kind[0] - if self.is_closing(): - status += 'C' - if self.log_me: - status += 'L' - status += str(self._incoming_concurrency.max_concurrent) - return status - - async def connection_lost(self): - '''Handle client disconnection.''' - await super().connection_lost() - self.session_mgr.remove_session(self) - msg = '' - if self._incoming_concurrency.max_concurrent < self.initial_concurrent * 0.8: - msg += ' whilst throttled' - if self.send_size >= 1_000_000: - msg += f'. Sent {self.send_size:,d} bytes in {self.send_count:,d} messages' - if msg: - msg = 'disconnected' + msg - self.logger.info(msg) - - def sub_count(self): - return 0 - - async def handle_request(self, request): - """Handle an incoming request. ElectrumX doesn't receive - notifications from client sessions. - """ - if isinstance(request, Request): - handler = self.request_handlers.get(request.method) - method = request.method - args = request.args - else: - handler = None - method = 'invalid method' - args = None - self.logger.debug(f'Session request handling: [method] {method}, [args] {args}') - - # If DROP_CLIENT_UNKNOWN is enabled, check if the client identified - # by calling server.version previously. If not, disconnect the session - if self.env.drop_client_unknown and method != 'server.version' and self.client == 'unknown': - self.logger.info(f'disconnecting because client is unknown') - raise ReplyAndDisconnect( - BAD_REQUEST, f'use server.version to identify client') - - self.session_mgr._method_counts[method] += 1 - coro = handler_invocation(handler, request)() - return await coro - - -class ElectrumX(SessionBase): - '''A TCP server that handles incoming Electrum connections.''' - - PROTOCOL_MIN = (1, 4) - PROTOCOL_MAX = (1, 4, 3) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.subscribe_headers = False - self.connection.max_response_size = self.env.max_send - self.hashX_subs = {} - self.sv_seen = False - self.mempool_statuses = {} - self.set_request_handlers(self.PROTOCOL_MIN) - self.is_peer = False - self.cost = 5.0 # Connection cost - - @classmethod - def protocol_min_max_strings(cls): - return [util.version_string(ver) - for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] - - @classmethod - def server_features(cls, env): - '''Return the server features dictionary.''' - hosts_dict = {} - for service in env.report_services: - port_dict = hosts_dict.setdefault(str(service.host), {}) - if service.protocol not in port_dict: - port_dict[f'{service.protocol}_port'] = service.port - - min_str, max_str = cls.protocol_min_max_strings() - return { - 'hosts': hosts_dict, - 'pruning': None, - 'server_version': electrumx.version, - 'protocol_min': min_str, - 'protocol_max': max_str, - 'genesis_hash': env.coin.GENESIS_HASH, - 'hash_function': 'sha256', - 'services': [str(service) for service in env.report_services], - } - - async def server_features_async(self): - self.bump_cost(0.2) - return self.server_features(self.env) - - @classmethod - def server_version_args(cls): - '''The arguments to a server.version RPC call to a peer.''' - return [electrumx.version, cls.protocol_min_max_strings()] - - def protocol_version_string(self): - return util.version_string(self.protocol_tuple) - - def extra_cost(self): - return self.session_mgr.extra_cost(self) - - def on_disconnect_due_to_excessive_session_cost(self): - remote_addr = self.remote_address() - ip_addr = remote_addr.host if remote_addr else None - groups = self.session_mgr.sessions[self] - group_names = [group.name for group in groups] - self.logger.info(f"closing session over res usage. ip: {ip_addr}. groups: {group_names}") - - def sub_count(self): - return len(self.hashX_subs) - - def unsubscribe_hashX(self, hashX): - self.mempool_statuses.pop(hashX, None) - return self.hashX_subs.pop(hashX, None) - - async def notify(self, touched, height_changed): - '''Wrap _notify_inner; websockets raises exceptions for unclear reasons.''' - try: - async with timeout_after(30): - await self._notify_inner(touched, height_changed) - except TaskTimeout: - self.logger.warning('timeout notifying client, closing...') - await self.close(force_after=1.0) - except Exception: - self.logger.exception('unexpected exception notifying client') - - async def _notify_inner(self, touched, height_changed): - '''Notify the client about changes to touched addresses (from mempool - updates or new blocks) and height. - ''' - if height_changed and self.subscribe_headers: - args = (await self.subscribe_headers_result(), ) - await self.send_notification('blockchain.headers.subscribe', args) - - touched = touched.intersection(self.hashX_subs) - if touched or (height_changed and self.mempool_statuses): - changed = {} - - for hashX in touched: - alias = self.hashX_subs.get(hashX) - if alias: - status = await self.subscription_address_status(hashX) - changed[alias] = status - - # Check mempool hashXs - the status is a function of the confirmed state of - # other transactions. - mempool_statuses = self.mempool_statuses.copy() - for hashX, old_status in mempool_statuses.items(): - alias = self.hashX_subs.get(hashX) - if alias: - status = await self.subscription_address_status(hashX) - if status != old_status: - changed[alias] = status - - method = 'blockchain.scripthash.subscribe' - for alias, status in changed.items(): - await self.send_notification(method, (alias, status)) - - if changed: - es = '' if len(changed) == 1 else 'es' - self.logger.info(f'notified of {len(changed):,d} address{es}') - - async def subscribe_headers_result(self): - '''The result of a header subscription or notification.''' - return self.session_mgr.hsub_results - - async def headers_subscribe(self): - '''Subscribe to get raw headers of new blocks.''' - if not self.subscribe_headers: - self.subscribe_headers = True - self.bump_cost(0.25) - return await self.subscribe_headers_result() - - async def add_peer(self, features): - '''Add a peer (but only if the peer resolves to the source).''' - self.is_peer = True - self.bump_cost(100.0) - return await self.peer_mgr.on_add_peer(features, self.remote_address()) - - async def peers_subscribe(self): - '''Return the server peers as a list of (ip, host, details) tuples.''' - self.bump_cost(1.0) - return self.peer_mgr.on_peers_subscribe(self.is_tor()) - - async def address_status(self, hashX): - '''Returns an address status. - - Status is a hex string, but must be None if there is no history. - ''' - # Note history is ordered and mempool unordered in electrum-server - # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 - db_history, cost = await self.session_mgr.limited_history(hashX) - mempool = await self.mempool.transaction_summaries(hashX) - - status = ''.join(f'{hash_to_hex_str(tx_hash)}:' - f'{height:d}:' - for tx_hash, height in db_history) - status += ''.join(f'{hash_to_hex_str(tx.hash)}:' - f'{-tx.has_unconfirmed_inputs:d}:' - for tx in mempool) - - # Add status hashing cost - self.bump_cost(cost + 0.1 + len(status) * 0.00002) - - if status: - status = sha256(status.encode()).hex() - else: - status = None - - if mempool: - self.mempool_statuses[hashX] = status - else: - self.mempool_statuses.pop(hashX, None) - - return status - - async def subscription_address_status(self, hashX): - '''As for address_status, but if it can't be calculated the subscription is - discarded.''' - try: - return await self.address_status(hashX) - except RPCError: - self.unsubscribe_hashX(hashX) - return None - - async def hashX_listunspent(self, hashX): - '''Return the list of UTXOs of a script hash, including mempool - effects.''' - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = await self.mempool.potential_spends(hashX) - returned_utxos = [] - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - # Todo need to combine mempool atomicals - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'tx_hash': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'tx_pos': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - return returned_utxos - - # Get atomical_id from an atomical inscription number - def get_atomical_id_by_atomical_number(self, atomical_number): - return self.db.get_atomical_id_by_atomical_number(atomical_number) - - # Get atomicals base information from db or placeholder information if mint is still in the mempool and unconfirmed - async def atomical_id_get(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - if atomical: - return atomical - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - async def atomical_id_get_ft_info(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - if atomical['subtype'] == 'decentralized': - atomical = await self.session_mgr.bp.get_dft_mint_info_rpc_format_by_atomical_id(atomical_id) - elif atomical['subtype'] == 'direct': - atomical = await self.session_mgr.bp.get_ft_mint_info_rpc_format_by_atomical_id(atomical_id) - else: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not a fungible token (FT)') - - if atomical: - return atomical - # Check mempool - atomical_in_mempool = await self.mempool.get_atomical_mint(atomical_id) - if atomical_in_mempool == None: - raise RPCError(BAD_REQUEST, f'"{compact_atomical_id}" is not found') - return atomical_in_mempool - - async def atomical_id_get_state(self, compact_atomical_id, Verbose=False): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_latest_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_state_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_mod_state_history_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_events(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - height = self.session_mgr.bp.height - self.db.populate_extended_events_atomical_info(atomical_id, atomical, height) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def atomical_id_get_tx_history(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - history = await self.scripthash_get_history(hash_to_hex_str(double_sha256(atomical_id))) - history.sort(key=lambda x: x['height'], reverse=True) - - atomical['tx'] = { - 'history': history - } - return atomical - - async def atomical_id_get_location(self, compact_atomical_id): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - await self.db.populate_extended_location_atomical_info(atomical_id, atomical) - return atomical - - async def get_summary_info(self, atomical_hash_count=10): - - if atomical_hash_count and atomical_hash_count > 100000: - atomical_hash_count = 100000 - - db_height = self.db.db_height - last_block_hash = self.db.get_atomicals_block_hash(db_height) - ret = { - 'coin': self.env.coin.__name__, - 'network': self.coin.NET, - 'height': db_height, - 'block_tip': hash_to_hex_str(self.db.db_tip), - 'server_time': datetime.datetime.now().isoformat(), - 'atomicals_block_tip': last_block_hash, - 'atomical_count': self.db.db_atomical_count - } - - list_hashes = {} - ret['atomicals_block_hashes'] = {} - # ret['atomicals_block_hashes'][db_height] = last_block_hash - for i in range(atomical_hash_count): - next_db_height = db_height - i - nextblockhash = self.db.get_atomicals_block_hash(next_db_height) - ret['atomicals_block_hashes'][next_db_height] = nextblockhash - return ret - - async def atomicals_list_get(self, limit, offset, asc): - atomicals = await self.db.get_atomicals_list(limit, offset, asc) - atomicals_populated = [] - for atomical_id in atomicals: - atomical = await self.atomical_id_get(location_id_bytes_to_compact(atomical_id)) - atomicals_populated.append(atomical) - return {'global': await self.get_summary_info(), 'result': atomicals_populated } - - async def atomicals_num_to_id(self, limit, offset, asc): - atomicals_num_to_id_map = await self.db.get_num_to_id(limit, offset, asc) - atomicals_num_to_id_map_reformatted = {} - for num, id in atomicals_num_to_id_map.items(): - atomicals_num_to_id_map_reformatted[num] = location_id_bytes_to_compact(id) - return {'global': await self.get_summary_info(), 'result': atomicals_num_to_id_map_reformatted } - - async def atomicals_block_hash(self, height): - if not height: - height = self.session_mgr.bp.height - block_hash = self.db.get_atomicals_block_hash(height) - return {'result': block_hash} - - async def atomicals_block_txs(self, height): - tx_list = self.session_mgr.bp.get_atomicals_block_txs(height) - return {'global': await self.get_summary_info(), 'result': tx_list } - - async def hashX_subscribe(self, hashX, alias): - # Store the subscription only after address_status succeeds - result = await self.address_status(hashX) - self.hashX_subs[hashX] = alias - return result - - async def get_balance(self, hashX): - utxos = await self.db.all_utxos(hashX) - confirmed = sum(utxo.value for utxo in utxos) - unconfirmed = await self.mempool.balance_delta(hashX) - self.bump_cost(1.0 + len(utxos) / 50) - return {'confirmed': confirmed, 'unconfirmed': unconfirmed} - - async def scripthash_get_balance(self, scripthash): - '''Return the confirmed and unconfirmed balance of a scripthash.''' - hashX = scripthash_to_hashX(scripthash) - return await self.get_balance(hashX) - - async def unconfirmed_history(self, hashX): - # Note unconfirmed history is unordered in electrum-server - # height is -1 if it has unconfirmed inputs, otherwise 0 - result = [{'tx_hash': hash_to_hex_str(tx.hash), - 'height': -tx.has_unconfirmed_inputs, - 'fee': tx.fee} - for tx in await self.mempool.transaction_summaries(hashX)] - self.bump_cost(0.25 + len(result) / 50) - return result - - async def confirmed_and_unconfirmed_history(self, hashX): - # Note history is ordered but unconfirmed is unordered in e-s - history, cost = await self.session_mgr.limited_history(hashX) - self.bump_cost(cost) - conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} - for tx_hash, height in history] - return conf + await self.unconfirmed_history(hashX) - - async def atomicals_listscripthash(self, scripthash, Verbose=False): - '''Return the list of Atomical UTXOs for an address''' - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listscripthash_atomicals(hashX, Verbose) - - async def atomicals_list(self, offset, limit, asc): - '''Return the list of atomicals order by reverse atomical number''' - return await self.atomicals_list_get(offset, limit, asc) - - async def atomicals_get(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get(compact_atomical_id)} - - async def atomicals_dump(self): - if True: - self.db.dump() - return {'result': True} - else: - return {'result': False} - - async def atomicals_get_dft_mints(self, compact_atomical_id, limit=100, offset=0): - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - entries = self.session_mgr.bp.get_distmints_by_atomical_id(atomical_id, limit, offset) - return {'global': await self.get_summary_info(), 'result': entries} - - async def atomicals_get_ft_info(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_ft_info(compact_atomical_id)} - - async def atomicals_get_global(self, hashes=10): - return {'global': await self.get_summary_info(hashes)} - - async def atomicals_get_location(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_location(compact_atomical_id)} - - async def atomical_get_state(self, compact_atomical_id_or_atomical_number, Verbose=False): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state(compact_atomical_id, Verbose)} - - async def atomical_get_state_history(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_state_history(compact_atomical_id)} - - async def atomical_get_events(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = self.atomical_resolve_id(compact_atomical_id_or_atomical_number) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_events(compact_atomical_id)} - - def atomical_resolve_id(self, compact_atomical_id_or_atomical_number): - compact_atomical_id = compact_atomical_id_or_atomical_number - if not isinstance(compact_atomical_id_or_atomical_number, int) and is_compact_atomical_id(compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - found_atomical_id = self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number) - if not found_atomical_id: - raise RPCError(BAD_REQUEST, f'not found atomical: {compact_atomical_id_or_atomical_number}') - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - return compact_atomical_id - - async def atomicals_get_tx_history(self, compact_atomical_id_or_atomical_number): - '''Return the history of an Atomical``` - atomical_id: the mint transaction hash + 'i' of the atomical id - verbose: to determine whether to print extended information - ''' - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact(self.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - return {'global': await self.get_summary_info(), 'result': await self.atomical_id_get_tx_history(compact_atomical_id)} - - async def atomicals_get_by_ticker(self, ticker): - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_ticker(ticker, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'ticker' - } - return { - 'result': return_result - } - async def atomicals_get_by_container(self, container): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'container' - } - return { - 'result': return_result - } - - def auto_populate_container_regular_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - value['status'] = 'verified' - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - def auto_populate_container_dmint_items_fields(self, items): - if not items or not isinstance(items, dict): - return {} - for item, value in items.items(): - provided_id = value.get('id') - if provided_id and isinstance(provided_id, bytes) and len(provided_id) == 36: - value['$id'] = location_id_bytes_to_compact(provided_id) - return auto_encode_bytes_elements(items) - - async def atomicals_get_container_items(self, container, limit, offset): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, self.session_mgr.bp.height) - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container not found') - - compact_atomical_id = location_id_bytes_to_compact(found_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - items = [] - if container_dmint_status: - if limit > 100: - limit = 100 - if offset < 0: - offset = 0 - height = self.session_mgr.bp.height - items = await self.session_mgr.bp.get_effective_dmitems_paginated(found_atomical_id, limit, offset, height) - return { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'dmint', - 'items': self.auto_populate_container_dmint_items_fields(items) - } - } - } - else: - container_mod_history = self.session_mgr.bp.get_mod_history(found_atomical_id, self.session_mgr.bp.height) - current_height_latest_state = calculate_latest_state_from_mod_history(container_mod_history) - items = current_height_latest_state.get('items', []) - return { - 'result': { - 'container': container_info, - 'item_data': { - 'limit': limit, - 'offset': offset, - 'type': 'regular', - 'items': self.auto_populate_container_regular_items_fields(items) - } - } - } - - async def atomicals_get_by_container_item(self, container, item_name): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - if status == 'verified': - found_atomical_id = candidate_atomical_id - else: - self.logger.info(f'formatted_entries {formatted_entries}') - raise RPCError(BAD_REQUEST, f'Container does not exist') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_atomical_id, item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item' - } - return { - 'result': return_result - } - - async def atomicals_get_by_container_item_validation(self, container, item_name, bitworkc, bitworkr, main_name, main_hash, proof, check_without_sealed): - if not isinstance(container, str): - raise RPCError(BAD_REQUEST, f'empty container') - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_container(container, height) - found_parent_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - if status == 'verified': - found_parent_atomical_id = candidate_atomical_id - else: - raise RPCError(BAD_REQUEST, f'Container does not exist') - compact_atomical_id = location_id_bytes_to_compact(found_parent_atomical_id) - container_info = await self.atomical_id_get(compact_atomical_id) - # If it is a dmint container then there is no items field, instead construct it from the dmitems - container_dmint_status = container_info.get('$container_dmint_status') - errors = container_dmint_status.get('errors') - if not container_dmint_status: - raise RPCError(BAD_REQUEST, f'Container dmint status not exist') - if container_dmint_status.get('status') != 'valid': - errors = container_dmint_status.get('errors') - if check_without_sealed and errors and len(errors) == 1 and errors[0] == 'container not sealed': - pass - else: - raise RPCError(BAD_REQUEST, f'Container dmint status is invalid') - - dmint = container_dmint_status.get('dmint') - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(found_parent_atomical_id, item_name, height) - found_item_atomical_id = None - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - if status == 'verified': - found_item_atomical_id = candidate_atomical_id - - # validate the proof data nonetheless - if not proof or not isinstance(proof, list) or len(proof) == 0: - raise RPCError(BAD_REQUEST, f'Proof must be provided') - - applicable_rule, state_at_height = self.session_mgr.bp.get_applicable_rule_by_height(found_parent_atomical_id, item_name, height - MINT_SUBNAME_RULES_BECOME_EFFECTIVE_IN_BLOCKS, DMINT_PATH) - proof_valid, target_vector, target_hash = validate_merkle_proof_dmint(dmint['merkle'], item_name, bitworkc, bitworkr, main_name, main_hash, proof) - if applicable_rule and applicable_rule.get('matched_rule'): - applicable_rule = applicable_rule.get('matched_rule') - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_item_atomical_id, - 'candidates': formatted_entries, - 'type': 'item', - 'applicable_rule': applicable_rule, - 'proof_valid': proof_valid, - 'target_vector': target_vector, - 'target_hash': target_hash, - 'dmint': state_at_height.get('dmint') - } - return { - 'result': return_result - } - - async def atomicals_get_by_realm(self, name): - height = self.session_mgr.bp.height - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_realm(name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'realm' - } - return { - 'result': return_result - } - - async def atomicals_get_by_subrealm(self, parent_compact_atomical_id_or_atomical_number, name): - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_subrealm(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'subrealm' - } - return { - 'result': return_result - } - - async def atomicals_get_by_dmitem(self, parent_compact_atomical_id_or_atomical_number, name): - height = self.session_mgr.bp.height - compact_atomical_id_parent = self.atomical_resolve_id(parent_compact_atomical_id_or_atomical_number) - atomical_id_parent = compact_to_location_id_bytes(compact_atomical_id_parent) - status, candidate_atomical_id, all_entries = self.session_mgr.bp.get_effective_dmitem(atomical_id_parent, name, height) - formatted_entries = format_name_type_candidates_to_rpc(all_entries, self.session_mgr.bp.build_atomical_id_to_candidate_map(all_entries)) - - if candidate_atomical_id: - candidate_atomical_id = location_id_bytes_to_compact(candidate_atomical_id) - - found_atomical_id = None - if status == 'verified': - found_atomical_id = candidate_atomical_id - - return_result = { - 'status': status, - 'candidate_atomical_id': candidate_atomical_id, - 'atomical_id': found_atomical_id, - 'candidates': formatted_entries, - 'type': 'dmitem' - } - return { - 'result': return_result - } - - # Get a summary view of a realm and if it's allowing mints and what parts already existed of a subrealm - async def atomicals_get_realm_info(self, full_name, Verbose=False): - if not full_name or not isinstance(full_name, str): - raise RPCError(BAD_REQUEST, f'invalid input full_name: {full_name}') - full_name = full_name.lower() - split_names = full_name.split('.') - total_name_parts = len(split_names) - level = 0 - last_found_realm_atomical_id = None - last_found_realm = None - realms_path = [] - latest_all_entries_candidates = [] - height = self.session_mgr.bp.height - for name_part in split_names: - if level == 0: - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_realm(name_part, height) - else: - self.logger.info(f'atomicals_get_realm_info {last_found_realm} {name_part}') - realm_status, last_found_realm, latest_all_entries_candidates = self.session_mgr.bp.get_effective_subrealm(last_found_realm, name_part, height) - # stops when it does not found the realm component - if realm_status != 'verified': - break - # Save the latest realm (could be the top level realm, or the parent of a subrealm, or even the subrealm itself) - last_found_realm_atomical_id = last_found_realm - # Add it to the list of paths - realms_path.append({ - 'atomical_id': location_id_bytes_to_compact(last_found_realm), - 'name_part': name_part, - 'candidates': latest_all_entries_candidates - }) - level += 1 - - joined_name = '' - is_first_name_part = True - for name_element in realms_path: - if is_first_name_part: - is_first_name_part = False - else: - joined_name += '.' - joined_name += name_element['name_part'] - # Nothing was found - realms_path_len = len(realms_path) - if realms_path_len == 0: - return {'result': { - 'atomical_id': None, - 'top_level_realm_atomical_id': None, - 'top_level_realm_name': None, - 'nearest_parent_realm_atomical_id': None, - 'nearest_parent_realm_name': None, - 'request_full_realm_name': full_name, - 'found_full_realm_name': None, - 'missing_name_parts': full_name, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) } - } - # Populate the subrealm minting rules for a parent atomical - that = self - def populate_rules_response_struct(parent_atomical_id, struct_to_populate, Verbose): - current_height = that.session_mgr.bp.height - subrealm_mint_mod_history = that.session_mgr.bp.get_mod_history(parent_atomical_id, current_height) - current_height_latest_state = calculate_latest_state_from_mod_history(subrealm_mint_mod_history) - current_height_rules_list = validate_rules_data(current_height_latest_state.get(SUBREALM_MINT_PATH, None)) - nearest_parent_realm_subrealm_mint_allowed = False - struct_to_populate['nearest_parent_realm_subrealm_mint_rules'] = { - 'nearest_parent_realm_atomical_id': location_id_bytes_to_compact(parent_atomical_id), - 'current_height': current_height, - 'current_height_rules': current_height_rules_list - } - if current_height_rules_list and len(current_height_rules_list) > 0: - nearest_parent_realm_subrealm_mint_allowed = True - struct_to_populate['nearest_parent_realm_subrealm_mint_allowed'] = nearest_parent_realm_subrealm_mint_allowed - # - # - # - # At least the top level realm was found if we got this far - # - # - # The number of realms returned and name components is equal, therefore the subrealm was found correctly - if realms_path_len == total_name_parts: - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-2]['atomical_id'] - nearest_parent_realm_name = realms_path[-2]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) - return_struct = { - 'atomical_id': realms_path[-1]['atomical_id'], - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': None, - 'candidates': format_name_type_candidates_to_rpc(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) - } - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) - return {'result': return_struct} - - # The number of realms and components do not match, that is because at least the top level realm or intermediate subrealm was found - # But the final subrealm does not exist yet - # if realms_path_len < total_name_parts: - # It is known if we got this far that realms_path_len < total_name_parts - nearest_parent_realm_atomical_id = None - nearest_parent_realm_name = None - top_level_realm = realms_path[0]['atomical_id'] - top_level_realm_name = realms_path[0]['name_part'] - if realms_path_len >= 2: - nearest_parent_realm_atomical_id = realms_path[-1]['atomical_id'] - nearest_parent_realm_name = realms_path[-1]['name_part'] - elif realms_path_len == 1: - nearest_parent_realm_atomical_id = top_level_realm - nearest_parent_realm_name = top_level_realm_name - - missing_name_parts = '.'.join(split_names[ len(realms_path):]) - final_subrealm_name = split_names[-1] - applicable_rule_map = self.session_mgr.bp.build_applicable_rule_map(latest_all_entries_candidates, compact_to_location_id_bytes(nearest_parent_realm_atomical_id), final_subrealm_name) - return_struct = { - 'atomical_id': None, - 'top_level_realm_atomical_id': top_level_realm, - 'top_level_realm_name': top_level_realm_name, - 'nearest_parent_realm_atomical_id': nearest_parent_realm_atomical_id, - 'nearest_parent_realm_name': nearest_parent_realm_name, - 'request_full_realm_name': full_name, - 'found_full_realm_name': joined_name, - 'missing_name_parts': missing_name_parts, - 'final_subrealm_name': final_subrealm_name, - 'candidates': format_name_type_candidates_to_rpc_for_subname(latest_all_entries_candidates, self.session_mgr.bp.build_atomical_id_to_candidate_map(latest_all_entries_candidates)) - } - if Verbose: - populate_rules_response_struct(compact_to_location_id_bytes(nearest_parent_realm_atomical_id), return_struct, Verbose) - return {'result': return_struct} - - # Perform a search for tickers, containers, and realms - def atomicals_search_name_template(self, db_prefix, name_type_str, parent_prefix=None, prefix=None, Reverse=False, Limit=1000, Offset=0, is_verified_only=False): - db_entries = self.db.get_name_entries_template_limited(db_prefix, parent_prefix, prefix, Reverse, Limit, Offset) - formatted_results = [] - for item in db_entries: - if name_type_str == "ticker": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'tick', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.ticker_data_cache) - elif name_type_str == "realm": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'rlm', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.realm_data_cache) - elif name_type_str == "collection": - status, _, _ = self.session_mgr.bp.get_effective_name_template(b'co', item['name'], self.session_mgr.bp.height, self.session_mgr.bp.container_data_cache) - elif name_type_str == "subrealm": - status, _, _ = self.session_mgr.bp.get_effective_subrealm(parent_prefix, item['name'], self.session_mgr.bp.height) - obj = { - 'atomical_id': location_id_bytes_to_compact(item['atomical_id']), - 'tx_num': item['tx_num'] - } - obj[name_type_str + '_hex'] = item['name_hex'] - obj[name_type_str] = item['name'] - obj['status'] = status - if is_verified_only and status == "verified": - formatted_results.append(obj) - elif not is_verified_only: - formatted_results.append(obj) - return {'result': formatted_results} - - async def atomicals_search_tickers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'tick', 'ticker', None, prefix, Reverse, Limit, Offset, is_verified_only) - - async def atomicals_search_realms(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'rlm', 'realm', None, prefix, Reverse, Limit, Offset, is_verified_only) - - async def atomicals_search_subrealms(self, parent_realm_id_compact, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - parent_realm_id_long_form = compact_to_location_id_bytes(parent_realm_id_compact) - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'srlm', 'subrealm', parent_realm_id_long_form, prefix, Reverse, Limit, Offset, is_verified_only) - - async def atomicals_search_containers(self, prefix=None, Reverse=False, Limit=100, Offset=0, is_verified_only=False): - if isinstance(prefix, str): - prefix = prefix.encode() - return self.atomicals_search_name_template(b'co', 'collection', None, prefix, Reverse, Limit, Offset, is_verified_only) - - async def atomicals_at_location(self, compact_location_id): - '''Return the Atomicals at a specific location id``` - ''' - atomical_basic_infos = [] - atomicals_found_at_location = self.db.get_atomicals_by_location_extended_info_long_form(compact_to_location_id_bytes(compact_location_id)) - for atomical_id in atomicals_found_at_location['atomicals']: - atomical_basic_info = self.session_mgr.bp.get_atomicals_id_mint_info_basic_struct(atomical_id) - atomical_basic_info['value'] = self.db.get_uxto_atomicals_value(compact_to_location_id_bytes(compact_location_id), atomical_id) - atomical_basic_infos.append(atomical_basic_info) - return { - 'location_info': atomicals_found_at_location['location_info'], - 'atomicals': atomical_basic_infos - } - - async def atomicals_get_ft_balances(self, scripthash): - '''Return the FT balances for a scripthash address''' - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_ft_balances_atomicals(hashX) - - async def atomicals_get_nft_balances(self, scripthash): - '''Return the NFT balances for a scripthash address''' - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_nft_balances_atomicals(hashX) - - async def atomicals_get_holders(self, compact_atomical_id, limit=50, offset=0): - '''Return the holder by a specific location id``` - ''' - formatted_results = [] - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - atomical = await self.atomical_id_get(compact_atomical_id) - atomical = await self.db.populate_extended_atomical_holder_info(atomical_id, atomical) - if atomical["type"] == "FT": - if atomical.get("$mint_mode", "fixed") == "fixed": - max_supply = atomical.get('$max_supply', 0) - else: - max_supply = atomical.get('$max_supply', -1) - if max_supply < 0: - mint_amount = atomical.get("mint_info", {}).get("args", {}).get("mint_amount") - max_supply = DFT_MINT_MAX_MAX_COUNT_DENSITY * mint_amount - for holder in atomical.get("holders", [])[offset:offset+limit]: - percent = holder['holding'] / max_supply - formatted_results.append({ - "percent": percent, - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - elif atomical["type"] == "NFT": - for holder in atomical.get("holders", [])[offset:offset+limit]: - formatted_results.append({ - "address": get_address_from_output_script(bytes.fromhex(holder['script'])), - "holding": holder["holding"] - }) - return formatted_results - - async def hashX_ft_balances_atomicals(self, hashX): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert (atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'FT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['ticker'] = atomical_id_basic_info.get('$ticker') - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] - return return_struct - - async def hashX_nft_balances_atomicals(self, hashX): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'balances': {} - } - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_compact = atomical_id_basic_info['atomical_id'] - assert(atomical_id_compact == atomical_id_entry_compact) - if atomical_id_basic_info.get('type') == 'NFT': - if return_struct['balances'].get(atomical_id_compact) is None: - return_struct['balances'][atomical_id_compact] = {} - return_struct['balances'][atomical_id_compact]['id'] = atomical_id_compact - return_struct['balances'][atomical_id_compact]['confirmed'] = 0 - if atomical_id_basic_info.get('subtype'): - return_struct['balances'][atomical_id_compact]['subtype'] = atomical_id_basic_info.get('subtype') - if atomical_id_basic_info.get('$request_container'): - return_struct['balances'][atomical_id_compact]['request_container'] = atomical_id_basic_info.get('$request_container') - if atomical_id_basic_info.get('$container'): - return_struct['balances'][atomical_id_compact]['container'] = atomical_id_basic_info.get('$container') - if atomical_id_basic_info.get('$dmitem'): - return_struct['balances'][atomical_id_compact]['dmitem'] = atomical_id_basic_info.get('$dmitem') - if atomical_id_basic_info.get('$request_dmitem'): - return_struct['balances'][atomical_id_compact]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - if atomical_id_basic_info.get('$realm'): - return_struct['balances'][atomical_id_compact]['realm'] = atomical_id_basic_info.get('$realm') - if atomical_id_basic_info.get('$request_realm'): - return_struct['balances'][atomical_id_compact]['request_realm'] = atomical_id_basic_info.get('$request_realm') - if atomical_id_basic_info.get('$subrealm'): - return_struct['balances'][atomical_id_compact]['subrealm'] = atomical_id_basic_info.get('$subrealm') - if atomical_id_basic_info.get('$request_subrealm'): - return_struct['balances'][atomical_id_compact]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - if atomical_id_basic_info.get('$full_realm_name'): - return_struct['balances'][atomical_id_compact]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') - if atomical_id_basic_info.get('$parent_container'): - return_struct['balances'][atomical_id_compact]['parent_container'] = atomical_id_basic_info.get('$parent_container') - if atomical_id_basic_info.get('$parent_realm'): - return_struct['balances'][atomical_id_compact]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') - if atomical_id_basic_info.get('$parent_container_name'): - return_struct['balances'][atomical_id_compact]['parent_container_name'] = atomical_id_basic_info.get('$parent_container_name') - if atomical_id_basic_info.get('$bitwork'): - return_struct['balances'][atomical_id_compact]['bitwork'] = atomical_id_basic_info.get('$bitwork') - if atomical_id_basic_info.get('$parents'): - return_struct['balances'][atomical_id_compact]['parents'] = atomical_id_basic_info.get('$parents') - if returned_utxo['height'] > 0: - return_struct['balances'][atomical_id_compact]['confirmed'] += returned_utxo['atomicals'][atomical_id_compact] - return return_struct - - async def hashX_listscripthash_atomicals(self, hashX, Verbose=False): - utxos = await self.db.all_utxos(hashX) - utxos = sorted(utxos) - # Comment out the utxos for now and add it in later - # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) - self.bump_cost(1.0 + len(utxos) / 50) - spends = [] # await self.mempool.potential_spends(hashX) - returned_utxos = [] - atomicals_id_map = {} - for utxo in utxos: - if (utxo.tx_hash, utxo.tx_pos) in spends: - continue - atomicals = self.db.get_atomicals_by_utxo(utxo, True) - atomicals_basic_infos = {} - for atomical_id in atomicals: - # This call is efficient in that it's cached underneath. - # Now we only show the atomical id and its corresponding value - # because it can always be fetched separately which is more efficient. - atomical_basic_info = await self.session_mgr.bp.get_base_mint_info_rpc_format_by_atomical_id(atomical_id) - atomical_id_compact = location_id_bytes_to_compact(atomical_id) - atomicals_id_map[atomical_id_compact] = atomical_basic_info - location = utxo.tx_hash + util.pack_le_uint32(utxo.tx_pos) - atomicals_basic_infos[atomical_id_compact] = self.db.get_uxto_atomicals_value(location, atomical_id) - if Verbose or len(atomicals) > 0: - returned_utxos.append({ - 'txid': hash_to_hex_str(utxo.tx_hash), - 'index': utxo.tx_pos, - 'vout': utxo.tx_pos, - 'height': utxo.height, - 'value': utxo.value, - 'atomicals': atomicals_basic_infos - }) - # Aggregate balances - return_struct = { - 'global': await self.get_summary_info(), - 'atomicals': {}, - 'utxos': returned_utxos - } - - for returned_utxo in returned_utxos: - for atomical_id_entry_compact in returned_utxo['atomicals']: - atomical_id_basic_info = atomicals_id_map[atomical_id_entry_compact] - atomical_id_ref = atomical_id_basic_info['atomical_id'] - if return_struct['atomicals'].get(atomical_id_ref) is None: - return_struct['atomicals'][atomical_id_ref] = { - 'atomical_id': atomical_id_ref, - 'atomical_number': atomical_id_basic_info['atomical_number'], - 'type': atomical_id_basic_info['type'], - 'confirmed': 0, - # 'subtype': atomical_id_basic_info.get('subtype'), - 'data': atomical_id_basic_info - } - if atomical_id_basic_info.get('$realm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm'] = atomical_id_basic_info.get('$realm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') - elif atomical_id_basic_info.get('$subrealm'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') - return_struct['atomicals'][atomical_id_ref]['subrealm'] = atomical_id_basic_info.get('$subrealm') - return_struct['atomicals'][atomical_id_ref]['full_realm_name'] = atomical_id_basic_info.get('$full_realm_name') - elif atomical_id_basic_info.get('$dmitem'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') - return_struct['atomicals'][atomical_id_ref]['dmitem'] = atomical_id_basic_info.get('$dmitem') - elif atomical_id_basic_info.get('$ticker'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') - return_struct['atomicals'][atomical_id_ref]['ticker'] = atomical_id_basic_info.get('$ticker') - elif atomical_id_basic_info.get('$container'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['container'] = atomical_id_basic_info.get('$container') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') - # Label them as candidates if they were candidates - elif atomical_id_basic_info.get('subtype') == 'request_realm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['request_realm_status'] = atomical_id_basic_info.get('$request_realm_status') - return_struct['atomicals'][atomical_id_ref]['request_realm'] = atomical_id_basic_info.get('$request_realm') - return_struct['atomicals'][atomical_id_ref]['realm_candidates'] = atomical_id_basic_info.get('$realm_candidates') - elif atomical_id_basic_info.get('subtype') == 'request_subrealm': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['subrealm_candidates'] = atomical_id_basic_info.get('$subrealm_candidates') - return_struct['atomicals'][atomical_id_ref]['request_subrealm_status'] = atomical_id_basic_info.get('$request_subrealm_status') - return_struct['atomicals'][atomical_id_ref]['request_full_realm_name'] = atomical_id_basic_info.get('$request_full_realm_name') - return_struct['atomicals'][atomical_id_ref]['request_subrealm'] = atomical_id_basic_info.get('$request_subrealm') - return_struct['atomicals'][atomical_id_ref]['parent_realm'] = atomical_id_basic_info.get('$parent_realm') - elif atomical_id_basic_info.get('subtype') == 'request_dmitem': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['dmitem_candidates'] = atomical_id_basic_info.get('$dmitem_candidates') - return_struct['atomicals'][atomical_id_ref]['request_dmitem_status'] = atomical_id_basic_info.get('$request_dmitem_status') - return_struct['atomicals'][atomical_id_ref]['request_dmitem'] = atomical_id_basic_info.get('$request_dmitem') - return_struct['atomicals'][atomical_id_ref]['parent_container'] = atomical_id_basic_info.get('$parent_container') - elif atomical_id_basic_info.get('subtype') == 'request_container': - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['container_candidates'] = atomical_id_basic_info.get('$container_candidates') - return_struct['atomicals'][atomical_id_ref]['request_container_status'] = atomical_id_basic_info.get('$request_container_status') - return_struct['atomicals'][atomical_id_ref]['request_container'] = atomical_id_basic_info.get('$request_container') - elif atomical_id_basic_info.get('$request_ticker_status'): - return_struct['atomicals'][atomical_id_ref]['subtype'] = atomical_id_basic_info.get('subtype') - return_struct['atomicals'][atomical_id_ref]['ticker_candidates'] = atomical_id_basic_info.get('$ticker_candidates') - return_struct['atomicals'][atomical_id_ref]['request_ticker_status'] = atomical_id_basic_info.get('$request_ticker_status') - return_struct['atomicals'][atomical_id_ref]['request_ticker'] = atomical_id_basic_info.get('$request_ticker') - - if returned_utxo['height'] <= 0: - return_struct['atomicals'][atomical_id_ref]['unconfirmed'] += returned_utxo["atomicals"][atomical_id_ref] - else: - return_struct['atomicals'][atomical_id_ref]['confirmed'] += returned_utxo["atomicals"][atomical_id_ref] - - return return_struct - - async def atomicals_get_tx(self, txids): - return await self.atomical_get_tx(txids) - - async def scripthash_get_history(self, scripthash): - '''Return the confirmed and unconfirmed history of a scripthash.''' - hashX = scripthash_to_hashX(scripthash) - return await self.confirmed_and_unconfirmed_history(hashX) - - async def scripthash_get_mempool(self, scripthash): - '''Return the mempool transactions touching a scripthash.''' - hashX = scripthash_to_hashX(scripthash) - return await self.unconfirmed_history(hashX) - - async def scripthash_listunspent(self, scripthash): - '''Return the list of UTXOs of a scripthash.''' - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_listunspent(hashX) - - async def scripthash_subscribe(self, scripthash): - '''Subscribe to a script hash. - - scripthash: the SHA256 hash of the script to subscribe to''' - hashX = scripthash_to_hashX(scripthash) - return await self.hashX_subscribe(hashX, scripthash) - - async def scripthash_unsubscribe(self, scripthash): - '''Unsubscribe from a script hash.''' - self.bump_cost(0.1) - hashX = scripthash_to_hashX(scripthash) - return self.unsubscribe_hashX(hashX) is not None - - async def _merkle_proof(self, cp_height, height): - max_height = self.db.db_height - if not height <= cp_height <= max_height: - raise RPCError(BAD_REQUEST, - f'require header height {height:,d} <= ' - f'cp_height {cp_height:,d} <= ' - f'chain height {max_height:,d}') - branch, root = await self.db.header_branch_and_root(cp_height + 1, - height) - return { - 'branch': [hash_to_hex_str(elt) for elt in branch], - 'root': hash_to_hex_str(root), - } - - async def block_header(self, height, cp_height=0): - '''Return a raw block header as a hexadecimal string, or as a - dictionary with a merkle proof.''' - height = non_negative_integer(height) - cp_height = non_negative_integer(cp_height) - raw_header_hex = (await self.session_mgr.raw_header(height)).hex() - self.bump_cost(1.25 - (cp_height == 0)) - if cp_height == 0: - return raw_header_hex - result = {'header': raw_header_hex} - result.update(await self._merkle_proof(cp_height, height)) - return result - - async def block_headers(self, start_height, count, cp_height=0): - '''Return count concatenated block headers as hex for the main chain; - starting at start_height. - - start_height and count must be non-negative integers. At most - MAX_CHUNK_SIZE headers will be returned. - ''' - start_height = non_negative_integer(start_height) - count = non_negative_integer(count) - cp_height = non_negative_integer(cp_height) - cost = count / 50 - - max_size = self.MAX_CHUNK_SIZE - count = min(count, max_size) - headers, count = await self.db.read_headers(start_height, count) - result = {'hex': headers.hex(), 'count': count, 'max': max_size} - if count and cp_height: - cost += 1.0 - last_height = start_height + count - 1 - result.update(await self._merkle_proof(cp_height, last_height)) - self.bump_cost(cost) - return result - - def is_tor(self): - '''Try to detect if the connection is to a tor hidden service we are - running.''' - proxy_address = self.peer_mgr.proxy_address() - if not proxy_address: - return False - remote_addr = self.remote_address() - if not remote_addr: - return False - return remote_addr.host == proxy_address.host - - async def replaced_banner(self, banner): - network_info = await self.daemon_request('getnetworkinfo') - ni_version = network_info['version'] - major, minor = divmod(ni_version, 1000000) - minor, revision = divmod(minor, 10000) - revision //= 100 - daemon_version = f'{major:d}.{minor:d}.{revision:d}' - for pair in [ - ('$SERVER_VERSION', electrumx.version_short), - ('$SERVER_SUBVERSION', electrumx.version), - ('$DAEMON_VERSION', daemon_version), - ('$DAEMON_SUBVERSION', network_info['subversion']), - ('$DONATION_ADDRESS', self.env.donation_address), - ]: - banner = banner.replace(*pair) - return banner - - async def donation_address(self): - '''Return the donation address as a string, empty if there is none.''' - self.bump_cost(0.1) - return self.env.donation_address - - async def banner(self): - '''Return the server banner text.''' - banner = f'You are connected to an {electrumx.version} server.' - self.bump_cost(0.5) - - if self.is_tor(): - banner_file = self.env.tor_banner_file - else: - banner_file = self.env.banner_file - if banner_file: - try: - with codecs.open(banner_file, 'r', 'utf-8') as f: - banner = f.read() - except (OSError, UnicodeDecodeError) as e: - self.logger.error(f'reading banner file {banner_file}: {e!r}') - else: - banner = await self.replaced_banner(banner) - - return banner - - async def relayfee(self): - '''The minimum fee a low-priority tx must pay in order to be accepted - to the daemon's memory pool.''' - self.bump_cost(1.0) - return await self.daemon_request('relayfee') - - async def estimatefee(self, number, mode=None): - '''The estimated transaction fee per kilobyte to be paid for a - transaction to be included within a certain number of blocks. - - number: the number of blocks - mode: CONSERVATIVE or ECONOMICAL estimation mode - ''' - number = non_negative_integer(number) - # use whitelist for mode, otherwise it would be easy to force a cache miss: - if mode not in self.coin.ESTIMATEFEE_MODES: - raise RPCError(BAD_REQUEST, f'unknown estimatefee mode: {mode}') - self.bump_cost(0.1) - - number = self.coin.bucket_estimatefee_block_target(number) - cache = self.session_mgr.estimatefee_cache - - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash and blockhash == self.session_mgr.bp.tip: - return feerate - else: - # create lock now, store it, and only then await on it - lock = asyncio.Lock() - cache[(number, mode)] = (None, None, lock) - async with lock: - cache_item = cache.get((number, mode)) - if cache_item is not None: - blockhash, feerate, lock = cache_item - if blockhash == self.session_mgr.bp.tip: - return feerate - self.bump_cost(2.0) # cache miss incurs extra cost - blockhash = self.session_mgr.bp.tip - if mode: - feerate = await self.daemon_request('estimatefee', number, mode) - else: - feerate = await self.daemon_request('estimatefee', number) - assert feerate is not None - assert blockhash is not None - cache[(number, mode)] = (blockhash, feerate, lock) - return feerate - - async def ping(self): - '''Serves as a connection keep-alive mechanism and for the client to - confirm the server is still responding. - ''' - self.bump_cost(0.1) - return None - - async def server_version(self, client_name='', protocol_version=None): - '''Returns the server version as a string. - - client_name: a string identifying the client - protocol_version: the protocol version spoken by the client - ''' - self.bump_cost(0.5) - if self.sv_seen: - raise RPCError(BAD_REQUEST, f'server.version already sent') - self.sv_seen = True - - if client_name: - client_name = str(client_name) - if self.env.drop_client is not None and \ - self.env.drop_client.match(client_name): - raise ReplyAndDisconnect(RPCError( - BAD_REQUEST, f'unsupported client: {client_name}')) - self.client = client_name[:17] - - # Find the highest common protocol version. Disconnect if - # that protocol version in unsupported. - ptuple, client_min = util.protocol_version( - protocol_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX) - - await self.crash_old_client(ptuple, self.env.coin.CRASH_CLIENT_VER) - - if ptuple is None: - if client_min > self.PROTOCOL_MIN: - self.logger.info(f'client requested future protocol version ' - f'{util.version_string(client_min)} ' - f'- is your software out of date?') - raise ReplyAndDisconnect(RPCError( - BAD_REQUEST, f'unsupported protocol version: {protocol_version}')) - self.set_request_handlers(ptuple) - - return electrumx.version, self.protocol_version_string() - - async def crash_old_client(self, ptuple, crash_client_ver): - if crash_client_ver: - client_ver = util.protocol_tuple(self.client) - is_old_protocol = ptuple is None or ptuple <= (1, 2) - is_old_client = client_ver != (0,) and client_ver <= crash_client_ver - if is_old_protocol and is_old_client: - self.logger.info(f'attempting to crash old client with version {self.client}') - # this can crash electrum client 2.6 <= v < 3.1.2 - await self.send_notification('blockchain.relayfee', ()) - # this can crash electrum client (v < 2.8.2) UNION (3.0.0 <= v < 3.3.0) - await self.send_notification('blockchain.estimatefee', ()) - - async def transaction_broadcast_validate(self, raw_tx): - '''Simulate a Broadcast a raw transaction to the network. - - raw_tx: the raw transaction as a hexadecimal string to validate for Atomicals FT rules''' - self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, False) - return hex_hash - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') - - async def transaction_broadcast(self, raw_tx): - '''Broadcast a raw transaction to the network. - - raw_tx: the raw transaction as a hexadecimal string''' - self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction_validated(raw_tx, True) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') - except AtomicalsValidationError as e: - self.logger.info(f'error validating atomicals transaction: {e}') - raise RPCError(ATOMICALS_INVALID_TX, 'the transaction was rejected by ' - f'atomicals rules.\n\n{e}\n[{raw_tx}]') - - else: - self.txs_sent += 1 - client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): - msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) - if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') - return msg - - self.logger.info(f'sent tx: {hex_hash}') - return hex_hash - - async def transaction_broadcast_force(self, raw_tx): - '''Broadcast a raw transaction to the network. Force even if invalid FT transfer - raw_tx: the raw transaction as a hexadecimal string''' - self.bump_cost(0.25 + len(raw_tx) / 5000) - # This returns errors as JSON RPC errors, as is natural - try: - hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'error sending transaction: {message}') - raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' - f'network rules.\n\n{message}\n[{raw_tx}]') - else: - self.txs_sent += 1 - client_ver = util.protocol_tuple(self.client) - if client_ver != (0, ): - msg = self.coin.warn_old_client_on_tx_broadcast(client_ver) - if msg: - self.logger.info(f'sent tx: {hex_hash}. and warned user to upgrade their ' - f'client from {self.client}') - return msg - - self.logger.info(f'sent tx: {hex_hash}') - return hex_hash - - - async def transaction_get(self, tx_hash, verbose=False): - '''Return the serialized raw transaction given its hash - - tx_hash: the transaction hash as a hexadecimal string - verbose: passed on to the daemon - ''' - assert_tx_hash(tx_hash) - if verbose not in (True, False): - raise RPCError(BAD_REQUEST, '"verbose" must be a boolean') - - self.bump_cost(1.0) - return await self.daemon_request('getrawtransaction', tx_hash, verbose) - - async def transaction_merkle(self, tx_hash, height): - '''Return the merkle branch to a confirmed transaction given its hash - and height. - - tx_hash: the transaction hash as a hexadecimal string - height: the height of the block it is in - ''' - tx_hash = assert_tx_hash(tx_hash) - height = non_negative_integer(height) - - branch, tx_pos, cost = await self.session_mgr.merkle_branch_for_tx_hash( - height, tx_hash) - self.bump_cost(cost) - - return {"block_height": height, "merkle": branch, "pos": tx_pos} - - async def transaction_id_from_pos(self, height, tx_pos, merkle=False): - '''Return the txid and optionally a merkle proof, given - a block height and position in the block. - ''' - tx_pos = non_negative_integer(tx_pos) - height = non_negative_integer(height) - if merkle not in (True, False): - raise RPCError(BAD_REQUEST, '"merkle" must be a boolean') - - if merkle: - branch, tx_hash, cost = await self.session_mgr.merkle_branch_for_tx_pos( - height, tx_pos) - self.bump_cost(cost) - return {"tx_hash": tx_hash, "merkle": branch} - else: - tx_hashes, cost = await self.session_mgr.tx_hashes_at_blockheight(height) - try: - tx_hash = tx_hashes[tx_pos] - except IndexError: - raise RPCError(BAD_REQUEST, - f'no tx at position {tx_pos:,d} in block at height {height:,d}') - self.bump_cost(cost) - return hash_to_hex_str(tx_hash) - - async def compact_fee_histogram(self): - self.bump_cost(1.0) - return await self.mempool.compact_fee_histogram() - - async def atomicals_transaction(self, txid): - return await self.session_mgr.get_transaction_detail(txid) - - async def get_transaction_detail_by_height(self, height, limit, offset, op_type, reverse=True): - res = [] - txs_list = [] - txs = self.db.get_atomicals_block_txs(height) - for tx in txs: - # get operation by db method - tx_num, _ = self.db.get_tx_num_height_from_tx_hash(hex_str_to_hash(tx)) - txs_list.append({ - "tx_num": tx_num, - "tx_hash": tx, - "height": height - }) - - txs_list.sort(key=lambda x: x['tx_num'], reverse=reverse) - for tx in txs_list: - data = await self.session_mgr.get_transaction_detail(tx["tx_hash"], height, tx["tx_num"]) - if (op_type and op_type == data["op"]) or (not op_type and data["op"]): - res.append(data) - total = len(res) - return res[offset:offset+limit], total - - # get the whole transaction by block height - # return transaction detail - async def transaction_by_height(self, height, limit=10, offset=0, op_type=None, reverse=True): - res, total = await self.get_transaction_detail_by_height(height, limit, offset, op_type, reverse) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by atomical id - async def transaction_by_atomical_id(self, compact_atomical_id_or_atomical_number, limit=10, offset=0, op_type=None, reverse=True): - res = [] - compact_atomical_id = compact_atomical_id_or_atomical_number - if isinstance(compact_atomical_id_or_atomical_number, int) != True and is_compact_atomical_id(compact_atomical_id_or_atomical_number): - assert_atomical_id(compact_atomical_id) - else: - compact_atomical_id = location_id_bytes_to_compact(self.db.get_atomical_id_by_atomical_number(compact_atomical_id_or_atomical_number)) - atomical_id = compact_to_location_id_bytes(compact_atomical_id) - hashX = double_sha256(atomical_id) - - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if (op_type and data["op"] == op_type) or not op_type: - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - # get transaction by scripthash - async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_type=None, reverse=True): - hashX = scripthash_to_hashX(scripthash) - res = [] - if op_type: - op = self.session_mgr.bp.op_list.get(op_type, None) - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, op, reverse) - else: - history_data, total = await self.session_mgr.get_history_op(hashX, limit, offset, None, reverse) - - for history in history_data: - tx_hash, tx_height = self.db.fs_tx_hash(history["tx_num"]) - data = await self.session_mgr.get_transaction_detail(hash_to_hex_str(tx_hash), tx_height, history["tx_num"]) - if data and data["op"]: - if data["op"] and (data["op"] == op_type or not op_type): - res.append(data) - return {"result": res, "total": total, "limit": limit, "offset": offset} - - def set_request_handlers(self, ptuple): - self.protocol_tuple = ptuple - handlers = { - 'blockchain.block.header': self.block_header, - 'blockchain.block.headers': self.block_headers, - 'blockchain.estimatefee': self.estimatefee, - 'blockchain.headers.subscribe': self.headers_subscribe, - 'blockchain.relayfee': self.relayfee, - 'blockchain.scripthash.get_balance': self.scripthash_get_balance, - 'blockchain.scripthash.get_history': self.scripthash_get_history, - 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, - 'blockchain.scripthash.listunspent': self.scripthash_listunspent, - 'blockchain.scripthash.subscribe': self.scripthash_subscribe, - 'blockchain.transaction.broadcast': self.transaction_broadcast, - 'blockchain.transaction.broadcast_force': self.transaction_broadcast_force, - 'blockchain.transaction.get': self.transaction_get, - 'blockchain.transaction.get_merkle': self.transaction_merkle, - 'blockchain.transaction.id_from_pos': self.transaction_id_from_pos, - 'mempool.get_fee_histogram': self.compact_fee_histogram, - 'server.add_peer': self.add_peer, - 'server.banner': self.banner, - 'server.donation_address': self.donation_address, - 'server.features': self.server_features_async, - 'server.peers.subscribe': self.peers_subscribe, - 'server.ping': self.ping, - 'server.version': self.server_version, - # The Atomicals era has begun # - 'blockchain.atomicals.validate': self.transaction_broadcast_validate, - 'blockchain.atomicals.get_ft_balances_scripthash': self.atomicals_get_ft_balances, - 'blockchain.atomicals.get_nft_balances_scripthash': self.atomicals_get_nft_balances, - 'blockchain.atomicals.listscripthash': self.atomicals_listscripthash, - 'blockchain.atomicals.list': self.atomicals_list, - 'blockchain.atomicals.get_numbers': self.atomicals_num_to_id, - 'blockchain.atomicals.get_block_hash': self.atomicals_block_hash, - 'blockchain.atomicals.get_block_txs': self.atomicals_block_txs, - 'blockchain.atomicals.dump': self.atomicals_dump, - 'blockchain.atomicals.at_location': self.atomicals_at_location, - 'blockchain.atomicals.get_location': self.atomicals_get_location, - 'blockchain.atomicals.get': self.atomicals_get, - 'blockchain.atomicals.get_global': self.atomicals_get_global, - 'blockchain.atomicals.get_state': self.atomical_get_state, - 'blockchain.atomicals.get_state_history': self.atomical_get_state_history, - 'blockchain.atomicals.get_events': self.atomical_get_events, - 'blockchain.atomicals.get_tx_history': self.atomicals_get_tx_history, - 'blockchain.atomicals.get_realm_info': self.atomicals_get_realm_info, - 'blockchain.atomicals.get_by_realm': self.atomicals_get_by_realm, - 'blockchain.atomicals.get_by_subrealm': self.atomicals_get_by_subrealm, - 'blockchain.atomicals.get_by_dmitem': self.atomicals_get_by_dmitem, - 'blockchain.atomicals.get_by_ticker': self.atomicals_get_by_ticker, - 'blockchain.atomicals.get_by_container': self.atomicals_get_by_container, - 'blockchain.atomicals.get_by_container_item': self.atomicals_get_by_container_item, - 'blockchain.atomicals.get_by_container_item_validate': self.atomicals_get_by_container_item_validation, - 'blockchain.atomicals.get_container_items': self.atomicals_get_container_items, - 'blockchain.atomicals.get_ft_info': self.atomicals_get_ft_info, - 'blockchain.atomicals.get_dft_mints': self.atomicals_get_dft_mints, - 'blockchain.atomicals.find_tickers': self.atomicals_search_tickers, - 'blockchain.atomicals.find_realms': self.atomicals_search_realms, - 'blockchain.atomicals.find_subrealms': self.atomicals_search_subrealms, - 'blockchain.atomicals.find_containers': self.atomicals_search_containers, - 'blockchain.atomicals.get_holders': self.atomicals_get_holders, - 'blockchain.atomicals.transaction': self.atomicals_transaction, - 'blockchain.atomicals.transaction_global': self.session_mgr.transaction_global, - 'blockchain.atomicals.transaction_by_height': self.transaction_by_height, - 'blockchain.atomicals.transaction_by_atomical_id': self.transaction_by_atomical_id, - 'blockchain.atomicals.transaction_by_scripthash': self.transaction_by_scripthash, - } - if ptuple >= (1, 4, 2): - handlers['blockchain.scripthash.unsubscribe'] = self.scripthash_unsubscribe - self.request_handlers = handlers - -class LocalRPC(SessionBase): - '''A local TCP RPC server session.''' - - processing_timeout = 10**9 # disable timeouts - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.client = 'RPC' - self.connection.max_response_size = 0 - - def protocol_version_string(self): - return 'RPC' - - -class DashElectrumX(ElectrumX): - '''A TCP server that handles incoming Electrum Dash connections.''' - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.mns = set() - self.mn_cache_height = 0 - self.mn_cache = [] - - def set_request_handlers(self, ptuple): - super().set_request_handlers(ptuple) - self.request_handlers.update({ - 'masternode.announce.broadcast': - self.masternode_announce_broadcast, - 'masternode.subscribe': self.masternode_subscribe, - 'masternode.list': self.masternode_list, - 'protx.diff': self.protx_diff, - 'protx.info': self.protx_info, - }) - - async def _notify_inner(self, touched, height_changed): - '''Notify the client about changes in masternode list.''' - await super()._notify_inner(touched, height_changed) - for mn in self.mns.copy(): - status = await self.daemon_request('masternode_list', - ('status', mn)) - await self.send_notification('masternode.subscribe', - (mn, status.get(mn))) - - # Masternode command handlers - async def masternode_announce_broadcast(self, signmnb): - '''Pass through the masternode announce message to be broadcast - by the daemon. - - signmnb: signed masternode broadcast message.''' - try: - return await self.daemon_request('masternode_broadcast', - ('relay', signmnb)) - except DaemonError as e: - error, = e.args - message = error['message'] - self.logger.info(f'masternode_broadcast: {message}') - raise RPCError(BAD_REQUEST, 'the masternode broadcast was ' - f'rejected.\n\n{message}\n[{signmnb}]') - - async def masternode_subscribe(self, collateral): - '''Returns the status of masternode. - - collateral: masternode collateral. - ''' - result = await self.daemon_request('masternode_list', - ('status', collateral)) - if result is not None: - self.mns.add(collateral) - return result.get(collateral) - return None - - async def masternode_list(self, payees): - ''' - Returns the list of masternodes. - - payees: a list of masternode payee addresses. - ''' - if not isinstance(payees, list): - raise RPCError(BAD_REQUEST, 'expected a list of payees') - - def get_masternode_payment_queue(mns): - '''Returns the calculated position in the payment queue for all the - valid masterernodes in the given mns list. - - mns: a list of masternodes information. - ''' - now = int(datetime.datetime.utcnow().strftime("%s")) - mn_queue = [] - - # Only ENABLED masternodes are considered for the list. - for line in mns: - mnstat = mns[line].split() - if mnstat[0] == 'ENABLED': - # if last paid time == 0 - if int(mnstat[5]) == 0: - # use active seconds - mnstat.append(int(mnstat[4])) - else: - # now minus last paid - delta = now - int(mnstat[5]) - # if > active seconds, use active seconds - if delta >= int(mnstat[4]): - mnstat.append(int(mnstat[4])) - # use active seconds - else: - mnstat.append(delta) - mn_queue.append(mnstat) - mn_queue = sorted(mn_queue, key=lambda x: x[8], reverse=True) - return mn_queue - - def get_payment_position(payment_queue, address): - ''' - Returns the position of the payment list for the given address. - - payment_queue: position in the payment queue for the masternode. - address: masternode payee address. - ''' - position = -1 - for pos, mn in enumerate(payment_queue, start=1): - if mn[2] == address: - position = pos - break - return position - - # Accordingly with the masternode payment queue, a custom list - # with the masternode information including the payment - # position is returned. - cache = self.session_mgr.mn_cache - if not cache or self.session_mgr.mn_cache_height != self.db.db_height: - full_mn_list = await self.daemon_request('masternode_list', - ('full',)) - mn_payment_queue = get_masternode_payment_queue(full_mn_list) - mn_payment_count = len(mn_payment_queue) - mn_list = [] - for key, value in full_mn_list.items(): - mn_data = value.split() - mn_info = { - 'vin': key, - 'status': mn_data[0], - 'protocol': mn_data[1], - 'payee': mn_data[2], - 'lastseen': mn_data[3], - 'activeseconds': mn_data[4], - 'lastpaidtime': mn_data[5], - 'lastpaidblock': mn_data[6], - 'ip': mn_data[7] - } - mn_info['paymentposition'] = get_payment_position( - mn_payment_queue, mn_info['payee'] - ) - mn_info['inselection'] = ( - mn_info['paymentposition'] < mn_payment_count // 10 - ) - hashX = self.coin.address_to_hashX(mn_info['payee']) - balance = await self.get_balance(hashX) - mn_info['balance'] = (sum(balance.values()) - / self.coin.VALUE_PER_COIN) - mn_list.append(mn_info) - cache.clear() - cache.extend(mn_list) - self.session_mgr.mn_cache_height = self.db.db_height - - # If payees is an empty list the whole masternode list is returned - if payees: - return [mn for mn in cache if mn['payee'] in payees] - else: - return cache - - async def protx_diff(self, base_height, height): - ''' - Calculates a diff between two deterministic masternode lists. - The result also contains proof data. - - base_height: The starting block height (starting from 1). - height: The ending block height. - ''' - if not isinstance(base_height, int) or not isinstance(height, int): - raise RPCError(BAD_REQUEST, 'expected a int block heights') - - max_height = self.db.db_height - if (not 1 <= base_height <= max_height or - not base_height <= height <= max_height): - raise RPCError(BAD_REQUEST, - f'require 1 <= base_height {base_height:,d} <= ' - f'height {height:,d} <= ' - f'chain height {max_height:,d}') - - return await self.daemon_request('protx', - ('diff', base_height, height)) - - async def protx_info(self, protx_hash): - ''' - Returns detailed information about a deterministic masternode. - - protx_hash: The hash of the initial ProRegTx - ''' - if not isinstance(protx_hash, str): - raise RPCError(BAD_REQUEST, 'expected protx hash string') - - res = await self.daemon_request('protx', ('info', protx_hash)) - if 'wallet' in res: - del res['wallet'] - return res - - -class SmartCashElectrumX(DashElectrumX): - '''A TCP server that handles incoming Electrum-SMART connections.''' - - def set_request_handlers(self, ptuple): - super().set_request_handlers(ptuple) - self.request_handlers.update({ - 'smartrewards.current': self.smartrewards_current, - 'smartrewards.check': self.smartrewards_check - }) - - async def smartrewards_current(self): - '''Returns the current smartrewards info.''' - result = await self.daemon_request('smartrewards', ('current',)) - if result is not None: - return result - return None - - async def smartrewards_check(self, addr): - ''' - Returns the status of an address - - addr: a single smartcash address - ''' - result = await self.daemon_request('smartrewards', ('check', addr)) - if result is not None: - return result - return None - - -class AuxPoWElectrumX(ElectrumX): - async def block_header(self, height, cp_height=0): - result = await super().block_header(height, cp_height) - - # Older protocol versions don't truncate AuxPoW - if self.protocol_tuple < (1, 4, 1): - return result - - # Not covered by a checkpoint; return full AuxPoW data - if cp_height == 0: - return result - - # Covered by a checkpoint; truncate AuxPoW data - result['header'] = self.truncate_auxpow(result['header'], height) - return result - - async def block_headers(self, start_height, count, cp_height=0): - result = await super().block_headers(start_height, count, cp_height) - - # Older protocol versions don't truncate AuxPoW - if self.protocol_tuple < (1, 4, 1): - return result - - # Not covered by a checkpoint; return full AuxPoW data - if cp_height == 0: - return result - - # Covered by a checkpoint; truncate AuxPoW data - result['hex'] = self.truncate_auxpow(result['hex'], start_height) - return result - - def truncate_auxpow(self, headers_full_hex, start_height): - height = start_height - headers_full = util.hex_to_bytes(headers_full_hex) - cursor = 0 - headers = bytearray() - - while cursor < len(headers_full): - headers += headers_full[cursor:cursor+self.coin.TRUNCATED_HEADER_SIZE] - cursor += self.db.dynamic_header_len(height) - height += 1 - - return headers.hex() - - -class NameIndexElectrumX(ElectrumX): - def set_request_handlers(self, ptuple): - super().set_request_handlers(ptuple) - - if ptuple >= (1, 4, 3): - self.request_handlers['blockchain.name.get_value_proof'] = self.name_get_value_proof - - async def name_get_value_proof(self, scripthash, cp_height=0): - history = await self.scripthash_get_history(scripthash) - - trimmed_history = [] - prev_height = None - - for update in history[::-1]: - txid = update['tx_hash'] - height = update['height'] - - if (self.coin.NAME_EXPIRATION is not None - and prev_height is not None - and height < prev_height - self.coin.NAME_EXPIRATION): - break - - tx = await(self.transaction_get(txid)) - update['tx'] = tx - del update['tx_hash'] - - tx_merkle = await self.transaction_merkle(txid, height) - del tx_merkle['block_height'] - update['tx_merkle'] = tx_merkle - - if height <= cp_height: - header = await self.block_header(height, cp_height) - update['header'] = header - - trimmed_history.append(update) - - if height <= cp_height: - break - - prev_height = height - - return {scripthash: trimmed_history} - - -class NameIndexAuxPoWElectrumX(NameIndexElectrumX, AuxPoWElectrumX): - pass diff --git a/electrumx/server/session/electrumx_session.py b/electrumx/server/session/electrumx_session.py index 9fd9080d..9a0217f2 100644 --- a/electrumx/server/session/electrumx_session.py +++ b/electrumx/server/session/electrumx_session.py @@ -82,8 +82,8 @@ async def notify(self, touched, height_changed): except TaskTimeout: self.logger.warning('timeout notifying client, closing...') await self.close(force_after=1.0) - except Exception: - self.logger.exception('unexpected exception notifying client') + except Exception as e: + self.logger.exception(f'Unexpected exception notifying client: {e}') async def _notify_inner(self, touched, height_changed): """Notify the client about changes to touched addresses (from mempool diff --git a/electrumx/server/session/http_session.py b/electrumx/server/session/http_session.py index 0ab28c8a..2d6c18c9 100644 --- a/electrumx/server/session/http_session.py +++ b/electrumx/server/session/http_session.py @@ -39,7 +39,7 @@ async def formatted_request(request, call): return error_resp(500, e) -class HttpHandler(object): +class HttpSession(object): def __init__(self, session_mgr, db, mempool, peer_mgr, kind): # self.transport = transport self.logger = util.class_logger(__name__, self.__class__.__name__) diff --git a/electrumx/server/session/session_manager.py b/electrumx/server/session/session_manager.py index 5c2bb49c..9e1c6fd1 100644 --- a/electrumx/server/session/session_manager.py +++ b/electrumx/server/session/session_manager.py @@ -22,7 +22,7 @@ from electrumx.server.http_middleware import * from electrumx.server.mempool import MemPool from electrumx.server.session import BAD_REQUEST, DAEMON_ERROR -from electrumx.server.session.http_session import HttpHandler +from electrumx.server.session.http_session import HttpSession from electrumx.server.session.util import non_negative_integer, SESSION_PROTOCOL_MAX from electrumx.server.peers import PeerManager @@ -133,7 +133,7 @@ async def _start_servers(self, services): error_middleware(self), request_middleware(self), ]) - handler = HttpHandler(self, self.db, self.mempool, self.peer_mgr, kind) + handler = HttpSession(self, self.db, self.mempool, self.peer_mgr, kind) await handler.add_endpoints(app.router, SESSION_PROTOCOL_MAX) app['rate_limiter'] = rate_limiter runner = web.AppRunner(app) diff --git a/electrumx/server/session/shared_session.py b/electrumx/server/session/shared_session.py index 50301c5c..1b31adf6 100644 --- a/electrumx/server/session/shared_session.py +++ b/electrumx/server/session/shared_session.py @@ -11,7 +11,7 @@ from electrumx.server.daemon import DaemonError from electrumx.server.session import ATOMICALS_INVALID_TX, BAD_REQUEST from electrumx.server.session.util import assert_atomical_id, non_negative_integer, SESSION_BASE_MAX_CHUNK_SIZE, \ - scripthash_to_hashX, assert_tx_hash + scripthash_to_hash_x, assert_tx_hash if TYPE_CHECKING: from electrumx.lib.coins import AtomicalsCoinMixin, Coin @@ -158,35 +158,35 @@ async def relay_fee(self): async def scripthash_get_balance(self, scripthash): """Return the confirmed and unconfirmed balance of a scripthash.""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self.get_balance(hash_x) async def scripthash_get_history(self, scripthash): """Return the confirmed and unconfirmed history of a scripthash.""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._confirmed_and_unconfirmed_history(hash_x) async def scripthash_get_mempool(self, scripthash): """Return the mempool transactions touching a scripthash.""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._unconfirmed_history(hash_x) async def scripthash_list_unspent(self, scripthash): """Return the list of UTXOs of a scripthash.""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._hash_x_list_unspent(hash_x) async def scripthash_subscribe(self, scripthash): """Subscribe to a script hash. scripthash: the SHA256 hash of the script to subscribe to""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._hash_x_subscribe(hash_x, scripthash) async def scripthash_unsubscribe(self, scripthash): """Unsubscribe from a script hash.""" self.bump_cost(0.1) - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return self.unsubscribe_hash_x(hash_x) is not None async def compact_fee_histogram(self): @@ -195,17 +195,17 @@ async def compact_fee_histogram(self): async def atomicals_get_ft_balances(self, scripthash): """Return the FT balances for a scripthash address""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._hash_x_ft_balances_atomicals(hash_x) async def atomicals_get_nft_balances(self, scripthash): """Return the NFT balances for a scripthash address""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._hash_x_nft_balances_atomicals(hash_x) async def atomicals_list_scripthash(self, scripthash, verbose=False): """Return the list of Atomical UTXOs for an address""" - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) return await self._hash_x_list_scripthash_atomicals(hash_x, verbose) async def atomicals_list(self, limit, offset, asc): @@ -904,7 +904,7 @@ async def transaction_by_atomical_id(self, compact_id_or_number, limit=10, offse # get transaction by scripthash async def transaction_by_scripthash(self, scripthash, limit=10, offset=0, op_type=None, reverse=True): - hash_x = scripthash_to_hashX(scripthash) + hash_x = scripthash_to_hash_x(scripthash) res = [] if op_type: op = self.session_mgr.bp.op_list.get(op_type, None) diff --git a/electrumx/server/session/util.py b/electrumx/server/session/util.py index 62b1b08c..3bf4c132 100644 --- a/electrumx/server/session/util.py +++ b/electrumx/server/session/util.py @@ -8,7 +8,7 @@ SESSION_PROTOCOL_MAX = (1, 4, 3) -def scripthash_to_hashX(scripthash): +def scripthash_to_hash_x(scripthash): try: bin_hash = hex_str_to_hash(scripthash) if len(bin_hash) == 32: diff --git a/electrumx/server/storage.py b/electrumx/server/storage.py index a45c2700..87bc929e 100644 --- a/electrumx/server/storage.py +++ b/electrumx/server/storage.py @@ -5,7 +5,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. -'''Backend database abstraction.''' +"""Backend database abstraction.""" import os from functools import partial @@ -15,16 +15,16 @@ def db_class(name) -> Type['Storage']: - '''Returns a DB engine class.''' - for db_class in util.subclasses(Storage): - if db_class.__name__.lower() == name.lower(): - db_class.import_module() - return db_class + """Returns a DB engine class.""" + for db_cls in util.subclasses(Storage): + if db_cls.__name__.lower() == name.lower(): + db_cls.import_module() + return db_cls raise RuntimeError(f'unrecognised DB engine "{name}"') class Storage: - '''Abstract base class of the DB backend abstraction.''' + """Abstract base class of the DB backend abstraction.""" def __init__(self, name, for_sync): self.is_new = not os.path.exists(name) @@ -33,15 +33,15 @@ def __init__(self, name, for_sync): @classmethod def import_module(cls): - '''Import the DB engine module.''' + """Import the DB engine module.""" raise NotImplementedError def open(self, name, create): - '''Open an existing database or create a new one.''' + """Open an existing database or create a new one.""" raise NotImplementedError def close(self): - '''Close an existing database.''' + """Close an existing database.""" raise NotImplementedError def get(self, key): @@ -51,26 +51,26 @@ def put(self, key, value): raise NotImplementedError def write_batch(self): - '''Return a context manager that provides `put` and `delete`. + """Return a context manager that provides `put` and `delete`. Changes should only be committed when the context manager closes without an exception. - ''' + """ raise NotImplementedError def iterator(self, prefix=b'', reverse=False): - '''Return an iterator that yields (key, value) pairs from the + """Return an iterator that yields (key, value) pairs from the database sorted by key. If `prefix` is set, only keys starting with `prefix` will be included. If `reverse` is True the items are returned in reverse order. - ''' + """ raise NotImplementedError class LevelDB(Storage): - '''LevelDB database engine.''' + """LevelDB database engine.""" @classmethod def import_module(cls): @@ -91,7 +91,7 @@ def open(self, name, create): class RocksDB(Storage): - '''RocksDB database engine.''' + """RocksDB database engine.""" @classmethod def import_module(cls): @@ -123,7 +123,7 @@ def iterator(self, prefix=b'', reverse=False): class RocksDBWriteBatch: - '''A write batch for RocksDB.''' + """A write batch for RocksDB.""" def __init__(self, db): self.batch = RocksDB.module.WriteBatch() @@ -138,7 +138,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class RocksDBIterator: - '''An iterator for RocksDB.''' + """An iterator for RocksDB.""" def __init__(self, db, prefix, reverse): self.prefix = prefix diff --git a/tests/lib/test_atomicals_blueprint_builder.py b/tests/lib/test_atomicals_blueprint_builder.py index 9196ba94..13bff2a1 100644 --- a/tests/lib/test_atomicals_blueprint_builder.py +++ b/tests/lib/test_atomicals_blueprint_builder.py @@ -37,7 +37,7 @@ def mock_mint_fetcher(self, atomical_id): ft_output_blueprint = blueprint_builder.get_ft_output_blueprint() assert(len(ft_output_blueprint.outputs) == 0) - assert(ft_output_blueprint.first_atomical_id == None) + assert(ft_output_blueprint.first_atomical_id is None) assert(blueprint_builder.are_fts_burned == False) # Log that there were tokens burned due to not being cleanly assigned From 3cce6b767ec79490d81fc5ab2f4910d12e079a14 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 29 May 2024 11:28:58 +0800 Subject: [PATCH 12/13] Fix requirements and setup script --- requirements-test.txt | 4 ++-- requirements.txt | 23 +++++++++++------------ setup.py | 22 ++++++++++------------ 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index 96362ad5..01a123ff 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,4 +1,4 @@ -requests==2.31.0 +requests>=2.32.0,<2.33 pycodestyle pytest-asyncio pytest-cov @@ -19,4 +19,4 @@ x16rv2_hash git+https://github.com/VerusCoin/verushashpy bell-yespower cpupower -bitweb_yespower==1.0.5 \ No newline at end of file +bitweb_yespower==1.0.5 diff --git a/requirements.txt b/requirements.txt index e34c676d..f155c5d6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,14 @@ -aiorpcX[ws]>=0.22.0,<0.23 -attrs +aiorpcX[ws]>=0.23.0,<0.24 +attrs>=23.2.0,<23.3.0 pylru @ git+https://github.com/atomicals-community/pylru@c9b47f0 -aiohttp>=3.3,<4 -cbor2 -websockets -regex -krock32 -merkletools @ git+https://github.com/tierion/pymerkletools.git@f10d71e2cd529a833728e836dc301f9af502d0b0 -requests==2.31.0 -python-dotenv +aiohttp>=3.9.0,<3.10 +cbor2>=5.6.0,<5.7 +websockets>=12.0.0,<13.0 +regex==2024.5.15 +krock32>=0.1.1,<0.2 +merkletools @ git+https://github.com/tierion/pymerkletools@f10d71e +requests>=2.32.0,<2.33 +python-dotenv>=1.0.0,<1.1 # For LevelDB -plyvel - +plyvel>=1.5.0,<1.6 diff --git a/setup.py b/setup.py index d0cce275..580743be 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,13 @@ import os import re - import setuptools def find_version(): tld = os.path.abspath(os.path.dirname(__file__)) - filename = os.path.join(tld, 'electrumx', '__init__.py') - with open(filename) as f: - text = f.read() + filename = os.path.join(tld, 'electrumx', 'version.py') + with open(filename) as file: + text = file.read() match = re.search(r"^__version__ = \"(.*)\"$", text, re.MULTILINE) if not match: raise RuntimeError('cannot find version') @@ -17,19 +16,19 @@ def find_version(): version = find_version() -with open('requirements.txt') as f: - requirements = f.read().splitlines() +with open('requirements.txt') as _file: + requirements = _file.read().splitlines() setuptools.setup( name='e-x', version=version, scripts=['electrumx_server', 'electrumx_rpc', 'electrumx_compact_history'], - python_requires='>=3.8', + python_requires='>=3.10', install_requires=requirements, extras_require={ 'dev': ['objgraph'], 'rapidjson': ['python-rapidjson>=0.4.1,<2.0'], - 'rocksdb': ['Cython', 'rocksdb @ git+https://github.com/jansegre/python-rocksdb@6177a68'], + 'rocksdb': ['Cython>=3.0,<3.1', 'rocksdb @ git+https://github.com/jansegre/python-rocksdb@6177a68'], 'ujson': ['ujson>=2.0.0,<4.0.0'], 'uvloop': ['uvloop>=0.14'], # For various coins @@ -46,18 +45,17 @@ def find_version(): packages=setuptools.find_packages(include=('electrumx*',)), description='ElectrumX Server', author='Electrum developers', - author_email='electrumdev@gmail.com', license='MIT Licence', - url='https://github.com/spesmilo/electrumx', + url='https://github.com/atomicals/atomicals-electrumx', long_description='Server implementation for the Electrum protocol', - download_url=('https://github.com/spesmilo/electrumX/archive/' + download_url=('https://github.com/atomicals/atomicals-electrumx/archive/' f'{version}.tar.gz'), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: AsyncIO', 'License :: OSI Approved :: MIT License', 'Operating System :: Unix', - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.10", "Topic :: Database", 'Topic :: Internet', ], From ed03d817175fdad9bf31b1beb4348c7a21f5b918 Mon Sep 17 00:00:00 2001 From: Wizz Wallet <153743376+wizz-wallet-dev@users.noreply.github.com> Date: Wed, 29 May 2024 11:38:00 +0800 Subject: [PATCH 13/13] Fix controller `aiorpcx_version` requirement --- electrumx/server/controller.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/electrumx/server/controller.py b/electrumx/server/controller.py index d0d402da..028c5ae3 100644 --- a/electrumx/server/controller.py +++ b/electrumx/server/controller.py @@ -80,11 +80,9 @@ class Controller(ServerBase): """ async def serve(self, shutdown_event): - """Start the RPC server and wait for the mempool to synchronize. - - Thenstart serving external clients.""" - if not (0, 22, 0) <= aiorpcx_version < (0, 23): - raise RuntimeError('aiorpcX version 0.22.x is required') + """Start the RPC server and wait for the mempool to synchronize, then start serving external clients.""" + if not (0, 23, 0) <= aiorpcx_version < (0, 24): + raise RuntimeError('aiorpcX version 0.23.x is required') env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()