Skip to content

Commit

Permalink
Merge branch 'Synapse' into feature/cpu_register_faster
Browse files Browse the repository at this point in the history
  • Loading branch information
joeylegere authored Jul 27, 2022
2 parents 5ac648b + c2f0fc5 commit 11d35d7
Show file tree
Hide file tree
Showing 82 changed files with 9,599 additions and 6,097 deletions.
38 changes: 19 additions & 19 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ version: 2.1

orbs:
python: circleci/python@2.0.3
coveralls: coveralls/coveralls@1.0.6
# coveralls: coveralls/coveralls@1.0.6

jobs:
build-and-test:
Expand Down Expand Up @@ -76,21 +76,21 @@ jobs:
- store_artifacts:
path: test-results

- when:
condition:
equal: ["3.10.5", << parameters.python-version >> ]
steps:
- run:
name: Upload Coverage
command: |
. env/bin/activate && coveralls
env:
CI_NAME: circleci
CI_BUILD_NUMBER: $CIRCLE_BUILD_NUM
CI_BUILD_URL: $CIRCLE_BUILD_URL
CI_BRANCH: $CIRCLE_BRANCH
CI_JOB_ID: $CIRCLE_NODE_INDEX
COVERALLS_PARALLEL: true
#- when:
#condition:
#equal: ["3.10.5", << parameters.python-version >> ]
#steps:
#- run:
#name: Upload Coverage
#command: |
#. env/bin/activate && coveralls
#env:
#CI_NAME: circleci
#CI_BUILD_NUMBER: $CIRCLE_BUILD_NUM
#CI_BUILD_URL: $CIRCLE_BUILD_URL
#CI_BRANCH: $CIRCLE_BRANCH
#CI_JOB_ID: $CIRCLE_NODE_INDEX
#COVERALLS_PARALLEL: true

unit-tests-all-python-versions:
docker:
Expand Down Expand Up @@ -120,6 +120,6 @@ workflows:
- unit-tests-all-python-versions:
requires:
- build-and-test
- coveralls:
requires:
- build-and-test
#- coveralls:
#requires:
#- build-and-test
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ At Bittensor, we are creating an open, decentralized, peer-to-peer network that
https://opentensor.gitbook.io/bittensor/

## 2. Install
Three ways to install Bittensor.
Three ways to install Bittensor

1. Through the installer:
```
Expand Down Expand Up @@ -230,19 +230,19 @@ The template server follows a similar structure as the template miner.

```bash
$ cd bittensor
$ python3 ./bittensor/_neuron/text/template_server/main.py --wallet.name <WALLET NAME> --wallet.hotkey <HOTKEY NAME>
$ python3 ./bittensor/_neuron/text/core_server/main.py --wallet.name <WALLET NAME> --wallet.hotkey <HOTKEY NAME>
```
or
```python3
>> import bittensor
>> bittensor.neurons.text.template_server.neuron().run()
>> bittensor.neurons.text.core_server.neuron().run()
```

For the full list of settings, please run

```bash
$ cd bittensor
$ python3 ./bittensor/_neuron/text/template_server/main.py --help
$ python3 ./bittensor/_neuron/text/core_server/main.py --help
```


Expand Down
65 changes: 0 additions & 65 deletions benchmarks/advanced_server.py

This file was deleted.

8 changes: 4 additions & 4 deletions benchmarks/template_server.py → benchmarks/core_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
""" Benchmarking pytest fixture.
Example:
$ python3 benchmarks/template_server.py --neuron.model_name albert-base-v1
$ python3 benchmarks/core_server.py --neuron.model_name albert-base-v1
"""
from benchmarks import QueryBenchmark
Expand All @@ -33,7 +33,7 @@ class Benchmark ( QueryBenchmark ):
def miner_name() -> str:
r""" Return miner name
"""
return 'template_server'
return 'core_server'

@staticmethod
def run_neuron( config , subtensor, metagraph, wallet ):
Expand All @@ -42,7 +42,7 @@ def run_neuron( config , subtensor, metagraph, wallet ):
config (bittensor.Config)
Run config
"""
bittensor.neurons.text.template_server.neuron( config,subtensor=subtensor, metagraph=metagraph,wallet=wallet).run()
bittensor.neurons.text.core_server.neuron( config,subtensor=subtensor, metagraph=metagraph,wallet=wallet).run()

@staticmethod
def config() -> 'bittensor.Config':
Expand All @@ -51,7 +51,7 @@ def config() -> 'bittensor.Config':
config (bittensor.Config)
Run config.
"""
config = bittensor.neurons.text.template_server.neuron.config()
config = bittensor.neurons.text.core_server.neuron.config()
return config


Expand Down
13 changes: 11 additions & 2 deletions bittensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def turn_console_off():

# Vocabulary dimension.
#__vocab_size__ = len( tokenizer ) + len( tokenizer.additional_special_tokens) + 100 # Plus 100 for eventual token size increase.
__vocab_size__ = 50378
__vocab_size__ = 50258

# Tensor dimension.
# NOTE (const): if/when this increases peers must be responsible for trimming or expanding output to this size.
Expand All @@ -49,6 +49,9 @@ def turn_console_off():
# Substrate ss58_format
__ss58_format__ = 42

# Wallet ss58 address length
__ss58_address_length__ = 48

__networks__ = [ 'local', 'nobunaga', 'nakamoto']

__datasets__ = ['ArXiv', 'BookCorpus2', 'Books3', 'DMMathematics', 'EnronEmails', 'EuroParl', 'Gutenberg_PG', 'HackerNews', 'NIHExPorter', 'OpenSubtitles', 'PhilPapers', 'UbuntuIRC', 'YoutubeSubtitles']
Expand Down Expand Up @@ -102,6 +105,7 @@ def turn_console_off():
from bittensor._subtensor import subtensor as subtensor
from bittensor._tokenizer import tokenizer as tokenizer
from bittensor._serializer import serializer as serializer
from bittensor._synapse import synapse as synapse
from bittensor._dataset import dataset as dataset
from bittensor._receptor import receptor_pool as receptor_pool
from bittensor._wandb import wandb as wandb
Expand All @@ -122,7 +126,12 @@ def turn_console_off():
from bittensor._dataset.dataset_impl import Dataset as Dataset
from bittensor._receptor.receptor_pool_impl import ReceptorPool as ReceptorPool
from bittensor._threadpool.priority_thread_pool_impl import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor
from bittensor._ipfs.ipfs_impl import Ipfs
from bittensor._ipfs.ipfs_impl import Ipfs as Ipfs
from bittensor._synapse.synapse_impl import Synapse as Synapse
from bittensor._synapse.text_causallm_impl import TextCausalLM as TextCausalLM
from bittensor._synapse.text_causallmnext_impl import TextCausalLMNext as TextCausalLMNext
from bittensor._synapse.text_lasthiddenstate_impl import TextLastHiddenState as TextLastHiddenState
from bittensor._synapse.text_seq2seq_impl import TextSeq2Seq as TextSeq2Seq

# DEFAULTS
defaults = Config()
Expand Down
96 changes: 46 additions & 50 deletions bittensor/_axon/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,11 @@ def __new__(
wallet: 'bittensor.Wallet' = None,
forward_text: 'Callable' = None,
backward_text: 'Callable' = None,
forward_image: 'Callable' = None,
backward_image: 'Callable' = None,
forward_tensor: 'Callable' = None,
backward_tensor: 'Callable' = None,
synapse_last_hidden: 'Callable' = None,
synapse_causal_lm: 'Callable' = None,
synapse_causal_lm_next: 'Callable' = None,
synapse_seq_2_seq: 'Callable' = None,
synapse_checks: 'Callable' = None,
thread_pool: 'futures.ThreadPoolExecutor' = None,
server: 'grpc._Server' = None,
port: int = None,
Expand All @@ -77,14 +78,16 @@ def __new__(
function which is called on forward text requests.
backward_text (:obj:`callable`, `optional`):
function which is called on backward text requests.
forward_image (:obj:`callable`, `optional`):
function which is called on forward image requests.
backward_image (:obj:`callable`, `optional`):
function which is called on backward image requests.
forward_tensor (:obj:`callable`, `optional`):
function which is called on forward tensor requests.
backward_tensor (:obj:`callable`, `optional`):
function which is called on backward tensor requests.
synapse_last_hidden (:obj:`callable`, `optional`):
function which is called by the last hidden synapse
synapse_causal_lm (:obj:`callable`, `optional`):
function which is called by the causal lm synapse
synapse_causal_lm_next (:obj:`callable`, `optional`):
function which is called by the TextCausalLMNext synapse
synapse_seq_2_seq (:obj:`callable`, `optional`):
function which is called by the seq2seq synapse
synapse_checks (:obj:`callable`, 'optional'):
function which is called before each synapse to check for stake
thread_pool (:obj:`ThreadPoolExecutor`, `optional`):
Threadpool used for processing server queries.
server (:obj:`grpc._Server`, `required`):
Expand Down Expand Up @@ -139,8 +142,13 @@ def __new__(
('grpc.keepalive_timeout_ms', 500000)]
)

forwards = [forward_text, forward_image, forward_tensor]
backwards = [backward_text, backward_image, backward_tensor]
synapses = {}
synapses[bittensor.proto.Synapse.SynapseType.TEXT_LAST_HIDDEN_STATE] = synapse_last_hidden
synapses[bittensor.proto.Synapse.SynapseType.TEXT_CAUSAL_LM] = synapse_causal_lm
synapses[bittensor.proto.Synapse.SynapseType.TEXT_CAUSAL_LM_NEXT] = synapse_causal_lm_next
synapses[bittensor.proto.Synapse.SynapseType.TEXT_SEQ_2_SEQ] = synapse_seq_2_seq

synapse_check_function = synapse_checks if synapse_checks != None else axon.default_synapse_check

if priority != None:
priority_threadpool = bittensor.prioritythreadpool(config=config)
Expand All @@ -152,8 +160,10 @@ def __new__(
server = server,
ip = config.axon.ip,
port = config.axon.port,
forwards = forwards,
backwards = backwards,
forward = forward_text,
backward = backward_text,
synapses = synapses,
synapse_checks = synapse_check_function,
priority = priority,
priority_threadpool = priority_threadpool,
forward_timeout = config.axon.forward_timeout,
Expand Down Expand Up @@ -200,7 +210,7 @@ def add_args( cls, parser: argparse.ArgumentParser, prefix: str = None ):
parser.add_argument('--' + prefix_str + 'axon.backward_timeout', type=int,
help='Number of seconds to wait for backward axon request', default=2*bittensor.__blocktime__)
parser.add_argument('--' + prefix_str + 'axon.forward_timeout', type=int,
help='Number of seconds to wait for forward axon request', default=bittensor.__blocktime__)
help='Number of seconds to wait for forward axon request', default=5*bittensor.__blocktime__)
parser.add_argument('--' + prefix_str + 'axon.priority.max_workers', type = int,
help='''maximum number of threads in thread pool''', default = bittensor.defaults.axon.priority.max_workers)
parser.add_argument('--' + prefix_str + 'axon.priority.maxsize', type=int,
Expand All @@ -217,13 +227,13 @@ def add_args( cls, parser: argparse.ArgumentParser, prefix: str = None ):
def add_defaults(cls, defaults):
""" Adds parser defaults to object from enviroment variables.
"""
defaults.axon = bittensor.Config()
defaults.axon = bittensor.config()
defaults.axon.port = os.getenv('BT_AXON_PORT') if os.getenv('BT_AXON_PORT') != None else 8091
defaults.axon.ip = os.getenv('BT_AXON_IP') if os.getenv('BT_AXON_IP') != None else '[::]'
defaults.axon.max_workers = os.getenv('BT_AXON_MAX_WORERS') if os.getenv('BT_AXON_MAX_WORERS') != None else 10
defaults.axon.maximum_concurrent_rpcs = os.getenv('BT_AXON_MAXIMUM_CONCURRENT_RPCS') if os.getenv('BT_AXON_MAXIMUM_CONCURRENT_RPCS') != None else 400

defaults.axon.priority = bittensor.Config()
defaults.axon.priority = bittensor.config()
defaults.axon.priority.max_workers = os.getenv('BT_AXON_PRIORITY_MAX_WORKERS') if os.getenv('BT_AXON_PRIORITY_MAX_WORKERS') != None else 10
defaults.axon.priority.maxsize = os.getenv('BT_AXON_PRIORITY_MAXSIZE') if os.getenv('BT_AXON_PRIORITY_MAXSIZE') != None else -1

Expand All @@ -236,56 +246,42 @@ def check_config(cls, config: 'bittensor.Config' ):
assert config.axon.port > 1024 and config.axon.port < 65535, 'port must be in range [1024, 65535]'
bittensor.wallet.check_config( config )

@classmethod
def default_synapse_check(cls, synapse, hotkey ):
""" default synapse check function
"""
if len(hotkey) == bittensor.__ss58_address_length__:
return True

return False

@staticmethod
def check_backward_callback( backward_callback:Callable, modality:int, pubkey:str = '_' ):
def check_backward_callback( backward_callback:Callable, pubkey:str = '_' ):
""" Check and test axon backward callback function
"""
if not inspect.ismethod(backward_callback) and not inspect.isfunction(backward_callback):
raise ValueError('The axon backward callback must be a function with signature Callable[inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor ) -> torch.FloatTensor:, got {}'.format(backward_callback))
if len( inspect.signature(backward_callback).parameters) != 2:
raise ValueError('The axon backward callback must have signature Callable[ inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor ) -> torch.FloatTensor:, got {}'.format(inspect.signature(backward_callback)))
if len( inspect.signature(backward_callback).parameters) != 3:
raise ValueError('The axon backward callback must have signature Callable[ inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor, synapses ) -> torch.FloatTensor:, got {}'.format(inspect.signature(backward_callback)))
if 'inputs_x' not in inspect.signature(backward_callback).parameters:
raise ValueError('The axon backward callback must have signature Callable[inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor ) -> torch.FloatTensor:, got {}'.format(inspect.signature(backward_callback)))
if 'grads_dy' not in inspect.signature(backward_callback).parameters:
raise ValueError('The axon backward callback must have signature Callable[inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor ) -> torch.FloatTensor:, got {}'.format(inspect.signature(backward_callback)))

if modality == bittensor.proto.Modality.TEXT:
sample_input = torch.randint(0,1,(3, 3))
grads_raw = torch.rand(3, 3, bittensor.__network_dim__)
backward_callback(sample_input,grads_raw)

if modality == bittensor.proto.Modality.IMAGE:
sample_input = torch.rand(1,1,3,512,512)
grads_raw = torch.rand(512, 512, bittensor.__network_dim__)
backward_callback(sample_input,grads_raw)

if modality == bittensor.proto.Modality.TENSOR:
sample_input = torch.rand(1,1,1)
grads_raw = torch.rand(1, 1, bittensor.__network_dim__)
backward_callback(sample_input,grads_raw)

@staticmethod
def check_forward_callback( forward_callback:Callable, modality:int, pubkey:str = '_'):
def check_forward_callback( forward_callback:Callable, synapses:list = []):
""" Check and test axon forward callback function
"""
if not inspect.ismethod(forward_callback) and not inspect.isfunction(forward_callback):
raise ValueError('The axon forward callback must be a function with signature Callable[inputs_x: torch.Tensor] -> torch.FloatTensor:, got {}'.format(forward_callback))
if len( inspect.signature(forward_callback).parameters) != 1:
raise ValueError('The axon forward callback must have signature Callable[ inputs_x: torch.Tensor] -> torch.FloatTensor:, got {}'.format(inspect.signature(forward_callback)))
if len( inspect.signature(forward_callback).parameters) != 3:
raise ValueError('The axon forward callback must have signature Callable[ inputs_x: torch.Tensor, synapses, hotkey] -> torch.FloatTensor:, got {}'.format(inspect.signature(forward_callback)))
if 'inputs_x' not in inspect.signature(forward_callback).parameters:
raise ValueError('The axon forward callback must have signature Callable[ inputs_x: torch.Tensor] -> torch.FloatTensor:, got {}'.format(inspect.signature(forward_callback)))

if modality == bittensor.proto.Modality.TEXT:
sample_input = torch.randint(0,1,(3, 3))
forward_callback(sample_input)

if modality == bittensor.proto.Modality.IMAGE:
sample_input = torch.rand(1,1,3,512,512)
forward_callback(sample_input)

if modality == bittensor.proto.Modality.TENSOR:
sample_input = torch.rand(1,1,1)
forward_callback(sample_input)
sample_input = torch.randint(0,1,(3, 3))
forward_callback([sample_input], synapses, hotkey='')

class AuthInterceptor(grpc.ServerInterceptor):
""" Creates a new server interceptor that authenticates incoming messages from passed arguments.
Expand Down
Loading

0 comments on commit 11d35d7

Please sign in to comment.