Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] ask cuda during btcli run #890

Merged
merged 2 commits into from
Aug 30, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 32 additions & 21 deletions bittensor/_cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -817,6 +817,31 @@ def check_overview_config( config: 'bittensor.Config' ):
wallet_name = Prompt.ask("Enter wallet name", default = bittensor.defaults.wallet.name)
config.wallet.name = str(wallet_name)

def _check_for_cuda_reg_config( config: 'bittensor.Config' ) -> None:
"""Checks, when CUDA is available, if the user would like to register with their CUDA device."""
if torch.cuda.is_available():
if config.subtensor.register.cuda.get('use_cuda') is None:
# Ask about cuda registration only if a CUDA device is available.
cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n")
config.subtensor.register.cuda.use_cuda = cuda

# Only ask about which CUDA device if the user has more than one CUDA device.
if config.subtensor.register.cuda.use_cuda and config.subtensor.register.cuda.get('dev_id') is None and torch.cuda.device_count() > 0:
devices: List[str] = [str(x) for x in range(torch.cuda.device_count())]
device_names: List[str] = [torch.cuda.get_device_name(x) for x in range(torch.cuda.device_count())]
console.print("Available CUDA devices:")
choices_str: str = ""
for i, device in enumerate(devices):
choices_str += (" {}: {}\n".format(device, device_names[i]))
console.print(choices_str)
dev_id = Prompt.ask("Which GPU would you like to use?", choices=devices, default=str(bittensor.defaults.subtensor.register.cuda.dev_id))
try:
dev_id = int(dev_id)
except ValueError:
console.error(":cross_mark:[red]Invalid GPU device[/red] [bold white]{}[/bold white]\nAvailable CUDA devices:{}".format(dev_id, choices_str))
sys.exit(1)
config.subtensor.register.cuda.dev_id = dev_id

def check_register_config( config: 'bittensor.Config' ):
if config.subtensor.get('network') == bittensor.defaults.subtensor.network and not config.no_prompt:
config.subtensor.network = Prompt.ask("Enter subtensor network", choices=bittensor.__networks__, default = bittensor.defaults.subtensor.network)
Expand All @@ -829,27 +854,9 @@ def check_register_config( config: 'bittensor.Config' ):
hotkey = Prompt.ask("Enter hotkey name", default = bittensor.defaults.wallet.hotkey)
config.wallet.hotkey = str(hotkey)

if not config.no_prompt and config.subtensor.register.cuda.use_cuda == bittensor.defaults.subtensor.register.cuda.use_cuda:
# Ask about cuda registration only if a CUDA device is available.
if torch.cuda.is_available():
cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n")
config.subtensor.register.cuda.use_cuda = cuda
# Only ask about which CUDA device if the user has more than one CUDA device.
if cuda and config.subtensor.register.cuda.get('dev_id') is None and torch.cuda.device_count() > 0:
devices: List[str] = [str(x) for x in range(torch.cuda.device_count())]
device_names: List[str] = [torch.cuda.get_device_name(x) for x in range(torch.cuda.device_count())]
console.print("Available CUDA devices:")
choices_str: str = ""
for i, device in enumerate(devices):
choices_str += (" {}: {}\n".format(device, device_names[i]))
console.print(choices_str)
dev_id = Prompt.ask("Which GPU would you like to use?", choices=devices, default=str(bittensor.defaults.subtensor.register.cuda.dev_id))
try:
dev_id = int(dev_id)
except ValueError:
console.error(":cross_mark:[red]Invalid GPU device[/red] [bold white]{}[/bold white]\nAvailable CUDA devices:{}".format(dev_id, choices_str))
sys.exit(1)
config.subtensor.register.cuda.dev_id = dev_id
if not config.no_prompt:
cli._check_for_cuda_reg_config(config)


def check_new_coldkey_config( config: 'bittensor.Config' ):
if config.wallet.get('name') == bittensor.defaults.wallet.name and not config.no_prompt:
Expand Down Expand Up @@ -925,6 +932,10 @@ def check_run_config( config: 'bittensor.Config' ):
if 'server' in config.model and not config.no_prompt:
synapse = Prompt.ask('Enter synapse', choices = list(bittensor.synapse.__synapses_types__), default = 'All')
config.synapse = synapse

# Don't need to ask about registration if they don't want to reregister the wallet.
if config.wallet.get('reregister', bittensor.defaults.wallet.reregister) and not config.no_prompt:
cli._check_for_cuda_reg_config(config)

def check_help_config( config: 'bittensor.Config'):
if config.model == 'None':
Expand Down
2 changes: 1 addition & 1 deletion bittensor/_subtensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None ):
parser.add_argument('--' + prefix_str + 'subtensor.register.num_processes', '-n', dest='subtensor.register.num_processes', help="Number of processors to use for registration", type=int, default=bittensor.defaults.subtensor.register.num_processes)
parser.add_argument('--' + prefix_str + 'subtensor.register.update_interval', '--' + prefix_str + 'subtensor.register.cuda.update_interval', '--' + prefix_str + 'cuda.update_interval', '-u', help="The number of nonces to process before checking for next block during registration", type=int, default=bittensor.defaults.subtensor.register.update_interval)
# registration args. Used for register and re-register and anything that calls register.
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.use_cuda', '--' + prefix_str + 'cuda', '--' + prefix_str + 'cuda.use_cuda', default=bittensor.defaults.subtensor.register.cuda.use_cuda, help='''Set true to use CUDA.''', action='store_true', required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.use_cuda', '--' + prefix_str + 'cuda', '--' + prefix_str + 'cuda.use_cuda', default=argparse.SUPPRESS, help='''Set true to use CUDA.''', action='store_true', required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.dev_id', '--' + prefix_str + 'cuda.dev_id', type=int, default=argparse.SUPPRESS, help='''Set the CUDA device id. Goes by the order of speed. (i.e. 0 is the fastest).''', required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.TPB', '--' + prefix_str + 'cuda.TPB', type=int, default=bittensor.defaults.subtensor.register.cuda.TPB, help='''Set the number of Threads Per Block for CUDA.''', required=False )

Expand Down