Skip to content

Commit

Permalink
Merge pull request #416 from backend-developers-ltd/burn
Browse files Browse the repository at this point in the history
implement burn
  • Loading branch information
mpnowacki-reef authored Feb 14, 2025
2 parents 65f11fd + 44a96ab commit 2a23301
Show file tree
Hide file tree
Showing 5 changed files with 486 additions and 68 deletions.
17 changes: 17 additions & 0 deletions validator/app/src/compute_horde_validator/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,23 @@ def wrapped(*args, **kwargs):
"Timeout for waiting for a streaming job to be ready to accept connections from the user",
int,
),
"DYNAMIC_BURN_TARGET_SS58ADDRESSES": (
"Comma-separated list of ss58 addresses that will receive 'DYNAMIC_BURN_RATE' fraction of all incentives",
"",
str,
),
"DYNAMIC_BURN_RATE": (
0.0,
"(0.0 - 1.0) fraction of miner incentives that will be directed to 'DYNAMIC_BURN_TARGET_SS58ADDRESSES' ",
float,
),
"DYNAMIC_BURN_PARTITION": (
0.0,
"(0.0 - 1.0) each time miner incentive is burned, if there is more than one hotkey registered from among "
"'DYNAMIC_BURN_TARGET_SS58ADDRESSES', one will be chosen as the primary at random (but random seed is the same "
"across all validators) and will receive 'DYNAMIC_BURN_PARTITION' fraction of all the burn.",
float,
),
}

# Content Security Policy
Expand Down
3 changes: 3 additions & 0 deletions validator/app/src/compute_horde_validator/validator/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class EventType(models.TextChoices):
LLM_PROMPT_GENERATION = "LLM_PROMPT_GENERATION"
LLM_PROMPT_ANSWERING = "LLM_PROMPT_ANSWERING"
LLM_PROMPT_SAMPLING = "LLM_PROMPT_SAMPLING"
BURNING_INCENTIVE = "BURNING_INCENTIVE"

class EventSubType(models.TextChoices):
SUCCESS = "SUCCESS"
Expand Down Expand Up @@ -88,6 +89,8 @@ class EventSubType(models.TextChoices):
ERROR_UPLOADING_TO_S3 = "ERROR_UPLOADING_TO_S3"
ERROR_DOWNLOADING_FROM_S3 = "ERROR_DOWNLOADING_FROM_S3"
LLM_PROMPT_ANSWERS_DOWNLOAD_WORKER_FAILED = "LLM_PROMPT_ANSWERS_DOWNLOAD_WORKER_FAILED"
APPLIED_BURNING = "APPLIED_BURNING"
NO_BURNING = "NO_BURNING"

type = models.CharField(max_length=255, choices=EventType.choices)
subtype = models.CharField(max_length=255, choices=EventSubType.choices)
Expand Down
157 changes: 129 additions & 28 deletions validator/app/src/compute_horde_validator/validator/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
from datetime import timedelta
from functools import cached_property
from math import ceil, floor
from typing import Union

import billiard.exceptions
import bittensor
import bittensor.core.metagraph
import celery.exceptions
import numpy as np
import requests
Expand All @@ -26,6 +28,7 @@
from django.conf import settings
from django.db import transaction
from django.utils.timezone import now
from numpy.typing import NDArray
from pydantic import JsonValue

from compute_horde_validator.celery import app
Expand Down Expand Up @@ -64,6 +67,9 @@
from .models import AdminJobRequest
from .scoring import score_batches

if False:
import torch

logger = get_task_logger(__name__)

JOB_WINDOW = 2 * 60 * 60
Expand Down Expand Up @@ -685,6 +691,126 @@ def get_metagraph(subtensor, netuid):
return subtensor.metagraph(netuid=netuid)


def normalize_batch_scores(
hotkey_scores: dict[str, float],
subtensor,
metagraph,
) -> tuple["torch.Tensor", "torch.FloatTensor"] | tuple[NDArray[np.int64], NDArray[np.float32]]:
neurons = metagraph.neurons
hotkey_to_uid = {n.hotkey: n.uid for n in neurons}
score_per_uid = {}

for hotkey, score in hotkey_scores.items():
uid = hotkey_to_uid.get(hotkey)
if uid is None:
continue
score_per_uid[uid] = score

uids = np.zeros(len(neurons), dtype=np.int64)
weights = np.zeros(len(neurons), dtype=np.float32)

if not score_per_uid:
logger.warning("Batch produced no scores")
return uids, weights

for ind, n in enumerate(neurons):
uids[ind] = n.uid
weights[ind] = score_per_uid.get(n.uid, 0)

uids, weights = process_weights_for_netuid(
uids,
weights,
settings.BITTENSOR_NETUID,
subtensor,
metagraph,
)

return uids, weights


def apply_dancing_burners(
uids: Union["torch.Tensor", NDArray[np.int64]],
weights: Union["torch.FloatTensor", NDArray[np.float32]],
subtensor,
metagraph: bittensor.core.metagraph.NonTorchMetagraph,
cycle_block_start: int,
) -> tuple["torch.Tensor", "torch.FloatTensor"] | tuple[NDArray[np.int64], NDArray[np.float32]]:
burner_hotkeys = config.DYNAMIC_BURN_TARGET_SS58ADDRESSES.split(",")
burn_rate = config.DYNAMIC_BURN_RATE
burn_partition = config.DYNAMIC_BURN_PARTITION

registered_burner_hotkeys = sorted([h for h in burner_hotkeys if h in metagraph.hotkeys])
se_data = {
"registered_burner_hotkeys": registered_burner_hotkeys,
"burner_hotkeys": burner_hotkeys,
"burn_rate": burn_rate,
"burn_partition": burn_partition,
"cycle_block_start": cycle_block_start,
}

if not registered_burner_hotkeys or not burn_rate:
logger.info(
"None of the burner hotkeys registered or burn_rate=0, not applying burn incentive"
)
SystemEvent.objects.using(settings.DEFAULT_DB_ALIAS).create(
type=SystemEvent.EventType.BURNING_INCENTIVE,
subtype=SystemEvent.EventSubType.NO_BURNING,
long_description="",
data=se_data,
)
return uids, weights

if len(registered_burner_hotkeys) == 1:
logger.info("Single burner hotkey registered, applying all burn incentive to it")
weight_adjustment = {registered_burner_hotkeys[0]: burn_rate}
main_burner = registered_burner_hotkeys[0]

else:
main_burner = random.Random(cycle_block_start).choice(registered_burner_hotkeys)
logger.info(
"Main burner: %s, other burners: %s",
main_burner,
[h for h in registered_burner_hotkeys if h != main_burner],
)
weight_adjustment = {
main_burner: burn_rate * burn_partition,
**{
h: burn_rate * (1 - burn_partition) / (len(registered_burner_hotkeys) - 1)
for h in registered_burner_hotkeys
if h != main_burner
},
}

SystemEvent.objects.using(settings.DEFAULT_DB_ALIAS).create(
type=SystemEvent.EventType.BURNING_INCENTIVE,
subtype=SystemEvent.EventSubType.APPLIED_BURNING,
long_description="",
data={**se_data, "main_burner": main_burner, "weight_adjustment": weight_adjustment},
)

weights = weights * (1 - burn_rate)

hotkey_to_uid = {n.hotkey: n.uid for n in metagraph.neurons}
for hotkey, weight in weight_adjustment.items():
uid = hotkey_to_uid[hotkey]
if uid not in uids:
uids = np.append(uids, uid)
weights = np.append(weights, weight)
else:
index = np.where(uids == uid)[0]
weights[index] = weights[index] + weight

uids, weights = process_weights_for_netuid(
uids,
weights,
settings.BITTENSOR_NETUID,
subtensor,
metagraph,
)

return uids, weights


@app.task
def set_scores():
if not config.SERVING:
Expand Down Expand Up @@ -716,9 +842,6 @@ def set_scores():
return

metagraph = get_metagraph(subtensor, netuid=settings.BITTENSOR_NETUID)
neurons = metagraph.neurons
hotkey_to_uid = {n.hotkey: n.uid for n in neurons}
score_per_uid = {}
batches = list(
SyntheticJobBatch.objects.select_related("cycle")
.filter(
Expand All @@ -745,33 +868,11 @@ def set_scores():
)

hotkey_scores = score_batches(batches)
for hotkey, score in hotkey_scores.items():
uid = hotkey_to_uid.get(hotkey)
if uid is None:
continue
score_per_uid[uid] = score

if not score_per_uid:
logger.warning(
"Batches produced no scores. Marking them as scored and skipping setting weights."
)
for batch in batches:
batch.scored = True
batch.save()
return
uids, weights = normalize_batch_scores(hotkey_scores, subtensor, metagraph)

uids = np.zeros(len(neurons), dtype=np.int64)
weights = np.zeros(len(neurons), dtype=np.float32)
for ind, n in enumerate(neurons):
uids[ind] = n.uid
weights[ind] = score_per_uid.get(n.uid, 0)

uids, weights = process_weights_for_netuid(
uids,
weights,
settings.BITTENSOR_NETUID,
subtensor,
metagraph,
uids, weights = apply_dancing_burners(
uids, weights, subtensor, metagraph, batches[-1].cycle.start
)

for batch in batches:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ def __init__(
commit_reveal_weights_interval=1000,
max_weight_limit=65535,
)
self.weights_set: list[list[numbers.Number]] = []
self.weights_committed: list[list[numbers.Number]] = []
self.weights_set: list[dict[int, numbers.Number]] = []
self.weights_committed: list[dict[int, numbers.Number]] = []
self.weights_revealed: list[list[numbers.Number]] = []
self.init_time = monotonic()
self.block_duration = block_duration
Expand Down Expand Up @@ -338,11 +338,11 @@ def set_weights(
) -> tuple[bool, str]:
if not isinstance(weights, list):
weights = weights.tolist()
self.weights_set.append(weights)
self.weights_set.append({uid: weight for uid, weight in zip(uids, weights)})
return self.mocked_set_weights()

def commit_weights(self, weights, **kwargs) -> tuple[bool, str]:
self.weights_committed.append(weights)
def commit_weights(self, weights, uids, **kwargs) -> tuple[bool, str]:
self.weights_committed.append({uid: weight for uid, weight in zip(uids, weights)})
if self.hyperparameters.commit_reveal_weights_enabled:
return self.mocked_commit_weights()
return False, "MockSubtensor doesn't support commit_weights"
Expand Down
Loading

0 comments on commit 2a23301

Please sign in to comment.