Skip to content

Commit

Permalink
adding liquid alpha
Browse files Browse the repository at this point in the history
  • Loading branch information
YourUsername committed Sep 3, 2024
1 parent 5b4bdf7 commit 4e9276b
Show file tree
Hide file tree
Showing 8 changed files with 1,298,819 additions and 241 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

59 changes: 41 additions & 18 deletions pallets/subnet_emission/src/subnet_consensus/yuma.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,6 @@ impl<T: Config> YumaEpoch<T> {
let active_stake = self.compute_active_stake(&inactive, &stake);
log::trace!("final active stake: {active_stake:?}");

// dbg!(weights.as_ref());
// dbg!(active_stake.as_ref());

let ConsensusAndTrust {
consensus,
validator_trust,
Expand All @@ -104,7 +101,7 @@ impl<T: Config> YumaEpoch<T> {
ema_bonds,
dividends,
} = self
.compute_bonds_and_dividends(&weights, &active_stake, &incentives)
.compute_bonds_and_dividends(&consensus, &weights, &active_stake, &incentives)
.ok_or(EmissionError::Other("bonds storage is broken"))?;

let Emissions {
Expand Down Expand Up @@ -334,8 +331,6 @@ impl<T: Config> YumaEpoch<T> {
self.params.kappa,
);

dbg!(consensus.clone());

log::trace!("final consensus: {consensus:?}");

// Compute preranks: r_j = SUM(i) w_ij * s_i
Expand Down Expand Up @@ -383,8 +378,45 @@ impl<T: Config> YumaEpoch<T> {
}
}

fn calculate_ema_bonds(
&self,
bonds_delta: &[Vec<(u16, I32F32)>],
bonds: &[Vec<(u16, I32F32)>],
consensus: &[I32F32],
) -> Vec<Vec<(u16, I32F32)>> {
let bonds_moving_average = I64F64::from_num(self.params.bonds_moving_average)
.checked_div(I64F64::from_num(1_000_000))
.unwrap_or_default();
let default_alpha =
I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average));

if !self.params.use_weights_encryption {
return mat_ema_sparse(bonds_delta, bonds, default_alpha);
}

let consensus_high = quantile(consensus, 0.75);
let consensus_low = quantile(consensus, 0.25);

if consensus_high <= consensus_low && consensus_high == 0 && consensus_low >= 0 {
return mat_ema_sparse(bonds_delta, bonds, default_alpha);
}

log::trace!("Using Liquid Alpha");
let (alpha_low, alpha_high) = get_alpha_values_32(self.subnet_id);
log::trace!("alpha_low: {:?} alpha_high: {:?}", alpha_low, alpha_high);

let (a, b) =
calculate_logistic_params(alpha_high, alpha_low, consensus_high, consensus_low);
let alpha = compute_alpha_values(consensus, a, b);
let clamped_alpha: Vec<I32F32> =
alpha.into_iter().map(|a| a.clamp(alpha_low, alpha_high)).collect();

mat_ema_alpha_vec_sparse(bonds_delta, bonds, &clamped_alpha)
}

fn compute_bonds_and_dividends(
&self,
consensus: &ConsensusVal,
weights: &WeightsVal,
active_stake: &ActiveStake,
incentives: &IncentivesVal,
Expand All @@ -393,9 +425,6 @@ impl<T: Config> YumaEpoch<T> {
let mut bonds = self.modules.bonds.clone();
log::trace!(" original bonds: {bonds:?}");

// dbg!(&active_stake);
// dbg!(&incentives);

// Remove bonds referring to deregistered modules.
bonds = vec_mask_sparse_matrix(
&bonds,
Expand All @@ -419,16 +448,10 @@ impl<T: Config> YumaEpoch<T> {
log::trace!(" normalized bonds delta: {bonds_delta:?}");

// Compute bonds moving average.
let bonds_moving_average = I64F64::from_num(self.params.bonds_moving_average)
.checked_div(I64F64::from_num(1_000_000))
.unwrap_or_default();
log::trace!(" bonds moving average: {bonds_moving_average}");
let alpha = I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average));
let mut ema_bonds = mat_ema_sparse(&bonds_delta, &bonds, alpha);
log::trace!(" original ema bonds: {ema_bonds:?}");
let mut ema_bonds =
Self::calculate_ema_bonds(&self, &bonds_delta, &bonds, &consensus.clone().into_inner());

// dbg!(&ema_bonds);
// dbg!(&incentives);
log::trace!(" original ema bonds: {ema_bonds:?}");

// Normalize EMA bonds.
inplace_col_normalize_sparse(&mut ema_bonds, self.module_count()); // sum_i b_ij = 1
Expand Down
5 changes: 4 additions & 1 deletion pallets/subnet_emission/src/subnet_consensus/yuma/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ use sp_std::collections::btree_map::BTreeMap;
use frame_support::DebugNoBound;
use pallet_subspace::{
math::*, BalanceOf, Bonds, BondsMovingAverage, Config, Founder, Kappa, Keys, LastUpdate,
MaxAllowedValidators, MaxWeightAge, Pallet as PalletSubspace, ValidatorPermits, Vec, Weights,
MaxAllowedValidators, MaxWeightAge, Pallet as PalletSubspace, UseWeightsEncrytyption,
ValidatorPermits, Vec, Weights,
};
use parity_scale_codec::{Decode, Encode};
use scale_info::TypeInfo;
Expand All @@ -31,6 +32,7 @@ pub struct YumaParams<T: Config> {

pub current_block: u64,
pub activity_cutoff: u64,
pub use_weights_encryption: bool,
pub max_allowed_validators: Option<u16>,
pub bonds_moving_average: u64,
}
Expand Down Expand Up @@ -172,6 +174,7 @@ impl<T: Config> YumaParams<T> {
founder_key,
founder_emission,

use_weights_encryption: UseWeightsEncrytyption::<T>::get(subnet_id),
current_block: PalletSubspace::<T>::get_current_block_number(),
activity_cutoff: MaxWeightAge::<T>::get(subnet_id),
max_allowed_validators: MaxAllowedValidators::<T>::get(subnet_id),
Expand Down
4 changes: 4 additions & 0 deletions pallets/subspace/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ std = [
"frame-system/std",
"scale-info/std",
"sp-std/std",
"num-traits/std",
]
runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"]
try-runtime = ["frame-support/try-runtime"]
Expand All @@ -47,6 +48,9 @@ pallet-transaction-payment.workspace = true
pallet-subspace-genesis-config.path = "./genesis-config"
pallet-governance-api = { path = "../governance/api", default-features = false }
pallet-subnet-emission-api = { path = "../subnet_emission/api", default-features = false }
num-traits = { version = "0.2.19", default-features = false, features = [
"libm",
] }

[dev-dependencies]
pallet-balances = { workspace = true, features = ["std"] }
Expand Down
5 changes: 4 additions & 1 deletion pallets/subspace/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,13 +328,16 @@ pub mod pallet {

#[pallet::type_value]
pub fn DefaultMinUnderperformanceThreshold() -> I64F64 {
I64F64::from_num(0.1)
I64F64::from_num(0)
}

#[pallet::storage]
pub type MinUnderperformanceThreshold<T: Config> =
StorageMap<_, Identity, u16, I64F64, ValueQuery, DefaultMinUnderperformanceThreshold>;

#[pallet::storage]
pub type UseWeightsEncrytyption<T: Config> = StorageMap<_, Identity, u16, bool, ValueQuery>;

// ---------------------------------
// Subnet PARAMS
// ---------------------------------
Expand Down
127 changes: 115 additions & 12 deletions pallets/subspace/src/math.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
use sp_std::{vec, vec::Vec};
use substrate_fixed::types::{I32F32, I64F64};
use num_traits::float::Float;
use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, vec, vec::Vec};
use substrate_fixed::{
transcendental::{exp, ln},
types::{I32F32, I64F64},
};

// Return true when vector sum is zero.
pub fn is_zero(vector: &[I32F32]) -> bool {
Expand Down Expand Up @@ -90,6 +94,114 @@ pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec<Vec<(u16, I
result
}

pub fn mat_ema_alpha_vec_sparse(
new: &[Vec<(u16, I32F32)>],
old: &[Vec<(u16, I32F32)>],
alpha: &[I32F32],
) -> Vec<Vec<(u16, I32F32)>> {
assert_eq!(
new.len(),
old.len(),
"New and old matrices must have the same number of rows"
);
let zero = I32F32::from_num(0.0);
let one = I32F32::from_num(1.0);

new.iter()
.zip(old)
.map(|(new_row, old_row)| {
let mut row_map: BTreeMap<u16, I32F32> = new_row
.iter()
.map(|&(j, value)| {
let alpha_val = alpha.get(j as usize).copied().unwrap_or(zero);
(j, alpha_val.saturating_mul(value))
})
.collect();

old_row.iter().for_each(|&(j, value)| {
let alpha_val = alpha.get(j as usize).copied().unwrap_or(zero);
let old_component = one.saturating_sub(alpha_val).saturating_mul(value);
row_map
.entry(j)
.and_modify(|e| *e = e.saturating_add(old_component))
.or_insert(old_component);
});

row_map.into_iter().filter(|&(_, v)| v > zero).collect()
})
.collect()
}

pub fn calculate_logistic_params(
alpha_high: I32F32,
alpha_low: I32F32,
consensus_high: I32F32,
consensus_low: I32F32,
) -> (I32F32, I32F32) {
if consensus_high <= consensus_low
|| alpha_low == I32F32::from_num(0)
|| alpha_high == I32F32::from_num(0)
{
return (I32F32::from_num(0), I32F32::from_num(0));
}

let calc_term = |alpha: I32F32| {
ln((I32F32::from_num(1) / alpha) - I32F32::from_num(1)).unwrap_or(I32F32::from_num(0.0))
};

let a = (calc_term(alpha_high) - calc_term(alpha_low)) / (consensus_low - consensus_high);
let b = calc_term(alpha_low) + a * consensus_low;

(a, b)
}

pub fn compute_alpha_values(consensus: &[I32F32], a: I32F32, b: I32F32) -> Vec<I32F32> {
let alpha: Vec<I32F32> = consensus
.iter()
.map(|&c| {
let exp_val =
exp(b.saturating_sub(a.saturating_mul(c))).unwrap_or(I32F32::from_num(0.0));
I32F32::from_num(1.0).saturating_div(I32F32::from_num(1.0).saturating_add(exp_val))
})
.collect();

alpha
}

pub fn get_alpha_values_32(_netuid: u16) -> (I32F32, I32F32) {
const ALPHA_LOW: u16 = 45875;
const ALPHA_HIGH: u16 = 58982;

let result =
[ALPHA_LOW, ALPHA_HIGH].map(|alpha| I32F32::from_num(alpha) / I32F32::from_num(u16::MAX));
(result[0], result[1])
}

trait Lerp {
fn lerp(self, other: Self, t: Self) -> Self;
}
impl Lerp for I32F32 {
fn lerp(self, other: Self, t: Self) -> Self {
self + (other - self) * t
}
}

pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 {
let mut sorted = data.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
let len = sorted.len();
if len == 0 {
return I32F32::from_num(0);
}
let pos = quantile * (len - 1) as f64;
let (low, high) = (pos.floor() as usize, pos.ceil() as usize);
if low == high {
sorted[low]
} else {
sorted[low].lerp(sorted[high], I32F32::from_num(pos.fract()))
}
}

/// Normalizes (sum to 1 except 0) each row (dim=0) of a sparse matrix in-place.
pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) {
for sparse_row in sparse_matrix.iter_mut() {
Expand Down Expand Up @@ -135,41 +247,32 @@ pub fn weighted_median_col_sparse(
let zero: I32F32 = I32F32::from_num(0);
let mut use_stake: Vec<I32F32> = stake.iter().copied().filter(|&s| s > zero).collect();
inplace_normalize(&mut use_stake);
dbg!(&use_stake); // Log normalized stakes

let stake_sum: I32F32 = use_stake.iter().sum();
let stake_idx: Vec<usize> = (0..use_stake.len()).collect();
let minority: I32F32 = stake_sum.saturating_sub(majority);
let mut use_score: Vec<Vec<I32F32>> = vec![vec![zero; use_stake.len()]; columns as usize];

let mut median: Vec<I32F32> = vec![zero; columns as usize];
let mut k: usize = 0;

dbg!(&score);

for r in 0..rows {
let Some(stake_r) = stake.get(r) else {
dbg!("Skipping row due to missing stake", r);
continue;
};
let Some(score_r) = score.get(r) else {
dbg!("Skipping row due to missing score", r);
continue;
};
if *stake_r <= zero {
dbg!("Skipping row due to zero or negative stake", r, stake_r);
continue;
}
dbg!("Processing row", r, score_r);
for (c, val) in score_r.iter() {
let Some(use_score_c) = use_score.get_mut(*c as usize) else {
dbg!("Column index out of bounds", c);
continue;
};
let Some(use_score_c_k) = use_score_c.get_mut(k) else {
dbg!("Row index out of bounds", k);
continue;
};
dbg!("Setting score", r, c, val);
*use_score_c_k = *val;
}
k = k.saturating_add(1);
Expand Down
Loading

0 comments on commit 4e9276b

Please sign in to comment.