Skip to content

Commit

Permalink
Some update to the metrics.
Browse files Browse the repository at this point in the history
  • Loading branch information
MathieuDutSik committed Nov 15, 2024
1 parent 0b96d06 commit 0601333
Showing 1 changed file with 96 additions and 94 deletions.
190 changes: 96 additions & 94 deletions linera-views/src/backends/lru_caching.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,19 +40,87 @@ pub fn read_storage_cache_policy(storage_cache_policy: Option<String>) -> Storag
}
}

#[cfg(with_metrics)]
use std::sync::LazyLock;
use std::{
collections::{btree_map, hash_map::RandomState, BTreeMap, BTreeSet},
sync::{Arc, Mutex},
};

use linked_hash_map::LinkedHashMap;
#[cfg(with_metrics)]
use {
linera_base::prometheus_util,
prometheus::{HistogramVec, IntCounterVec},
};
mod metrics {
use std::sync::LazyLock;

use linera_base::prometheus_util;
use prometheus::{HistogramVec, IntCounterVec};

/// The total number of value cache faults
pub static NUM_CACHE_VALUE_FAULT: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_value_fault",
"Number of value cache faults",
&[],
)
.expect("Counter creation should not fail")
});

/// The total number of cache successes
pub static NUM_CACHE_VALUE_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_value_success",
"Number of value cache success",
&[],
)
.expect("Counter creation should not fail")
});

/// The total number of find cache faults
pub static NUM_CACHE_FIND_FAULT: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_find_fault",
"Number of find cache faults",
&[],
)
.expect("Counter creation should not fail")
});

/// The total number of find cache successes
pub static NUM_CACHE_FIND_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_find_success",
"Number of find cache success",
&[],
)
.expect("Counter creation should not fail")
});

/// Size of the inserted value entry
pub static VALUE_CACHE_ENTRY_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"value_cache_entry_size",
"Value cache entry size",
&[],
Some(vec![
10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0, 100000.0, 300000.0,
1000000.0,
]),
)
.expect("Histogram can be created")
});

/// Size of the inserted find entry
pub static FIND_CACHE_ENTRY_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"find_cache_entry_size",
"Find cache entry size",
&[],
Some(vec![
10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0, 100000.0, 300000.0,
1000000.0,
]),
)
.expect("Histogram can be created")
});
}

#[cfg(with_testing)]
use crate::memory::MemoryStore;
Expand All @@ -65,80 +133,6 @@ use crate::{
},
};

#[cfg(with_metrics)]
/// The total number of value cache faults
static NUM_CACHE_VALUE_FAULT: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_value_fault",
"Number of value cache faults",
&[],
)
.expect("Counter creation should not fail")
});

#[cfg(with_metrics)]
/// The total number of cache successes
static NUM_CACHE_VALUE_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_value_success",
"Number of value cache success",
&[],
)
.expect("Counter creation should not fail")
});

#[cfg(with_metrics)]
/// The total number of find cache faults
static NUM_CACHE_FIND_FAULT: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_find_fault",
"Number of find cache faults",
&[],
)
.expect("Counter creation should not fail")
});

#[cfg(with_metrics)]
/// The total number of find cache successes
static NUM_CACHE_FIND_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"num_cache_find_success",
"Number of find cache success",
&[],
)
.expect("Counter creation should not fail")
});

#[cfg(with_metrics)]
/// Size of the inserted value entry
static VALUE_CACHE_ENTRY_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"value_cache_entry_size",
"Value cache entry size",
&[],
Some(vec![
10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0, 100000.0, 300000.0,
1000000.0,
]),
)
.expect("Histogram can be created")
});

#[cfg(with_metrics)]
/// Size of the inserted find entry
static FIND_CACHE_ENTRY_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"find_cache_entry_size",
"Find cache entry size",
&[],
Some(vec![
10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0, 100000.0, 300000.0,
1000000.0,
]),
)
.expect("Histogram can be created")
});

#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)]
enum CacheEntry {
Find,
Expand Down Expand Up @@ -396,7 +390,7 @@ impl StoragePrefixCache {
pub fn insert_value(&mut self, key: Vec<u8>, cache_entry: ValueCacheEntry) {
let cache_size = cache_entry.size() + key.len();
#[cfg(with_metrics)]
VALUE_CACHE_ENTRY_SIZE
metrics::VALUE_CACHE_ENTRY_SIZE
.with_label_values(&[])
.observe(cache_size as f64);
if cache_size > self.storage_cache_policy.max_entry_size {
Expand Down Expand Up @@ -440,7 +434,7 @@ impl StoragePrefixCache {
pub fn insert_find(&mut self, key_prefix: Vec<u8>, cache_entry: FindCacheEntry) {
let cache_size = cache_entry.size() + key_prefix.len();
#[cfg(with_metrics)]
FIND_CACHE_ENTRY_SIZE
metrics::FIND_CACHE_ENTRY_SIZE
.with_label_values(&[])
.observe(cache_size as f64);
if cache_size > self.storage_cache_policy.max_cache_size {
Expand Down Expand Up @@ -670,12 +664,14 @@ where
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_read_value(key) {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_SUCCESS
.with_label_values(&[])
.inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
let value = self.store.read_value_bytes(key).await?;
let mut cache = cache.lock().unwrap();
cache.insert_read_value(key.to_vec(), &value);
Expand All @@ -690,12 +686,14 @@ where
let mut cache = cache.lock().unwrap();
if let Some(result) = cache.query_contains_key(key) {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_SUCCESS
.with_label_values(&[])
.inc();
return Ok(result);
}
}
#[cfg(with_metrics)]
NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
let result = self.store.contains_key(key).await?;
let mut cache = cache.lock().unwrap();
cache.insert_contains_key(key.to_vec(), result);
Expand All @@ -715,11 +713,13 @@ where
for i in 0..size {
if let Some(result) = cache.query_contains_key(&keys[i]) {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_SUCCESS
.with_label_values(&[])
.inc();
results[i] = result;
} else {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
indices.push(i);
key_requests.push(keys[i].clone());
}
Expand Down Expand Up @@ -750,11 +750,13 @@ where
for (i, key) in keys.into_iter().enumerate() {
if let Some(value) = cache.query_read_value(&key) {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_SUCCESS
.with_label_values(&[])
.inc();
result.push(value);
} else {
#[cfg(with_metrics)]
NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_VALUE_FAULT.with_label_values(&[]).inc();
result.push(None);
cache_miss_indices.push(i);
miss_keys.push(key);
Expand Down Expand Up @@ -784,12 +786,12 @@ where
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_find_keys(key_prefix) {
#[cfg(with_metrics)]
NUM_CACHE_FIND_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_FIND_SUCCESS.with_label_values(&[]).inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
NUM_CACHE_FIND_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_FIND_FAULT.with_label_values(&[]).inc();
let keys = self.uncached_find_keys_by_prefix(key_prefix).await?;
let mut cache = cache.lock().unwrap();
cache.insert_find_keys(key_prefix.to_vec(), &keys);
Expand All @@ -807,12 +809,12 @@ where
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_find_key_values(key_prefix) {
#[cfg(with_metrics)]
NUM_CACHE_FIND_SUCCESS.with_label_values(&[]).inc();
metrics::NUM_CACHE_FIND_SUCCESS.with_label_values(&[]).inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
NUM_CACHE_FIND_FAULT.with_label_values(&[]).inc();
metrics::NUM_CACHE_FIND_FAULT.with_label_values(&[]).inc();
let key_values = self.uncached_find_key_values_by_prefix(key_prefix).await?;
let mut cache = cache.lock().unwrap();
cache.insert_find_key_values(key_prefix.to_vec(), &key_values);
Expand Down

0 comments on commit 0601333

Please sign in to comment.