[peer-metrics] Add metrics for cluster health, like GetClusterHealth admin API

This commit is contained in:
Alex Auvolat 2024-02-20 12:37:55 +01:00
parent 00d479358d
commit 3cdf69f079
No known key found for this signature in database
GPG key ID: 0E496D15096376BE
2 changed files with 172 additions and 28 deletions

View file

@ -104,7 +104,7 @@ pub struct System {
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
_metrics: SystemMetrics,
metrics: SystemMetrics,
replication_mode: ReplicationMode,
replication_factor: usize,
@ -168,7 +168,7 @@ pub struct ClusterHealth {
pub partitions_all_ok: usize,
}
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ClusterHealthStatus {
/// All nodes are available
Healthy,
@ -376,7 +376,7 @@ impl System {
consul_discovery,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(),
_metrics: metrics,
metrics,
ring,
update_ring: Mutex::new(update_ring),
@ -698,7 +698,13 @@ impl System {
while !*stop_signal.borrow() {
let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL;
// Update local node status that is exchanged.
// Status variables are exported into Prometheus in SystemMetrics,
// so we take the opportunity to also update here the health status
// that is reported in those metrics.
self.update_local_status();
*self.metrics.health.write().unwrap() = Some(self.health());
let local_status: NodeStatus = self.local_status.read().unwrap().clone();
let _ = self
.rpc

View file

@ -2,22 +2,40 @@ use std::sync::{Arc, RwLock};
use opentelemetry::{global, metrics::*, KeyValue};
use crate::system::NodeStatus;
use crate::system::{ClusterHealth, ClusterHealthStatus, NodeStatus};
/// TableMetrics reference all counter used for metrics
pub struct SystemMetrics {
pub(crate) health: Arc<RwLock<Option<ClusterHealth>>>,
// Static values
pub(crate) _garage_build_info: ValueObserver<u64>,
pub(crate) _replication_factor: ValueObserver<u64>,
// Disk space values from System::local_status
pub(crate) _disk_avail: ValueObserver<u64>,
pub(crate) _disk_total: ValueObserver<u64>,
// Health report from System::health()
pub(crate) _cluster_healthy: ValueObserver<u64>,
pub(crate) _cluster_available: ValueObserver<u64>,
pub(crate) _known_nodes: ValueObserver<u64>,
pub(crate) _connected_nodes: ValueObserver<u64>,
pub(crate) _storage_nodes: ValueObserver<u64>,
pub(crate) _storage_nodes_ok: ValueObserver<u64>,
pub(crate) _partitions: ValueObserver<u64>,
pub(crate) _partitions_quorum: ValueObserver<u64>,
pub(crate) _partitions_all_ok: ValueObserver<u64>,
}
impl SystemMetrics {
pub fn new(replication_factor: usize, local_status: Arc<RwLock<NodeStatus>>) -> Self {
let meter = global::meter("garage_system");
let st1 = local_status.clone();
let st2 = local_status.clone();
let health = Arc::new(RwLock::new(None));
Self {
health: health.clone(),
// Static values
_garage_build_info: meter
.u64_value_observer("garage_build_info", move |observer| {
observer.observe(
@ -36,9 +54,13 @@ impl SystemMetrics {
})
.with_description("Garage replication factor setting")
.init(),
_disk_avail: meter
// Disk space values from System::local_status
_disk_avail: {
let status = local_status.clone();
meter
.u64_value_observer("garage_local_disk_avail", move |observer| {
let st = st1.read().unwrap();
let st = status.read().unwrap();
if let Some((avail, _total)) = st.data_disk_avail {
observer.observe(avail, &[KeyValue::new("volume", "data")]);
}
@ -47,10 +69,13 @@ impl SystemMetrics {
}
})
.with_description("Garage available disk space on each node")
.init(),
_disk_total: meter
.init()
},
_disk_total: {
let status = local_status.clone();
meter
.u64_value_observer("garage_local_disk_total", move |observer| {
let st = st2.read().unwrap();
let st = status.read().unwrap();
if let Some((_avail, total)) = st.data_disk_avail {
observer.observe(total, &[KeyValue::new("volume", "data")]);
}
@ -59,7 +84,120 @@ impl SystemMetrics {
}
})
.with_description("Garage total disk space on each node")
.init(),
.init()
},
// Health report from System::health()
_cluster_healthy: {
let health = health.clone();
meter
.u64_value_observer("cluster_healthy", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
if h.status == ClusterHealthStatus::Healthy {
observer.observe(1, &[]);
} else {
observer.observe(0, &[]);
}
}
})
.with_description("Whether all storage nodes are connected")
.init()
},
_cluster_available: {
let health = health.clone();
meter.u64_value_observer("cluster_available", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
if h.status != ClusterHealthStatus::Unavailable {
observer.observe(1, &[]);
} else {
observer.observe(0, &[]);
}
}
})
.with_description("Whether all requests can be served, even if some storage nodes are disconnected")
.init()
},
_known_nodes: {
let health = health.clone();
meter
.u64_value_observer("cluster_known_nodes", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.known_nodes as u64, &[]);
}
})
.with_description("Number of nodes already seen once in the cluster")
.init()
},
_connected_nodes: {
let health = health.clone();
meter
.u64_value_observer("cluster_connected_nodes", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.connected_nodes as u64, &[]);
}
})
.with_description("Number of nodes currently connected")
.init()
},
_storage_nodes: {
let health = health.clone();
meter
.u64_value_observer("cluster_storage_nodes", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.storage_nodes as u64, &[]);
}
})
.with_description("Number of storage nodes declared in the current layout")
.init()
},
_storage_nodes_ok: {
let health = health.clone();
meter
.u64_value_observer("cluster_storage_nodes_ok", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.storage_nodes_ok as u64, &[]);
}
})
.with_description("Number of storage nodes currently connected")
.init()
},
_partitions: {
let health = health.clone();
meter
.u64_value_observer("cluster_partitions", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.partitions as u64, &[]);
}
})
.with_description("Number of partitions in the layout")
.init()
},
_partitions_quorum: {
let health = health.clone();
meter
.u64_value_observer("cluster_partitions_quorum", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.partitions_quorum as u64, &[]);
}
})
.with_description(
"Number of partitions for which we have a quorum of connected nodes",
)
.init()
},
_partitions_all_ok: {
let health = health.clone();
meter
.u64_value_observer("cluster_partitions_all_ok", move |observer| {
if let Some(h) = health.read().unwrap().as_ref() {
observer.observe(h.partitions_all_ok as u64, &[]);
}
})
.with_description(
"Number of partitions for which all storage nodes are connected",
)
.init()
},
}
}
}