mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2024-11-24 17:11:01 +00:00
Merge pull request 'refactor: remove max_write_errors and max_faults' (#760) from yuka/garage:remove-max-write-errors into next-0.10
Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/760
This commit is contained in:
commit
603604cdfc
5 changed files with 5 additions and 19 deletions
|
@ -247,7 +247,6 @@ impl Garage {
|
||||||
|
|
||||||
let control_rep_param = TableFullReplication {
|
let control_rep_param = TableFullReplication {
|
||||||
system: system.clone(),
|
system: system.clone(),
|
||||||
max_faults: replication_mode.control_write_max_faults(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Initialize block manager...");
|
info!("Initialize block manager...");
|
||||||
|
|
|
@ -21,13 +21,6 @@ impl ReplicationMode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn control_write_max_faults(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Self::None => 0,
|
|
||||||
_ => 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn replication_factor(&self) -> usize {
|
pub fn replication_factor(&self) -> usize {
|
||||||
match self {
|
match self {
|
||||||
Self::None => 1,
|
Self::None => 1,
|
||||||
|
|
|
@ -21,8 +21,6 @@ use crate::replication::*;
|
||||||
pub struct TableFullReplication {
|
pub struct TableFullReplication {
|
||||||
/// The membership manager of this node
|
/// The membership manager of this node
|
||||||
pub system: Arc<System>,
|
pub system: Arc<System>,
|
||||||
/// Max number of faults allowed while replicating a record
|
|
||||||
pub max_faults: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TableReplication for TableFullReplication {
|
impl TableReplication for TableFullReplication {
|
||||||
|
@ -45,15 +43,15 @@ impl TableReplication for TableFullReplication {
|
||||||
}
|
}
|
||||||
fn write_quorum(&self) -> usize {
|
fn write_quorum(&self) -> usize {
|
||||||
let nmembers = self.system.cluster_layout().current().all_nodes().len();
|
let nmembers = self.system.cluster_layout().current().all_nodes().len();
|
||||||
if nmembers > self.max_faults {
|
|
||||||
nmembers - self.max_faults
|
let max_faults = if nmembers > 1 { 1 } else { 0 };
|
||||||
|
|
||||||
|
if nmembers > max_faults {
|
||||||
|
nmembers - max_faults
|
||||||
} else {
|
} else {
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn max_write_errors(&self) -> usize {
|
|
||||||
self.max_faults
|
|
||||||
}
|
|
||||||
|
|
||||||
fn partition_of(&self, _hash: &Hash) -> Partition {
|
fn partition_of(&self, _hash: &Hash) -> Partition {
|
||||||
0u16
|
0u16
|
||||||
|
|
|
@ -20,7 +20,6 @@ pub trait TableReplication: Send + Sync + 'static {
|
||||||
fn write_sets(&self, hash: &Hash) -> Self::WriteSets;
|
fn write_sets(&self, hash: &Hash) -> Self::WriteSets;
|
||||||
/// Responses needed to consider a write succesfull in each set
|
/// Responses needed to consider a write succesfull in each set
|
||||||
fn write_quorum(&self) -> usize;
|
fn write_quorum(&self) -> usize;
|
||||||
fn max_write_errors(&self) -> usize;
|
|
||||||
|
|
||||||
// Accessing partitions, for Merkle tree & sync
|
// Accessing partitions, for Merkle tree & sync
|
||||||
/// Get partition for data with given hash
|
/// Get partition for data with given hash
|
||||||
|
|
|
@ -44,9 +44,6 @@ impl TableReplication for TableShardedReplication {
|
||||||
fn write_quorum(&self) -> usize {
|
fn write_quorum(&self) -> usize {
|
||||||
self.write_quorum
|
self.write_quorum
|
||||||
}
|
}
|
||||||
fn max_write_errors(&self) -> usize {
|
|
||||||
self.replication_factor - self.write_quorum
|
|
||||||
}
|
|
||||||
|
|
||||||
fn partition_of(&self, hash: &Hash) -> Partition {
|
fn partition_of(&self, hash: &Hash) -> Partition {
|
||||||
self.system.cluster_layout().current().partition_of(hash)
|
self.system.cluster_layout().current().partition_of(hash)
|
||||||
|
|
Loading…
Reference in a new issue