Merge pull request 'cli: uniformize output and add some infos' (#984) from uniformize-cli into next-v2

Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/984
This commit is contained in:
Alex 2025-03-12 15:51:11 +00:00
commit 48e0436f29
26 changed files with 480 additions and 353 deletions

View file

@ -35,7 +35,15 @@ steps:
- matrix:
ARCH: i386
- name: upgrade tests
- name: upgrade tests from v1.0.0
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr ci --run "./script/test-upgrade.sh v1.0.0 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
when:
- matrix:
ARCH: amd64
- name: upgrade tests from v0.8.4
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)

View file

@ -1752,7 +1752,8 @@
"type": "object",
"required": [
"versionId",
"deleted",
"refDeleted",
"versionDeleted",
"garbageCollected"
],
"properties": {
@ -1766,10 +1767,13 @@
}
]
},
"deleted": {
"garbageCollected": {
"type": "boolean"
},
"garbageCollected": {
"refDeleted": {
"type": "boolean"
},
"versionDeleted": {
"type": "boolean"
},
"versionId": {
@ -3516,6 +3520,13 @@
"type": "boolean",
"description": "Whether this node is part of an older layout version and is draining data."
},
"garageVersion": {
"type": [
"string",
"null"
],
"description": "Garage version"
},
"hostname": {
"type": [
"string",

View file

@ -17,13 +17,19 @@ else
fi
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
if [ "$GARAGE_08" = "1" ]; then
if [ "$GARAGE_OLDVER" = "v08" ]; then
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
else
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
elif [ "$GARAGE_OLDVER" = "v1" ]; then
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
else
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml json-api CreateKey '{"name":"opérateur"}')
ACCESS_KEY=`echo $KEY_INFO|jq -r .accessKeyId`
SECRET_KEY=`echo $KEY_INFO|jq -r .secretAccessKey`
fi
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3

View file

@ -29,7 +29,7 @@ until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
sleep 1
done
if [ "$GARAGE_08" = "1" ]; then
if [ "$GARAGE_OLDVER" = "v08" ]; then
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \

View file

@ -24,7 +24,10 @@ echo "============= insert data into old version cluster ================="
export GARAGE_BIN=/tmp/old_garage
if echo $OLD_VERSION | grep 'v0\.8\.'; then
echo "Detected Garage v0.8.x"
export GARAGE_08=1
export GARAGE_OLDVER=v08
elif (echo $OLD_VERSION | grep 'v0\.9\.') || (echo $OLD_VERSION | grep 'v1\.'); then
echo "Detected Garage v0.9.x / v1.x"
export GARAGE_OLDVER=v1
fi
echo "⏳ Setup cluster using old version"
@ -47,7 +50,7 @@ killall -9 old_garage || true
echo "🏁 Removing old garage version"
rm -rv $GARAGE_BIN
export -n GARAGE_BIN
export -n GARAGE_08
export -n GARAGE_OLDVER
echo "================ read data from new cluster ==================="

View file

@ -47,3 +47,4 @@ prometheus = { workspace = true, optional = true }
[features]
metrics = [ "opentelemetry-prometheus", "prometheus" ]
k2v = [ "garage_model/k2v" ]

View file

@ -188,8 +188,8 @@ pub struct GetClusterStatusResponse {
pub struct NodeResp {
/// Full-length node identifier
pub id: String,
/// Role assigned to this node in the current cluster layout
pub role: Option<NodeAssignedRole>,
/// Garage version
pub garage_version: Option<String>,
/// Socket address used by other nodes to connect to this node for RPC
#[schema(value_type = Option<String>)]
pub addr: Option<SocketAddr>,
@ -200,6 +200,8 @@ pub struct NodeResp {
/// For disconnected nodes, the number of seconds since last contact,
/// or `null` if no contact was established since Garage restarted.
pub last_seen_secs_ago: Option<u64>,
/// Role assigned to this node in the current cluster layout
pub role: Option<NodeAssignedRole>,
/// Whether this node is part of an older layout version and is draining data.
pub draining: bool,
/// Total and available space on the disk partition(s) containing the data
@ -1174,7 +1176,8 @@ pub struct LocalGetBlockInfoResponse {
#[serde(rename_all = "camelCase")]
pub struct BlockVersion {
pub version_id: String,
pub deleted: bool,
pub ref_deleted: bool,
pub version_deleted: bool,
pub garbage_collected: bool,
pub backlink: Option<BlockVersionBacklink>,
}

View file

@ -84,14 +84,16 @@ impl RequestHandler for LocalGetBlockInfoRequest {
};
versions.push(BlockVersion {
version_id: hex::encode(&br.version),
deleted: v.deleted.get(),
ref_deleted: br.deleted.get(),
version_deleted: v.deleted.get(),
garbage_collected: false,
backlink: Some(bl),
});
} else {
versions.push(BlockVersion {
version_id: hex::encode(&br.version),
deleted: true,
ref_deleted: br.deleted.get(),
version_deleted: true,
garbage_collected: true,
backlink: None,
});

View file

@ -33,6 +33,7 @@ impl RequestHandler for GetClusterStatusRequest {
i.id,
NodeResp {
id: hex::encode(i.id),
garage_version: i.status.garage_version,
addr: i.addr,
hostname: i.status.hostname,
is_up: i.is_up,
@ -231,12 +232,16 @@ impl RequestHandler for GetClusterStatisticsRequest {
if meta_part_avail.len() < node_partition_count.len()
|| data_part_avail.len() < node_partition_count.len()
{
writeln!(&mut ret, " data: < {}", data_avail).unwrap();
writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
ret += &format_table_to_string(vec![
format!(" data: < {}", data_avail),
format!(" metadata: < {}", meta_avail),
]);
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
} else {
writeln!(&mut ret, " data: {}", data_avail).unwrap();
writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
ret += &format_table_to_string(vec![
format!(" data: {}", data_avail),
format!(" metadata: {}", meta_avail),
]);
}
}

View file

@ -55,27 +55,48 @@ impl RequestHandler for LocalGetNodeStatisticsRequest {
garage: &Arc<Garage>,
_admin: &Admin,
) -> Result<LocalGetNodeStatisticsResponse, Error> {
let mut ret = String::new();
writeln!(
&mut ret,
"Garage version: {} [features: {}]\nRust compiler version: {}",
garage_util::version::garage_version(),
garage_util::version::garage_features()
.map(|list| list.join(", "))
.unwrap_or_else(|| "(unknown)".into()),
garage_util::version::rust_version(),
)
.unwrap();
let sys_status = garage.system.local_status();
writeln!(&mut ret, "\nDatabase engine: {}", garage.db.engine()).unwrap();
let mut ret = format_table_to_string(vec![
format!("Node ID:\t{:?}", garage.system.id),
format!("Hostname:\t{}", sys_status.hostname.unwrap_or_default(),),
format!(
"Garage version:\t{}",
garage_util::version::garage_version(),
),
format!(
"Garage features:\t{}",
garage_util::version::garage_features()
.map(|list| list.join(", "))
.unwrap_or_else(|| "(unknown)".into()),
),
format!(
"Rust compiler version:\t{}",
garage_util::version::rust_version(),
),
format!("Database engine:\t{}", garage.db.engine()),
]);
// Gather table statistics
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tInsQueue\tGcTodo".into()];
table.push(gather_table_stats(&garage.admin_token_table)?);
table.push(gather_table_stats(&garage.bucket_table)?);
table.push(gather_table_stats(&garage.bucket_alias_table)?);
table.push(gather_table_stats(&garage.key_table)?);
table.push(gather_table_stats(&garage.object_table)?);
table.push(gather_table_stats(&garage.object_counter_table.table)?);
table.push(gather_table_stats(&garage.mpu_table)?);
table.push(gather_table_stats(&garage.mpu_counter_table.table)?);
table.push(gather_table_stats(&garage.version_table)?);
table.push(gather_table_stats(&garage.block_ref_table)?);
#[cfg(feature = "k2v")]
{
table.push(gather_table_stats(&garage.k2v.item_table)?);
table.push(gather_table_stats(&garage.k2v.counter_table.table)?);
}
write!(
&mut ret,
"\nTable stats:\n{}",
@ -87,24 +108,17 @@ impl RequestHandler for LocalGetNodeStatisticsRequest {
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
let rc_len = garage.block_manager.rc_len()?.to_string();
writeln!(
&mut ret,
" number of RC entries (~= number of blocks): {}",
rc_len
)
.unwrap();
writeln!(
&mut ret,
" resync queue length: {}",
garage.block_manager.resync.queue_len()?
)
.unwrap();
writeln!(
&mut ret,
" blocks with resync errors: {}",
garage.block_manager.resync.errors_len()?
)
.unwrap();
ret += &format_table_to_string(vec![
format!(" number of RC entries:\t{} (~= number of blocks)", rc_len),
format!(
" resync queue length:\t{}",
garage.block_manager.resync.queue_len()?
),
format!(
" blocks with resync errors:\t{}",
garage.block_manager.resync.errors_len()?
),
]);
Ok(LocalGetNodeStatisticsResponse { freeform: ret })
}
@ -119,11 +133,12 @@ where
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
Ok(format!(
" {}\t{}\t{}\t{}\t{}",
" {}\t{}\t{}\t{}\t{}\t{}",
F::TABLE_NAME,
data_len,
mkl_len,
t.merkle_updater.todo_len()?,
t.data.insert_queue_len()?,
t.data.gc_todo_len()?
))
}

View file

@ -50,6 +50,7 @@ sodiumoxide.workspace = true
structopt.workspace = true
git-version.workspace = true
utoipa.workspace = true
serde_json.workspace = true
futures.workspace = true
tokio.workspace = true
@ -85,7 +86,7 @@ k2v-client.workspace = true
[features]
default = [ "bundled-libs", "metrics", "lmdb", "sqlite", "k2v" ]
k2v = [ "garage_util/k2v", "garage_api_k2v" ]
k2v = [ "garage_util/k2v", "garage_api_k2v", "garage_api_admin/k2v" ]
# Database engines
lmdb = [ "garage_model/lmdb" ]

View file

@ -36,16 +36,6 @@ pub fn node_id_command(config_file: PathBuf, quiet: bool) -> Result<(), Error> {
);
eprintln!(" garage [-c <config file path>] node connect {}", idstr);
eprintln!();
eprintln!("Or instruct them to connect from here by running:");
eprintln!(
" garage -c {} -h <remote node> node connect {}",
config_file.to_string_lossy(),
idstr
);
eprintln!(
"where <remote_node> is their own node identifier in the format: <full-node-id>@<ip>:<port>"
);
eprintln!();
eprintln!("This node identifier can also be added as a bootstrap node in other node's garage.toml files:");
eprintln!(" bootstrap_peers = [");
eprintln!(" \"{}\",", idstr);

View file

@ -34,14 +34,12 @@ impl Cli {
list.0.sort_by_key(|x| x.created);
let mut table = vec!["ID\tCREATED\tNAME\tEXPIRATION\tSCOPE".to_string()];
let mut table = vec!["ID\tCreated\tName\tExpiration\tScope".to_string()];
for tok in list.0.iter() {
let scope = if tok.expired {
String::new()
} else if tok.scope.len() > 1 {
format!("[{}]", tok.scope.len())
} else {
tok.scope.get(0).cloned().unwrap_or_default()
table_list_abbr(&tok.scope)
};
let exp = if tok.expired {
"expired".to_string()
@ -233,7 +231,7 @@ impl Cli {
}
fn print_token_info(token: &GetAdminTokenInfoResponse) {
format_table(vec![
let mut table = vec![
format!("ID:\t{}", token.id.as_ref().unwrap()),
format!("Name:\t{}", token.name),
format!("Created:\t{}", token.created.unwrap().with_timezone(&Local)),
@ -248,6 +246,16 @@ fn print_token_info(token: &GetAdminTokenInfoResponse) {
.map(|x| x.with_timezone(&Local).to_string())
.unwrap_or("never".into())
),
format!("Scope:\t{}", token.scope.to_vec().join(", ")),
]);
String::new(),
];
for (i, scope) in token.scope.iter().enumerate() {
if i == 0 {
table.push(format!("Scope:\t{}", scope));
} else {
table.push(format!("\t{}", scope));
}
}
format_table(table);
}

View file

@ -51,46 +51,70 @@ impl Cli {
.local_api_request(LocalGetBlockInfoRequest { block_hash: hash })
.await?;
println!("Block hash: {}", info.block_hash);
println!("Refcount: {}", info.refcount);
println!("==== BLOCK INFORMATION ====");
format_table(vec![
format!("Block hash:\t{}", info.block_hash),
format!("Refcount:\t{}", info.refcount),
]);
println!();
let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()];
println!("==== REFERENCES TO THIS BLOCK ====");
let mut table = vec!["Status\tVersion\tBucket\tKey\tMPU".into()];
let mut nondeleted_count = 0;
let mut inconsistent_refs = false;
for ver in info.versions.iter() {
match &ver.backlink {
Some(BlockVersionBacklink::Object { bucket_id, key }) => {
table.push(format!(
"{:.16}\t{:.16}\t{}\t\t{:?}",
ver.version_id, bucket_id, key, ver.deleted
"{}\t{:.16}{}\t{:.16}\t{}",
ver.ref_deleted.then_some("deleted").unwrap_or("active"),
ver.version_id,
ver.version_deleted
.then_some(" (deleted)")
.unwrap_or_default(),
bucket_id,
key
));
}
Some(BlockVersionBacklink::Upload {
upload_id,
upload_deleted: _,
upload_deleted,
upload_garbage_collected: _,
bucket_id,
key,
}) => {
table.push(format!(
"{:.16}\t{:.16}\t{}\t{:.16}\t{:.16}",
"{}\t{:.16}{}\t{:.16}\t{}\t{:.16}{}",
ver.ref_deleted.then_some("deleted").unwrap_or("active"),
ver.version_id,
ver.version_deleted
.then_some(" (deleted)")
.unwrap_or_default(),
bucket_id.as_deref().unwrap_or(""),
key.as_deref().unwrap_or(""),
upload_id,
ver.deleted
upload_deleted.then_some(" (deleted)").unwrap_or_default(),
));
}
None => {
table.push(format!("{:.16}\t\t\tyes", ver.version_id));
}
}
if !ver.deleted {
if ver.ref_deleted != ver.version_deleted {
inconsistent_refs = true;
}
if !ver.ref_deleted {
nondeleted_count += 1;
}
}
format_table(table);
if inconsistent_refs {
println!();
println!("There are inconsistencies between the block_ref and the version tables.");
println!("Fix them by running `garage repair block-refs`");
}
if info.refcount != nondeleted_count {
println!();
println!(

View file

@ -30,21 +30,18 @@ impl Cli {
pub async fn cmd_list_buckets(&self) -> Result<(), Error> {
let buckets = self.api_request(ListBucketsRequest).await?;
println!("List of buckets:");
let mut table = vec![];
let mut table = vec!["ID\tGlobal aliases\tLocal aliases".to_string()];
for bucket in buckets.0.iter() {
let local_aliases_n = match &bucket.local_aliases[..] {
[] => "".into(),
[alias] => format!("{}:{}", alias.access_key_id, alias.alias),
s => format!("[{} local aliases]", s.len()),
};
table.push(format!(
"\t{}\t{}\t{}",
bucket.global_aliases.join(","),
local_aliases_n,
"{:.16}\t{}\t{}",
bucket.id,
table_list_abbr(&bucket.global_aliases),
table_list_abbr(
bucket
.local_aliases
.iter()
.map(|x| format!("{}:{}", x.access_key_id, x.alias))
),
));
}
format_table(table);
@ -61,88 +58,20 @@ impl Cli {
})
.await?;
println!("Bucket: {}", bucket.id);
let size = bytesize::ByteSize::b(bucket.bytes as u64);
println!(
"\nSize: {} ({})",
size.to_string_as(true),
size.to_string_as(false)
);
println!("Objects: {}", bucket.objects);
println!(
"Unfinished uploads (multipart and non-multipart): {}",
bucket.unfinished_uploads,
);
println!(
"Unfinished multipart uploads: {}",
bucket.unfinished_multipart_uploads
);
let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64);
println!(
"Size of unfinished multipart uploads: {} ({})",
mpu_size.to_string_as(true),
mpu_size.to_string_as(false),
);
println!("\nWebsite access: {}", bucket.website_access);
if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() {
println!("\nQuotas:");
if let Some(ms) = bucket.quotas.max_size {
let ms = bytesize::ByteSize::b(ms);
println!(
" maximum size: {} ({})",
ms.to_string_as(true),
ms.to_string_as(false)
);
}
if let Some(mo) = bucket.quotas.max_objects {
println!(" maximum number of objects: {}", mo);
}
}
println!("\nGlobal aliases:");
for alias in bucket.global_aliases {
println!(" {}", alias);
}
println!("\nKey-specific aliases:");
let mut table = vec![];
for key in bucket.keys.iter() {
for alias in key.bucket_local_aliases.iter() {
table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias));
}
}
format_table(table);
println!("\nAuthorized keys:");
let mut table = vec![];
for key in bucket.keys.iter() {
if !(key.permissions.read || key.permissions.write || key.permissions.owner) {
continue;
}
let rflag = if key.permissions.read { "R" } else { " " };
let wflag = if key.permissions.write { "W" } else { " " };
let oflag = if key.permissions.owner { "O" } else { " " };
table.push(format!(
"\t{}{}{}\t{}\t{}",
rflag, wflag, oflag, key.access_key_id, key.name
));
}
format_table(table);
print_bucket_info(&bucket);
Ok(())
}
pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> {
self.api_request(CreateBucketRequest {
global_alias: Some(opt.name.clone()),
local_alias: None,
})
.await?;
let bucket = self
.api_request(CreateBucketRequest {
global_alias: Some(opt.name.clone()),
local_alias: None,
})
.await?;
println!("Bucket {} was created.", opt.name);
print_bucket_info(&bucket.0);
Ok(())
}
@ -200,7 +129,7 @@ impl Cli {
})
.await?;
if let Some(key_pat) = &opt.local {
let res = if let Some(key_pat) = &opt.local {
let key = self
.api_request(GetKeyInfoRequest {
search: Some(key_pat.clone()),
@ -216,12 +145,7 @@ impl Cli {
access_key_id: key.access_key_id.clone(),
},
})
.await?;
println!(
"Alias {} now points to bucket {:.16} in namespace of key {}",
opt.new_name, bucket.id, key.access_key_id
)
.await?
} else {
self.api_request(AddBucketAliasRequest {
bucket_id: bucket.id.clone(),
@ -229,19 +153,16 @@ impl Cli {
global_alias: opt.new_name.clone(),
},
})
.await?;
.await?
};
println!(
"Alias {} now points to bucket {:.16}",
opt.new_name, bucket.id
)
}
print_bucket_info(&res.0);
Ok(())
}
pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> {
if let Some(key_pat) = &opt.local {
let res = if let Some(key_pat) = &opt.local {
let key = self
.api_request(GetKeyInfoRequest {
search: Some(key_pat.clone()),
@ -266,12 +187,7 @@ impl Cli {
local_alias: opt.name.clone(),
},
})
.await?;
println!(
"Alias {} no longer points to bucket {:.16} in namespace of key {}",
&opt.name, bucket.id, key.access_key_id
)
.await?
} else {
let bucket = self
.api_request(GetBucketInfoRequest {
@ -287,13 +203,10 @@ impl Cli {
global_alias: opt.name.clone(),
},
})
.await?;
.await?
};
println!(
"Alias {} no longer points to bucket {:.16}",
opt.name, bucket.id
)
}
print_bucket_info(&res.0);
Ok(())
}
@ -315,44 +228,19 @@ impl Cli {
})
.await?;
self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest {
bucket_id: bucket.id.clone(),
access_key_id: key.access_key_id.clone(),
permissions: ApiBucketKeyPerm {
read: opt.read,
write: opt.write,
owner: opt.owner,
},
}))
.await?;
let new_bucket = self
.api_request(GetBucketInfoRequest {
id: Some(bucket.id),
global_alias: None,
search: None,
})
let res = self
.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest {
bucket_id: bucket.id.clone(),
access_key_id: key.access_key_id.clone(),
permissions: ApiBucketKeyPerm {
read: opt.read,
write: opt.write,
owner: opt.owner,
},
}))
.await?;
if let Some(new_key) = new_bucket
.keys
.iter()
.find(|k| k.access_key_id == key.access_key_id)
{
println!(
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
key.access_key_id,
new_bucket.id,
new_key.permissions.read,
new_key.permissions.write,
new_key.permissions.owner
);
} else {
println!(
"Access key {} has no permissions on bucket {:.16}",
key.access_key_id, new_bucket.id
);
}
print_bucket_info(&res.0);
Ok(())
}
@ -374,44 +262,19 @@ impl Cli {
})
.await?;
self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest {
bucket_id: bucket.id.clone(),
access_key_id: key.access_key_id.clone(),
permissions: ApiBucketKeyPerm {
read: opt.read,
write: opt.write,
owner: opt.owner,
},
}))
.await?;
let new_bucket = self
.api_request(GetBucketInfoRequest {
id: Some(bucket.id),
global_alias: None,
search: None,
})
let res = self
.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest {
bucket_id: bucket.id.clone(),
access_key_id: key.access_key_id.clone(),
permissions: ApiBucketKeyPerm {
read: opt.read,
write: opt.write,
owner: opt.owner,
},
}))
.await?;
if let Some(new_key) = new_bucket
.keys
.iter()
.find(|k| k.access_key_id == key.access_key_id)
{
println!(
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
key.access_key_id,
new_bucket.id,
new_key.permissions.read,
new_key.permissions.write,
new_key.permissions.owner
);
} else {
println!(
"Access key {} no longer has permissions on bucket {:.16}",
key.access_key_id, new_bucket.id
);
}
print_bucket_info(&res.0);
Ok(())
}
@ -447,20 +310,17 @@ impl Cli {
}
};
self.api_request(UpdateBucketRequest {
id: bucket.id,
body: UpdateBucketRequestBody {
website_access: Some(wa),
quotas: None,
},
})
.await?;
let res = self
.api_request(UpdateBucketRequest {
id: bucket.id,
body: UpdateBucketRequestBody {
website_access: Some(wa),
quotas: None,
},
})
.await?;
if opt.allow {
println!("Website access allowed for {}", &opt.bucket);
} else {
println!("Website access denied for {}", &opt.bucket);
}
print_bucket_info(&res.0);
Ok(())
}
@ -500,16 +360,17 @@ impl Cli {
},
};
self.api_request(UpdateBucketRequest {
id: bucket.id.clone(),
body: UpdateBucketRequestBody {
website_access: None,
quotas: Some(new_quotas),
},
})
.await?;
let res = self
.api_request(UpdateBucketRequest {
id: bucket.id.clone(),
body: UpdateBucketRequestBody {
website_access: None,
quotas: Some(new_quotas),
},
})
.await?;
println!("Quotas updated for bucket {:.16}", bucket.id);
print_bucket_info(&res.0);
Ok(())
}
@ -547,3 +408,105 @@ impl Cli {
Ok(())
}
}
fn print_bucket_info(bucket: &GetBucketInfoResponse) {
println!("==== BUCKET INFORMATION ====");
let mut info = vec![
format!("Bucket:\t{}", bucket.id),
String::new(),
{
let size = bytesize::ByteSize::b(bucket.bytes as u64);
format!(
"Size:\t{} ({})",
size.to_string_as(true),
size.to_string_as(false)
)
},
format!("Objects:\t{}", bucket.objects),
];
if bucket.unfinished_uploads > 0 {
info.extend([
format!(
"Unfinished uploads:\t{} multipart uploads",
bucket.unfinished_multipart_uploads
),
format!("\t{} including regular uploads", bucket.unfinished_uploads),
{
let mpu_size =
bytesize::ByteSize::b(bucket.unfinished_multipart_upload_bytes as u64);
format!(
"Size of unfinished multipart uploads:\t{} ({})",
mpu_size.to_string_as(true),
mpu_size.to_string_as(false),
)
},
]);
}
info.extend([
String::new(),
format!("Website access:\t{}", bucket.website_access),
]);
if let Some(wc) = &bucket.website_config {
info.extend([
format!(" index document:\t{}", wc.index_document),
format!(
" error document:\t{}",
wc.error_document.as_deref().unwrap_or("(not defined)")
),
]);
}
if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() {
info.push(String::new());
info.push("Quotas:\tenabled".into());
if let Some(ms) = bucket.quotas.max_size {
let ms = bytesize::ByteSize::b(ms);
info.push(format!(
" maximum size:\t{} ({})",
ms.to_string_as(true),
ms.to_string_as(false)
));
}
if let Some(mo) = bucket.quotas.max_objects {
info.push(format!(" maximum number of objects:\t{}", mo));
}
}
if !bucket.global_aliases.is_empty() {
info.push(String::new());
for (i, alias) in bucket.global_aliases.iter().enumerate() {
if i == 0 && bucket.global_aliases.len() > 1 {
info.push(format!("Global aliases:\t{}", alias));
} else if i == 0 {
info.push(format!("Global alias:\t{}", alias));
} else {
info.push(format!("\t{}", alias));
}
}
}
format_table(info);
println!("");
println!("==== KEYS FOR THIS BUCKET ====");
let mut key_info = vec!["Permissions\tAccess key\t\tLocal aliases".to_string()];
key_info.extend(bucket.keys.iter().map(|key| {
let rflag = if key.permissions.read { "R" } else { " " };
let wflag = if key.permissions.write { "W" } else { " " };
let oflag = if key.permissions.owner { "O" } else { " " };
format!(
"{}{}{}\t{}\t{}\t{}",
rflag,
wflag,
oflag,
key.access_key_id,
key.name,
key.bucket_local_aliases.to_vec().join(","),
)
}));
format_table(key_info);
}

View file

@ -16,7 +16,7 @@ impl Cli {
println!("==== HEALTHY NODES ====");
let mut healthy_nodes =
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail\tVersion".to_string()];
for adv in status.nodes.iter().filter(|adv| adv.is_up) {
let host = adv.hostname.as_deref().unwrap_or("?");
@ -35,7 +35,7 @@ impl Cli {
None => "?".into(),
};
healthy_nodes.push(format!(
"{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
"{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}\t{version}",
id = adv.id,
host = host,
addr = addr,
@ -43,6 +43,7 @@ impl Cli {
zone = cfg.zone,
capacity = capacity_string(cfg.capacity),
data_avail = data_avail,
version = adv.garage_version.as_deref().unwrap_or_default(),
));
} else {
let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
@ -54,11 +55,12 @@ impl Cli {
_ => "NO ROLE ASSIGNED",
};
healthy_nodes.push(format!(
"{id:.16}\t{h}\t{addr}\t\t\t{status}",
"{id:.16}\t{h}\t{addr}\t\t\t{status}\t\t{version}",
id = adv.id,
h = host,
addr = addr,
status = status,
version = adv.garage_version.as_deref().unwrap_or_default(),
));
}
}

View file

@ -24,10 +24,9 @@ impl Cli {
pub async fn cmd_list_keys(&self) -> Result<(), Error> {
let keys = self.api_request(ListKeysRequest).await?;
println!("List of keys:");
let mut table = vec![];
let mut table = vec!["ID\tName".to_string()];
for key in keys.0.iter() {
table.push(format!("\t{}\t{}", key.id, key.name));
table.push(format!("{}\t{}", key.id, key.name));
}
format_table(table);
@ -185,43 +184,35 @@ impl Cli {
}
fn print_key_info(key: &GetKeyInfoResponse) {
println!("Key name: {}", key.name);
println!("Key ID: {}", key.access_key_id);
println!(
"Secret key: {}",
key.secret_access_key.as_deref().unwrap_or("(redacted)")
);
println!("Can create buckets: {}", key.permissions.create_bucket);
println!("==== ACCESS KEY INFORMATION ====");
println!("\nKey-specific bucket aliases:");
let mut table = vec![];
for bucket in key.buckets.iter() {
for la in bucket.local_aliases.iter() {
table.push(format!(
"\t{}\t{}\t{}",
la,
bucket.global_aliases.join(","),
bucket.id
));
}
}
format_table(table);
format_table(vec![
format!("Key name:\t{}", key.name),
format!("Key ID:\t{}", key.access_key_id),
format!(
"Secret key:\t{}",
key.secret_access_key.as_deref().unwrap_or("(redacted)")
),
format!("Can create buckets:\t{}", key.permissions.create_bucket),
]);
println!("\nAuthorized buckets:");
let mut table = vec![];
for bucket in key.buckets.iter() {
println!("");
println!("==== BUCKETS FOR THIS KEY ====");
let mut bucket_info = vec!["Permissions\tID\tGlobal aliases\tLocal aliases".to_string()];
bucket_info.extend(key.buckets.iter().map(|bucket| {
let rflag = if bucket.permissions.read { "R" } else { " " };
let wflag = if bucket.permissions.write { "W" } else { " " };
let oflag = if bucket.permissions.owner { "O" } else { " " };
table.push(format!(
"\t{}{}{}\t{}\t{}\t{:.16}",
format!(
"{}{}{}\t{:.16}\t{}\t{}",
rflag,
wflag,
oflag,
bucket.global_aliases.join(","),
bucket.id,
table_list_abbr(&bucket.global_aliases),
bucket.local_aliases.join(","),
bucket.id
));
}
format_table(table);
)
}));
format_table(bucket_info);
}

View file

@ -378,7 +378,7 @@ pub fn print_cluster_layout(layout: &GetClusterLayoutResponse, empty_msg: &str)
let tags = role.tags.join(",");
if let (Some(capacity), Some(usable_capacity)) = (role.capacity, role.usable_capacity) {
table.push(format!(
"{:.16}\t{}\t{}\t{}\t{} ({:.1}%)",
"{:.16}\t[{}]\t{}\t{}\t{} ({:.1}%)",
role.id,
tags,
role.zone,
@ -388,7 +388,7 @@ pub fn print_cluster_layout(layout: &GetClusterLayoutResponse, empty_msg: &str)
));
} else {
table.push(format!(
"{:.16}\t{}\t{}\t{}",
"{:.16}\t[{}]\t{}\t{}",
role.id,
tags,
role.zone,
@ -427,7 +427,7 @@ pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool {
}) => {
let tags = tags.join(",");
table.push(format!(
"{:.16}\t{}\t{}\t{}",
"{:.16}\t[{}]\t{}\t{}",
change.id,
tags,
zone,

View file

@ -43,6 +43,7 @@ impl Cli {
Command::Meta(mo) => self.cmd_meta(mo).await,
Command::Stats(so) => self.cmd_stats(so).await,
Command::Repair(ro) => self.cmd_repair(ro).await,
Command::JsonApi { endpoint, payload } => self.cmd_json_api(endpoint, payload).await,
_ => unreachable!(),
}
@ -105,4 +106,59 @@ impl Cli {
}
Ok(resp.success.into_iter().next().unwrap().1)
}
pub async fn cmd_json_api(&self, endpoint: String, payload: String) -> Result<(), Error> {
let payload: serde_json::Value = if payload == "-" {
serde_json::from_reader(&std::io::stdin())?
} else {
serde_json::from_str(&payload)?
};
let request: AdminApiRequest = serde_json::from_value(serde_json::json!({
endpoint.clone(): payload,
}))?;
let resp = match self
.proxy_rpc_endpoint
.call(&self.rpc_host, ProxyRpc::Proxy(request), PRIO_NORMAL)
.await??
{
ProxyRpcResponse::ProxyApiOkResponse(resp) => resp,
ProxyRpcResponse::ApiErrorResponse {
http_code,
error_code,
message,
} => {
return Err(Error::Message(format!(
"{} ({}): {}",
error_code, http_code, message
)))
}
m => return Err(Error::unexpected_rpc_message(m)),
};
if let serde_json::Value::Object(map) = serde_json::to_value(&resp)? {
if let Some(inner) = map.get(&endpoint) {
serde_json::to_writer_pretty(std::io::stdout(), &inner)?;
return Ok(());
}
}
Err(Error::Message(format!(
"Invalid response: {}",
serde_json::to_string(&resp)?
)))
}
}
pub fn table_list_abbr<T: IntoIterator<Item = S>, S: AsRef<str>>(values: T) -> String {
let mut iter = values.into_iter();
match iter.next() {
Some(first) => match iter.count() {
0 => first.as_ref().to_string(),
n => format!("{}, ... ({})", first.as_ref(), n + 1),
},
None => String::new(),
}
}

View file

@ -22,15 +22,22 @@ impl Cli {
})
.await?;
let mut table = vec![];
for (node, err) in res.error.iter() {
table.push(format!("{:.16}\tError: {}", node, err));
}
let mut table = vec!["Node\tResult".to_string()];
for (node, _) in res.success.iter() {
table.push(format!("{:.16}\tSnapshot created", node));
}
for (node, err) in res.error.iter() {
table.push(format!("{:.16}\tError: {}", node, err));
}
format_table(table);
if !res.error.is_empty() {
return Err(Error::Message(format!(
"{} nodes returned an error",
res.error.len()
)));
}
Ok(())
}
@ -47,19 +54,17 @@ impl Cli {
.await?;
for (node, res) in res.success.iter() {
println!("======================");
println!("Stats for node {:.16}:\n", node);
println!("==== NODE [{:.16}] ====", node);
println!("{}\n", res.freeform);
}
for (node, err) in res.error.iter() {
println!("======================");
println!("Node {:.16}: error: {}\n", node, err);
println!("==== NODE [{:.16}] ====", node);
println!("Error: {}\n", err);
}
let res = self.api_request(GetClusterStatisticsRequest).await?;
println!("======================");
println!("Cluster statistics:\n");
println!("==== CLUSTER STATISTICS ====");
println!("{}\n", res.freeform);
Ok(())

View file

@ -66,6 +66,17 @@ pub enum Command {
/// Output openapi JSON schema for admin api
#[structopt(name = "admin-api-schema", version = garage_version(), setting(structopt::clap::AppSettings::Hidden))]
AdminApiSchema,
/// Directly invoke the admin API using a JSON payload.
/// The result is printed to `stdout` in JSON format.
#[structopt(name = "json-api", version = garage_version())]
JsonApi {
/// The admin API endpoint to invoke, e.g. GetClusterStatus
endpoint: String,
/// The JSON payload, or `-` to read from `stdin`
#[structopt(default_value = "null")]
payload: String,
},
}
// -------------------------

View file

@ -3,6 +3,8 @@ use std::path::{Path, PathBuf};
use std::process;
use std::sync::Once;
use serde_json::json;
use super::ext::*;
// https://xkcd.com/221/
@ -193,27 +195,17 @@ api_bind_addr = "127.0.0.1:{admin_port}"
let mut key = Key::default();
let mut cmd = self.command();
let base = cmd.args(["key", "create"]);
let base = cmd.args(["json-api", "CreateKey"]);
let with_name = match maybe_name {
Some(name) => base.args([name]),
None => base,
Some(name) => base.args([serde_json::to_string(&json!({"name": name})).unwrap()]),
None => base.args(["{}"]),
};
let output = with_name.expect_success_output("Could not create key");
let stdout = String::from_utf8(output.stdout).unwrap();
let stdout: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap();
for line in stdout.lines() {
if let Some(key_id) = line.strip_prefix("Key ID: ") {
key.id = key_id.to_owned();
continue;
}
if let Some(key_secret) = line.strip_prefix("Secret key: ") {
key.secret = key_secret.to_owned();
continue;
}
}
assert!(!key.id.is_empty(), "Invalid key: Key ID is empty");
assert!(!key.secret.is_empty(), "Invalid key: Key secret is empty");
key.id = stdout["accessKeyId"].as_str().unwrap().to_string();
key.secret = stdout["secretAccessKey"].as_str().unwrap().to_string();
key
}

View file

@ -823,7 +823,7 @@ impl LayoutVersion {
let total_cap_n = self.expect_get_node_capacity(&self.node_id_vec[*n]);
let tags_n = (self.node_role(&self.node_id_vec[*n]).ok_or("<??>"))?.tags_string();
table.push(format!(
" {:?}\t{}\t{} ({} new)\t{}\t{} ({:.1}%)",
" {:?}\t[{}]\t{} ({} new)\t{}\t{} ({:.1}%)",
self.node_id_vec[*n],
tags_n,
stored_partitions[*n],

View file

@ -124,6 +124,9 @@ pub struct NodeStatus {
/// Hostname of the node
pub hostname: Option<String>,
/// Garage version of the node
pub garage_version: Option<String>,
/// Replication factor configured on the node
pub replication_factor: usize,
@ -369,6 +372,10 @@ impl System {
&self.layout_manager.rpc_helper
}
pub fn local_status(&self) -> NodeStatus {
self.local_status.read().unwrap().clone()
}
// ---- Administrative operations (directly available and
// also available through RPC) ----
@ -786,6 +793,7 @@ impl NodeStatus {
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
),
garage_version: Some(garage_util::version::garage_version().to_string()),
replication_factor: replication_factor.into(),
layout_digest: layout_manager.layout().digest(),
meta_disk_avail: None,
@ -796,6 +804,7 @@ impl NodeStatus {
fn unknown() -> Self {
NodeStatus {
hostname: None,
garage_version: None,
replication_factor: 0,
layout_digest: Default::default(),
meta_disk_avail: None,

View file

@ -66,6 +66,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
store.clone(),
merkle_tree.clone(),
merkle_todo.clone(),
insert_queue.clone(),
gc_todo.clone(),
);
@ -367,6 +368,10 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
}
}
pub fn insert_queue_len(&self) -> Result<usize, Error> {
Ok(self.insert_queue.len()?)
}
pub fn gc_todo_len(&self) -> Result<usize, Error> {
Ok(self.gc_todo.len()?)
}

View file

@ -7,6 +7,7 @@ pub struct TableMetrics {
pub(crate) _table_size: ValueObserver<u64>,
pub(crate) _merkle_tree_size: ValueObserver<u64>,
pub(crate) _merkle_todo_len: ValueObserver<u64>,
pub(crate) _insert_queue_len: ValueObserver<u64>,
pub(crate) _gc_todo_len: ValueObserver<u64>,
pub(crate) get_request_counter: BoundCounter<u64>,
@ -26,6 +27,7 @@ impl TableMetrics {
store: db::Tree,
merkle_tree: db::Tree,
merkle_todo: db::Tree,
insert_queue: db::Tree,
gc_todo: db::Tree,
) -> Self {
let meter = global::meter(table_name);
@ -72,6 +74,20 @@ impl TableMetrics {
)
.with_description("Merkle tree updater TODO queue length")
.init(),
_insert_queue_len: meter
.u64_value_observer(
"table.insert_queue_length",
move |observer| {
if let Ok(v) = insert_queue.len() {
observer.observe(
v as u64,
&[KeyValue::new("table_name", table_name)],
);
}
},
)
.with_description("Table insert queue length")
.init(),
_gc_todo_len: meter
.u64_value_observer(
"table.gc_todo_queue_length",