mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2024-11-22 08:01:02 +00:00
Fix some new clippy lints
This commit is contained in:
parent
0af314b295
commit
ba6b56ae68
14 changed files with 22 additions and 37 deletions
|
@ -200,12 +200,7 @@ pub fn find_matching_cors_rule<'a>(
|
|||
None => vec![],
|
||||
};
|
||||
return Ok(cors_config.iter().find(|rule| {
|
||||
cors_rule_matches(
|
||||
rule,
|
||||
origin,
|
||||
&req.method().to_string(),
|
||||
request_headers.iter(),
|
||||
)
|
||||
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1042,12 +1042,12 @@ mod tests {
|
|||
|
||||
query.common.prefix = "a/".to_string();
|
||||
assert_eq!(
|
||||
common_prefix(&objs.get(0).unwrap(), &query.common),
|
||||
common_prefix(objs.get(0).unwrap(), &query.common),
|
||||
Some("a/b/")
|
||||
);
|
||||
|
||||
query.common.prefix = "a/b/".to_string();
|
||||
assert_eq!(common_prefix(&objs.get(0).unwrap(), &query.common), None);
|
||||
assert_eq!(common_prefix(objs.get(0).unwrap(), &query.common), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1272,7 +1272,7 @@ mod tests {
|
|||
Version {
|
||||
bucket_id: uuid,
|
||||
key: "a".to_string(),
|
||||
uuid: uuid,
|
||||
uuid,
|
||||
deleted: false.into(),
|
||||
blocks: crdt::Map::<VersionBlockKey, VersionBlock>::from_iter(blocks),
|
||||
parts_etags: crdt::Map::<u64, String>::from_iter(etags),
|
||||
|
|
|
@ -259,8 +259,7 @@ impl RoutingRuleInner {
|
|||
let has_prefix = self
|
||||
.condition
|
||||
.as_ref()
|
||||
.map(|c| c.prefix.as_ref())
|
||||
.flatten()
|
||||
.and_then(|c| c.prefix.as_ref())
|
||||
.is_some();
|
||||
self.redirect.validate(has_prefix)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ pub async fn check_payload_signature(
|
|||
|
||||
let canonical_request = canonical_request(
|
||||
request.method(),
|
||||
&request.uri().path().to_string(),
|
||||
request.uri().path(),
|
||||
&canonical_query_string(request.uri()),
|
||||
&headers,
|
||||
&authorization.signed_headers,
|
||||
|
|
|
@ -115,7 +115,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
|||
} else {
|
||||
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
||||
.err_context(READ_KEY_ERROR)?;
|
||||
if let Some(a) = config.as_ref().map(|c| c.rpc_public_addr).flatten() {
|
||||
if let Some(a) = config.as_ref().and_then(|c| c.rpc_public_addr) {
|
||||
(node_id, a)
|
||||
} else {
|
||||
let default_addr = SocketAddr::new(
|
||||
|
|
|
@ -27,7 +27,7 @@ async fn test_bucket_all() {
|
|||
.buckets
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.iter()
|
||||
.filter(|x| x.name.as_ref().is_some())
|
||||
.find(|x| x.name.as_ref().unwrap() == "hello")
|
||||
.is_some());
|
||||
|
@ -79,7 +79,7 @@ async fn test_bucket_all() {
|
|||
.buckets
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.iter()
|
||||
.filter(|x| x.name.as_ref().is_some())
|
||||
.find(|x| x.name.as_ref().unwrap() == "hello")
|
||||
.is_none());
|
||||
|
|
|
@ -527,8 +527,8 @@ async fn test_listmultipart() {
|
|||
upnext = r.next_upload_id_marker;
|
||||
|
||||
loopcnt += 1;
|
||||
upcnt += r.uploads.unwrap_or(vec![]).len();
|
||||
pfxcnt += r.common_prefixes.unwrap_or(vec![]).len();
|
||||
upcnt += r.uploads.unwrap_or_default().len();
|
||||
pfxcnt += r.common_prefixes.unwrap_or_default().len();
|
||||
|
||||
if next.is_none() {
|
||||
break;
|
||||
|
|
|
@ -124,7 +124,7 @@ async fn test_uploadlistpart() {
|
|||
|
||||
assert!(r.part_number_marker.is_none());
|
||||
assert!(r.next_part_number_marker.is_some());
|
||||
assert_eq!(r.max_parts, 1 as i32);
|
||||
assert_eq!(r.max_parts, 1_i32);
|
||||
assert!(r.is_truncated);
|
||||
assert_eq!(r.key.unwrap(), "a");
|
||||
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
|
||||
|
@ -146,7 +146,7 @@ async fn test_uploadlistpart() {
|
|||
r2.part_number_marker.as_ref().unwrap(),
|
||||
r.next_part_number_marker.as_ref().unwrap()
|
||||
);
|
||||
assert_eq!(r2.max_parts, 1 as i32);
|
||||
assert_eq!(r2.max_parts, 1_i32);
|
||||
assert!(r2.is_truncated);
|
||||
assert_eq!(r2.key.unwrap(), "a");
|
||||
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
|
||||
|
|
|
@ -30,8 +30,7 @@ impl<'a> BucketHelper<'a> {
|
|||
// the AWS spec, and hex-encoded UUIDs are 64 chars long.
|
||||
let hexbucket = hex::decode(bucket_name.as_str())
|
||||
.ok()
|
||||
.map(|by| Uuid::try_from(&by))
|
||||
.flatten();
|
||||
.and_then(|by| Uuid::try_from(&by));
|
||||
if let Some(bucket_id) = hexbucket {
|
||||
Ok(self
|
||||
.0
|
||||
|
@ -46,8 +45,7 @@ impl<'a> BucketHelper<'a> {
|
|||
.bucket_alias_table
|
||||
.get(&EmptyKey, bucket_name)
|
||||
.await?
|
||||
.map(|x| *x.state.get())
|
||||
.flatten())
|
||||
.and_then(|x| *x.state.get()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -106,8 +106,7 @@ impl Key {
|
|||
/// Get permissions for a bucket
|
||||
pub fn bucket_permissions(&self, bucket: &Uuid) -> BucketKeyPerm {
|
||||
self.params()
|
||||
.map(|params| params.authorized_buckets.get(bucket))
|
||||
.flatten()
|
||||
.and_then(|params| params.authorized_buckets.get(bucket))
|
||||
.cloned()
|
||||
.unwrap_or(BucketKeyPerm::NO_PERMISSIONS)
|
||||
}
|
||||
|
|
|
@ -51,10 +51,8 @@ pub async fn get_consul_nodes(
|
|||
let pubkey = ent
|
||||
.node_meta
|
||||
.get("pubkey")
|
||||
.map(|k| hex::decode(&k).ok())
|
||||
.flatten()
|
||||
.map(|k| NodeID::from_slice(&k[..]))
|
||||
.flatten();
|
||||
.and_then(|k| hex::decode(&k).ok())
|
||||
.and_then(|k| NodeID::from_slice(&k[..]));
|
||||
if let (Some(ip), Some(pubkey)) = (ip, pubkey) {
|
||||
ret.push((pubkey, SocketAddr::new(ip, ent.service_port)));
|
||||
} else {
|
||||
|
|
|
@ -63,10 +63,8 @@ pub async fn get_kubernetes_nodes(
|
|||
let pubkey = &node
|
||||
.metadata
|
||||
.name
|
||||
.map(|k| hex::decode(&k).ok())
|
||||
.flatten()
|
||||
.map(|k| NodeID::from_slice(&k[..]))
|
||||
.flatten();
|
||||
.and_then(|k| hex::decode(&k).ok())
|
||||
.and_then(|k| NodeID::from_slice(&k[..]));
|
||||
|
||||
if let Some(pubkey) = pubkey {
|
||||
ret.push((*pubkey, SocketAddr::new(node.spec.address, node.spec.port)))
|
||||
|
|
|
@ -322,8 +322,7 @@ impl RpcHelper {
|
|||
let peer_avg_ping = peer_list
|
||||
.iter()
|
||||
.find(|x| x.id.as_ref() == to.as_slice())
|
||||
.map(|pi| pi.avg_ping)
|
||||
.flatten()
|
||||
.and_then(|pi| pi.avg_ping)
|
||||
.unwrap_or_else(|| Duration::from_secs(1));
|
||||
(
|
||||
to != self.0.our_node_id,
|
||||
|
|
|
@ -175,8 +175,7 @@ async fn serve_file(garage: Arc<Garage>, req: &Request<Body>) -> Result<Response
|
|||
.bucket_alias_table
|
||||
.get(&EmptyKey, &bucket_name.to_string())
|
||||
.await?
|
||||
.map(|x| x.state.take())
|
||||
.flatten()
|
||||
.and_then(|x| x.state.take())
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
// Check bucket isn't deleted and has website access enabled
|
||||
|
|
Loading…
Reference in a new issue