mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2024-11-25 09:31:00 +00:00
Improve CLI, adapt tests, update documentation
This commit is contained in:
parent
1b450c4b49
commit
de4276202a
31 changed files with 1235 additions and 1022 deletions
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -874,7 +874,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "netapp"
|
name = "netapp"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
source = "git+https://git.deuxfleurs.fr/lx/netapp#9b64c27da68f7ac9049e02e26da918e871a63f07"
|
source = "git+https://git.deuxfleurs.fr/lx/netapp#c20d36892bcccae580603249706ba60d54a46d7f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
|
|
@ -65,7 +65,7 @@ in let
|
||||||
*/
|
*/
|
||||||
''^(src|tests)'' # fixed default
|
''^(src|tests)'' # fixed default
|
||||||
''.*\.(rs|toml)$'' # fixed default
|
''.*\.(rs|toml)$'' # fixed default
|
||||||
''^(crdt|replication)'' # our crate submodules
|
''^(crdt|replication|cli)'' # our crate submodules
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -30,3 +30,4 @@
|
||||||
|
|
||||||
- [Working Documents](./working_documents/index.md)
|
- [Working Documents](./working_documents/index.md)
|
||||||
- [Load Balancing Data](./working_documents/load_balancing.md)
|
- [Load Balancing Data](./working_documents/load_balancing.md)
|
||||||
|
- [Migrating from 0.3 to 0.4](./working_documents/migration_04.md)
|
||||||
|
|
|
@ -11,15 +11,12 @@ to get familiar with Garage's command line and usage patterns.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
To run a real-world deployment, make sure you the following conditions are met:
|
To run a real-world deployment, make sure the following conditions are met:
|
||||||
|
|
||||||
- You have at least three machines with sufficient storage space available.
|
- You have at least three machines with sufficient storage space available.
|
||||||
|
|
||||||
- Each machine has a public IP address which is reachable by other machines.
|
- Each machine has a public IP address which is reachable by other machines.
|
||||||
Running behind a NAT is possible, but having several Garage nodes behind a single NAT
|
Running behind a NAT is likely to be possible but hasn't been tested for the latest version (TODO).
|
||||||
is slightly more involved as each will have to have a different RPC port number
|
|
||||||
(the local port number of a node must be the same as the port number exposed publicly
|
|
||||||
by the NAT).
|
|
||||||
|
|
||||||
- Ideally, each machine should have a SSD available in addition to the HDD you are dedicating
|
- Ideally, each machine should have a SSD available in addition to the HDD you are dedicating
|
||||||
to Garage. This will allow for faster access to metadata and has the potential
|
to Garage. This will allow for faster access to metadata and has the potential
|
||||||
|
@ -45,44 +42,22 @@ For our example, we will suppose the following infrastructure with IPv6 connecti
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `lxpz/garage_amd64` and is stored on the [Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `lxpz/garage_amd64` and is stored on the [Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v0.3.0`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v0.4.0`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v0.3.0` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v0.4.0` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull lxpz/garage_amd64:v0.3.0
|
sudo docker pull lxpz/garage_amd64:v0.4.0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Generating TLS certificates
|
|
||||||
|
|
||||||
You first need to generate TLS certificates to encrypt traffic between Garage nodes
|
|
||||||
(reffered to as RPC traffic).
|
|
||||||
|
|
||||||
To generate your TLS certificates, run on your machine:
|
|
||||||
|
|
||||||
```
|
|
||||||
wget https://git.deuxfleurs.fr/Deuxfleurs/garage/raw/branch/main/genkeys.sh
|
|
||||||
chmod +x genkeys.sh
|
|
||||||
./genkeys.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
It will creates a folder named `pki/` containing the keys that you will used for the cluster.
|
|
||||||
These files will have to be copied to all of your cluster nodes, as explained below.
|
|
||||||
|
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
|
||||||
On each machine, we will have a similar setup,
|
On each machine, we will have a similar setup,
|
||||||
especially you must consider the following folders/files:
|
especially you must consider the following folders/files:
|
||||||
|
|
||||||
- `/etc/garage/garage.toml`: Garage daemon's configuration (see below)
|
- `/etc/garage.toml`: Garage daemon's configuration (see below)
|
||||||
|
|
||||||
- `/etc/garage/pki/`: Folder containing Garage certificates,
|
|
||||||
must be generated on your computer and copied on the servers.
|
|
||||||
Only the files `garage-ca.crt`, `garage.crt` and `garage.key` are necessary.
|
|
||||||
|
|
||||||
- `/var/lib/garage/meta/`: Folder containing Garage's metadata,
|
- `/var/lib/garage/meta/`: Folder containing Garage's metadata,
|
||||||
put this folder on a SSD if possible
|
put this folder on a SSD if possible
|
||||||
|
@ -91,7 +66,7 @@ especially you must consider the following folders/files:
|
||||||
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
|
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
|
||||||
|
|
||||||
|
|
||||||
A valid `/etc/garage/garage.toml` for our cluster would be:
|
A valid `/etc/garage/garage.toml` for our cluster would look as follows:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
|
@ -100,18 +75,8 @@ data_dir = "/var/lib/garage/data"
|
||||||
replication_mode = "3"
|
replication_mode = "3"
|
||||||
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
|
rpc_public_addr = "<this node's public IP>:3901"
|
||||||
bootstrap_peers = [
|
rpc_secret = "<RPC secret>"
|
||||||
"[fc00:1::1]:3901",
|
|
||||||
"[fc00:1::2]:3901",
|
|
||||||
"[fc00:B::1]:3901",
|
|
||||||
"[fc00:F::1]:3901",
|
|
||||||
]
|
|
||||||
|
|
||||||
[rpc_tls]
|
|
||||||
ca_cert = "/etc/garage/pki/garage-ca.crt"
|
|
||||||
node_cert = "/etc/garage/pki/garage.crt"
|
|
||||||
node_key = "/etc/garage/pki/garage.key"
|
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
s3_region = "garage"
|
s3_region = "garage"
|
||||||
|
@ -123,11 +88,14 @@ root_domain = ".web.garage"
|
||||||
index = "index.html"
|
index = "index.html"
|
||||||
```
|
```
|
||||||
|
|
||||||
Please make sure to change `bootstrap_peers` to **your** IP addresses!
|
Check the following for your configuration files:
|
||||||
|
|
||||||
Check the [configuration file reference documentation](../reference_manual/configuration.md)
|
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
||||||
to learn more about all available configuration options.
|
This parameter is optional but recommended: if your nodes have trouble communicating with
|
||||||
|
one another, consider adding it.
|
||||||
|
|
||||||
|
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
||||||
|
You can generate such a key with `openssl rand -hex 32`.
|
||||||
|
|
||||||
## Starting Garage using Docker
|
## Starting Garage using Docker
|
||||||
|
|
||||||
|
@ -139,11 +107,10 @@ docker run \
|
||||||
--name garaged \
|
--name garaged \
|
||||||
--restart always \
|
--restart always \
|
||||||
--network host \
|
--network host \
|
||||||
-v /etc/garage/pki:/etc/garage/pki \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /etc/garage/garage.toml:/garage/garage.toml \
|
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
lxpz/garage_amd64:v0.3.0
|
lxpz/garage_amd64:v0.4.0
|
||||||
```
|
```
|
||||||
|
|
||||||
It should be restarted automatically at each reboot.
|
It should be restarted automatically at each reboot.
|
||||||
|
@ -155,101 +122,102 @@ but please check the relase notes before doing so!
|
||||||
To upgrade, simply stop and remove this container and
|
To upgrade, simply stop and remove this container and
|
||||||
start again the command with a new version of Garage.
|
start again the command with a new version of Garage.
|
||||||
|
|
||||||
|
|
||||||
## Controling the daemon
|
## Controling the daemon
|
||||||
|
|
||||||
The `garage` binary has two purposes:
|
The `garage` binary has two purposes:
|
||||||
- it acts as a daemon when launched with `garage server ...`
|
- it acts as a daemon when launched with `garage server`
|
||||||
- it acts as a control tool for the daemon when launched with any other command
|
- it acts as a control tool for the daemon when launched with any other command
|
||||||
|
|
||||||
In this section, we will see how to use the `garage` binary as a control tool for the daemon we just started.
|
Ensure an appropriate `garage` binary (the same version as your Docker image) is available in your path.
|
||||||
You first need to get a shell having access to this binary. For instance, enter the Docker container with:
|
If your configuration file is at `/etc/garage.toml`, the `garage` binary should work with no further change.
|
||||||
|
|
||||||
|
You can test your `garage` CLI utility by running a simple command such as:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo docker exec -ti garaged bash
|
garage status
|
||||||
```
|
```
|
||||||
|
|
||||||
You will now have a shell where the Garage binary is available as `/garage/garage`
|
At this point, nodes are not yet talking to one another.
|
||||||
|
Your output should therefore look like follows:
|
||||||
*You can also install the binary on your machine to remotely control the cluster.*
|
|
||||||
|
|
||||||
## Talk to the daemon and create an alias
|
|
||||||
|
|
||||||
`garage` requires 4 options to talk with the daemon:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
--ca-cert <ca-cert>
|
Mercury$ garage node-id
|
||||||
--client-cert <client-cert>
|
==== HEALTHY NODES ====
|
||||||
--client-key <client-key>
|
ID Hostname Address Tag Zone Capacity
|
||||||
-h, --rpc-host <rpc-host>
|
563e1ac825ee3323… Mercury [fc00:1::1]:3901 NO ROLE ASSIGNED
|
||||||
```
|
```
|
||||||
|
|
||||||
The 3 first ones are certificates and keys needed by TLS, the last one is simply the address of Garage's RPC endpoint.
|
|
||||||
|
|
||||||
If you are invoking `garage` from a server node directly, you do not need to set `--rpc-host`
|
## Connecting nodes together
|
||||||
as the default value `127.0.0.1:3901` will allow it to contact Garage correctly.
|
|
||||||
|
|
||||||
To avoid typing the 3 first options each time we want to run a command,
|
When your Garage nodes first start, they will generate a local node identifier
|
||||||
you can use the following alias:
|
(based on a public/private key pair).
|
||||||
|
|
||||||
|
To obtain the node identifier of a node, once it is generated,
|
||||||
|
run `garage node-id`.
|
||||||
|
This will print keys as follows:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
alias garagectl='/garage/garage \
|
Mercury$ garage node-id
|
||||||
--ca-cert /etc/garage/pki/garage-ca.crt \
|
563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
--client-cert /etc/garage/pki/garage.crt \
|
|
||||||
--client-key /etc/garage/pki/garage.key'
|
Venus$ garage node-id
|
||||||
|
86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332@[fc00:1::2]:3901
|
||||||
|
|
||||||
|
etc.
|
||||||
```
|
```
|
||||||
|
|
||||||
You can now use all of the commands presented in the [quick start guide](../quick_start/index.md),
|
You can then instruct nodes to connect to one another as follows:
|
||||||
simply replace occurences of `garage` by `garagectl`.
|
|
||||||
|
|
||||||
#### Test the alias
|
```bash
|
||||||
|
# Instruct Venus to connect to Mercury (this will establish communication both ways)
|
||||||
You can test your alias by running a simple command such as:
|
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
|
|
||||||
```
|
|
||||||
garagectl status
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You should get something like that as result:
|
You don't nead to instruct all node to connect to all other nodes:
|
||||||
|
nodes will discover one another transitively.
|
||||||
|
|
||||||
|
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
Healthy nodes:
|
==== HEALTHY NODES ====
|
||||||
8781c50c410a41b3… Mercury [fc00:1::1]:3901 UNCONFIGURED/REMOVED
|
ID Hostname Address Tag Zone Capacity
|
||||||
2a638ed6c775b69a… Venus [fc00:1::2]:3901 UNCONFIGURED/REMOVED
|
563e1ac825ee3323… Mercury [fc00:1::1]:3901 NO ROLE ASSIGNED
|
||||||
68143d720f20c89d… Earth [fc00:B::1]:3901 UNCONFIGURED/REMOVED
|
86f0f26ae4afbd59… Venus [fc00:1::2]:3901 NO ROLE ASSIGNED
|
||||||
212f7572f0c89da9… Mars [fc00:F::1]:3901 UNCONFIGURED/REMOVED
|
68143d720f20c89d… Earth [fc00:B::1]:3901 NO ROLE ASSIGNED
|
||||||
|
212f7572f0c89da9… Mars [fc00:F::1]:3901 NO ROLE ASSIGNED
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Giving roles to nodes
|
||||||
## Configuring a cluster
|
|
||||||
|
|
||||||
We will now inform Garage of the disk space available on each node of the cluster
|
We will now inform Garage of the disk space available on each node of the cluster
|
||||||
as well as the zone (e.g. datacenter) in which each machine is located.
|
as well as the zone (e.g. datacenter) in which each machine is located.
|
||||||
|
|
||||||
For our example, we will suppose we have the following infrastructure (Capacity, Identifier and Datacenter are specific values to Garage described in the following):
|
For our example, we will suppose we have the following infrastructure
|
||||||
|
(Capacity, Identifier and Zone are specific values to Garage described in the following):
|
||||||
|
|
||||||
| Location | Name | Disk Space | `Capacity` | `Identifier` | `Zone` |
|
| Location | Name | Disk Space | `Capacity` | `Identifier` | `Zone` |
|
||||||
|----------|---------|------------|------------|--------------|--------------|
|
|----------|---------|------------|------------|--------------|--------------|
|
||||||
| Paris | Mercury | 1 To | `2` | `8781c5` | `par1` |
|
| Paris | Mercury | 1 To | `2` | `563e` | `par1` |
|
||||||
| Paris | Venus | 2 To | `4` | `2a638e` | `par1` |
|
| Paris | Venus | 2 To | `4` | `86f0` | `par1` |
|
||||||
| London | Earth | 2 To | `4` | `68143d` | `lon1` |
|
| London | Earth | 2 To | `4` | `6814` | `lon1` |
|
||||||
| Brussels | Mars | 1.5 To | `3` | `212f75` | `bru1` |
|
| Brussels | Mars | 1.5 To | `3` | `212f` | `bru1` |
|
||||||
|
|
||||||
#### Node identifiers
|
#### Node identifiers
|
||||||
|
|
||||||
After its first launch, Garage generates a random and unique identifier for each nodes, such as:
|
After its first launch, Garage generates a random and unique identifier for each nodes, such as:
|
||||||
|
|
||||||
```
|
```
|
||||||
8781c50c410a41b363167e9d49cc468b6b9e4449b6577b64f15a249a149bdcbc
|
563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d
|
||||||
```
|
```
|
||||||
|
|
||||||
Often a shorter form can be used, containing only the beginning of the identifier, like `8781c5`,
|
Often a shorter form can be used, containing only the beginning of the identifier, like `563e`,
|
||||||
which identifies the server "Mercury" located in "Paris" according to our previous table.
|
which identifies the server "Mercury" located in "Paris" according to our previous table.
|
||||||
|
|
||||||
The most simple way to match an identifier to a node is to run:
|
The most simple way to match an identifier to a node is to run:
|
||||||
|
|
||||||
```
|
```
|
||||||
garagectl status
|
garage status
|
||||||
```
|
```
|
||||||
|
|
||||||
It will display the IP address associated with each node;
|
It will display the IP address associated with each node;
|
||||||
|
@ -287,16 +255,16 @@ have 66% chance of being stored by Venus and 33% chance of being stored by Mercu
|
||||||
Given the information above, we will configure our cluster as follow:
|
Given the information above, we will configure our cluster as follow:
|
||||||
|
|
||||||
```
|
```
|
||||||
garagectl node configure -z par1 -c 2 -t mercury 8781c5
|
garage node configure -z par1 -c 2 -t mercury 563e
|
||||||
garagectl node configure -z par1 -c 4 -t venus 2a638e
|
garage node configure -z par1 -c 4 -t venus 86f0
|
||||||
garagectl node configure -z lon1 -c 4 -t earth 68143d
|
garage node configure -z lon1 -c 4 -t earth 6814
|
||||||
garagectl node configure -z bru1 -c 3 -t mars 212f75
|
garage node configure -z bru1 -c 3 -t mars 212f
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Using your Garage cluster
|
## Using your Garage cluster
|
||||||
|
|
||||||
Creating buckets and managing keys is done using the `garagectl` CLI,
|
Creating buckets and managing keys is done using the `garage` CLI,
|
||||||
and is covered in the [quick start guide](../quick_start/index.md).
|
and is covered in the [quick start guide](../quick_start/index.md).
|
||||||
Remember also that the CLI is self-documented thanks to the `--help` flag and
|
Remember also that the CLI is self-documented thanks to the `--help` flag and
|
||||||
the `help` subcommand (e.g. `garage help`, `garage key --help`).
|
the `help` subcommand (e.g. `garage help`, `garage key --help`).
|
||||||
|
|
|
@ -10,8 +10,6 @@ Following this guide is recommended before moving on to
|
||||||
|
|
||||||
Note that this kind of deployment should not be used in production, as it provides
|
Note that this kind of deployment should not be used in production, as it provides
|
||||||
no redundancy for your data!
|
no redundancy for your data!
|
||||||
We will also skip intra-cluster TLS configuration, meaning that if you add nodes
|
|
||||||
to your cluster, communication between them will not be secure.
|
|
||||||
|
|
||||||
## Get a binary
|
## Get a binary
|
||||||
|
|
||||||
|
@ -30,7 +28,10 @@ you can [build Garage from source](../cookbook/from_source.md).
|
||||||
## Writing a first configuration file
|
## Writing a first configuration file
|
||||||
|
|
||||||
This first configuration file should allow you to get started easily with the simplest
|
This first configuration file should allow you to get started easily with the simplest
|
||||||
possible Garage deployment:
|
possible Garage deployment.
|
||||||
|
**Save it as `/etc/garage.toml`.**
|
||||||
|
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
||||||
|
at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`).
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
metadata_dir = "/tmp/meta"
|
metadata_dir = "/tmp/meta"
|
||||||
|
@ -39,10 +40,10 @@ data_dir = "/tmp/data"
|
||||||
replication_mode = "none"
|
replication_mode = "none"
|
||||||
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
|
rpc_public_addr = "127.0.0.1:3901"
|
||||||
|
rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec"
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = []
|
||||||
"127.0.0.1:3901",
|
|
||||||
]
|
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
s3_region = "garage"
|
s3_region = "garage"
|
||||||
|
@ -54,7 +55,10 @@ root_domain = ".web.garage"
|
||||||
index = "index.html"
|
index = "index.html"
|
||||||
```
|
```
|
||||||
|
|
||||||
Save your configuration file as `garage.toml`.
|
The `rpc_secret` value provided above is just an example. It will work, but in
|
||||||
|
order to secure your cluster you will need to use another one. You can generate
|
||||||
|
such a value with `openssl rand -hex 32`.
|
||||||
|
|
||||||
|
|
||||||
As you can see in the `metadata_dir` and `data_dir` parameters, we are saving Garage's data
|
As you can see in the `metadata_dir` and `data_dir` parameters, we are saving Garage's data
|
||||||
in `/tmp` which gets erased when your system reboots. This means that data stored on this
|
in `/tmp` which gets erased when your system reboots. This means that data stored on this
|
||||||
|
@ -67,15 +71,15 @@ your data to be persisted properly.
|
||||||
Use the following command to launch the Garage server with our configuration file:
|
Use the following command to launch the Garage server with our configuration file:
|
||||||
|
|
||||||
```
|
```
|
||||||
RUST_LOG=garage=info garage server -c garage.toml
|
RUST_LOG=garage=info garage server
|
||||||
```
|
```
|
||||||
|
|
||||||
You can tune Garage's verbosity as follows (from less verbose to more verbose):
|
You can tune Garage's verbosity as follows (from less verbose to more verbose):
|
||||||
|
|
||||||
```
|
```
|
||||||
RUST_LOG=garage=info garage server -c garage.toml
|
RUST_LOG=garage=info garage server
|
||||||
RUST_LOG=garage=debug garage server -c garage.toml
|
RUST_LOG=garage=debug garage server
|
||||||
RUST_LOG=garage=trace garage server -c garage.toml
|
RUST_LOG=garage=trace garage server
|
||||||
```
|
```
|
||||||
|
|
||||||
Log level `info` is recommended for most use cases.
|
Log level `info` is recommended for most use cases.
|
||||||
|
@ -85,11 +89,12 @@ Log level `debug` can help you check why your S3 API calls are not working.
|
||||||
## Checking that Garage runs correctly
|
## Checking that Garage runs correctly
|
||||||
|
|
||||||
The `garage` utility is also used as a CLI tool to configure your Garage deployment.
|
The `garage` utility is also used as a CLI tool to configure your Garage deployment.
|
||||||
It tries to connect to a Garage server through the RPC protocol, by default looking
|
It uses values from the TOML configuration file to find the Garage daemon running on the
|
||||||
for a Garage server at `localhost:3901`.
|
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
||||||
|
again have to specify `-c path/to/garage.toml`.
|
||||||
|
|
||||||
Since our deployment already binds to port 3901, the following command should be sufficient
|
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
||||||
to show Garage's status:
|
the following command should be enough to show the status of your cluster:
|
||||||
|
|
||||||
```
|
```
|
||||||
garage status
|
garage status
|
||||||
|
@ -98,8 +103,9 @@ garage status
|
||||||
This should show something like this:
|
This should show something like this:
|
||||||
|
|
||||||
```
|
```
|
||||||
Healthy nodes:
|
==== HEALTHY NODES ====
|
||||||
2a638ed6c775b69a… linuxbox 127.0.0.1:3901 UNCONFIGURED/REMOVED
|
ID Hostname Address Tag Zone Capacity
|
||||||
|
563e1ac825ee3323… linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuring your Garage node
|
## Configuring your Garage node
|
||||||
|
@ -117,7 +123,7 @@ garage node configure -z dc1 -c 1 <node_id>
|
||||||
|
|
||||||
where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column).
|
where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column).
|
||||||
You can enter simply a prefix of that identifier.
|
You can enter simply a prefix of that identifier.
|
||||||
For instance here you could write just `garage node configure -z dc1 -c 1 2a63`.
|
For instance here you could write just `garage node configure -z dc1 -c 1 563e`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -10,31 +10,26 @@ block_size = 1048576
|
||||||
|
|
||||||
replication_mode = "3"
|
replication_mode = "3"
|
||||||
|
|
||||||
|
rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
|
rpc_public_addr = "[fc00:1::1]:3901"
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = [
|
||||||
"[fc00:1::1]:3901",
|
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||||
"[fc00:1::2]:3901",
|
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332[fc00:1::2]:3901",
|
||||||
"[fc00:B::1]:3901",
|
"681456ab91350f92242e80a531a3ec9392cb7c974f72640112f90a600d7921a4@[fc00:B::1]:3901",
|
||||||
"[fc00:F::1]:3901",
|
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
||||||
]
|
]
|
||||||
|
|
||||||
consul_host = "consul.service"
|
consul_host = "consul.service"
|
||||||
consul_service_name = "garage-daemon"
|
consul_service_name = "garage-daemon"
|
||||||
|
|
||||||
max_concurrent_rpc_requests = 12
|
|
||||||
|
|
||||||
sled_cache_capacity = 134217728
|
sled_cache_capacity = 134217728
|
||||||
sled_flush_every_ms = 2000
|
sled_flush_every_ms = 2000
|
||||||
|
|
||||||
[rpc_tls]
|
|
||||||
ca_cert = "/etc/garage/pki/garage-ca.crt"
|
|
||||||
node_cert = "/etc/garage/pki/garage.crt"
|
|
||||||
node_key = "/etc/garage/pki/garage.key"
|
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
s3_region = "garage"
|
|
||||||
api_bind_addr = "[::]:3900"
|
api_bind_addr = "[::]:3900"
|
||||||
|
s3_region = "garage"
|
||||||
|
|
||||||
[s3_web]
|
[s3_web]
|
||||||
bind_addr = "[::]:3902"
|
bind_addr = "[::]:3902"
|
||||||
|
@ -63,10 +58,15 @@ when [configuring it](../getting_started/05_cluster.md).
|
||||||
|
|
||||||
#### `block_size`
|
#### `block_size`
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size` (except the last
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
one which might be standard). The default size is 1MB and should work in most cases.
|
(except the last one which might be smaller). The default size is 1MB and
|
||||||
If you are interested in tuning this, feel free to do so (and remember to report your
|
should work in most cases. If you are interested in tuning this, feel free
|
||||||
findings to us!)
|
to do so (and remember to report your findings to us!). If this value is
|
||||||
|
changed for a running Garage installation, only files newly uploaded will be
|
||||||
|
affected. Previously uploaded files will remain available. This however
|
||||||
|
means that chunks from existing files will not be deduplicated with chunks
|
||||||
|
from newly uploaded files, meaning you might use more storage space that is
|
||||||
|
optimally possible.
|
||||||
|
|
||||||
#### `replication_mode`
|
#### `replication_mode`
|
||||||
|
|
||||||
|
@ -97,6 +97,14 @@ Never run a Garage cluster where that is not the case.**
|
||||||
Changing the `replication_mode` of a cluster might work (make sure to shut down all nodes
|
Changing the `replication_mode` of a cluster might work (make sure to shut down all nodes
|
||||||
and changing it everywhere at the time), but is not officially supported.
|
and changing it everywhere at the time), but is not officially supported.
|
||||||
|
|
||||||
|
#### `rpc_secret`
|
||||||
|
|
||||||
|
Garage uses a secret key that is shared between all nodes of the cluster
|
||||||
|
in order to identify these nodes and allow them to communicate together.
|
||||||
|
This key should be specified here in the form of a 32-byte hex-encoded
|
||||||
|
random string. Such a string can be generated with a command
|
||||||
|
such as `openssl rand -hex 32`.
|
||||||
|
|
||||||
#### `rpc_bind_addr`
|
#### `rpc_bind_addr`
|
||||||
|
|
||||||
The address and port on which to bind for inter-cluster communcations
|
The address and port on which to bind for inter-cluster communcations
|
||||||
|
@ -106,10 +114,28 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
||||||
port number to the same internal port nubmer. This means that if you have several nodes running
|
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||||
behind a NAT, they should each use a different RPC port number.
|
behind a NAT, they should each use a different RPC port number.
|
||||||
|
|
||||||
|
#### `rpc_public_addr`
|
||||||
|
|
||||||
|
The address and port that other nodes need to use to contact this node for
|
||||||
|
RPC calls. **This parameter is optional but recommended.** In case you have
|
||||||
|
a NAT that binds the RPC port to a port that is different on your public IP,
|
||||||
|
this field might help making it work.
|
||||||
|
|
||||||
#### `bootstrap_peers`
|
#### `bootstrap_peers`
|
||||||
|
|
||||||
A list of IPs and ports on which to contact other Garage peers of this cluster.
|
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
||||||
This should correspond to the RPC ports set up with `rpc_bind_addr`.
|
These peer identifiers have the following syntax:
|
||||||
|
|
||||||
|
```
|
||||||
|
<node public key>@<node public IP or hostname>:<port>
|
||||||
|
```
|
||||||
|
|
||||||
|
In the case where `rpc_public_addr` is correctly specified in the
|
||||||
|
configuration file, the full identifier of a node including IP and port can
|
||||||
|
be obtained by running `garage node-id` and then included directly in the
|
||||||
|
`bootstrap_peers` list of other nodes. Otherwise, only the node's public
|
||||||
|
key will be returned by `garage node-id` and you will have to add the IP
|
||||||
|
yourself.
|
||||||
|
|
||||||
#### `consul_host` and `consul_service_name`
|
#### `consul_host` and `consul_service_name`
|
||||||
|
|
||||||
|
@ -121,12 +147,6 @@ The `consul_host` parameter should be set to the hostname of the Consul server,
|
||||||
and `consul_service_name` should be set to the service name under which Garage's
|
and `consul_service_name` should be set to the service name under which Garage's
|
||||||
RPC ports are announced.
|
RPC ports are announced.
|
||||||
|
|
||||||
#### `max_concurrent_rpc_requests`
|
|
||||||
|
|
||||||
Garage implements rate limiting for RPC requests: no more than
|
|
||||||
`max_concurrent_rpc_requests` concurrent outbound RPC requests will be made
|
|
||||||
by a Garage node (additionnal requests will be put in a waiting queue).
|
|
||||||
|
|
||||||
#### `sled_cache_capacity`
|
#### `sled_cache_capacity`
|
||||||
|
|
||||||
This parameter can be used to tune the capacity of the cache used by
|
This parameter can be used to tune the capacity of the cache used by
|
||||||
|
@ -143,21 +163,6 @@ of a power outage (though this should not matter much as data is replicated on o
|
||||||
nodes). The default value, 2000ms, should be appropriate for most use cases.
|
nodes). The default value, 2000ms, should be appropriate for most use cases.
|
||||||
|
|
||||||
|
|
||||||
## The `[rpc_tls]` section
|
|
||||||
|
|
||||||
This section should be used to configure the TLS certificates used to encrypt
|
|
||||||
intra-cluster traffic (RPC traffic). The following parameters should be set:
|
|
||||||
|
|
||||||
- `ca_cert`: the certificate of the CA that is allowed to sign individual node certificates
|
|
||||||
- `node_cert`: the node certificate for the current node
|
|
||||||
- `node_key`: the key associated with the node certificate
|
|
||||||
|
|
||||||
Note tha several nodes may use the same node certificate, as long as it is signed
|
|
||||||
by the CA.
|
|
||||||
|
|
||||||
If this section is absent, TLS is not used to encrypt intra-cluster traffic.
|
|
||||||
|
|
||||||
|
|
||||||
## The `[s3_api]` section
|
## The `[s3_api]` section
|
||||||
|
|
||||||
#### `api_bind_addr`
|
#### `api_bind_addr`
|
||||||
|
|
|
@ -23,76 +23,35 @@ Not implemented:
|
||||||
|
|
||||||
All APIs that are not mentionned are not implemented and will return a 400 bad request.
|
All APIs that are not mentionned are not implemented and will return a 400 bad request.
|
||||||
|
|
||||||
#### AbortMultipartUpload
|
| Endpoint | Status |
|
||||||
|
|------------------------------|----------------------------------|
|
||||||
|
| AbortMultipartUpload | Implemented |
|
||||||
|
| CompleteMultipartUpload | Implemented |
|
||||||
|
| CopyObject | Implemented |
|
||||||
|
| CreateBucket | Unsupported, stub (see below) |
|
||||||
|
| CreateMultipartUpload | Implemented |
|
||||||
|
| DeleteBucket | Unsupported (see below) |
|
||||||
|
| DeleteObject | Implemented |
|
||||||
|
| DeleteObjects | Implemented |
|
||||||
|
| GetBucketLocation | Implemented |
|
||||||
|
| GetBucketVersioning | Stub (see below) |
|
||||||
|
| GetObject | Implemented |
|
||||||
|
| HeadBucket | Implemented |
|
||||||
|
| HeadObject | Implemented |
|
||||||
|
| ListBuckets | Implemented |
|
||||||
|
| ListObjects | Implemented, bugs? (see below) |
|
||||||
|
| ListObjectsV2 | Implemented |
|
||||||
|
| PutObject | Implemented |
|
||||||
|
| UploadPart | Implemented |
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### CompleteMultipartUpload
|
|
||||||
|
|
||||||
Implemented badly. Garage will not check that all the parts stored correspond to the list given by the client in the request body. This means that the multipart upload might be completed with an invalid size. This is a bug and will be fixed.
|
- **CreateBucket:** Garage does not yet accept creating buckets or giving access using API calls, it has to be done using the CLI tools. CreateBucket will return a 200 if the bucket exists and user has write access, and a 403 Forbidden in all other cases.
|
||||||
|
|
||||||
#### CopyObject
|
- **DeleteBucket:** Garage does not yet accept deleting buckets using API calls, it has to be done using the CLI tools. This request will return a 403 Forbidden.
|
||||||
|
|
||||||
Implemented.
|
- **GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns
|
||||||
|
|
||||||
#### CreateBucket
|
|
||||||
|
|
||||||
Garage does not accept creating buckets or giving access using API calls, it has to be done using the CLI tools. CreateBucket will return a 200 if the bucket exists and user has write access, and a 403 Forbidden in all other cases.
|
|
||||||
|
|
||||||
#### CreateMultipartUpload
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### DeleteBucket
|
|
||||||
|
|
||||||
Garage does not accept deleting buckets using API calls, it has to be done using the CLI tools. This request will return a 403 Forbidden.
|
|
||||||
|
|
||||||
#### DeleteObject
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### DeleteObjects
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### GetBucketLocation
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### GetBucketVersioning
|
|
||||||
|
|
||||||
Stub implementation (Garage does not yet support versionning so this always returns
|
|
||||||
"versionning not enabled").
|
"versionning not enabled").
|
||||||
|
|
||||||
#### GetObject
|
- **ListObjects:** Implemented, but there isn't a very good specification of what `encoding-type=url` covers so there might be some encoding bugs. In our implementation the url-encoded fields are in the same in ListObjects as they are in ListObjectsV2.
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### HeadBucket
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### HeadObject
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### ListBuckets
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### ListObjects
|
|
||||||
|
|
||||||
Implemented, but there isn't a very good specification of what `encoding-type=url` covers so there might be some encoding bugs. In our implementation the url-encoded fields are in the same in ListObjects as they are in ListObjectsV2.
|
|
||||||
|
|
||||||
#### ListObjectsV2
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### PutObject
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
#### UploadPart
|
|
||||||
|
|
||||||
Implemented.
|
|
||||||
|
|
||||||
|
|
61
doc/book/src/working_documents/migration_04.md
Normal file
61
doc/book/src/working_documents/migration_04.md
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
# Migrating from 0.3 to 0.4
|
||||||
|
|
||||||
|
**Migrating from 0.3 to 0.4 is unsupported. This document is only intended to document the process internally for the Deuxfleurs cluster where we have to do it. Do not try it yourself, you will lose your data and we will not help you.**
|
||||||
|
|
||||||
|
**Migrating from 0.2 to 0.4 will break everything for sure. Never try it.**
|
||||||
|
|
||||||
|
The internal data format of Garage hasn't changed much between 0.3 and 0.4.
|
||||||
|
The Sled database is still the same, and the data directory as well.
|
||||||
|
|
||||||
|
The following has changed, all in the meta directory:
|
||||||
|
|
||||||
|
- `node_id` in 0.3 contains the identifier of the current node. In 0.4, this file does nothing and should be deleted. It is replaced by `node_key` (the secret key) and `node_key.pub` (the associated public key). A node's identifier on the ring is its public key.
|
||||||
|
|
||||||
|
- `peer_info` in 0.3 contains the list of peers saved automatically by Garage. The format has changed and it is now stored in `peer_list` (`peer_info` should be deleted).
|
||||||
|
|
||||||
|
When migrating, all node identifiers will change. This also means that the affectation of data partitions on the ring will change, and lots of data will have to be rebalanced.
|
||||||
|
|
||||||
|
- If your cluster has only 3 nodes, all nodes store everything, therefore nothing has to be rebalanced.
|
||||||
|
|
||||||
|
- If your cluster has only 4 nodes, for any partition there will always be at least 2 nodes that stored data before that still store it after. Therefore the migration should in theory be transparent and Garage should continue to work during the rebalance.
|
||||||
|
|
||||||
|
- If your cluster has 5 or more nodes, data will disappear during the migration. Do not migrate (fortunately we don't have this scenario at Deuxfleurs), or if you do, make Garage unavailable until things stabilize (disable web and api access).
|
||||||
|
|
||||||
|
|
||||||
|
The migration steps are as follows:
|
||||||
|
|
||||||
|
1. Prepare a new configuration file for 0.4. For each node, point to the same meta and data directories as Garage 0.3. Basically, the things that change are the following:
|
||||||
|
|
||||||
|
- No more `rpc_tls` section
|
||||||
|
- You have to generate a shared `rpc_secret` and put it in all config files
|
||||||
|
- `bootstrap_nodes` has a different syntax as it has to contain node keys. Leave it empty and use `garage node-id` and `garage node connect` instead (new features of 0.4)
|
||||||
|
- put the publicly accessible RPC address of your node in `rpc_public_addr` if possible (its optional but recommended)
|
||||||
|
- If you are using Consul, change the `consul_service_name` to NOT be the name advertised by Nomad. Now Garage is responsible for advertising its own service itself.
|
||||||
|
|
||||||
|
2. Disable api and web access for some time, do `garage repair --all --yes tables` and `garage repair --all --yes blocks`, check the logs and check that all data seems to be synced correctly between nodes.
|
||||||
|
|
||||||
|
3. Save somewhere the output of `garage status`. We will need this to remember how to reconfigure nodes in 0.4.
|
||||||
|
|
||||||
|
4. Turn off Garage 0.3
|
||||||
|
|
||||||
|
5. Backup metadata folders if you can (i.e. if you have space to do it somewhere). Backuping data folders could also be usefull but that's much harder to do. If your filesystem supports snapshots, this could be a good time to use them.
|
||||||
|
|
||||||
|
6. Turn on Garage 0.4
|
||||||
|
|
||||||
|
7. At this point, running `garage status` should indicate that all nodes of the previous cluster are "unavailable". The nodes have new identifiers that should appear in healthy nodes once they can talk to one another (use `garage node connect` if necessary`). They should have NO ROLE ASSIGNED at the moment.
|
||||||
|
|
||||||
|
8. Prepare a script with several `garage node configure` commands that replace each of the v0.3 node ID with the corresponding v0.4 node ID, with the same zone/tag/capacity. For example if your node `drosera` had identifier `c24e` before and now has identifier `789a`, and it was configured with capacity `2` in zone `dc1`, put the following command in your script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
garage node configure 789a -z dc1 -c 2 -t drosera --replace c24e
|
||||||
|
```
|
||||||
|
|
||||||
|
9. Run your reconfiguration script. Check that the new output of `garage status` contains the correct node IDs with the correct values for capacity and zone. Old nodes should no longer be mentioned.
|
||||||
|
|
||||||
|
10. If your cluster has 4 nodes or less, and you are feeling adventurous, you can reenable Web and API access now. Things will probably work.
|
||||||
|
|
||||||
|
11. Garage might already be resyncing stuff. Issue a `garage repair --all --yes tables` and `garage repair --all --yes blocks` to force it to do so.
|
||||||
|
|
||||||
|
12. Wait for resyncing activity to stop in the logs. Do steps 11 and 12 two or three times, until you see that when you issue the repair commands, nothing gets resynced any longer.
|
||||||
|
|
||||||
|
13. Your upgraded cluster should be in a working state. Re-enable API and Web access and check that everything went well.
|
|
@ -9,11 +9,11 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
|
||||||
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
|
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
|
||||||
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
||||||
|
|
||||||
garage bucket create eprouvette
|
garage -c /tmp/config.1.toml bucket create eprouvette
|
||||||
KEY_INFO=`garage key new --name opérateur`
|
KEY_INFO=$(garage -c /tmp/config.1.toml key new --name opérateur)
|
||||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||||
garage bucket allow eprouvette --read --write --key $ACCESS_KEY
|
garage -c /tmp/config.1.toml bucket allow eprouvette --read --write --key $ACCESS_KEY
|
||||||
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
|
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
|
||||||
|
|
||||||
echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3."
|
echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3."
|
||||||
|
|
|
@ -17,6 +17,10 @@ MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
|
||||||
WHICH_GARAGE=$(which garage || exit 1)
|
WHICH_GARAGE=$(which garage || exit 1)
|
||||||
echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
|
echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
|
||||||
|
|
||||||
|
NETWORK_SECRET="$(openssl rand -hex 32)"
|
||||||
|
|
||||||
|
|
||||||
|
# <<<<<<<<< BEGIN FOR LOOP ON NODES
|
||||||
for count in $(seq 1 3); do
|
for count in $(seq 1 3); do
|
||||||
CONF_PATH="/tmp/config.$count.toml"
|
CONF_PATH="/tmp/config.$count.toml"
|
||||||
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
|
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
|
||||||
|
@ -26,13 +30,10 @@ block_size = 1048576 # objects are split in blocks of maximum this number of b
|
||||||
metadata_dir = "/tmp/garage-meta-$count"
|
metadata_dir = "/tmp/garage-meta-$count"
|
||||||
data_dir = "/tmp/garage-data-$count"
|
data_dir = "/tmp/garage-data-$count"
|
||||||
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
|
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
|
||||||
bootstrap_peers = [
|
rpc_public_addr = "127.0.0.1:$((3900+$count))"
|
||||||
"127.0.0.1:3901",
|
bootstrap_peers = []
|
||||||
"127.0.0.1:3902",
|
|
||||||
"127.0.0.1:3903"
|
|
||||||
]
|
|
||||||
max_concurrent_rpc_requests = 12
|
|
||||||
replication_mode = "3"
|
replication_mode = "3"
|
||||||
|
rpc_secret = "$NETWORK_SECRET"
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
api_bind_addr = "0.0.0.0:$((3910+$count))" # the S3 API port, HTTP without TLS. Add a reverse proxy for the TLS part.
|
api_bind_addr = "0.0.0.0:$((3910+$count))" # the S3 API port, HTTP without TLS. Add a reverse proxy for the TLS part.
|
||||||
|
@ -61,11 +62,21 @@ if [ -z "$SKIP_HTTPS" ]; then
|
||||||
socat openssl-listen:4443,reuseaddr,fork,cert=/tmp/garagessl/test.pem,verify=0 tcp4-connect:localhost:3911 &
|
socat openssl-listen:4443,reuseaddr,fork,cert=/tmp/garagessl/test.pem,verify=0 tcp4-connect:localhost:3911 &
|
||||||
fi
|
fi
|
||||||
|
|
||||||
(garage server -c /tmp/config.$count.toml 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
|
(garage -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
|
||||||
|
done
|
||||||
|
# >>>>>>>>>>>>>>>> END FOR LOOP ON NODES
|
||||||
|
|
||||||
|
sleep 3
|
||||||
|
# Establish connections between nodes
|
||||||
|
for count in $(seq 1 3); do
|
||||||
|
NODE=$(garage -c /tmp/config.$count.toml node-id -q)
|
||||||
|
for count2 in $(seq 1 3); do
|
||||||
|
garage -c /tmp/config.$count2.toml node connect $NODE
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
RETRY=120
|
RETRY=120
|
||||||
until garage status 2>&1|grep -q Healthy ; do
|
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
|
||||||
(( RETRY-- ))
|
(( RETRY-- ))
|
||||||
if (( RETRY <= 0 )); then
|
if (( RETRY <= 0 )); then
|
||||||
echo -en "${MAIN_LABEL} Garage did not start"
|
echo -en "${MAIN_LABEL} Garage did not start"
|
||||||
|
@ -74,6 +85,7 @@ until garage status 2>&1|grep -q Healthy ; do
|
||||||
echo -en "${MAIN_LABEL} cluster starting...\n"
|
echo -en "${MAIN_LABEL} cluster starting...\n"
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
echo -en "${MAIN_LABEL} cluster started\n"
|
echo -en "${MAIN_LABEL} cluster started\n"
|
||||||
|
|
||||||
wait
|
wait
|
||||||
|
|
|
@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
RETRY=120
|
RETRY=120
|
||||||
until garage status 2>&1|grep -q Healthy ; do
|
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
|
||||||
(( RETRY-- ))
|
(( RETRY-- ))
|
||||||
if (( RETRY <= 0 )); then
|
if (( RETRY <= 0 )); then
|
||||||
echo "garage did not start in time, failing."
|
echo "garage did not start in time, failing."
|
||||||
|
@ -21,10 +21,10 @@ until garage status 2>&1|grep -q Healthy ; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
garage status \
|
garage -c /tmp/config.1.toml status \
|
||||||
| grep UNCONFIGURED \
|
| grep 'NO ROLE' \
|
||||||
| grep -Po '^[0-9a-f]+' \
|
| grep -Po '^[0-9a-f]+' \
|
||||||
| while read id; do
|
| while read id; do
|
||||||
garage node configure -z dc1 -c 1 $id
|
garage -c /tmp/config.1.toml node configure -z dc1 -c 1 $id
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -17,13 +17,14 @@ SKIP_DUCK=1
|
||||||
echo "⏳ Setup"
|
echo "⏳ Setup"
|
||||||
${SCRIPT_FOLDER}/dev-clean.sh
|
${SCRIPT_FOLDER}/dev-clean.sh
|
||||||
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
|
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
|
||||||
|
sleep 6
|
||||||
${SCRIPT_FOLDER}/dev-configure.sh
|
${SCRIPT_FOLDER}/dev-configure.sh
|
||||||
${SCRIPT_FOLDER}/dev-bucket.sh
|
${SCRIPT_FOLDER}/dev-bucket.sh
|
||||||
|
|
||||||
which garage
|
which garage
|
||||||
garage status
|
garage -c /tmp/config.1.toml status
|
||||||
garage key list
|
garage -c /tmp/config.1.toml key list
|
||||||
garage bucket list
|
garage -c /tmp/config.1.toml bucket list
|
||||||
|
|
||||||
dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline storage (< INLINE_THRESHOLD = 3072 bytes)
|
dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline storage (< INLINE_THRESHOLD = 3072 bytes)
|
||||||
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
|
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
|
||||||
|
@ -116,9 +117,9 @@ if [ -z "$SKIP_AWS" ]; then
|
||||||
echo "<h1>hello world</h1>" > /tmp/garage-index.html
|
echo "<h1>hello world</h1>" > /tmp/garage-index.html
|
||||||
aws s3 cp /tmp/garage-index.html s3://eprouvette/index.html
|
aws s3 cp /tmp/garage-index.html s3://eprouvette/index.html
|
||||||
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
|
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
|
||||||
garage bucket website --allow eprouvette
|
garage -c /tmp/config.1.toml bucket website --allow eprouvette
|
||||||
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 200 ]
|
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 200 ]
|
||||||
garage bucket website --deny eprouvette
|
garage -c /tmp/config.1.toml bucket website --deny eprouvette
|
||||||
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
|
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
|
||||||
aws s3 rm s3://eprouvette/index.html
|
aws s3 rm s3://eprouvette/index.html
|
||||||
rm /tmp/garage-index.html
|
rm /tmp/garage-index.html
|
||||||
|
@ -127,8 +128,8 @@ fi
|
||||||
echo "🏁 Teardown"
|
echo "🏁 Teardown"
|
||||||
AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||||
AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||||
garage bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
|
garage -c /tmp/config.1.toml bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
|
||||||
garage bucket delete --yes eprouvette
|
garage -c /tmp/config.1.toml bucket delete --yes eprouvette
|
||||||
garage key delete --yes $AWS_ACCESS_KEY_ID
|
garage -c /tmp/config.1.toml key delete --yes $AWS_ACCESS_KEY_ID
|
||||||
|
|
||||||
echo "✅ Success"
|
echo "✅ Success"
|
||||||
|
|
|
@ -83,7 +83,9 @@ impl Error {
|
||||||
Error::NotFound => StatusCode::NOT_FOUND,
|
Error::NotFound => StatusCode::NOT_FOUND,
|
||||||
Error::Forbidden(_) => StatusCode::FORBIDDEN,
|
Error::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||||
Error::InternalError(
|
Error::InternalError(
|
||||||
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::TooManyErrors(_),
|
GarageError::Timeout
|
||||||
|
| GarageError::RemoteError(_)
|
||||||
|
| GarageError::Quorum(_, _, _, _),
|
||||||
) => StatusCode::SERVICE_UNAVAILABLE,
|
) => StatusCode::SERVICE_UNAVAILABLE,
|
||||||
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => {
|
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => {
|
||||||
StatusCode::INTERNAL_SERVER_ERROR
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
|
@ -98,7 +100,9 @@ impl Error {
|
||||||
Error::Forbidden(_) => "AccessDenied",
|
Error::Forbidden(_) => "AccessDenied",
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::InternalError(
|
Error::InternalError(
|
||||||
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::TooManyErrors(_),
|
GarageError::Timeout
|
||||||
|
| GarageError::RemoteError(_)
|
||||||
|
| GarageError::Quorum(_, _, _, _),
|
||||||
) => "ServiceUnavailable",
|
) => "ServiceUnavailable",
|
||||||
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => "InternalError",
|
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => "InternalError",
|
||||||
_ => "InvalidRequest",
|
_ => "InvalidRequest",
|
||||||
|
|
|
@ -349,11 +349,7 @@ impl AdminRpcHandler {
|
||||||
PRIO_NORMAL,
|
PRIO_NORMAL,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
let is_err = match resp {
|
if !matches!(resp, Ok(Ok(_))) {
|
||||||
Ok(Ok(_)) => false,
|
|
||||||
_ => true,
|
|
||||||
};
|
|
||||||
if is_err {
|
|
||||||
failures.push(node);
|
failures.push(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,657 +0,0 @@
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use structopt::StructOpt;
|
|
||||||
|
|
||||||
use garage_util::data::Uuid;
|
|
||||||
use garage_util::error::Error;
|
|
||||||
use garage_util::time::*;
|
|
||||||
|
|
||||||
use garage_rpc::ring::*;
|
|
||||||
use garage_rpc::system::*;
|
|
||||||
use garage_rpc::*;
|
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
|
||||||
use garage_model::key_table::*;
|
|
||||||
|
|
||||||
use crate::admin_rpc::*;
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub enum Command {
|
|
||||||
/// Run Garage server
|
|
||||||
#[structopt(name = "server")]
|
|
||||||
Server(ServerOpt),
|
|
||||||
|
|
||||||
/// Get network status
|
|
||||||
#[structopt(name = "status")]
|
|
||||||
Status,
|
|
||||||
|
|
||||||
/// Garage node operations
|
|
||||||
#[structopt(name = "node")]
|
|
||||||
Node(NodeOperation),
|
|
||||||
|
|
||||||
/// Bucket operations
|
|
||||||
#[structopt(name = "bucket")]
|
|
||||||
Bucket(BucketOperation),
|
|
||||||
|
|
||||||
/// Key operations
|
|
||||||
#[structopt(name = "key")]
|
|
||||||
Key(KeyOperation),
|
|
||||||
|
|
||||||
/// Start repair of node data
|
|
||||||
#[structopt(name = "repair")]
|
|
||||||
Repair(RepairOpt),
|
|
||||||
|
|
||||||
/// Gather node statistics
|
|
||||||
#[structopt(name = "stats")]
|
|
||||||
Stats(StatsOpt),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub struct ServerOpt {
|
|
||||||
/// Configuration file
|
|
||||||
#[structopt(short = "c", long = "config", default_value = "./config.toml")]
|
|
||||||
pub config_file: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub enum NodeOperation {
|
|
||||||
/// Connect to Garage node that is currently isolated from the system
|
|
||||||
#[structopt(name = "connect")]
|
|
||||||
Connect(ConnectNodeOpt),
|
|
||||||
|
|
||||||
/// Configure Garage node
|
|
||||||
#[structopt(name = "configure")]
|
|
||||||
Configure(ConfigureNodeOpt),
|
|
||||||
|
|
||||||
/// Remove Garage node from cluster
|
|
||||||
#[structopt(name = "remove")]
|
|
||||||
Remove(RemoveNodeOpt),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub struct ConnectNodeOpt {
|
|
||||||
/// Node public key and address, in the format:
|
|
||||||
/// `<public key hexadecimal>@<ip or hostname>:<port>`
|
|
||||||
node: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub struct ConfigureNodeOpt {
|
|
||||||
/// Node to configure (prefix of hexadecimal node id)
|
|
||||||
node_id: String,
|
|
||||||
|
|
||||||
/// Location (zone or datacenter) of the node
|
|
||||||
#[structopt(short = "z", long = "zone")]
|
|
||||||
zone: Option<String>,
|
|
||||||
|
|
||||||
/// Capacity (in relative terms, use 1 to represent your smallest server)
|
|
||||||
#[structopt(short = "c", long = "capacity")]
|
|
||||||
capacity: Option<u32>,
|
|
||||||
|
|
||||||
/// Gateway-only node
|
|
||||||
#[structopt(short = "g", long = "gateway")]
|
|
||||||
gateway: bool,
|
|
||||||
|
|
||||||
/// Optional node tag
|
|
||||||
#[structopt(short = "t", long = "tag")]
|
|
||||||
tag: Option<String>,
|
|
||||||
|
|
||||||
/// Replaced node(s): list of node IDs that will be removed from the current cluster
|
|
||||||
#[structopt(long = "replace")]
|
|
||||||
replace: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
|
||||||
pub struct RemoveNodeOpt {
|
|
||||||
/// Node to configure (prefix of hexadecimal node id)
|
|
||||||
node_id: String,
|
|
||||||
|
|
||||||
/// If this flag is not given, the node won't be removed
|
|
||||||
#[structopt(long = "yes")]
|
|
||||||
yes: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub enum BucketOperation {
|
|
||||||
/// List buckets
|
|
||||||
#[structopt(name = "list")]
|
|
||||||
List,
|
|
||||||
|
|
||||||
/// Get bucket info
|
|
||||||
#[structopt(name = "info")]
|
|
||||||
Info(BucketOpt),
|
|
||||||
|
|
||||||
/// Create bucket
|
|
||||||
#[structopt(name = "create")]
|
|
||||||
Create(BucketOpt),
|
|
||||||
|
|
||||||
/// Delete bucket
|
|
||||||
#[structopt(name = "delete")]
|
|
||||||
Delete(DeleteBucketOpt),
|
|
||||||
|
|
||||||
/// Allow key to read or write to bucket
|
|
||||||
#[structopt(name = "allow")]
|
|
||||||
Allow(PermBucketOpt),
|
|
||||||
|
|
||||||
/// Deny key from reading or writing to bucket
|
|
||||||
#[structopt(name = "deny")]
|
|
||||||
Deny(PermBucketOpt),
|
|
||||||
|
|
||||||
/// Expose as website or not
|
|
||||||
#[structopt(name = "website")]
|
|
||||||
Website(WebsiteOpt),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct WebsiteOpt {
|
|
||||||
/// Create
|
|
||||||
#[structopt(long = "allow")]
|
|
||||||
pub allow: bool,
|
|
||||||
|
|
||||||
/// Delete
|
|
||||||
#[structopt(long = "deny")]
|
|
||||||
pub deny: bool,
|
|
||||||
|
|
||||||
/// Bucket name
|
|
||||||
pub bucket: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct BucketOpt {
|
|
||||||
/// Bucket name
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct DeleteBucketOpt {
|
|
||||||
/// Bucket name
|
|
||||||
pub name: String,
|
|
||||||
|
|
||||||
/// If this flag is not given, the bucket won't be deleted
|
|
||||||
#[structopt(long = "yes")]
|
|
||||||
pub yes: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct PermBucketOpt {
|
|
||||||
/// Access key name or ID
|
|
||||||
#[structopt(long = "key")]
|
|
||||||
pub key_pattern: String,
|
|
||||||
|
|
||||||
/// Allow/deny read operations
|
|
||||||
#[structopt(long = "read")]
|
|
||||||
pub read: bool,
|
|
||||||
|
|
||||||
/// Allow/deny write operations
|
|
||||||
#[structopt(long = "write")]
|
|
||||||
pub write: bool,
|
|
||||||
|
|
||||||
/// Bucket name
|
|
||||||
pub bucket: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub enum KeyOperation {
|
|
||||||
/// List keys
|
|
||||||
#[structopt(name = "list")]
|
|
||||||
List,
|
|
||||||
|
|
||||||
/// Get key info
|
|
||||||
#[structopt(name = "info")]
|
|
||||||
Info(KeyOpt),
|
|
||||||
|
|
||||||
/// Create new key
|
|
||||||
#[structopt(name = "new")]
|
|
||||||
New(KeyNewOpt),
|
|
||||||
|
|
||||||
/// Rename key
|
|
||||||
#[structopt(name = "rename")]
|
|
||||||
Rename(KeyRenameOpt),
|
|
||||||
|
|
||||||
/// Delete key
|
|
||||||
#[structopt(name = "delete")]
|
|
||||||
Delete(KeyDeleteOpt),
|
|
||||||
|
|
||||||
/// Import key
|
|
||||||
#[structopt(name = "import")]
|
|
||||||
Import(KeyImportOpt),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct KeyOpt {
|
|
||||||
/// ID or name of the key
|
|
||||||
pub key_pattern: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct KeyNewOpt {
|
|
||||||
/// Name of the key
|
|
||||||
#[structopt(long = "name", default_value = "Unnamed key")]
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct KeyRenameOpt {
|
|
||||||
/// ID or name of the key
|
|
||||||
pub key_pattern: String,
|
|
||||||
|
|
||||||
/// New name of the key
|
|
||||||
pub new_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct KeyDeleteOpt {
|
|
||||||
/// ID or name of the key
|
|
||||||
pub key_pattern: String,
|
|
||||||
|
|
||||||
/// Confirm deletion
|
|
||||||
#[structopt(long = "yes")]
|
|
||||||
pub yes: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
|
||||||
pub struct KeyImportOpt {
|
|
||||||
/// Access key ID
|
|
||||||
pub key_id: String,
|
|
||||||
|
|
||||||
/// Secret access key
|
|
||||||
pub secret_key: String,
|
|
||||||
|
|
||||||
/// Key name
|
|
||||||
#[structopt(short = "n", default_value = "Imported key")]
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
|
||||||
pub struct RepairOpt {
|
|
||||||
/// Launch repair operation on all nodes
|
|
||||||
#[structopt(short = "a", long = "all-nodes")]
|
|
||||||
pub all_nodes: bool,
|
|
||||||
|
|
||||||
/// Confirm the launch of the repair operation
|
|
||||||
#[structopt(long = "yes")]
|
|
||||||
pub yes: bool,
|
|
||||||
|
|
||||||
#[structopt(subcommand)]
|
|
||||||
pub what: Option<RepairWhat>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
|
||||||
pub enum RepairWhat {
|
|
||||||
/// Only do a full sync of metadata tables
|
|
||||||
#[structopt(name = "tables")]
|
|
||||||
Tables,
|
|
||||||
/// Only repair (resync/rebalance) the set of stored blocks
|
|
||||||
#[structopt(name = "blocks")]
|
|
||||||
Blocks,
|
|
||||||
/// Only redo the propagation of object deletions to the version table (slow)
|
|
||||||
#[structopt(name = "versions")]
|
|
||||||
Versions,
|
|
||||||
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
|
|
||||||
#[structopt(name = "block_refs")]
|
|
||||||
BlockRefs,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
|
||||||
pub struct StatsOpt {
|
|
||||||
/// Gather statistics from all nodes
|
|
||||||
#[structopt(short = "a", long = "all-nodes")]
|
|
||||||
pub all_nodes: bool,
|
|
||||||
|
|
||||||
/// Gather detailed statistics (this can be long)
|
|
||||||
#[structopt(short = "d", long = "detailed")]
|
|
||||||
pub detailed: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cli_cmd(
|
|
||||||
cmd: Command,
|
|
||||||
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
|
||||||
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match cmd {
|
|
||||||
Command::Status => cmd_status(system_rpc_endpoint, rpc_host).await,
|
|
||||||
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
|
||||||
cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await
|
|
||||||
}
|
|
||||||
Command::Node(NodeOperation::Configure(configure_opt)) => {
|
|
||||||
cmd_configure(system_rpc_endpoint, rpc_host, configure_opt).await
|
|
||||||
}
|
|
||||||
Command::Node(NodeOperation::Remove(remove_opt)) => {
|
|
||||||
cmd_remove(system_rpc_endpoint, rpc_host, remove_opt).await
|
|
||||||
}
|
|
||||||
Command::Bucket(bo) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
|
|
||||||
}
|
|
||||||
Command::Key(ko) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
|
|
||||||
}
|
|
||||||
Command::Repair(ro) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
|
|
||||||
}
|
|
||||||
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> {
|
|
||||||
let status = match rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
||||||
};
|
|
||||||
let config = match rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
||||||
};
|
|
||||||
|
|
||||||
println!("Healthy nodes:");
|
|
||||||
let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity".to_string()];
|
|
||||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
|
||||||
if let Some(cfg) = config.members.get(&adv.id) {
|
|
||||||
healthy_nodes.push(format!(
|
|
||||||
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}",
|
|
||||||
id = adv.id,
|
|
||||||
host = adv.status.hostname,
|
|
||||||
addr = adv.addr,
|
|
||||||
tag = cfg.tag,
|
|
||||||
zone = cfg.zone,
|
|
||||||
capacity = cfg.capacity_string(),
|
|
||||||
));
|
|
||||||
} else {
|
|
||||||
healthy_nodes.push(format!(
|
|
||||||
"{id:?}\t{h}\t{addr}\tUNCONFIGURED/REMOVED",
|
|
||||||
id = adv.id,
|
|
||||||
h = adv.status.hostname,
|
|
||||||
addr = adv.addr,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format_table(healthy_nodes);
|
|
||||||
|
|
||||||
let status_keys = status.iter().map(|adv| adv.id).collect::<HashSet<_>>();
|
|
||||||
let failure_case_1 = status.iter().any(|adv| !adv.is_up);
|
|
||||||
let failure_case_2 = config
|
|
||||||
.members
|
|
||||||
.iter()
|
|
||||||
.any(|(id, _)| !status_keys.contains(id));
|
|
||||||
if failure_case_1 || failure_case_2 {
|
|
||||||
println!("\nFailed nodes:");
|
|
||||||
let mut failed_nodes = vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity\tLast seen".to_string()];
|
|
||||||
for adv in status.iter().filter(|adv| !adv.is_up) {
|
|
||||||
if let Some(cfg) = config.members.get(&adv.id) {
|
|
||||||
failed_nodes.push(format!(
|
|
||||||
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}\t{last_seen}s ago",
|
|
||||||
id = adv.id,
|
|
||||||
host = adv.status.hostname,
|
|
||||||
addr = adv.addr,
|
|
||||||
tag = cfg.tag,
|
|
||||||
zone = cfg.zone,
|
|
||||||
capacity = cfg.capacity_string(),
|
|
||||||
last_seen = (now_msec() - 0) / 1000,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (id, cfg) in config.members.iter() {
|
|
||||||
if !status.iter().any(|adv| adv.id == *id) {
|
|
||||||
failed_nodes.push(format!(
|
|
||||||
"{id:?}\t??\t??\t[{tag}]\t{zone}\t{capacity}\tnever seen",
|
|
||||||
id = id,
|
|
||||||
tag = cfg.tag,
|
|
||||||
zone = cfg.zone,
|
|
||||||
capacity = cfg.capacity_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format_table(failed_nodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_connect(
|
|
||||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
args: ConnectNodeOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match rpc_cli.call(&rpc_host, &SystemRpc::Connect(args.node), PRIO_NORMAL).await?? {
|
|
||||||
SystemRpc::Ok => {
|
|
||||||
println!("Success.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
r => {
|
|
||||||
Err(Error::BadRpc(format!("Unexpected response: {:?}", r)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_configure(
|
|
||||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
args: ConfigureNodeOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let status = match rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let added_node = find_matching_node(status.iter().map(|adv| adv.id), &args.node_id)?;
|
|
||||||
|
|
||||||
let mut config = match rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
||||||
};
|
|
||||||
|
|
||||||
for replaced in args.replace.iter() {
|
|
||||||
let replaced_node = find_matching_node(config.members.keys().cloned(), replaced)?;
|
|
||||||
if config.members.remove(&replaced_node).is_none() {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Cannot replace node {:?} as it is not in current configuration",
|
|
||||||
replaced_node
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if args.capacity.is_some() && args.gateway {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
|
|
||||||
}
|
|
||||||
if args.capacity == Some(0) {
|
|
||||||
return Err(Error::Message("Invalid capacity value: 0".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_entry = match config.members.get(&added_node) {
|
|
||||||
None => {
|
|
||||||
let capacity = match args.capacity {
|
|
||||||
Some(c) => Some(c),
|
|
||||||
None if args.gateway => None,
|
|
||||||
_ => return Err(Error::Message(
|
|
||||||
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
|
||||||
};
|
|
||||||
NetworkConfigEntry {
|
|
||||||
zone: args.zone.expect("Please specifiy a zone with the -z flag"),
|
|
||||||
capacity,
|
|
||||||
tag: args.tag.unwrap_or_default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(old) => {
|
|
||||||
let capacity = match args.capacity {
|
|
||||||
Some(c) => Some(c),
|
|
||||||
None if args.gateway => None,
|
|
||||||
_ => old.capacity,
|
|
||||||
};
|
|
||||||
NetworkConfigEntry {
|
|
||||||
zone: args.zone.unwrap_or_else(|| old.zone.to_string()),
|
|
||||||
capacity,
|
|
||||||
tag: args.tag.unwrap_or_else(|| old.tag.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
config.members.insert(added_node, new_entry);
|
|
||||||
config.version += 1;
|
|
||||||
|
|
||||||
rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
|
|
||||||
.await??;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_remove(
|
|
||||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
args: RemoveNodeOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut config = match rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
||||||
};
|
|
||||||
|
|
||||||
let deleted_node = find_matching_node(config.members.keys().cloned(), &args.node_id)?;
|
|
||||||
|
|
||||||
if !args.yes {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Add the flag --yes to really remove {:?} from the cluster",
|
|
||||||
deleted_node
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
config.members.remove(&deleted_node);
|
|
||||||
config.version += 1;
|
|
||||||
|
|
||||||
rpc_cli
|
|
||||||
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
|
|
||||||
.await??;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_admin(
|
|
||||||
rpc_cli: &Endpoint<AdminRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
args: AdminRpc,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? {
|
|
||||||
AdminRpc::Ok(msg) => {
|
|
||||||
println!("{}", msg);
|
|
||||||
}
|
|
||||||
AdminRpc::BucketList(bl) => {
|
|
||||||
println!("List of buckets:");
|
|
||||||
for bucket in bl {
|
|
||||||
println!("{}", bucket);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AdminRpc::BucketInfo(bucket) => {
|
|
||||||
print_bucket_info(&bucket);
|
|
||||||
}
|
|
||||||
AdminRpc::KeyList(kl) => {
|
|
||||||
println!("List of keys:");
|
|
||||||
for key in kl {
|
|
||||||
println!("{}\t{}", key.0, key.1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AdminRpc::KeyInfo(key) => {
|
|
||||||
print_key_info(&key);
|
|
||||||
}
|
|
||||||
r => {
|
|
||||||
error!("Unexpected response: {:?}", r);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Utility functions ----
|
|
||||||
|
|
||||||
fn print_key_info(key: &Key) {
|
|
||||||
println!("Key name: {}", key.name.get());
|
|
||||||
println!("Key ID: {}", key.key_id);
|
|
||||||
println!("Secret key: {}", key.secret_key);
|
|
||||||
if key.deleted.get() {
|
|
||||||
println!("Key is deleted.");
|
|
||||||
} else {
|
|
||||||
println!("Authorized buckets:");
|
|
||||||
for (b, _, perm) in key.authorized_buckets.items().iter() {
|
|
||||||
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn print_bucket_info(bucket: &Bucket) {
|
|
||||||
println!("Bucket name: {}", bucket.name);
|
|
||||||
match bucket.state.get() {
|
|
||||||
BucketState::Deleted => println!("Bucket is deleted."),
|
|
||||||
BucketState::Present(p) => {
|
|
||||||
println!("Authorized keys:");
|
|
||||||
for (k, _, perm) in p.authorized_keys.items().iter() {
|
|
||||||
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
|
|
||||||
}
|
|
||||||
println!("Website access: {}", p.website.get());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn format_table(data: Vec<String>) {
|
|
||||||
let data = data
|
|
||||||
.iter()
|
|
||||||
.map(|s| s.split('\t').collect::<Vec<_>>())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let columns = data.iter().map(|row| row.len()).fold(0, std::cmp::max);
|
|
||||||
let mut column_size = vec![0; columns];
|
|
||||||
|
|
||||||
let mut out = String::new();
|
|
||||||
|
|
||||||
for row in data.iter() {
|
|
||||||
for (i, col) in row.iter().enumerate() {
|
|
||||||
column_size[i] = std::cmp::max(column_size[i], col.chars().count());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for row in data.iter() {
|
|
||||||
for (col, col_len) in row[..row.len() - 1].iter().zip(column_size.iter()) {
|
|
||||||
out.push_str(col);
|
|
||||||
(0..col_len - col.chars().count() + 2).for_each(|_| out.push(' '));
|
|
||||||
}
|
|
||||||
out.push_str(&row[row.len() - 1]);
|
|
||||||
out.push('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
print!("{}", out);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_matching_node(
|
|
||||||
cand: impl std::iter::Iterator<Item = Uuid>,
|
|
||||||
pattern: &str,
|
|
||||||
) -> Result<Uuid, Error> {
|
|
||||||
let mut candidates = vec![];
|
|
||||||
for c in cand {
|
|
||||||
if hex::encode(&c).starts_with(&pattern) {
|
|
||||||
candidates.push(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if candidates.len() != 1 {
|
|
||||||
Err(Error::Message(format!(
|
|
||||||
"{} nodes match '{}'",
|
|
||||||
candidates.len(),
|
|
||||||
pattern,
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
Ok(candidates[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
287
src/garage/cli/cmd.rs
Normal file
287
src/garage/cli/cmd.rs
Normal file
|
@ -0,0 +1,287 @@
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
use garage_rpc::ring::*;
|
||||||
|
use garage_rpc::system::*;
|
||||||
|
use garage_rpc::*;
|
||||||
|
|
||||||
|
use crate::admin::*;
|
||||||
|
use crate::cli::*;
|
||||||
|
|
||||||
|
pub async fn cli_command_dispatch(
|
||||||
|
cmd: Command,
|
||||||
|
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
||||||
|
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
|
||||||
|
rpc_host: NodeID,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
match cmd {
|
||||||
|
Command::Status => cmd_status(system_rpc_endpoint, rpc_host).await,
|
||||||
|
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
||||||
|
cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await
|
||||||
|
}
|
||||||
|
Command::Node(NodeOperation::Configure(configure_opt)) => {
|
||||||
|
cmd_configure(system_rpc_endpoint, rpc_host, configure_opt).await
|
||||||
|
}
|
||||||
|
Command::Node(NodeOperation::Remove(remove_opt)) => {
|
||||||
|
cmd_remove(system_rpc_endpoint, rpc_host, remove_opt).await
|
||||||
|
}
|
||||||
|
Command::Bucket(bo) => {
|
||||||
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
|
||||||
|
}
|
||||||
|
Command::Key(ko) => {
|
||||||
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
|
||||||
|
}
|
||||||
|
Command::Repair(ro) => {
|
||||||
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
|
||||||
|
}
|
||||||
|
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> {
|
||||||
|
let status = match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
||||||
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
|
};
|
||||||
|
let config = match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
||||||
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("==== HEALTHY NODES ====");
|
||||||
|
let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity".to_string()];
|
||||||
|
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||||
|
if let Some(cfg) = config.members.get(&adv.id) {
|
||||||
|
healthy_nodes.push(format!(
|
||||||
|
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}",
|
||||||
|
id = adv.id,
|
||||||
|
host = adv.status.hostname,
|
||||||
|
addr = adv.addr,
|
||||||
|
tag = cfg.tag,
|
||||||
|
zone = cfg.zone,
|
||||||
|
capacity = cfg.capacity_string(),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
healthy_nodes.push(format!(
|
||||||
|
"{id:?}\t{h}\t{addr}\tNO ROLE ASSIGNED",
|
||||||
|
id = adv.id,
|
||||||
|
h = adv.status.hostname,
|
||||||
|
addr = adv.addr,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
format_table(healthy_nodes);
|
||||||
|
|
||||||
|
let status_keys = status.iter().map(|adv| adv.id).collect::<HashSet<_>>();
|
||||||
|
let failure_case_1 = status.iter().any(|adv| !adv.is_up);
|
||||||
|
let failure_case_2 = config
|
||||||
|
.members
|
||||||
|
.iter()
|
||||||
|
.any(|(id, _)| !status_keys.contains(id));
|
||||||
|
if failure_case_1 || failure_case_2 {
|
||||||
|
println!("\n==== FAILED NODES ====");
|
||||||
|
let mut failed_nodes =
|
||||||
|
vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity\tLast seen".to_string()];
|
||||||
|
for adv in status.iter().filter(|adv| !adv.is_up) {
|
||||||
|
if let Some(cfg) = config.members.get(&adv.id) {
|
||||||
|
failed_nodes.push(format!(
|
||||||
|
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}\t{last_seen}",
|
||||||
|
id = adv.id,
|
||||||
|
host = adv.status.hostname,
|
||||||
|
addr = adv.addr,
|
||||||
|
tag = cfg.tag,
|
||||||
|
zone = cfg.zone,
|
||||||
|
capacity = cfg.capacity_string(),
|
||||||
|
last_seen = adv
|
||||||
|
.last_seen_secs_ago
|
||||||
|
.map(|s| format!("{}s ago", s))
|
||||||
|
.unwrap_or_else(|| "never seen".into()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (id, cfg) in config.members.iter() {
|
||||||
|
if !status_keys.contains(id) {
|
||||||
|
failed_nodes.push(format!(
|
||||||
|
"{id:?}\t??\t??\t[{tag}]\t{zone}\t{capacity}\tnever seen",
|
||||||
|
id = id,
|
||||||
|
tag = cfg.tag,
|
||||||
|
zone = cfg.zone,
|
||||||
|
capacity = cfg.capacity_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
format_table(failed_nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_connect(
|
||||||
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||||
|
rpc_host: NodeID,
|
||||||
|
args: ConnectNodeOpt,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::Connect(args.node), PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::Ok => {
|
||||||
|
println!("Success.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
r => Err(Error::BadRpc(format!("Unexpected response: {:?}", r))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_configure(
|
||||||
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||||
|
rpc_host: NodeID,
|
||||||
|
args: ConfigureNodeOpt,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let status = match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
||||||
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let added_node = find_matching_node(status.iter().map(|adv| adv.id), &args.node_id)?;
|
||||||
|
|
||||||
|
let mut config = match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
||||||
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
|
};
|
||||||
|
|
||||||
|
for replaced in args.replace.iter() {
|
||||||
|
let replaced_node = find_matching_node(config.members.keys().cloned(), replaced)?;
|
||||||
|
if config.members.remove(&replaced_node).is_none() {
|
||||||
|
return Err(Error::Message(format!(
|
||||||
|
"Cannot replace node {:?} as it is not in current configuration",
|
||||||
|
replaced_node
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.capacity.is_some() && args.gateway {
|
||||||
|
return Err(Error::Message(
|
||||||
|
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
|
||||||
|
}
|
||||||
|
if args.capacity == Some(0) {
|
||||||
|
return Err(Error::Message("Invalid capacity value: 0".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_entry = match config.members.get(&added_node) {
|
||||||
|
None => {
|
||||||
|
let capacity = match args.capacity {
|
||||||
|
Some(c) => Some(c),
|
||||||
|
None if args.gateway => None,
|
||||||
|
_ => return Err(Error::Message(
|
||||||
|
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
||||||
|
};
|
||||||
|
NetworkConfigEntry {
|
||||||
|
zone: args.zone.ok_or("Please specifiy a zone with the -z flag")?,
|
||||||
|
capacity,
|
||||||
|
tag: args.tag.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(old) => {
|
||||||
|
let capacity = match args.capacity {
|
||||||
|
Some(c) => Some(c),
|
||||||
|
None if args.gateway => None,
|
||||||
|
_ => old.capacity,
|
||||||
|
};
|
||||||
|
NetworkConfigEntry {
|
||||||
|
zone: args.zone.unwrap_or_else(|| old.zone.to_string()),
|
||||||
|
capacity,
|
||||||
|
tag: args.tag.unwrap_or_else(|| old.tag.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
config.members.insert(added_node, new_entry);
|
||||||
|
config.version += 1;
|
||||||
|
|
||||||
|
rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
|
||||||
|
.await??;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_remove(
|
||||||
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||||
|
rpc_host: NodeID,
|
||||||
|
args: RemoveNodeOpt,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut config = match rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
SystemRpc::AdvertiseConfig(cfg) => cfg,
|
||||||
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let deleted_node = find_matching_node(config.members.keys().cloned(), &args.node_id)?;
|
||||||
|
|
||||||
|
if !args.yes {
|
||||||
|
return Err(Error::Message(format!(
|
||||||
|
"Add the flag --yes to really remove {:?} from the cluster",
|
||||||
|
deleted_node
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
config.members.remove(&deleted_node);
|
||||||
|
config.version += 1;
|
||||||
|
|
||||||
|
rpc_cli
|
||||||
|
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
|
||||||
|
.await??;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_admin(
|
||||||
|
rpc_cli: &Endpoint<AdminRpc, ()>,
|
||||||
|
rpc_host: NodeID,
|
||||||
|
args: AdminRpc,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? {
|
||||||
|
AdminRpc::Ok(msg) => {
|
||||||
|
println!("{}", msg);
|
||||||
|
}
|
||||||
|
AdminRpc::BucketList(bl) => {
|
||||||
|
println!("List of buckets:");
|
||||||
|
for bucket in bl {
|
||||||
|
println!("{}", bucket);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AdminRpc::BucketInfo(bucket) => {
|
||||||
|
print_bucket_info(&bucket);
|
||||||
|
}
|
||||||
|
AdminRpc::KeyList(kl) => {
|
||||||
|
println!("List of keys:");
|
||||||
|
for key in kl {
|
||||||
|
println!("{}\t{}", key.0, key.1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AdminRpc::KeyInfo(key) => {
|
||||||
|
print_key_info(&key);
|
||||||
|
}
|
||||||
|
r => {
|
||||||
|
error!("Unexpected response: {:?}", r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Utility functions ----
|
65
src/garage/cli/init.rs
Normal file
65
src/garage/cli/init.rs
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
pub const READ_KEY_ERROR: &str = "Unable to read node key. It will be generated by your garage node the first time is it launched. Ensure that your garage node is currently running. (The node key is supposed to be stored in your metadata directory.)";
|
||||||
|
|
||||||
|
pub fn node_id_command(config_file: PathBuf, quiet: bool) -> Result<(), Error> {
|
||||||
|
let config = garage_util::config::read_config(config_file.clone()).err_context(format!(
|
||||||
|
"Unable to read configuration file {}",
|
||||||
|
config_file.to_string_lossy(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let node_id =
|
||||||
|
garage_rpc::system::read_node_id(&config.metadata_dir).err_context(READ_KEY_ERROR)?;
|
||||||
|
|
||||||
|
let idstr = if let Some(addr) = config.rpc_public_addr {
|
||||||
|
let idstr = format!("{}@{}", hex::encode(&node_id), addr);
|
||||||
|
println!("{}", idstr);
|
||||||
|
idstr
|
||||||
|
} else {
|
||||||
|
let idstr = hex::encode(&node_id);
|
||||||
|
println!("{}", idstr);
|
||||||
|
|
||||||
|
if !quiet {
|
||||||
|
eprintln!("WARNING: I don't know the public address to reach this node.");
|
||||||
|
eprintln!("In all of the instructions below, replace 127.0.0.1:3901 by the appropriate address and port.");
|
||||||
|
}
|
||||||
|
|
||||||
|
format!("{}@127.0.0.1:3901", idstr)
|
||||||
|
};
|
||||||
|
|
||||||
|
if !quiet {
|
||||||
|
eprintln!();
|
||||||
|
eprintln!(
|
||||||
|
"To instruct a node to connect to this node, run the following command on that node:"
|
||||||
|
);
|
||||||
|
eprintln!(" garage [-c <config file path>] node connect {}", idstr);
|
||||||
|
eprintln!();
|
||||||
|
eprintln!("Or instruct them to connect from here by running:");
|
||||||
|
eprintln!(
|
||||||
|
" garage -c {} -h <remote node> node connect {}",
|
||||||
|
config_file.to_string_lossy(),
|
||||||
|
idstr
|
||||||
|
);
|
||||||
|
eprintln!(
|
||||||
|
"where <remote_node> is their own node identifier in the format: <pubkey>@<ip>:<port>"
|
||||||
|
);
|
||||||
|
eprintln!();
|
||||||
|
eprintln!("This node identifier can also be added as a bootstrap node in other node's garage.toml files:");
|
||||||
|
eprintln!(" bootstrap_peers = [");
|
||||||
|
eprintln!(" \"{}\",", idstr);
|
||||||
|
eprintln!(" ...");
|
||||||
|
eprintln!(" ]");
|
||||||
|
eprintln!();
|
||||||
|
eprintln!(
|
||||||
|
r#"Security notice: Garage's intra-cluster communications are secured primarily by the shared
|
||||||
|
secret value rpc_secret. However, an attacker that knows rpc_secret (for example if it
|
||||||
|
leaks) cannot connect if they do not know any of the identifiers of the nodes in the
|
||||||
|
cluster. It is thus a good security measure to try to keep them secret if possible.
|
||||||
|
"#
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
9
src/garage/cli/mod.rs
Normal file
9
src/garage/cli/mod.rs
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
pub(crate) mod cmd;
|
||||||
|
pub(crate) mod init;
|
||||||
|
pub(crate) mod structs;
|
||||||
|
pub(crate) mod util;
|
||||||
|
|
||||||
|
pub(crate) use cmd::*;
|
||||||
|
pub(crate) use init::*;
|
||||||
|
pub(crate) use structs::*;
|
||||||
|
pub(crate) use util::*;
|
296
src/garage/cli/structs.rs
Normal file
296
src/garage/cli/structs.rs
Normal file
|
@ -0,0 +1,296 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use structopt::StructOpt;
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub enum Command {
|
||||||
|
/// Run Garage server
|
||||||
|
#[structopt(name = "server")]
|
||||||
|
Server,
|
||||||
|
|
||||||
|
/// Print identifier (public key) of this garage node.
|
||||||
|
/// Generates a new keypair if necessary.
|
||||||
|
#[structopt(name = "node-id")]
|
||||||
|
NodeId(NodeIdOpt),
|
||||||
|
|
||||||
|
/// Get network status
|
||||||
|
#[structopt(name = "status")]
|
||||||
|
Status,
|
||||||
|
|
||||||
|
/// Garage node operations
|
||||||
|
#[structopt(name = "node")]
|
||||||
|
Node(NodeOperation),
|
||||||
|
|
||||||
|
/// Bucket operations
|
||||||
|
#[structopt(name = "bucket")]
|
||||||
|
Bucket(BucketOperation),
|
||||||
|
|
||||||
|
/// Key operations
|
||||||
|
#[structopt(name = "key")]
|
||||||
|
Key(KeyOperation),
|
||||||
|
|
||||||
|
/// Start repair of node data
|
||||||
|
#[structopt(name = "repair")]
|
||||||
|
Repair(RepairOpt),
|
||||||
|
|
||||||
|
/// Gather node statistics
|
||||||
|
#[structopt(name = "stats")]
|
||||||
|
Stats(StatsOpt),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub enum NodeOperation {
|
||||||
|
/// Connect to Garage node that is currently isolated from the system
|
||||||
|
#[structopt(name = "connect")]
|
||||||
|
Connect(ConnectNodeOpt),
|
||||||
|
|
||||||
|
/// Configure Garage node
|
||||||
|
#[structopt(name = "configure")]
|
||||||
|
Configure(ConfigureNodeOpt),
|
||||||
|
|
||||||
|
/// Remove Garage node from cluster
|
||||||
|
#[structopt(name = "remove")]
|
||||||
|
Remove(RemoveNodeOpt),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub struct NodeIdOpt {
|
||||||
|
/// Do not print usage instructions to stderr
|
||||||
|
#[structopt(short = "q", long = "quiet")]
|
||||||
|
pub(crate) quiet: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub struct ConnectNodeOpt {
|
||||||
|
/// Node public key and address, in the format:
|
||||||
|
/// `<public key hexadecimal>@<ip or hostname>:<port>`
|
||||||
|
pub(crate) node: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub struct ConfigureNodeOpt {
|
||||||
|
/// Node to configure (prefix of hexadecimal node id)
|
||||||
|
pub(crate) node_id: String,
|
||||||
|
|
||||||
|
/// Location (zone or datacenter) of the node
|
||||||
|
#[structopt(short = "z", long = "zone")]
|
||||||
|
pub(crate) zone: Option<String>,
|
||||||
|
|
||||||
|
/// Capacity (in relative terms, use 1 to represent your smallest server)
|
||||||
|
#[structopt(short = "c", long = "capacity")]
|
||||||
|
pub(crate) capacity: Option<u32>,
|
||||||
|
|
||||||
|
/// Gateway-only node
|
||||||
|
#[structopt(short = "g", long = "gateway")]
|
||||||
|
pub(crate) gateway: bool,
|
||||||
|
|
||||||
|
/// Optional node tag
|
||||||
|
#[structopt(short = "t", long = "tag")]
|
||||||
|
pub(crate) tag: Option<String>,
|
||||||
|
|
||||||
|
/// Replaced node(s): list of node IDs that will be removed from the current cluster
|
||||||
|
#[structopt(long = "replace")]
|
||||||
|
pub(crate) replace: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(StructOpt, Debug)]
|
||||||
|
pub struct RemoveNodeOpt {
|
||||||
|
/// Node to configure (prefix of hexadecimal node id)
|
||||||
|
pub(crate) node_id: String,
|
||||||
|
|
||||||
|
/// If this flag is not given, the node won't be removed
|
||||||
|
#[structopt(long = "yes")]
|
||||||
|
pub(crate) yes: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub enum BucketOperation {
|
||||||
|
/// List buckets
|
||||||
|
#[structopt(name = "list")]
|
||||||
|
List,
|
||||||
|
|
||||||
|
/// Get bucket info
|
||||||
|
#[structopt(name = "info")]
|
||||||
|
Info(BucketOpt),
|
||||||
|
|
||||||
|
/// Create bucket
|
||||||
|
#[structopt(name = "create")]
|
||||||
|
Create(BucketOpt),
|
||||||
|
|
||||||
|
/// Delete bucket
|
||||||
|
#[structopt(name = "delete")]
|
||||||
|
Delete(DeleteBucketOpt),
|
||||||
|
|
||||||
|
/// Allow key to read or write to bucket
|
||||||
|
#[structopt(name = "allow")]
|
||||||
|
Allow(PermBucketOpt),
|
||||||
|
|
||||||
|
/// Deny key from reading or writing to bucket
|
||||||
|
#[structopt(name = "deny")]
|
||||||
|
Deny(PermBucketOpt),
|
||||||
|
|
||||||
|
/// Expose as website or not
|
||||||
|
#[structopt(name = "website")]
|
||||||
|
Website(WebsiteOpt),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct WebsiteOpt {
|
||||||
|
/// Create
|
||||||
|
#[structopt(long = "allow")]
|
||||||
|
pub allow: bool,
|
||||||
|
|
||||||
|
/// Delete
|
||||||
|
#[structopt(long = "deny")]
|
||||||
|
pub deny: bool,
|
||||||
|
|
||||||
|
/// Bucket name
|
||||||
|
pub bucket: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct BucketOpt {
|
||||||
|
/// Bucket name
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct DeleteBucketOpt {
|
||||||
|
/// Bucket name
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
/// If this flag is not given, the bucket won't be deleted
|
||||||
|
#[structopt(long = "yes")]
|
||||||
|
pub yes: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct PermBucketOpt {
|
||||||
|
/// Access key name or ID
|
||||||
|
#[structopt(long = "key")]
|
||||||
|
pub key_pattern: String,
|
||||||
|
|
||||||
|
/// Allow/deny read operations
|
||||||
|
#[structopt(long = "read")]
|
||||||
|
pub read: bool,
|
||||||
|
|
||||||
|
/// Allow/deny write operations
|
||||||
|
#[structopt(long = "write")]
|
||||||
|
pub write: bool,
|
||||||
|
|
||||||
|
/// Bucket name
|
||||||
|
pub bucket: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub enum KeyOperation {
|
||||||
|
/// List keys
|
||||||
|
#[structopt(name = "list")]
|
||||||
|
List,
|
||||||
|
|
||||||
|
/// Get key info
|
||||||
|
#[structopt(name = "info")]
|
||||||
|
Info(KeyOpt),
|
||||||
|
|
||||||
|
/// Create new key
|
||||||
|
#[structopt(name = "new")]
|
||||||
|
New(KeyNewOpt),
|
||||||
|
|
||||||
|
/// Rename key
|
||||||
|
#[structopt(name = "rename")]
|
||||||
|
Rename(KeyRenameOpt),
|
||||||
|
|
||||||
|
/// Delete key
|
||||||
|
#[structopt(name = "delete")]
|
||||||
|
Delete(KeyDeleteOpt),
|
||||||
|
|
||||||
|
/// Import key
|
||||||
|
#[structopt(name = "import")]
|
||||||
|
Import(KeyImportOpt),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct KeyOpt {
|
||||||
|
/// ID or name of the key
|
||||||
|
pub key_pattern: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct KeyNewOpt {
|
||||||
|
/// Name of the key
|
||||||
|
#[structopt(long = "name", default_value = "Unnamed key")]
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct KeyRenameOpt {
|
||||||
|
/// ID or name of the key
|
||||||
|
pub key_pattern: String,
|
||||||
|
|
||||||
|
/// New name of the key
|
||||||
|
pub new_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct KeyDeleteOpt {
|
||||||
|
/// ID or name of the key
|
||||||
|
pub key_pattern: String,
|
||||||
|
|
||||||
|
/// Confirm deletion
|
||||||
|
#[structopt(long = "yes")]
|
||||||
|
pub yes: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
|
pub struct KeyImportOpt {
|
||||||
|
/// Access key ID
|
||||||
|
pub key_id: String,
|
||||||
|
|
||||||
|
/// Secret access key
|
||||||
|
pub secret_key: String,
|
||||||
|
|
||||||
|
/// Key name
|
||||||
|
#[structopt(short = "n", default_value = "Imported key")]
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||||
|
pub struct RepairOpt {
|
||||||
|
/// Launch repair operation on all nodes
|
||||||
|
#[structopt(short = "a", long = "all-nodes")]
|
||||||
|
pub all_nodes: bool,
|
||||||
|
|
||||||
|
/// Confirm the launch of the repair operation
|
||||||
|
#[structopt(long = "yes")]
|
||||||
|
pub yes: bool,
|
||||||
|
|
||||||
|
#[structopt(subcommand)]
|
||||||
|
pub what: Option<RepairWhat>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
|
pub enum RepairWhat {
|
||||||
|
/// Only do a full sync of metadata tables
|
||||||
|
#[structopt(name = "tables")]
|
||||||
|
Tables,
|
||||||
|
/// Only repair (resync/rebalance) the set of stored blocks
|
||||||
|
#[structopt(name = "blocks")]
|
||||||
|
Blocks,
|
||||||
|
/// Only redo the propagation of object deletions to the version table (slow)
|
||||||
|
#[structopt(name = "versions")]
|
||||||
|
Versions,
|
||||||
|
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
|
||||||
|
#[structopt(name = "block_refs")]
|
||||||
|
BlockRefs,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||||
|
pub struct StatsOpt {
|
||||||
|
/// Gather statistics from all nodes
|
||||||
|
#[structopt(short = "a", long = "all-nodes")]
|
||||||
|
pub all_nodes: bool,
|
||||||
|
|
||||||
|
/// Gather detailed statistics (this can be long)
|
||||||
|
#[structopt(short = "d", long = "detailed")]
|
||||||
|
pub detailed: bool,
|
||||||
|
}
|
83
src/garage/cli/util.rs
Normal file
83
src/garage/cli/util.rs
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
use garage_model::bucket_table::*;
|
||||||
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
|
pub fn print_key_info(key: &Key) {
|
||||||
|
println!("Key name: {}", key.name.get());
|
||||||
|
println!("Key ID: {}", key.key_id);
|
||||||
|
println!("Secret key: {}", key.secret_key);
|
||||||
|
if key.deleted.get() {
|
||||||
|
println!("Key is deleted.");
|
||||||
|
} else {
|
||||||
|
println!("Authorized buckets:");
|
||||||
|
for (b, _, perm) in key.authorized_buckets.items().iter() {
|
||||||
|
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn print_bucket_info(bucket: &Bucket) {
|
||||||
|
println!("Bucket name: {}", bucket.name);
|
||||||
|
match bucket.state.get() {
|
||||||
|
BucketState::Deleted => println!("Bucket is deleted."),
|
||||||
|
BucketState::Present(p) => {
|
||||||
|
println!("Authorized keys:");
|
||||||
|
for (k, _, perm) in p.authorized_keys.items().iter() {
|
||||||
|
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
|
||||||
|
}
|
||||||
|
println!("Website access: {}", p.website.get());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format_table(data: Vec<String>) {
|
||||||
|
let data = data
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.split('\t').collect::<Vec<_>>())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let columns = data.iter().map(|row| row.len()).fold(0, std::cmp::max);
|
||||||
|
let mut column_size = vec![0; columns];
|
||||||
|
|
||||||
|
let mut out = String::new();
|
||||||
|
|
||||||
|
for row in data.iter() {
|
||||||
|
for (i, col) in row.iter().enumerate() {
|
||||||
|
column_size[i] = std::cmp::max(column_size[i], col.chars().count());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for row in data.iter() {
|
||||||
|
for (col, col_len) in row[..row.len() - 1].iter().zip(column_size.iter()) {
|
||||||
|
out.push_str(col);
|
||||||
|
(0..col_len - col.chars().count() + 2).for_each(|_| out.push(' '));
|
||||||
|
}
|
||||||
|
out.push_str(&row[row.len() - 1]);
|
||||||
|
out.push('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
print!("{}", out);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_matching_node(
|
||||||
|
cand: impl std::iter::Iterator<Item = Uuid>,
|
||||||
|
pattern: &str,
|
||||||
|
) -> Result<Uuid, Error> {
|
||||||
|
let mut candidates = vec![];
|
||||||
|
for c in cand {
|
||||||
|
if hex::encode(&c).starts_with(&pattern) {
|
||||||
|
candidates.push(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if candidates.len() != 1 {
|
||||||
|
Err(Error::Message(format!(
|
||||||
|
"{} nodes match '{}'",
|
||||||
|
candidates.len(),
|
||||||
|
pattern,
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
Ok(candidates[0])
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,22 +4,24 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
mod admin_rpc;
|
mod admin;
|
||||||
mod cli;
|
mod cli;
|
||||||
mod repair;
|
mod repair;
|
||||||
mod server;
|
mod server;
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use structopt::StructOpt;
|
use structopt::StructOpt;
|
||||||
|
|
||||||
use netapp::util::parse_peer_addr;
|
use netapp::util::parse_and_resolve_peer_addr;
|
||||||
use netapp::NetworkKey;
|
use netapp::NetworkKey;
|
||||||
|
|
||||||
use garage_util::error::Error;
|
use garage_util::error::*;
|
||||||
|
|
||||||
use garage_rpc::system::*;
|
use garage_rpc::system::*;
|
||||||
use garage_rpc::*;
|
use garage_rpc::*;
|
||||||
|
|
||||||
use admin_rpc::*;
|
use admin::*;
|
||||||
use cli::*;
|
use cli::*;
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
|
@ -34,6 +36,10 @@ struct Opt {
|
||||||
#[structopt(short = "s", long = "rpc-secret")]
|
#[structopt(short = "s", long = "rpc-secret")]
|
||||||
pub rpc_secret: Option<String>,
|
pub rpc_secret: Option<String>,
|
||||||
|
|
||||||
|
/// Configuration file (garage.toml)
|
||||||
|
#[structopt(short = "c", long = "config", default_value = "/etc/garage.toml")]
|
||||||
|
pub config_file: PathBuf,
|
||||||
|
|
||||||
#[structopt(subcommand)]
|
#[structopt(subcommand)]
|
||||||
cmd: Command,
|
cmd: Command,
|
||||||
}
|
}
|
||||||
|
@ -45,38 +51,68 @@ async fn main() {
|
||||||
|
|
||||||
let opt = Opt::from_args();
|
let opt = Opt::from_args();
|
||||||
|
|
||||||
let res = if let Command::Server(server_opt) = opt.cmd {
|
let res = match opt.cmd {
|
||||||
|
Command::Server => {
|
||||||
// Abort on panic (same behavior as in Go)
|
// Abort on panic (same behavior as in Go)
|
||||||
std::panic::set_hook(Box::new(|panic_info| {
|
std::panic::set_hook(Box::new(|panic_info| {
|
||||||
error!("{}", panic_info.to_string());
|
error!("{}", panic_info.to_string());
|
||||||
std::process::abort();
|
std::process::abort();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
server::run_server(server_opt.config_file).await
|
server::run_server(opt.config_file).await
|
||||||
} else {
|
}
|
||||||
cli_command(opt).await
|
Command::NodeId(node_id_opt) => node_id_command(opt.config_file, node_id_opt.quiet),
|
||||||
|
_ => cli_command(opt).await,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
error!("{}", e);
|
eprintln!("Error: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let net_key_hex_str = &opt.rpc_secret.expect("No RPC secret provided");
|
let config = if opt.rpc_secret.is_none() || opt.rpc_host.is_none() {
|
||||||
|
Some(garage_util::config::read_config(opt.config_file.clone())
|
||||||
|
.err_context(format!("Unable to read configuration file {}. Configuration file is needed because -h or -s is not provided on the command line.", opt.config_file.to_string_lossy()))?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Find and parse network RPC secret
|
||||||
|
let net_key_hex_str = opt
|
||||||
|
.rpc_secret
|
||||||
|
.as_ref()
|
||||||
|
.or_else(|| config.as_ref().map(|c| &c.rpc_secret))
|
||||||
|
.ok_or("No RPC secret provided")?;
|
||||||
let network_key = NetworkKey::from_slice(
|
let network_key = NetworkKey::from_slice(
|
||||||
&hex::decode(net_key_hex_str).expect("Invalid RPC secret key (bad hex)")[..],
|
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
|
||||||
)
|
)
|
||||||
.expect("Invalid RPC secret provided (wrong length)");
|
.ok_or("Invalid RPC secret provided (wrong length)")?;
|
||||||
|
|
||||||
|
// Generate a temporary keypair for our RPC client
|
||||||
let (_pk, sk) = sodiumoxide::crypto::sign::ed25519::gen_keypair();
|
let (_pk, sk) = sodiumoxide::crypto::sign::ed25519::gen_keypair();
|
||||||
|
|
||||||
let netapp = NetApp::new(network_key, sk);
|
let netapp = NetApp::new(network_key, sk);
|
||||||
let (id, addr) =
|
|
||||||
parse_peer_addr(&opt.rpc_host.expect("No RPC host provided")).expect("Invalid RPC host");
|
// Find and parse the address of the target host
|
||||||
netapp.clone().try_connect(addr, id).await?;
|
let (id, addr) = if let Some(h) = opt.rpc_host {
|
||||||
|
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
|
||||||
|
(id, addrs[0])
|
||||||
|
} else if let Some(a) = config.as_ref().map(|c| c.rpc_public_addr).flatten() {
|
||||||
|
let node_id = garage_rpc::system::read_node_id(&config.unwrap().metadata_dir)
|
||||||
|
.err_context(READ_KEY_ERROR)?;
|
||||||
|
(node_id, a)
|
||||||
|
} else {
|
||||||
|
return Err(Error::Message("No RPC host provided".into()));
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connect to target host
|
||||||
|
netapp.clone().try_connect(addr, id).await
|
||||||
|
.err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
|
||||||
|
|
||||||
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
||||||
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
||||||
|
|
||||||
cli_cmd(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await
|
cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ use garage_api::run_api_server;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_web::run_web_server;
|
use garage_web::run_web_server;
|
||||||
|
|
||||||
use crate::admin_rpc::*;
|
use crate::admin::*;
|
||||||
|
|
||||||
async fn wait_from(mut chan: watch::Receiver<bool>) {
|
async fn wait_from(mut chan: watch::Receiver<bool>) {
|
||||||
while !*chan.borrow() {
|
while !*chan.borrow() {
|
||||||
|
|
|
@ -98,7 +98,9 @@ impl BlockManager {
|
||||||
.open_tree("block_local_resync_queue")
|
.open_tree("block_local_resync_queue")
|
||||||
.expect("Unable to open block_local_resync_queue tree");
|
.expect("Unable to open block_local_resync_queue tree");
|
||||||
|
|
||||||
let endpoint = system.netapp.endpoint(format!("garage_model/block.rs/Rpc"));
|
let endpoint = system
|
||||||
|
.netapp
|
||||||
|
.endpoint("garage_model/block.rs/Rpc".to_string());
|
||||||
|
|
||||||
let block_manager = Arc::new(Self {
|
let block_manager = Arc::new(Self {
|
||||||
replication,
|
replication,
|
||||||
|
|
|
@ -57,14 +57,9 @@ impl Garage {
|
||||||
info!("Initialize membership management system...");
|
info!("Initialize membership management system...");
|
||||||
let system = System::new(
|
let system = System::new(
|
||||||
network_key,
|
network_key,
|
||||||
config.metadata_dir.clone(),
|
|
||||||
background.clone(),
|
background.clone(),
|
||||||
replication_mode.replication_factor(),
|
replication_mode.replication_factor(),
|
||||||
config.rpc_bind_addr,
|
&config,
|
||||||
config.rpc_public_addr,
|
|
||||||
config.bootstrap_peers.clone(),
|
|
||||||
config.consul_host.clone(),
|
|
||||||
config.consul_service_name.clone(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let data_rep_param = TableShardedReplication {
|
let data_rep_param = TableShardedReplication {
|
||||||
|
|
|
@ -14,8 +14,8 @@ pub use netapp::proto::*;
|
||||||
pub use netapp::{NetApp, NodeID};
|
pub use netapp::{NetApp, NodeID};
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::error::Error;
|
|
||||||
use garage_util::data::Uuid;
|
use garage_util::data::Uuid;
|
||||||
|
use garage_util::error::Error;
|
||||||
|
|
||||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ impl RpcHelper {
|
||||||
Ok(results)
|
Ok(results)
|
||||||
} else {
|
} else {
|
||||||
let errors = errors.iter().map(|e| format!("{}", e)).collect::<Vec<_>>();
|
let errors = errors.iter().map(|e| format!("{}", e)).collect::<Vec<_>>();
|
||||||
Err(Error::TooManyErrors(errors))
|
Err(Error::Quorum(quorum, results.len(), to.len(), errors))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
@ -18,12 +18,13 @@ use tokio::sync::Mutex;
|
||||||
use netapp::endpoint::{Endpoint, EndpointHandler};
|
use netapp::endpoint::{Endpoint, EndpointHandler};
|
||||||
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
||||||
use netapp::proto::*;
|
use netapp::proto::*;
|
||||||
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
|
||||||
use netapp::util::parse_and_resolve_peer_addr;
|
use netapp::util::parse_and_resolve_peer_addr;
|
||||||
|
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
use garage_util::background::BackgroundRunner;
|
||||||
|
use garage_util::config::Config;
|
||||||
use garage_util::data::Uuid;
|
use garage_util::data::Uuid;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::*;
|
||||||
use garage_util::persister::Persister;
|
use garage_util::persister::Persister;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
@ -38,6 +39,8 @@ const PING_TIMEOUT: Duration = Duration::from_secs(2);
|
||||||
/// RPC endpoint used for calls related to membership
|
/// RPC endpoint used for calls related to membership
|
||||||
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
||||||
|
|
||||||
|
pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret";
|
||||||
|
|
||||||
/// RPC messages related to membership
|
/// RPC messages related to membership
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub enum SystemRpc {
|
pub enum SystemRpc {
|
||||||
|
@ -109,10 +112,27 @@ pub struct KnownNodeInfo {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
pub addr: SocketAddr,
|
pub addr: SocketAddr,
|
||||||
pub is_up: bool,
|
pub is_up: bool,
|
||||||
|
pub last_seen_secs_ago: Option<u64>,
|
||||||
pub status: NodeStatus,
|
pub status: NodeStatus,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_node_key(metadata_dir: &Path) -> Result<NodeKey, Error> {
|
pub fn read_node_id(metadata_dir: &Path) -> Result<NodeID, Error> {
|
||||||
|
let mut pubkey_file = metadata_dir.to_path_buf();
|
||||||
|
pubkey_file.push("node_key.pub");
|
||||||
|
|
||||||
|
let mut f = std::fs::File::open(pubkey_file.as_path())?;
|
||||||
|
let mut d = vec![];
|
||||||
|
f.read_to_end(&mut d)?;
|
||||||
|
if d.len() != 32 {
|
||||||
|
return Err(Error::Message("Corrupt node_key.pub file".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut key = [0u8; 32];
|
||||||
|
key.copy_from_slice(&d[..]);
|
||||||
|
Ok(NodeID::from_slice(&key[..]).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn gen_node_key(metadata_dir: &Path) -> Result<NodeKey, Error> {
|
||||||
let mut key_file = metadata_dir.to_path_buf();
|
let mut key_file = metadata_dir.to_path_buf();
|
||||||
key_file.push("node_key");
|
key_file.push("node_key");
|
||||||
if key_file.as_path().exists() {
|
if key_file.as_path().exists() {
|
||||||
|
@ -127,10 +147,30 @@ fn gen_node_key(metadata_dir: &Path) -> Result<NodeKey, Error> {
|
||||||
key.copy_from_slice(&d[..]);
|
key.copy_from_slice(&d[..]);
|
||||||
Ok(NodeKey::from_slice(&key[..]).unwrap())
|
Ok(NodeKey::from_slice(&key[..]).unwrap())
|
||||||
} else {
|
} else {
|
||||||
let (_, key) = ed25519::gen_keypair();
|
if !metadata_dir.exists() {
|
||||||
|
info!("Metadata directory does not exist, creating it.");
|
||||||
|
std::fs::create_dir(&metadata_dir)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Generating new node key pair.");
|
||||||
|
let (pubkey, key) = ed25519::gen_keypair();
|
||||||
|
|
||||||
|
{
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
let mut f = std::fs::File::create(key_file.as_path())?;
|
let mut f = std::fs::File::create(key_file.as_path())?;
|
||||||
|
let mut perm = f.metadata()?.permissions();
|
||||||
|
perm.set_mode(0o600);
|
||||||
|
std::fs::set_permissions(key_file.as_path(), perm)?;
|
||||||
f.write_all(&key[..])?;
|
f.write_all(&key[..])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut pubkey_file = metadata_dir.to_path_buf();
|
||||||
|
pubkey_file.push("node_key.pub");
|
||||||
|
let mut f2 = std::fs::File::create(pubkey_file.as_path())?;
|
||||||
|
f2.write_all(&pubkey[..])?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(key)
|
Ok(key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,20 +179,16 @@ impl System {
|
||||||
/// Create this node's membership manager
|
/// Create this node's membership manager
|
||||||
pub fn new(
|
pub fn new(
|
||||||
network_key: NetworkKey,
|
network_key: NetworkKey,
|
||||||
metadata_dir: PathBuf,
|
|
||||||
background: Arc<BackgroundRunner>,
|
background: Arc<BackgroundRunner>,
|
||||||
replication_factor: usize,
|
replication_factor: usize,
|
||||||
rpc_listen_addr: SocketAddr,
|
config: &Config,
|
||||||
rpc_public_address: Option<SocketAddr>,
|
|
||||||
bootstrap_peers: Vec<(NodeID, SocketAddr)>,
|
|
||||||
consul_host: Option<String>,
|
|
||||||
consul_service_name: Option<String>,
|
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
let node_key = gen_node_key(&metadata_dir).expect("Unable to read or generate node ID");
|
let node_key =
|
||||||
|
gen_node_key(&config.metadata_dir).expect("Unable to read or generate node ID");
|
||||||
info!("Node public key: {}", hex::encode(&node_key.public_key()));
|
info!("Node public key: {}", hex::encode(&node_key.public_key()));
|
||||||
|
|
||||||
let persist_config = Persister::new(&metadata_dir, "network_config");
|
let persist_config = Persister::new(&config.metadata_dir, "network_config");
|
||||||
let persist_peer_list = Persister::new(&metadata_dir, "peer_list");
|
let persist_peer_list = Persister::new(&config.metadata_dir, "peer_list");
|
||||||
|
|
||||||
let net_config = match persist_config.load() {
|
let net_config = match persist_config.load() {
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
|
@ -169,14 +205,14 @@ impl System {
|
||||||
hostname: gethostname::gethostname()
|
hostname: gethostname::gethostname()
|
||||||
.into_string()
|
.into_string()
|
||||||
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
|
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
|
||||||
replication_factor: replication_factor,
|
replication_factor,
|
||||||
config_version: net_config.version,
|
config_version: net_config.version,
|
||||||
};
|
};
|
||||||
|
|
||||||
let ring = Ring::new(net_config, replication_factor);
|
let ring = Ring::new(net_config, replication_factor);
|
||||||
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
||||||
|
|
||||||
if let Some(addr) = rpc_public_address {
|
if let Some(addr) = config.rpc_public_addr {
|
||||||
println!("{}@{}", hex::encode(&node_key.public_key()), addr);
|
println!("{}@{}", hex::encode(&node_key.public_key()), addr);
|
||||||
} else {
|
} else {
|
||||||
println!("{}", hex::encode(&node_key.public_key()));
|
println!("{}", hex::encode(&node_key.public_key()));
|
||||||
|
@ -185,8 +221,8 @@ impl System {
|
||||||
let netapp = NetApp::new(network_key, node_key);
|
let netapp = NetApp::new(network_key, node_key);
|
||||||
let fullmesh = FullMeshPeeringStrategy::new(
|
let fullmesh = FullMeshPeeringStrategy::new(
|
||||||
netapp.clone(),
|
netapp.clone(),
|
||||||
bootstrap_peers.clone(),
|
config.bootstrap_peers.clone(),
|
||||||
rpc_public_address,
|
config.rpc_public_addr,
|
||||||
);
|
);
|
||||||
|
|
||||||
let system_endpoint = netapp.endpoint(SYSTEM_RPC_PATH.into());
|
let system_endpoint = netapp.endpoint(SYSTEM_RPC_PATH.into());
|
||||||
|
@ -200,19 +236,19 @@ impl System {
|
||||||
netapp: netapp.clone(),
|
netapp: netapp.clone(),
|
||||||
fullmesh: fullmesh.clone(),
|
fullmesh: fullmesh.clone(),
|
||||||
rpc: RpcHelper {
|
rpc: RpcHelper {
|
||||||
fullmesh: fullmesh.clone(),
|
fullmesh,
|
||||||
background: background.clone(),
|
background: background.clone(),
|
||||||
},
|
},
|
||||||
system_endpoint,
|
system_endpoint,
|
||||||
replication_factor,
|
replication_factor,
|
||||||
rpc_listen_addr,
|
rpc_listen_addr: config.rpc_bind_addr,
|
||||||
rpc_public_addr: rpc_public_address,
|
rpc_public_addr: config.rpc_public_addr,
|
||||||
bootstrap_peers,
|
bootstrap_peers: config.bootstrap_peers.clone(),
|
||||||
consul_host,
|
consul_host: config.consul_host.clone(),
|
||||||
consul_service_name,
|
consul_service_name: config.consul_service_name.clone(),
|
||||||
ring,
|
ring,
|
||||||
update_ring: Mutex::new(update_ring),
|
update_ring: Mutex::new(update_ring),
|
||||||
background: background.clone(),
|
background,
|
||||||
});
|
});
|
||||||
sys.system_endpoint.set_handler(sys.clone());
|
sys.system_endpoint.set_handler(sys.clone());
|
||||||
sys
|
sys
|
||||||
|
@ -255,7 +291,7 @@ impl System {
|
||||||
rpc_public_addr,
|
rpc_public_addr,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::Message(format!("Error while publishing Consul service: {}", e)))
|
.err_context("Error while publishing Consul service")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save network configuration to disc
|
/// Save network configuration to disc
|
||||||
|
@ -277,18 +313,31 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connect(&self, node: &str) -> Result<SystemRpc, Error> {
|
async fn handle_connect(&self, node: &str) -> Result<SystemRpc, Error> {
|
||||||
let (pubkey, addrs) = parse_and_resolve_peer_addr(node)
|
let (pubkey, addrs) = parse_and_resolve_peer_addr(node).ok_or_else(|| {
|
||||||
.ok_or_else(|| Error::Message(format!("Unable to parse or resolve node specification: {}", node)))?;
|
Error::Message(format!(
|
||||||
|
"Unable to parse or resolve node specification: {}",
|
||||||
|
node
|
||||||
|
))
|
||||||
|
})?;
|
||||||
let mut errors = vec![];
|
let mut errors = vec![];
|
||||||
for ip in addrs.iter() {
|
for ip in addrs.iter() {
|
||||||
match self.netapp.clone().try_connect(*ip, pubkey).await {
|
match self
|
||||||
|
.netapp
|
||||||
|
.clone()
|
||||||
|
.try_connect(*ip, pubkey)
|
||||||
|
.await
|
||||||
|
.err_context(CONNECT_ERROR_MESSAGE)
|
||||||
|
{
|
||||||
Ok(()) => return Ok(SystemRpc::Ok),
|
Ok(()) => return Ok(SystemRpc::Ok),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
errors.push((*ip, e));
|
errors.push((*ip, e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Err(Error::Message(format!("Could not connect to specified peers. Errors: {:?}", errors)));
|
return Err(Error::Message(format!(
|
||||||
|
"Could not connect to specified peers. Errors: {:?}",
|
||||||
|
errors
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_pull_config(&self) -> SystemRpc {
|
fn handle_pull_config(&self) -> SystemRpc {
|
||||||
|
@ -298,21 +347,24 @@ impl System {
|
||||||
|
|
||||||
fn handle_get_known_nodes(&self) -> SystemRpc {
|
fn handle_get_known_nodes(&self) -> SystemRpc {
|
||||||
let node_status = self.node_status.read().unwrap();
|
let node_status = self.node_status.read().unwrap();
|
||||||
let known_nodes =
|
let known_nodes = self
|
||||||
self.fullmesh
|
.fullmesh
|
||||||
.get_peer_list()
|
.get_peer_list()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|n| KnownNodeInfo {
|
.map(|n| KnownNodeInfo {
|
||||||
id: n.id.into(),
|
id: n.id.into(),
|
||||||
addr: n.addr,
|
addr: n.addr,
|
||||||
is_up: n.is_up(),
|
is_up: n.is_up(),
|
||||||
status: node_status.get(&n.id.into()).cloned().map(|(_, st)| st).unwrap_or(
|
last_seen_secs_ago: n.last_seen.map(|t| (Instant::now() - t).as_secs()),
|
||||||
NodeStatus {
|
status: node_status
|
||||||
|
.get(&n.id.into())
|
||||||
|
.cloned()
|
||||||
|
.map(|(_, st)| st)
|
||||||
|
.unwrap_or(NodeStatus {
|
||||||
hostname: "?".to_string(),
|
hostname: "?".to_string(),
|
||||||
replication_factor: 0,
|
replication_factor: 0,
|
||||||
config_version: 0,
|
config_version: 0,
|
||||||
},
|
}),
|
||||||
),
|
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
SystemRpc::ReturnKnownNodes(known_nodes)
|
SystemRpc::ReturnKnownNodes(known_nodes)
|
||||||
|
@ -361,14 +413,14 @@ impl System {
|
||||||
drop(update_ring);
|
drop(update_ring);
|
||||||
|
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
let adv2 = adv.clone();
|
let adv = adv.clone();
|
||||||
self.background.spawn_cancellable(async move {
|
self.background.spawn_cancellable(async move {
|
||||||
self2
|
self2
|
||||||
.rpc
|
.rpc
|
||||||
.broadcast(
|
.broadcast(
|
||||||
&self2.system_endpoint,
|
&self2.system_endpoint,
|
||||||
SystemRpc::AdvertiseConfig(adv2),
|
SystemRpc::AdvertiseConfig(adv),
|
||||||
RequestStrategy::with_priority(PRIO_NORMAL),
|
RequestStrategy::with_priority(PRIO_HIGH),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -439,7 +491,12 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (node_id, node_addr) in ping_list {
|
for (node_id, node_addr) in ping_list {
|
||||||
tokio::spawn(self.netapp.clone().try_connect(node_addr, node_id));
|
tokio::spawn(
|
||||||
|
self.netapp
|
||||||
|
.clone()
|
||||||
|
.try_connect(node_addr, node_id)
|
||||||
|
.map(|r| r.err_context(CONNECT_ERROR_MESSAGE)),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,11 +28,7 @@ impl TableReplication for TableFullReplication {
|
||||||
|
|
||||||
fn write_nodes(&self, _hash: &Hash) -> Vec<Uuid> {
|
fn write_nodes(&self, _hash: &Hash) -> Vec<Uuid> {
|
||||||
let ring = self.system.ring.borrow();
|
let ring = self.system.ring.borrow();
|
||||||
ring.config
|
ring.config.members.keys().cloned().collect::<Vec<_>>()
|
||||||
.members
|
|
||||||
.keys()
|
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
}
|
}
|
||||||
fn write_quorum(&self) -> usize {
|
fn write_quorum(&self) -> usize {
|
||||||
let nmembers = self.system.ring.borrow().config.members.len();
|
let nmembers = self.system.ring.borrow().config.members.len();
|
||||||
|
|
|
@ -6,8 +6,8 @@ use std::path::PathBuf;
|
||||||
use serde::de::Error as SerdeError;
|
use serde::de::Error as SerdeError;
|
||||||
use serde::{de, Deserialize};
|
use serde::{de, Deserialize};
|
||||||
|
|
||||||
use netapp::NodeID;
|
|
||||||
use netapp::util::parse_and_resolve_peer_addr;
|
use netapp::util::parse_and_resolve_peer_addr;
|
||||||
|
use netapp::NodeID;
|
||||||
|
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
|
|
||||||
|
@ -46,10 +46,6 @@ pub struct Config {
|
||||||
/// Consul service name to use
|
/// Consul service name to use
|
||||||
pub consul_service_name: Option<String>,
|
pub consul_service_name: Option<String>,
|
||||||
|
|
||||||
/// Max number of concurrent RPC request
|
|
||||||
#[serde(default = "default_max_concurrent_rpc_requests")]
|
|
||||||
pub max_concurrent_rpc_requests: usize,
|
|
||||||
|
|
||||||
/// Sled cache size, in bytes
|
/// Sled cache size, in bytes
|
||||||
#[serde(default = "default_sled_cache_capacity")]
|
#[serde(default = "default_sled_cache_capacity")]
|
||||||
pub sled_cache_capacity: u64,
|
pub sled_cache_capacity: u64,
|
||||||
|
@ -91,9 +87,6 @@ fn default_sled_cache_capacity() -> u64 {
|
||||||
fn default_sled_flush_every_ms() -> u64 {
|
fn default_sled_flush_every_ms() -> u64 {
|
||||||
2000
|
2000
|
||||||
}
|
}
|
||||||
fn default_max_concurrent_rpc_requests() -> usize {
|
|
||||||
12
|
|
||||||
}
|
|
||||||
fn default_block_size() -> usize {
|
fn default_block_size() -> usize {
|
||||||
1048576
|
1048576
|
||||||
}
|
}
|
||||||
|
@ -117,10 +110,11 @@ where
|
||||||
let mut ret = vec![];
|
let mut ret = vec![];
|
||||||
|
|
||||||
for peer in <Vec<&str>>::deserialize(deserializer)? {
|
for peer in <Vec<&str>>::deserialize(deserializer)? {
|
||||||
let (pubkey, addrs) = parse_and_resolve_peer_addr(peer)
|
let (pubkey, addrs) = parse_and_resolve_peer_addr(peer).ok_or_else(|| {
|
||||||
.ok_or_else(|| D::Error::custom(format!("Unable to parse or resolve peer: {}", peer)))?;
|
D::Error::custom(format!("Unable to parse or resolve peer: {}", peer))
|
||||||
|
})?;
|
||||||
for ip in addrs {
|
for ip in addrs {
|
||||||
ret.push((pubkey.clone(), ip));
|
ret.push((pubkey, ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -93,9 +93,9 @@ impl From<netapp::NodeID> for FixedBytes32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<netapp::NodeID> for FixedBytes32 {
|
impl From<FixedBytes32> for netapp::NodeID {
|
||||||
fn into(self) -> netapp::NodeID {
|
fn from(bytes: FixedBytes32) -> netapp::NodeID {
|
||||||
netapp::NodeID::from_slice(self.as_slice()).unwrap()
|
netapp::NodeID::from_slice(bytes.as_slice()).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,8 +47,14 @@ pub enum Error {
|
||||||
#[error(display = "Timeout")]
|
#[error(display = "Timeout")]
|
||||||
Timeout,
|
Timeout,
|
||||||
|
|
||||||
#[error(display = "Too many errors: {:?}", _0)]
|
#[error(
|
||||||
TooManyErrors(Vec<String>),
|
display = "Could not reach quorum of {}. {} of {} request succeeded, others returned errors: {:?}",
|
||||||
|
_0,
|
||||||
|
_1,
|
||||||
|
_2,
|
||||||
|
_3
|
||||||
|
)]
|
||||||
|
Quorum(usize, usize, usize, Vec<String>),
|
||||||
|
|
||||||
#[error(display = "Bad RPC: {}", _0)]
|
#[error(display = "Bad RPC: {}", _0)]
|
||||||
BadRpc(String),
|
BadRpc(String),
|
||||||
|
@ -81,6 +87,35 @@ impl<T> From<tokio::sync::mpsc::error::SendError<T>> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a str> for Error {
|
||||||
|
fn from(v: &'a str) -> Error {
|
||||||
|
Error::Message(v.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for Error {
|
||||||
|
fn from(v: String) -> Error {
|
||||||
|
Error::Message(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ErrorContext<T, E> {
|
||||||
|
fn err_context<C: std::borrow::Borrow<str>>(self, ctx: C) -> Result<T, Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> ErrorContext<T, E> for Result<T, E>
|
||||||
|
where
|
||||||
|
E: std::fmt::Display,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn err_context<C: std::borrow::Borrow<str>>(self, ctx: C) -> Result<T, Error> {
|
||||||
|
match self {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(e) => Err(Error::Message(format!("{}\n{}", ctx.borrow(), e))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Custom serialization for our error type, for use in RPC.
|
// Custom serialization for our error type, for use in RPC.
|
||||||
// Errors are serialized as a string of their Display representation.
|
// Errors are serialized as a string of their Display representation.
|
||||||
// Upon deserialization, they all become a RemoteError with the
|
// Upon deserialization, they all become a RemoteError with the
|
||||||
|
|
|
@ -39,7 +39,9 @@ impl Error {
|
||||||
Error::NotFound => StatusCode::NOT_FOUND,
|
Error::NotFound => StatusCode::NOT_FOUND,
|
||||||
Error::ApiError(e) => e.http_status_code(),
|
Error::ApiError(e) => e.http_status_code(),
|
||||||
Error::InternalError(
|
Error::InternalError(
|
||||||
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::TooManyErrors(_),
|
GarageError::Timeout
|
||||||
|
| GarageError::RemoteError(_)
|
||||||
|
| GarageError::Quorum(_, _, _, _),
|
||||||
) => StatusCode::SERVICE_UNAVAILABLE,
|
) => StatusCode::SERVICE_UNAVAILABLE,
|
||||||
Error::InternalError(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
Error::InternalError(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
_ => StatusCode::BAD_REQUEST,
|
_ => StatusCode::BAD_REQUEST,
|
||||||
|
|
Loading…
Reference in a new issue