Chapter 5 (#13)

* Add chapter 5

* Basic deployment stuff

* Fix Dockerfile.

* Add an explicit connection timeout.

* Align port with configuration.

* Use debug for faster feedback loops.

* Add address configuration.

* Provision database.

* Use structured options.

* Add configuration

* Fix typo.

* Customise deserialization logic.

* Change to Require.

* Fix spec.

* Add a few more things to the dockerignore file.

* Update to match chapter.

* Add base configuration.

* Amend configuratiohn.

Co-authored-by: LukeMathWalker <rust@lpalmieri.com>
This commit is contained in:
Luca Palmieri 2020-11-01 21:25:11 +00:00 committed by GitHub
parent 39b7682fea
commit 0a4addc6bb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 648 additions and 1 deletions

42
Cargo.lock generated
View file

@ -520,6 +520,29 @@ dependencies = [
"uuid", "uuid",
] ]
[[package]]
name = "chapter05"
version = "0.1.0"
dependencies = [
"actix-rt",
"actix-web",
"chrono",
"config",
"lazy_static",
"reqwest",
"serde",
"serde-aux",
"sqlx",
"tokio",
"tracing",
"tracing-actix-web",
"tracing-bunyan-formatter",
"tracing-futures",
"tracing-log",
"tracing-subscriber",
"uuid",
]
[[package]] [[package]]
name = "chrono" name = "chrono"
version = "0.4.19" version = "0.4.19"
@ -702,6 +725,9 @@ name = "either"
version = "1.6.1" version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "encoding_rs" name = "encoding_rs"
@ -1774,6 +1800,17 @@ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]]
name = "serde-aux"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fb1c0e382599b35cf66986b74182d3787605bd4c3087b4091ee305a692f071"
dependencies = [
"chrono",
"serde",
"serde_json",
]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.117" version = "1.0.117"
@ -1791,6 +1828,7 @@ version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95"
dependencies = [ dependencies = [
"indexmap",
"itoa", "itoa",
"ryu", "ryu",
"serde", "serde",
@ -1937,6 +1975,7 @@ dependencies = [
"parking_lot", "parking_lot",
"percent-encoding", "percent-encoding",
"rand", "rand",
"serde",
"sha-1", "sha-1",
"sha2", "sha2",
"smallvec", "smallvec",
@ -1959,8 +1998,11 @@ dependencies = [
"either", "either",
"futures", "futures",
"heck", "heck",
"hex",
"proc-macro2", "proc-macro2",
"quote", "quote",
"serde",
"serde_json",
"sha2", "sha2",
"sqlx-core", "sqlx-core",
"sqlx-rt", "sqlx-rt",

View file

@ -1,2 +1,2 @@
[workspace] [workspace]
members = ["chapter03-0", "chapter03-1", "chapter04"] members = ["chapter03-0", "chapter03-1", "chapter04", "chapter05"]

9
chapter05/.dockerignore Normal file
View file

@ -0,0 +1,9 @@
.env
.dockerignore
spec.yaml
target/
deploy/
tests/
Dockerfile
scripts/
migrations/

1
chapter05/.env Normal file
View file

@ -0,0 +1 @@
DATABASE_URL="postgres://postgres:password@localhost:5432/newsletter"

34
chapter05/Cargo.toml Normal file
View file

@ -0,0 +1,34 @@
[package]
name = "chapter05"
version = "0.1.0"
authors = ["LukeMathWalker <rust@lpalmieri.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
path = "src/lib.rs"
[[bin]]
path = "src/main.rs"
name = "chapter05"
[dependencies]
actix-web = "3.0.0"
actix-rt = "1.1.1"
tokio = "0.2.22"
serde = "1.0.115"
config = { version = "0.10.1", default-features = false, features = ["yaml"] }
sqlx = { version = "0.4.0-beta.1", default-features = false, features = [ "runtime-tokio", "macros", "postgres", "uuid", "chrono", "migrate", "offline"] }
uuid = { version = "0.8.1", features = ["v4"] }
chrono = "0.4.15"
tracing = "0.1.19"
tracing-futures = "0.2.4"
tracing-subscriber = { version = "0.2.12", features = ["registry", "env-filter"] }
tracing-bunyan-formatter = "0.1.6"
tracing-log = "0.1.1"
tracing-actix-web = "0.2.0"
serde-aux = "1.0.1"
[dev-dependencies]
reqwest = { version = "0.10.7", features = ["json"] }
lazy_static = "1.4.0"

33
chapter05/Dockerfile Normal file
View file

@ -0,0 +1,33 @@
FROM rust:1.47 as planner
WORKDIR app
# We only pay the installation cost once,
# it will be cached from the second build onwards
# To ensure a reproducible build consider pinning
# the cargo-chef version with `--version X.X.X`
RUN cargo install cargo-chef
COPY . .
# Compute a lock-like file for our project
RUN cargo chef prepare --recipe-path recipe.json
FROM rust:1.47 as cacher
WORKDIR app
RUN cargo install cargo-chef
COPY --from=planner /app/recipe.json recipe.json
# Build our project dependencies, not our application!
RUN cargo chef cook --release --recipe-path recipe.json
FROM rust:1.47 as builder
WORKDIR app
COPY . .
# Copy over the cached dependencies
COPY --from=cacher /app/target target
COPY --from=cacher /usr/local/cargo /usr/local/cargo
# Build our application, leveraging the cached deps!
RUN cargo build --release --bin chapter05
FROM rust:1.47-slim as runtime
WORKDIR app
COPY --from=builder /app/target/release/chapter05 /usr/local/bin
COPY configuration configuration
ENV APP_ENVIRONMENT production
ENTRYPOINT ["./usr/local/bin/app"]

View file

@ -0,0 +1,10 @@
application:
port: 8000
host: 0.0.0.0
database:
host: "localhost"
port: 5432
username: "postgres"
password: "password"
database_name: "newsletter"
require_ssl: false

View file

@ -0,0 +1,4 @@
application:
host: 127.0.0.1
database:
require_ssl: false

View file

@ -0,0 +1,4 @@
application:
host: 0.0.0.0
database:
require_ssl: true

View file

@ -0,0 +1,8 @@
-- Create Subscriptions Table
CREATE TABLE subscriptions(
id uuid NOT NULL,
PRIMARY KEY (id),
email TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
subscribed_at timestamptz NOT NULL
);

40
chapter05/scripts/init_db.sh Executable file
View file

@ -0,0 +1,40 @@
#!/usr/bin/env bash
set -x
set -eo pipefail
# Check if a custom user has been set, otherwise default to 'postgres'
DB_USER=${POSTGRES_USER:=postgres}
# Check if a custom password has been set, otherwise default to 'password'
DB_PASSWORD="${POSTGRES_PASSWORD:=password}"
# Check if a custom password has been set, otherwise default to 'newsletter'
DB_NAME="${POSTGRES_DB:=newsletter}"
# Check if a custom port has been set, otherwise default to '5432'
DB_PORT="${POSTGRES_PORT:=5432}"
# Allow to skip Docker if a dockerized Postgres database is already running
if [[ -z "${SKIP_DOCKER}" ]]
then
# Launch postgres using Docker
docker run \
-e POSTGRES_USER=${DB_USER} \
-e POSTGRES_PASSWORD=${DB_PASSWORD} \
-e POSTGRES_DB=${DB_NAME} \
-p "${DB_PORT}":5432 \
-d postgres \
postgres -N 1000
# ^ Increased maximum number of connections for testing purposes
fi
# Keep pinging Postgres until it's ready to accept commands
until PGPASSWORD="${DB_PASSWORD}" psql -h "localhost" -U "${DB_USER}" -p "${DB_PORT}" -d "postgres" -c '\q'; do
>&2 echo "Postgres is still unavailable - sleeping"
sleep 1
done
>&2 echo "Postgres is up and running on port ${DB_PORT} - running migrations now!"
export DATABASE_URL=postgres://${DB_USER}:${DB_PASSWORD}@localhost:${DB_PORT}/${DB_NAME}
sqlx database create
sqlx migrate run
>&2 echo "Postgres has been migrated, ready to go!"

54
chapter05/spec.yaml Normal file
View file

@ -0,0 +1,54 @@
name: zero2prod
# See https://www.digitalocean.com/docs/app-platform/#regional-availability for the available options
# You can get region slugs from https://www.digitalocean.com/docs/platform/availability-matrix/
# `fra` stands for Frankfurt (Germany - EU)
region: fra
services:
- name: zero2prod
# Relative to the repository root
dockerfile_path: chapter05/Dockerfile
source_dir: chapter05
github:
branch: ch-05
deploy_on_push: true
repo: LukeMathWalker/zero-to-production
# Active probe used by DigitalOcean's to ensure our application is healthy
health_check:
# The path to our health check endpoint! It turned out to be useful in the end!
http_path: /health_check
# The port the application will be listening on for incoming requests
# It should match what we specify in our configuration.yaml file!
http_port: 8000
# For production workloads we'd go for at least two!
instance_count: 1
# Let's keep the bill lean for now...
instance_size_slug: basic-xxs
# All incoming requests should be routed to our app
routes:
- path: /
envs:
- key: APP_DATABASE__USERNAME
scope: RUN_TIME
value: ${newsletter.USERNAME}
- key: APP_DATABASE__PASSWORD
scope: RUN_TIME
value: ${newsletter.PASSWORD}
- key: APP_DATABASE__HOST
scope: RUN_TIME
value: ${newsletter.HOSTNAME}
- key: APP_DATABASE__PORT
scope: RUN_TIME
value: ${newsletter.PORT}
- key: APP_DATABASE__DATABASE_NAME
scope: RUN_TIME
value: ${newsletter.DATABASE}
databases:
# PG = Postgres
- engine: PG
# Database name
name: newsletter
# Again, let's keep the bill lean
num_nodes: 1
size: db-s-dev-database
# Postgres version - using the latest here
version: "12"

18
chapter05/sqlx-data.json Normal file
View file

@ -0,0 +1,18 @@
{
"db": "PostgreSQL",
"793f0df728d217c204123f12e4eafd6439db2d49d0cb506618ae9e780c7e0558": {
"query": "\n INSERT INTO subscriptions (id, email, name, subscribed_at)\n VALUES ($1, $2, $3, $4)\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Text",
"Text",
"Timestamptz"
]
},
"nullable": []
}
}
}

View file

@ -0,0 +1,104 @@
use serde_aux::field_attributes::deserialize_number_from_string;
use sqlx::postgres::{PgConnectOptions, PgSslMode};
use std::convert::{TryFrom, TryInto};
#[derive(serde::Deserialize)]
pub struct Settings {
pub database: DatabaseSettings,
pub application: ApplicationSettings,
}
#[derive(serde::Deserialize)]
pub struct ApplicationSettings {
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
}
#[derive(serde::Deserialize)]
pub struct DatabaseSettings {
pub username: String,
pub password: String,
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
pub database_name: String,
pub require_ssl: bool,
}
impl DatabaseSettings {
pub fn without_db(&self) -> PgConnectOptions {
let ssl_mode = if self.require_ssl {
PgSslMode::Require
} else {
PgSslMode::Prefer
};
PgConnectOptions::new()
.host(&self.host)
.username(&self.username)
.password(&self.password)
.port(self.port)
.ssl_mode(ssl_mode)
}
pub fn with_db(&self) -> PgConnectOptions {
self.without_db().database(&self.database_name)
}
}
pub fn get_configuration() -> Result<Settings, config::ConfigError> {
let mut settings = config::Config::default();
let base_path = std::env::current_dir().expect("Failed to determine the current directory");
let configuration_directory = base_path.join("configuration");
// Read the "default" configuration file
settings.merge(config::File::from(configuration_directory.join("base")).required(true))?;
// Detect the running environment.
// Default to `local` if unspecified.
let environment: Environment = std::env::var("APP_ENVIRONMENT")
.unwrap_or_else(|_| "local".into())
.try_into()
.expect("Failed to parse APP_ENVIRONMENT.");
// Layer on the environment-specific values.
settings.merge(
config::File::from(configuration_directory.join(environment.as_str())).required(true),
)?;
// Add in settings from environment variables (with a prefix of APP and '__' as separator)
// E.g. `APP_APPLICATION__PORT=5001 would set `Settings.application.port`
settings.merge(config::Environment::with_prefix("app").separator("__"))?;
settings.try_into()
}
/// The possible runtime environment for our application.
pub enum Environment {
Local,
Production,
}
impl Environment {
pub fn as_str(&self) -> &'static str {
match self {
Environment::Local => "local",
Environment::Production => "production",
}
}
}
impl TryFrom<String> for Environment {
type Error = String;
fn try_from(s: String) -> Result<Self, Self::Error> {
match s.to_lowercase().as_str() {
"local" => Ok(Self::Local),
"production" => Ok(Self::Production),
other => Err(format!(
"{} is not a supported environment. Use either `local` or `production`.",
other
)),
}
}
}

5
chapter05/src/lib.rs Normal file
View file

@ -0,0 +1,5 @@
#![allow(clippy::toplevel_ref_arg)]
pub mod configuration;
pub mod routes;
pub mod startup;
pub mod telemetry;

26
chapter05/src/main.rs Normal file
View file

@ -0,0 +1,26 @@
use chapter05::configuration::get_configuration;
use chapter05::startup::run;
use chapter05::telemetry::{get_subscriber, init_subscriber};
use sqlx::postgres::PgPoolOptions;
use std::net::TcpListener;
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
let subscriber = get_subscriber("zero2prod".into(), "info".into());
init_subscriber(subscriber);
let configuration = get_configuration().expect("Failed to read configuration.");
let connection_pool = PgPoolOptions::new()
.connect_timeout(std::time::Duration::from_secs(2))
.connect_with(configuration.database.with_db())
.await
.expect("Failed to connect to Postgres.");
let address = format!(
"{}:{}",
configuration.application.host, configuration.application.port
);
let listener = TcpListener::bind(address)?;
run(listener, connection_pool)?.await?;
Ok(())
}

View file

@ -0,0 +1,5 @@
use actix_web::HttpResponse;
pub async fn health_check() -> HttpResponse {
HttpResponse::Ok().finish()
}

View file

@ -0,0 +1,5 @@
mod health_check;
mod subscriptions;
pub use health_check::*;
pub use subscriptions::*;

View file

@ -0,0 +1,52 @@
use actix_web::{web, HttpResponse};
use chrono::Utc;
use sqlx::PgPool;
use uuid::Uuid;
#[derive(serde::Deserialize)]
pub struct FormData {
email: String,
name: String,
}
#[tracing::instrument(
name = "Adding a new subscriber",
skip(form, pool),
fields(
email = %form.email,
name = %form.name
)
)]
pub async fn subscribe(
form: web::Form<FormData>,
pool: web::Data<PgPool>,
) -> Result<HttpResponse, HttpResponse> {
insert_subscriber(&pool, &form)
.await
.map_err(|_| HttpResponse::InternalServerError().finish())?;
Ok(HttpResponse::Ok().finish())
}
#[tracing::instrument(
name = "Saving new subscriber details in the database",
skip(form, pool)
)]
pub async fn insert_subscriber(pool: &PgPool, form: &FormData) -> Result<(), sqlx::Error> {
sqlx::query!(
r#"
INSERT INTO subscriptions (id, email, name, subscribed_at)
VALUES ($1, $2, $3, $4)
"#,
Uuid::new_v4(),
form.email,
form.name,
Utc::now()
)
.execute(pool)
.await
.map_err(|e| {
tracing::error!("Failed to execute query: {:?}", e);
e
})?;
Ok(())
}

21
chapter05/src/startup.rs Normal file
View file

@ -0,0 +1,21 @@
use crate::routes::{health_check, subscribe};
use actix_web::dev::Server;
use actix_web::web::Data;
use actix_web::{web, App, HttpServer};
use sqlx::PgPool;
use std::net::TcpListener;
use tracing_actix_web::TracingLogger;
pub fn run(listener: TcpListener, db_pool: PgPool) -> Result<Server, std::io::Error> {
let db_pool = Data::new(db_pool);
let server = HttpServer::new(move || {
App::new()
.wrap(TracingLogger)
.route("/health_check", web::get().to(health_check))
.route("/subscriptions", web::post().to(subscribe))
.app_data(db_pool.clone())
})
.listen(listener)?
.run();
Ok(server)
}

View file

@ -0,0 +1,29 @@
use tracing::subscriber::set_global_default;
use tracing::Subscriber;
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
use tracing_log::LogTracer;
use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry};
/// Compose multiple layers into a `tracing`'s subscriber.
///
/// # Implementation Notes
///
/// We are using `impl Subscriber` as return type to avoid having to spell out the actual
/// type of the returned subscriber, which is indeed quite complex.
pub fn get_subscriber(name: String, env_filter: String) -> impl Subscriber + Sync + Send {
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(env_filter));
let formatting_layer = BunyanFormattingLayer::new(name, std::io::stdout);
Registry::default()
.with(env_filter)
.with(JsonStorageLayer)
.with(formatting_layer)
}
/// Register a subscriber as global default to process span data.
///
/// It should only be called once!
pub fn init_subscriber(subscriber: impl Subscriber + Sync + Send) {
LogTracer::init().expect("Failed to set logger");
set_global_default(subscriber).expect("Failed to set subscriber");
}

View file

@ -0,0 +1,143 @@
use chapter05::configuration::{get_configuration, DatabaseSettings};
use chapter05::startup::run;
use chapter05::telemetry::{get_subscriber, init_subscriber};
use sqlx::{Connection, Executor, PgConnection, PgPool};
use std::net::TcpListener;
use uuid::Uuid;
// Ensure that the `tracing` stack is only initialised once using `lazy_static`
lazy_static::lazy_static! {
static ref TRACING: () = {
let filter = if std::env::var("TEST_LOG").is_ok() { "debug" } else { "" };
let subscriber = get_subscriber("test".into(), filter.into());
init_subscriber(subscriber);
};
}
pub struct TestApp {
pub address: String,
pub db_pool: PgPool,
}
async fn spawn_app() -> TestApp {
// The first time `initialize` is invoked the code in `TRACING` is executed.
// All other invocations will instead skip execution.
lazy_static::initialize(&TRACING);
let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind random port");
// We retrieve the port assigned to us by the OS
let port = listener.local_addr().unwrap().port();
let address = format!("http://127.0.0.1:{}", port);
let mut configuration = get_configuration().expect("Failed to read configuration.");
configuration.database.database_name = Uuid::new_v4().to_string();
let connection_pool = configure_database(&configuration.database).await;
let server = run(listener, connection_pool.clone()).expect("Failed to bind address");
let _ = tokio::spawn(server);
TestApp {
address,
db_pool: connection_pool,
}
}
pub async fn configure_database(config: &DatabaseSettings) -> PgPool {
// Create database
let mut connection = PgConnection::connect_with(&config.without_db())
.await
.expect("Failed to connect to Postgres");
connection
.execute(&*format!(r#"CREATE DATABASE "{}";"#, config.database_name))
.await
.expect("Failed to create database.");
// Migrate database
let connection_pool = PgPool::connect_with(config.with_db())
.await
.expect("Failed to connect to Postgres.");
sqlx::migrate!("./migrations")
.run(&connection_pool)
.await
.expect("Failed to migrate the database");
connection_pool
}
#[actix_rt::test]
async fn health_check_works() {
// Arrange
let app = spawn_app().await;
let client = reqwest::Client::new();
// Act
let response = client
// Use the returned application address
.get(&format!("{}/health_check", &app.address))
.send()
.await
.expect("Failed to execute request.");
// Assert
assert!(response.status().is_success());
assert_eq!(Some(0), response.content_length());
}
#[actix_rt::test]
async fn subscribe_returns_a_200_for_valid_form_data() {
// Arrange
let app = spawn_app().await;
let client = reqwest::Client::new();
let body = "name=le%20guin&email=ursula_le_guin%40gmail.com";
// Act
let response = client
.post(&format!("{}/subscriptions", &app.address))
.header("Content-Type", "application/x-www-form-urlencoded")
.body(body)
.send()
.await
.expect("Failed to execute request.");
// Assert
assert_eq!(200, response.status().as_u16());
let saved = sqlx::query!("SELECT email, name FROM subscriptions",)
.fetch_one(&app.db_pool)
.await
.expect("Failed to fetch saved subscription.");
assert_eq!(saved.email, "ursula_le_guin@gmail.com");
assert_eq!(saved.name, "le guin");
}
#[actix_rt::test]
async fn subscribe_returns_a_400_when_data_is_missing() {
// Arrange
let app = spawn_app().await;
let client = reqwest::Client::new();
let test_cases = vec![
("name=le%20guin", "missing the email"),
("email=ursula_le_guin%40gmail.com", "missing the name"),
("", "missing both name and email"),
];
for (invalid_body, error_message) in test_cases {
// Act
let response = client
.post(&format!("{}/subscriptions", &app.address))
.header("Content-Type", "application/x-www-form-urlencoded")
.body(invalid_body)
.send()
.await
.expect("Failed to execute request.");
// Assert
assert_eq!(
400,
response.status().as_u16(),
// Additional customised error message on test failure
"The API did not fail with 400 Bad Request when the payload was {}.",
error_message
);
}
}