1
0
Fork 0
mirror of https://github.com/actix/actix-web.git synced 2024-12-17 05:36:36 +00:00

Merge branch 'master' into origin/feature/awc-retry-middleware

This commit is contained in:
Rob Ede 2024-06-11 01:13:42 +01:00 committed by GitHub
commit df3bf79873
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
431 changed files with 48909 additions and 23942 deletions

View file

@ -1,7 +1,10 @@
[alias]
chk = "hack check --workspace --all-features --tests --examples"
lint = "hack --clean-per-run clippy --workspace --tests --examples"
ci-min = "hack check --workspace --no-default-features"
ci-min-test = "hack check --workspace --no-default-features --tests --examples"
ci-default = "hack check --workspace"
ci-full = "check --workspace --bins --examples --tests"
lint = "clippy --workspace --all-targets -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --all-targets -- -Dclippy::todo"
# lib checking
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --depth=4 --skip=__compress,experimental-io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --depth=4 --skip=__compress check"

3
.github/FUNDING.yml vendored Normal file
View file

@ -0,0 +1,3 @@
# These are supported funding model platforms
github: [robjtede]

View file

@ -3,35 +3,41 @@ name: Bug Report
about: Create a bug report.
---
Your issue may already be reported!
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
Your issue may already be reported! Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
1.
2.
3.
4.
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Rust Version (I.e, output of `rustc -V`):
* Actix Web Version:
- Rust Version (I.e, output of `rustc -V`):
- Actix Web Version:

View file

@ -1,15 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A
- name: Gitter chat (actix-web)
url: https://gitter.im/actix/actix-web
about: Actix Web Q&A
- name: Gitter chat (actix)
url: https://gitter.im/actix/actix
about: Actix (actor framework) Q&A
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat
- name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A

View file

@ -2,13 +2,15 @@
<!-- Please fill out the following to get your PR reviewed quicker. -->
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
PR_TYPE
## PR Checklist
<!-- Check your PR fulfills the following items. ->>
<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
@ -17,11 +19,10 @@ PR_TYPE
- [ ] Format code with the latest stable rustfmt.
- [ ] (Team) Label with affected crates and semver status.
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

10
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: cargo
directory: /
schedule:
interval: weekly
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly

View file

@ -1,28 +1,28 @@
name: Benchmark
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check_benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
profile: minimal
override: true
run: |
rustup set profile minimal
rustup install nightly
rustup override set nightly
- name: Check benchmark
uses: actions-rs/cargo@v1
with:
command: bench
args: --bench=server -- --sample-size=15
run: cargo bench --bench=server -- --sample-size=15

91
.github/workflows/ci-post-merge.yml vendored Normal file
View file

@ -0,0 +1,91 @@
name: CI (post-merge)
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- { name: nightly, version: nightly }
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
steps:
- uses: actions/checkout@v4
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.1
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: ${{ matrix.version.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.38.0
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: check minimal
run: cargo ci-check-min
- name: check default
run: cargo ci-check-default
- name: tests
timeout-minutes: 60
run: just test
- name: CI cache clean
run: cargo-ci-cache-clean
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Free Disk Space
run: ./scripts/free-disk-space.sh
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
- name: Install cargo-hack
uses: taiki-e/install-action@v2.38.0
with:
tool: cargo-hack
- name: check feature combinations
run: cargo ci-check-all-feature-powerset
- name: check feature combinations
run: cargo ci-check-all-feature-powerset-linux

View file

@ -3,125 +3,119 @@ name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
merge_group:
types: [checks_requested]
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
read_msrv:
name: Read MSRV
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
build_and_test:
needs: read_msrv
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- 1.46.0 # MSRV
- stable
- nightly
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" }
- { name: stable, version: stable }
name: ${{ matrix.target.name }} / ${{ matrix.version }}
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.1
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Setup mold linker
if: matrix.target.os == 'ubuntu-latest'
uses: rui314/setup-mold@v1
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
toolchain: ${{ matrix.version.version }}
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.38.0
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: workaround MSRV issues
if: matrix.version.name == 'msrv'
run: just downgrade-for-msrv
- name: check minimal
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features
run: cargo ci-check-min
- name: check minimal + tests
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features --tests --examples
- name: check full
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: check default
run: cargo ci-check-default
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
timeout-minutes: 60
run: just test
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: CI cache clean
run: cargo-ci-cache-clean
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
io-uring:
name: io-uring tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Generate coverage file
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
file: cobertura.xml
toolchain: nightly
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache
- name: tests (io-uring)
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly
- name: Install just
uses: taiki-e/install-action@v2.38.0
with:
tool: just
- name: doc tests
run: just test-docs

View file

@ -1,39 +0,0 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: clippy
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --all-features

40
.github/workflows/coverage.yml vendored Normal file
View file

@ -0,0 +1,40 @@
name: Coverage
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly
components: llvm-tools
- name: Install just, cargo-llvm-cov, cargo-nextest
uses: taiki-e/install-action@v2.38.0
with:
tool: just,cargo-llvm-cov,cargo-nextest
- name: Generate code coverage
run: just test-coverage-codecov
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4.4.1
with:
files: codecov.json
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

116
.github/workflows/lint.yml vendored Normal file
View file

@ -0,0 +1,116 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly
components: rustfmt
- name: Check with Rustfmt
run: cargo fmt --all -- --check
clippy:
permissions:
contents: read
checks: write # to add clippy checks to PR diffs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
components: clippy
- name: Check with Clippy
uses: giraffate/clippy-action@v1.0.1
with:
reporter: github-pr-check
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: >-
--workspace --all-features --tests --examples --bins --
-A unknown_lints -D clippy::todo -D clippy::dbg_macro
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly
components: rust-docs
- name: Check for broken intra-doc links
env:
RUSTDOCFLAGS: -D warnings
run: cargo +nightly doc --no-deps --workspace --all-features
check-external-types:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly-2024-05-01)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly-2024-05-01
- name: Install just
uses: taiki-e/install-action@v2.38.0
with:
tool: just
- name: Install cargo-check-external-types
uses: taiki-e/cache-cargo-install-action@v1.2.2
with:
tool: cargo-check-external-types
- name: check external types
run: just check-external-types-all +nightly-2024-05-01
public-api-diff:
runs-on: ubuntu-latest
steps:
- name: Checkout main branch
uses: actions/checkout@v4
with:
ref: ${{ github.base_ref }}
- name: Checkout PR branch
uses: actions/checkout@v4
- name: Install Rust (nightly-2024-06-07)
uses: actions-rust-lang/setup-rust-toolchain@v1.9.0
with:
toolchain: nightly-2024-06-07
- name: Install cargo-public-api
uses: taiki-e/install-action@v2.38.0
with:
tool: cargo-public-api
- name: Generate API diff
run: |
for f in $(find -mindepth 2 -maxdepth 2 -name Cargo.toml); do
cargo public-api --manifest-path "$f" --simplified diff ${{ github.event.pull_request.base.sha }}..${{ github.sha }}
done

View file

@ -1,35 +0,0 @@
name: Upload Documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

7
.gitignore vendored
View file

@ -16,3 +16,10 @@ guide/build/
# Configuration directory generated by CLion
.idea
# Configuration directory generated by VSCode
.vscode
# code coverage
/lcov.info
/codecov.json

5
.prettierrc.yml Normal file
View file

@ -0,0 +1,5 @@
overrides:
- files: "*.md"
options:
printWidth: 9999
proseWrap: never

3
.rustfmt.toml Normal file
View file

@ -0,0 +1,3 @@
group_imports = "StdExternalCrate"
imports_granularity = "Crate"
use_field_init_shorthand = true

View file

@ -1,737 +1,5 @@
# Changes
# Changelog
## Unreleased - 2021-xx-xx
### Added
* `HttpServer::worker_max_blocking_threads` for setting block thread pool. [#2200]
Changelogs are kept separately for each crate in this repo.
### Changed
* `ServiceResponse::error_response` now uses body type of `Body`. [#2201]
* `ServiceResponse::checked_expr` now returns a `Result`. [#2201]
* Update `language-tags` to `0.3`.
* `ServiceResponse::take_body`. [#2201]
* `ServiceResponse::map_body` closure receives and returns `B` instead of `ResponseBody<B>` types. [#2201]
### Removed
* `HttpResponse::take_body` and old `HttpResponse::into_body` method that casted body type. [#2201]
[#2200]: https://github.com/actix/actix-web/pull/2200
[#2201]: https://github.com/actix/actix-web/pull/2201
## 4.0.0-beta.6 - 2021-04-17
### Added
* `HttpResponse` and `HttpResponseBuilder` structs. [#2065]
### Changed
* Most error types are now marked `#[non_exhaustive]`. [#2148]
* Methods on `ContentDisposition` that took `T: AsRef<str>` now take `impl AsRef<str>`.
[#2065]: https://github.com/actix/actix-web/pull/2065
[#2148]: https://github.com/actix/actix-web/pull/2148
## 4.0.0-beta.5 - 2021-04-02
### Added
* `Header` extractor for extracting common HTTP headers in handlers. [#2094]
* Added `TestServer::client_headers` method. [#2097]
### Fixed
* Double ampersand in Logger format is escaped correctly. [#2067]
### Changed
* `CustomResponder` would return error as `HttpResponse` when `CustomResponder::with_header` failed
instead of skipping. (Only the first error is kept when multiple error occur) [#2093]
### Removed
* The `client` mod was removed. Clients should now use `awc` directly.
[871ca5e4](https://github.com/actix/actix-web/commit/871ca5e4ae2bdc22d1ea02701c2992fa8d04aed7)
* Integration testing was moved to new `actix-test` crate. Namely these items from the `test`
module: `TestServer`, `TestServerConfig`, `start`, `start_with`, and `unused_addr`. [#2112]
[#2067]: https://github.com/actix/actix-web/pull/2067
[#2093]: https://github.com/actix/actix-web/pull/2093
[#2094]: https://github.com/actix/actix-web/pull/2094
[#2097]: https://github.com/actix/actix-web/pull/2097
[#2112]: https://github.com/actix/actix-web/pull/2112
## 4.0.0-beta.4 - 2021-03-09
### Changed
* Feature `cookies` is now optional and enabled by default. [#1981]
* `JsonBody::new` returns a default limit of 32kB to be consistent with `JsonConfig` and the default
behaviour of the `web::Json<T>` extractor. [#2010]
[#1981]: https://github.com/actix/actix-web/pull/1981
[#2010]: https://github.com/actix/actix-web/pull/2010
## 4.0.0-beta.3 - 2021-02-10
* Update `actix-web-codegen` to `0.5.0-beta.1`.
## 4.0.0-beta.2 - 2021-02-10
### Added
* The method `Either<web::Json<T>, web::Form<T>>::into_inner()` which returns the inner type for
whichever variant was created. Also works for `Either<web::Form<T>, web::Json<T>>`. [#1894]
* Add `services!` macro for helping register multiple services to `App`. [#1933]
* Enable registering a vec of services of the same type to `App` [#1933]
### Changed
* Rework `Responder` trait to be sync and returns `Response`/`HttpResponse` directly.
Making it simpler and more performant. [#1891]
* `ServiceRequest::into_parts` and `ServiceRequest::from_parts` can no longer fail. [#1893]
* `ServiceRequest::from_request` can no longer fail. [#1893]
* Our `Either` type now uses `Left`/`Right` variants (instead of `A`/`B`) [#1894]
* `test::{call_service, read_response, read_response_json, send_request}` take `&Service`
in argument [#1905]
* `App::wrap_fn`, `Resource::wrap_fn` and `Scope::wrap_fn` provide `&Service` in closure
argument. [#1905]
* `web::block` no longer requires the output is a Result. [#1957]
### Fixed
* Multiple calls to `App::data` with the same type now keeps the latest call's data. [#1906]
### Removed
* Public field of `web::Path` has been made private. [#1894]
* Public field of `web::Query` has been made private. [#1894]
* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
* `AppService::set_service_data`; for custom HTTP service factories adding application data, use the
layered data model by calling `ServiceRequest::add_data_container` when handling
requests instead. [#1906]
[#1891]: https://github.com/actix/actix-web/pull/1891
[#1893]: https://github.com/actix/actix-web/pull/1893
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1905]: https://github.com/actix/actix-web/pull/1905
[#1906]: https://github.com/actix/actix-web/pull/1906
[#1933]: https://github.com/actix/actix-web/pull/1933
[#1957]: https://github.com/actix/actix-web/pull/1957
## 4.0.0-beta.1 - 2021-01-07
### Added
* `Compat` middleware enabling generic response body/error type of middlewares like `Logger` and
`Compress` to be used in `middleware::Condition` and `Resource`, `Scope` services. [#1865]
### Changed
* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
* Bumped `rand` to `0.8`.
* Update `rust-tls` to `0.19`. [#1813]
* Rename `Handler` to `HandlerService` and rename `Factory` to `Handler`. [#1852]
* The default `TrailingSlash` is now `Trim`, in line with existing documentation. See migration
guide for implications. [#1875]
* Rename `DefaultHeaders::{content_type => add_content_type}`. [#1875]
* MSRV is now 1.46.0.
### Fixed
* Added the underlying parse error to `test::read_body_json`'s panic message. [#1812]
### Removed
* Public modules `middleware::{normalize, err_handlers}`. All necessary middleware structs are now
exposed directly by the `middleware` module.
* Remove `actix-threadpool` as dependency. `actix_threadpool::BlockingError` error type can be imported
from `actix_web::error` module. [#1878]
[#1812]: https://github.com/actix/actix-web/pull/1812
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1852]: https://github.com/actix/actix-web/pull/1852
[#1865]: https://github.com/actix/actix-web/pull/1865
[#1875]: https://github.com/actix/actix-web/pull/1875
[#1878]: https://github.com/actix/actix-web/pull/1878
## 3.3.2 - 2020-12-01
### Fixed
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
* Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
* Add `Either<A, B>` extractor helper. [#1788]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
* Implement `exclude_regex` for Logger middleware. [#1723]
* Add request-local data extractor `web::ReqData`. [#1748]
* Add ability to register closure for request middleware logging. [#1749]
* Add `app_data` to `ServiceConfig`. [#1757]
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
* Re-export bytes::Buf{Mut} in web module. [#1750]
* Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behavior for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
* Re-export `actix_rt::main` as `actix_web::main`.
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
* MSRV is now 1.41.1
### Fixed
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
* Move `BodyEncoding` to `dev` module #1220
* Allow to set `peer_addr` for TestRequest #1074
* Make web::Data deref to Arc<T> #1214
* Rename `App::register_data()` to `App::app_data()`
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
* Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
* Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
* Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
* Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
* Migrated to `std::future`
* Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Add `middleware::Condition` that conditionally enables another middleware
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
* Make UrlEncodedError::Overflow more informative
* Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
* Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
* Re-implement Host predicate (#989)
* Form implements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data`
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
* Enable `rust-tls` feature for client #1045
* Update serde_urlencoded to 0.6.1
* Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
* Unix domain sockets (HttpServer::bind_uds) #92
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
* Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
* Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
* Support asynchronous data factories #850
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
* Add support for PathConfig #903
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
* Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
* Re-apply patch from #637 #894
### Fixed
* HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
* Add `Scope::configure()` method.
* Add `ServiceRequest::set_payload()` method.
* Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Add macros for head, options, trace, connect and patch http methods
### Changed
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
* Fix Logger request time format, and use rfc3339. #867
* Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Added
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
* Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Added
* Allow to set/override app data on scope level
### Changed
* `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
* Add helper function for executing futures `test::block_fn()`
### Changed
* Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* CORS handling without headers #702
* Allow constructing `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
* Fix `NormalizePath` middleware impl #806
### Deleted
* `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
* Add raw services support via `web::service()`
* Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
* Extend `Responder` trait, allow to override status code and headers.
* Store visit and login timestamp in the identity cookie #502
### Changed
* `.to_async()` handler can return `Responder` type #792
### Fixed
* Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Add `.peer_addr()` #744
* Add `NormalizePath` middleware
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow using any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* Rustls support
### Changed
* Use forked cookie
* Multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538
Actix Web changelog [is now here &rarr;](./actix-web/CHANGES.md).

View file

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
- The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities

View file

@ -1,111 +1,29 @@
[package]
name = "actix-web"
version = "4.0.0-beta.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
[workspace]
resolver = "2"
members = [
"actix-files",
"actix-http-test",
"actix-http",
"actix-multipart",
"actix-multipart-derive",
"actix-router",
"actix-test",
"actix-web-actors",
"actix-web-codegen",
"actix-web",
"awc",
]
[workspace.package]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web"
license = "MIT OR Apache-2.0"
edition = "2018"
edition = "2021"
rust-version = "1.72"
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress", "cookies", "secure-cookies"]
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace]
members = [
".",
"awc",
"actix-http",
"actix-files",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"actix-http-test",
"actix-test",
]
# enable when MSRV is 1.51+
# resolver = "2"
[features]
default = ["compress", "cookies"]
# content-encoding support
compress = ["actix-http/compress"]
# support for cookies
cookies = ["cookie"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
# rustls
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
[dependencies]
actix-codec = "0.4.0"
actix-macros = "0.2.0"
actix-router = "0.2.7"
actix-rt = "2.2"
actix-server = "2.0.0-beta.3"
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-tls = { version = "3.0.0-beta.5", default-features = false, optional = true }
actix-web-codegen = "0.5.0-beta.2"
actix-http = "3.0.0-beta.6"
ahash = "0.7"
bytes = "1"
cookie = { version = "0.15", features = ["percent-encode"], optional = true }
derive_more = "0.99.5"
either = "1.5.3"
encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
itoa = "0.4"
language-tags = "0.3"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
paste = "1"
pin-project = "1.0.0"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
smallvec = "1.6"
socket2 = "0.4.0"
time = { version = "0.2.23", default-features = false, features = ["std"] }
url = "2.1"
[dev-dependencies]
actix-test = { version = "0.1.0-beta.2", features = ["openssl", "rustls"] }
awc = { version = "3.0.0-beta.5", features = ["openssl"] }
brotli2 = "0.3.2"
criterion = "0.3"
env_logger = "0.8"
flate2 = "1.0.13"
rand = "0.8"
rcgen = "0.8"
serde_derive = "1.0"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.19.0" }
[profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0
[profile.release]
lto = true
@ -117,36 +35,19 @@ actix-files = { path = "actix-files" }
actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" }
actix-multipart-derive = { path = "actix-multipart-derive" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "." }
actix-web = { path = "actix-web" }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" }
[[test]]
name = "test_server"
required-features = ["compress", "cookies"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[example]]
name = "on_connect"
required-features = []
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false
[[bench]]
name = "responder"
harness = false
# uncomment for quick testing against local actix-net repo
# actix-service = { path = "../actix-net/actix-service" }
# actix-macros = { path = "../actix-net/actix-macros" }
# actix-rt = { path = "../actix-net/actix-rt" }
# actix-codec = { path = "../actix-net/actix-codec" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }

View file

@ -1,662 +0,0 @@
## Unreleased
* The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`.
Before: `#[get("/test/")`
After: `#[get("/test")`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
## 3.0.0
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
* Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

109
README.md
View file

@ -1,109 +0,0 @@
<div align="center">
<h1>Actix Web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.5)](https://docs.rs/actix-web/4.0.0-beta.5)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.5/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.5)
<br />
[![build status](https://github.com/actix/actix-web/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-web/actions)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://docs.rs/awc/)
* Runs on stable Rust 1.46+
## Documentation
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
* [Application State](https://github.com/actix/examples/tree/master/basics/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0])
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT])
at your option.
## Code of Conduct
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

1
README.md Symbolic link
View file

@ -0,0 +1 @@
actix-web/README.md

View file

@ -1,141 +1,250 @@
# Changes
## Unreleased - 2021-xx-xx
* `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
* For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
* `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
## Unreleased
## 0.6.6
- Update `tokio-uring` dependency to `0.4`.
- Minimum supported Rust version (MSRV) is now 1.72.
## 0.6.5
- Fix handling of special characters in filenames.
## 0.6.4
- Fix handling of newlines in filenames.
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 0.6.3
- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903]
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
- Update `tokio-uring` dependency to `0.4`.
[#2903]: https://github.com/actix/actix-web/pull/2903
## 0.6.2
- Allow partial range responses for video content to start streaming sooner. [#2817]
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
[#2817]: https://github.com/actix/actix-web/pull/2817
## 0.6.1
- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021]
- Update `tokio-uring` dependency to `0.3`.
- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645]
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
[#2021]: https://github.com/actix/actix-web/pull/2021
[#2645]: https://github.com/actix/actix-web/pull/2645
## 0.6.0
- No significant changes since `0.6.0-beta.16`.
## 0.6.0-beta.16
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15
- No significant changes since `0.6.0-beta.14`.
## 0.6.0-beta.14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10
- No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8
- Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7
- Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6
- Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156
[#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4
## 0.6.0-beta.4 - 2021-04-02
* No notable changes.
* Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3 - 2021-03-09
* No notable changes.
## 0.6.0-beta.3
- No notable changes.
## 0.6.0-beta.2 - 2021-02-10
* Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
* Replace `v_htmlescape` with `askama_escape`. [#1953]
## 0.6.0-beta.2
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1
## 0.6.0-beta.1 - 2021-01-07
* `HttpRange::parse` now has its own error type.
* Update `bytes` to `1.0`. [#1813]
- `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0
## 0.5.0 - 2020-12-26
* Optionally support hidden files/directories. [#1811]
- Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1
## 0.4.1 - 2020-11-24
* Clarify order of parameters in `Files::new` and improve docs.
- Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0
## 0.4.0 - 2020-10-06
* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0
## 0.3.0 - 2020-09-11
* No significant changes from 0.3.0-beta.1.
- No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1
## 0.3.0-beta.1 - 2020-07-15
* Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1
- Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1
## 0.3.0-alpha.1 - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs
* Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
- Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs
- Bump minimum supported Rust version to 1.40
- Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1
## 0.2.1 - 2019-12-22
* Use the same format for file URLs regardless of platforms
- Use the same format for file URLs regardless of platforms
## 0.2.0
## 0.2.0 - 2019-12-20
* Fix BodyEncoding trait import #1220
- Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1
## 0.2.0-alpha.1 - 2019-12-07
* Migrate to `std::future`
- Migrate to `std::future`
## 0.1.7
## 0.1.7 - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151)
- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132
## 0.1.6
- Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5 - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113
## 0.1.5
- Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113
## 0.1.4 - 2019-07-20
* Allow to disable `Content-Disposition` header #686
## 0.1.4
- Allow to disable `Content-Disposition` header #686
## 0.1.3 - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930
## 0.1.3
- Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2 - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
## 0.1.2
- Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741
## 0.1.1 - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
## 0.1.1
- Static files are incorrectly served as both chunked and with length #812
## 0.1.0 - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4 - 2019-05-12
* Update actix-web to beta.4
## 0.1.0-beta.4
- Update actix-web to beta.4
## 0.1.0-beta.1 - 2019-04-20
* Update actix-web to beta.1
## 0.1.0-beta.1
- Update actix-web to beta.1
## 0.1.0-alpha.6 - 2019-04-14
* Update actix-web to alpha6
## 0.1.0-alpha.6
- Update actix-web to alpha6
## 0.1.0-alpha.4 - 2019-04-08
* Update actix-web to alpha4
## 0.1.0-alpha.4
- Update actix-web to alpha4
## 0.1.0-alpha.2 - 2019-04-02
* Add default handler support
## 0.1.0-alpha.2
- Add default handler support
## 0.1.0-alpha.1 - 2019-03-28
* Initial impl
## 0.1.0-alpha.1
- Initial impl

View file

@ -1,38 +1,56 @@
[package]
name = "actix-files"
version = "0.6.0-beta.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
version = "0.6.6"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web"
readme = "README.md"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
repository = "https://github.com/actix/actix-web"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0"
edition = "2018"
edition = "2021"
[lib]
name = "actix_files"
path = "src/lib.rs"
[package.metadata.cargo_check_external_types]
allowed_external_types = [
"actix_http::*",
"actix_service::*",
"actix_web::*",
"http::*",
"mime::*",
]
[features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies]
actix-web = { version = "4.0.0-beta.6", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-http = "3"
actix-service = "2"
actix-utils = "3"
actix-web = { version = "4", default-features = false }
askama_escape = "0.10"
bitflags = "1"
bitflags = "2"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
derive_more = "0.99.5"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
log = "0.4"
mime = "0.3"
mime = "0.3.9"
mime_guess = "2.0.1"
percent-encoding = "2.1"
pin-project-lite = "0.2.7"
v_htmlescape = "0.15.5"
# experimental-io-uring
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.5", optional = true, features = ["bytes"] }
actix-server = { version = "2.4", optional = true } # ensure matching tokio-uring versions
[dev-dependencies]
actix-rt = "2.2"
actix-web = "4.0.0-beta.6"
actix-test = "0.1.0-beta.2"
actix-rt = "2.7"
actix-test = "0.1"
actix-web = "4"
env_logger = "0.11"
tempfile = "3.2"

View file

@ -1,19 +1,32 @@
# actix-files
# `actix-files`
> Static file serving for Actix Web
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.4)](https://docs.rs/actix-files/0.6.0-beta.4)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.6)](https://docs.rs/actix-files/0.6.6)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg)
![License](https://img.shields.io/crates/l/actix-files.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.4/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.4)
[![dependency status](https://deps.rs/crate/actix-files/0.6.6/status.svg)](https://deps.rs/crate/actix-files/0.6.6)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
<!-- prettier-ignore-end -->
- [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum supported Rust version: 1.46 or later
<!-- cargo-rdme start -->
Static file serving for Actix Web.
Provides a non-blocking service for serving static files from disk.
## Examples
```rust
use actix_web::App;
use actix_files::Files;
let app = App::new()
.service(Files::new("/static", ".").prefer_utf8(true));
```
<!-- cargo-rdme end -->

View file

@ -0,0 +1,33 @@
use actix_files::Files;
use actix_web::{get, guard, middleware, App, HttpServer, Responder};
const EXAMPLES_DIR: &str = concat![env!("CARGO_MANIFEST_DIR"), "/examples"];
#[get("/")]
async fn index() -> impl Responder {
"Hello world!"
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
log::info!("starting HTTP server at http://localhost:8080");
HttpServer::new(|| {
App::new()
.service(index)
.service(
Files::new("/assets", EXAMPLES_DIR)
.show_files_listing()
.guard(guard::Header("show-listing", "?1")),
)
.service(Files::new("/assets", EXAMPLES_DIR))
.wrap(middleware::Compress::default())
.wrap(middleware::Logger::default())
})
.bind(("127.0.0.1", 8080))?
.workers(2)
.run()
.await
}

View file

@ -1,95 +1,214 @@
use std::{
cmp, fmt,
fs::File,
future::Future,
io::{self, Read, Seek},
io,
pin::Pin,
task::{Context, Poll},
};
use actix_web::{
error::{BlockingError, Error},
rt::task::{spawn_blocking, JoinHandle},
};
use bytes::Bytes;
use actix_web::{error::Error, web::Bytes};
#[cfg(feature = "experimental-io-uring")]
use bytes::BytesMut;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
#[doc(hidden)]
/// A helper created from a `std::fs::File` which reads the file
/// chunk-by-chunk on a `ThreadPool`.
pub struct ChunkedReadFile {
size: u64,
offset: u64,
state: ChunkedReadFileState,
counter: u64,
}
use super::named::File;
enum ChunkedReadFileState {
File(Option<File>),
Future(JoinHandle<Result<(File, Bytes), io::Error>>),
}
impl ChunkedReadFile {
pub(crate) fn new(size: u64, offset: u64, file: File) -> Self {
Self {
size,
offset,
state: ChunkedReadFileState::File(Some(file)),
counter: 0,
}
pin_project! {
/// Adapter to read a `std::file::File` in chunks.
#[doc(hidden)]
pub struct ChunkedReadFile<F, Fut> {
size: u64,
offset: u64,
#[pin]
state: ChunkedReadFileState<Fut>,
counter: u64,
callback: F,
}
}
impl fmt::Debug for ChunkedReadFile {
#[cfg(not(feature = "experimental-io-uring"))]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<File>, },
Future { #[pin] fut: Fut },
}
}
#[cfg(feature = "experimental-io-uring")]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<(File, BytesMut)> },
Future { #[pin] fut: Fut },
}
}
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile")
}
}
impl Stream for ChunkedReadFile {
pub(crate) fn new_chunked_read(
size: u64,
offset: u64,
file: File,
) -> impl Stream<Item = Result<Bytes, Error>> {
ChunkedReadFile {
size,
offset,
#[cfg(not(feature = "experimental-io-uring"))]
state: ChunkedReadFileState::File { file: Some(file) },
#[cfg(feature = "experimental-io-uring")]
state: ChunkedReadFileState::File {
file: Some((file, BytesMut::new())),
},
counter: 0,
callback: chunked_read_file_callback,
}
}
#[cfg(not(feature = "experimental-io-uring"))]
async fn chunked_read_file_callback(
mut file: File,
offset: u64,
max_bytes: usize,
) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _};
let res = actix_web::web::block(move || {
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok((file, Bytes::from(buf)))
}
})
.await??;
Ok(res)
}
#[cfg(feature = "experimental-io-uring")]
async fn chunked_read_file_callback(
file: File,
offset: u64,
max_bytes: usize,
mut bytes_mut: BytesMut,
) -> io::Result<(File, Bytes, BytesMut)> {
bytes_mut.reserve(max_bytes);
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await;
let n_bytes = res?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
let bytes = bytes_mut.split_to(n_bytes).freeze();
Ok((file, bytes, bytes_mut))
}
#[cfg(feature = "experimental-io-uring")]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize, BytesMut) -> Fut,
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.as_mut().get_mut();
match this.state {
ChunkedReadFileState::File(ref mut file) => {
let size = this.size;
let offset = this.offset;
let counter = this.counter;
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let mut file = file
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let (file, bytes_mut) = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = spawn_blocking(move || {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
this.state
.project_replace(ChunkedReadFileState::Future { fut });
let n_bytes =
file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
Ok((file, Bytes::from(buf)))
});
this.state = ChunkedReadFileState::Future(fut);
self.poll_next(cx)
}
}
ChunkedReadFileState::Future(ref mut fut) => {
let (file, bytes) =
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
this.state = ChunkedReadFileState::File(Some(file));
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
this.offset += bytes.len() as u64;
this.counter += bytes.len() as u64;
this.state.project_replace(ChunkedReadFileState::File {
file: Some((file, bytes_mut)),
});
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes) = ready!(fut.poll(cx))?;
this.state
.project_replace(ChunkedReadFileState::File { file: Some(file) });
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}

View file

@ -1,8 +1,13 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use std::{
fmt::Write,
fs::DirEntry,
io,
path::{Path, PathBuf},
};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS};
use v_htmlescape::escape as escape_html_entity;
/// A directory; responds with the generated directory listing.
#[derive(Debug)]
@ -40,17 +45,26 @@ impl Directory {
pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
// show file url as relative to static path
/// Returns percent encoded file URL path.
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path, CONTROLS)
};
}
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
/// Returns HTML entity encoded formatter.
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
escape_html_entity(&$entry.file_name().to_string_lossy())
};
}
@ -66,7 +80,7 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};

View file

@ -2,40 +2,47 @@ use actix_web::{http::StatusCode, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
#[derive(Display, Debug, PartialEq)]
#[derive(Debug, PartialEq, Eq, Display)]
pub enum FilesError {
/// Path is not a directory
/// Path is not a directory.
#[allow(dead_code)]
#[display(fmt = "Path is not a directory. Unable to serve static files")]
#[display(fmt = "path is not a directory. Unable to serve static files")]
IsNotDirectory,
/// Cannot render directory
#[display(fmt = "Unable to render directory without index file")]
/// Cannot render directory.
#[display(fmt = "unable to render directory without index file")]
IsDirectory,
}
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
/// Returns `404 Not Found`.
fn status_code(&self) -> StatusCode {
StatusCode::NOT_FOUND
}
}
#[derive(Display, Debug, PartialEq)]
#[derive(Debug, PartialEq, Eq, Display)]
#[non_exhaustive]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
/// Segment started with the wrapped invalid character.
#[display(fmt = "segment started with invalid character: ('{_0}')")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
/// Segment contained the wrapped invalid character.
#[display(fmt = "segment contained invalid character ('{_0}')")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
/// Segment ended with the wrapped invalid character.
#[display(fmt = "segment ended with invalid character: ('{_0}')")]
BadEnd(char),
/// Path is not a valid UTF-8 string after percent-decoding.
#[display(fmt = "path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
}
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError {
/// Returns `400 Bad Request`.
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}

View file

@ -1,9 +1,15 @@
use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
use std::{
cell::RefCell,
fmt, io,
path::{Path, PathBuf},
rc::Rc,
};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
use actix_utils::future::ok;
use actix_web::{
dev::{AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse},
dev::{
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest, ServiceResponse,
},
error::Error,
guard::Guard,
http::header::DispositionType,
@ -12,14 +18,16 @@ use actix_web::{
use futures_core::future::LocalBoxFuture;
use crate::{
directory_listing, named, Directory, DirectoryRenderer, FilesService, HttpNewService,
MimeOverride,
directory_listing, named,
service::{FilesService, FilesServiceInner},
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
};
/// Static files handling service.
///
/// `Files` service must be registered with `App::service()` method.
///
/// # Examples
/// ```
/// use actix_web::App;
/// use actix_files::Files;
@ -28,7 +36,7 @@ use crate::{
/// .service(Files::new("/static", "."));
/// ```
pub struct Files {
path: String,
mount_path: String,
directory: PathBuf,
index: Option<String>,
show_index: bool,
@ -36,6 +44,7 @@ pub struct Files {
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>,
path_filter: Option<Rc<PathFilter>>,
file_flags: named::Flags,
use_guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>,
@ -58,8 +67,9 @@ impl Clone for Files {
default: self.default.clone(),
renderer: self.renderer.clone(),
file_flags: self.file_flags,
path: self.path.clone(),
mount_path: self.mount_path.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
guards: self.guards.clone(),
hidden_files: self.hidden_files,
@ -96,7 +106,7 @@ impl Files {
};
Files {
path: mount_path.to_owned(),
mount_path: mount_path.trim_end_matches('/').to_owned(),
directory: dir,
index: None,
show_index: false,
@ -104,6 +114,7 @@ impl Files {
default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing),
mime_override: None,
path_filter: None,
file_flags: named::Flags::default(),
use_guards: None,
guards: Vec::new(),
@ -114,6 +125,9 @@ impl Files {
/// Show files listing for directories.
///
/// By default show files listing is disabled.
///
/// When used with [`Files::index_file()`], files listing is shown as a fallback
/// when the index file is not found.
pub fn show_files_listing(mut self) -> Self {
self.show_index = true;
self
@ -127,7 +141,7 @@ impl Files {
self
}
/// Set custom directory renderer
/// Set custom directory renderer.
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where
for<'r, 's> F:
@ -137,7 +151,7 @@ impl Files {
self
}
/// Specifies mime override callback
/// Specifies MIME override callback.
pub fn mime_override<F>(mut self, f: F) -> Self
where
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
@ -146,10 +160,45 @@ impl Files {
self
}
/// Sets path filtering closure.
///
/// The path provided to the closure is relative to `serve_from` path.
/// You can safely join this path with the `serve_from` path to get the real path.
/// However, the real path may not exist since the filter is called before checking path existence.
///
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
/// `404 Not Found` is returned.
///
/// # Examples
/// ```
/// use std::path::Path;
/// use actix_files::Files;
///
/// // prevent searching subdirectories and following symlinks
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
/// path.components().count() == 1
/// && Path::new("./static")
/// .join(path)
/// .symlink_metadata()
/// .map(|m| !m.file_type().is_symlink())
/// .unwrap_or(false)
/// });
/// ```
pub fn path_filter<F>(mut self, f: F) -> Self
where
F: Fn(&Path, &RequestHead) -> bool + 'static,
{
self.path_filter = Some(Rc::new(f));
self
}
/// Set index file
///
/// Shows specific index file for directory "/" instead of
/// Shows specific index file for directories instead of
/// showing files listing.
///
/// If the index file is not found, files listing is shown as a fallback if
/// [`Files::show_files_listing()`] is set.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into());
self
@ -186,7 +235,7 @@ impl Files {
/// request starts being handled by the file service, it will not be able to back-out and try
/// the next service, you will simply get a 404 (or 405) error response.
///
/// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
/// To allow `POST` requests to retrieve files, see [`Files::method_guard()`].
///
/// # Examples
/// ```
@ -213,9 +262,9 @@ impl Files {
self
}
/// See [`Files::method_guard`].
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
/// See [`Files::method_guard`].
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
self.method_guard(guard)
}
@ -234,23 +283,25 @@ impl Files {
/// Setting a fallback static file handler:
/// ```
/// use actix_files::{Files, NamedFile};
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
///
/// # fn run() -> Result<(), actix_web::Error> {
/// let files = Files::new("/", "./static")
/// .index_file("index.html")
/// .default_handler(NamedFile::open("./static/404.html")?);
/// .default_handler(fn_service(|req: ServiceRequest| async {
/// let (req, _) = req.into_parts();
/// let file = NamedFile::open_async("./static/404.html").await?;
/// let res = file.into_response(&req);
/// Ok(ServiceResponse::new(req, res))
/// }));
/// # Ok(())
/// # }
/// ```
pub fn default_handler<F, U>(mut self, f: F) -> Self
where
F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
> + 'static,
U: ServiceFactory<ServiceRequest, Config = (), Response = ServiceResponse, Error = Error>
+ 'static,
{
// create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
@ -286,9 +337,9 @@ impl HttpServiceFactory for Files {
}
let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.path)
ResourceDef::root_prefix(&self.mount_path)
} else {
ResourceDef::prefix(&self.path)
ResourceDef::prefix(&self.mount_path)
};
config.register_service(rdef, guards, self, None)
@ -304,7 +355,7 @@ impl ServiceFactory<ServiceRequest> for Files {
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let mut srv = FilesService {
let mut inner = FilesServiceInner {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
@ -312,6 +363,7 @@ impl ServiceFactory<ServiceRequest> for Files {
default: None,
renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
file_flags: self.file_flags,
guards: self.use_guards.clone(),
hidden_files: self.hidden_files,
@ -322,14 +374,57 @@ impl ServiceFactory<ServiceRequest> for Files {
Box::pin(async {
match fut.await {
Ok(default) => {
srv.default = Some(default);
Ok(srv)
inner.default = Some(default);
Ok(FilesService(Rc::new(inner)))
}
Err(_) => Err(()),
}
})
} else {
Box::pin(ok(srv))
Box::pin(async move { Ok(FilesService(Rc::new(inner))) })
}
}
}
#[cfg(test)]
mod tests {
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App, HttpResponse,
};
use super::*;
#[actix_web::test]
async fn custom_files_listing_renderer() {
let srv = test::init_service(
App::new().service(
Files::new("/", "./tests")
.show_files_listing()
.files_listing_renderer(|dir, req| {
Ok(ServiceResponse::new(
req.clone(),
HttpResponse::Ok().body(dir.path.to_str().unwrap().to_owned()),
))
}),
),
)
.await;
let req = TestRequest::with_uri("/").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let body = test::read_body(res).await;
let body_str = std::str::from_utf8(&body).unwrap();
let actual_path = Path::new(&body_str);
let expected_path = Path::new("actix-files/tests");
assert!(
actual_path.ends_with(expected_path),
"body {:?} does not end with {:?}",
actual_path,
expected_path
);
}
}

View file

@ -2,7 +2,7 @@
//!
//! Provides a non-blocking service for serving static files from disk.
//!
//! # Example
//! # Examples
//! ```
//! use actix_web::App;
//! use actix_files::Files;
@ -11,12 +11,17 @@
//! .service(Files::new("/static", ".").prefer_utf8(true));
//! ```
#![deny(rust_2018_idioms)]
#![warn(missing_docs, missing_debug_implementations)]
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible, missing_docs, missing_debug_implementations)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::path::Path;
use actix_service::boxed::{BoxService, BoxServiceFactory};
use actix_web::{
dev::{ServiceRequest, ServiceResponse},
dev::{RequestHead, ServiceRequest, ServiceResponse},
error::Error,
http::header::DispositionType,
};
@ -32,16 +37,15 @@ mod path_buf;
mod range;
mod service;
pub use crate::chunked::ChunkedReadFile;
pub use crate::directory::Directory;
pub use crate::files::Files;
pub use crate::named::NamedFile;
pub use crate::range::HttpRange;
pub use crate::service::FilesService;
use self::directory::{directory_listing, DirectoryRenderer};
use self::error::FilesError;
use self::path_buf::PathBufWrap;
pub use self::{
chunked::ChunkedReadFile, directory::Directory, files::Files, named::NamedFile,
range::HttpRange, service::FilesService,
};
use self::{
directory::{directory_listing, DirectoryRenderer},
error::FilesError,
path_buf::PathBufWrap,
};
type HttpService = BoxService<ServiceRequest, ServiceResponse, Error>;
type HttpNewService = BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>;
@ -56,20 +60,22 @@ pub fn file_extension_to_mime(ext: &str) -> mime::Mime {
type MimeOverride = dyn Fn(&mime::Name<'_>) -> DispositionType;
type PathFilter = dyn Fn(&Path, &RequestHead) -> bool;
#[cfg(test)]
mod tests {
use std::{
fs::{self, File},
fmt::Write as _,
fs::{self},
ops::Add,
time::{Duration, SystemTime},
};
use actix_service::ServiceFactory;
use actix_utils::future::ok;
use actix_web::{
dev::ServiceFactory,
guard,
http::{
header::{self, ContentDisposition, DispositionParam, DispositionType},
header::{self, ContentDisposition, DispositionParam},
Method, StatusCode,
},
middleware::Compress,
@ -79,8 +85,9 @@ mod tests {
};
use super::*;
use crate::named::File;
#[actix_rt::test]
#[actix_web::test]
async fn test_file_extension_to_mime() {
let m = file_extension_to_mime("");
assert_eq!(m, mime::APPLICATION_OCTET_STREAM);
@ -97,69 +104,69 @@ mod tests {
#[actix_rt::test]
async fn test_if_modified_since_without_if_none_match() {
let file = NamedFile::open("Cargo.toml").unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
}
#[actix_rt::test]
async fn test_if_modified_since_without_if_none_match_same() {
let file = NamedFile::open("Cargo.toml").unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = file.last_modified().unwrap();
let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
}
#[actix_rt::test]
async fn test_if_modified_since_with_if_none_match() {
let file = NamedFile::open("Cargo.toml").unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default()
.insert_header((header::IF_NONE_MATCH, "miss_etag"))
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
}
#[actix_rt::test]
async fn test_if_unmodified_since() {
let file = NamedFile::open("Cargo.toml").unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = file.last_modified().unwrap();
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_if_unmodified_since_failed() {
let file = NamedFile::open("Cargo.toml").unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = header::HttpDate::from(SystemTime::UNIX_EPOCH);
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
}
#[actix_rt::test]
async fn test_named_file_text() {
assert!(NamedFile::open("test--").is_err());
let mut file = NamedFile::open("Cargo.toml").unwrap();
assert!(NamedFile::open_async("test--").await.is_err());
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
{
file.file();
let _f: &File = &file;
@ -169,7 +176,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -182,8 +189,8 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_content_disposition() {
assert!(NamedFile::open("test--").is_err());
let mut file = NamedFile::open("Cargo.toml").unwrap();
assert!(NamedFile::open_async("test--").await.is_err());
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
{
file.file();
let _f: &File = &file;
@ -193,24 +200,36 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"Cargo.toml\""
);
let file = NamedFile::open("Cargo.toml")
let file = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.disable_content_disposition();
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none());
}
#[actix_rt::test]
async fn test_named_file_non_ascii_file_name() {
let mut file =
NamedFile::from_file(File::open("Cargo.toml").unwrap(), "貨物.toml").unwrap();
let file = {
#[cfg(feature = "experimental-io-uring")]
{
crate::named::File::open("Cargo.toml").await.unwrap()
}
#[cfg(not(feature = "experimental-io-uring"))]
{
crate::named::File::open("Cargo.toml").unwrap()
}
};
let mut file = NamedFile::from_file(file, "貨物.toml").unwrap();
{
file.file();
let _f: &File = &file;
@ -220,7 +239,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -233,7 +252,8 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_set_content_type() {
let mut file = NamedFile::open("Cargo.toml")
let mut file = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.set_content_type(mime::TEXT_XML);
{
@ -245,7 +265,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/xml"
@ -258,7 +278,7 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_image() {
let mut file = NamedFile::open("tests/test.png").unwrap();
let mut file = NamedFile::open_async("tests/test.png").await.unwrap();
{
file.file();
let _f: &File = &file;
@ -268,7 +288,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png"
@ -279,13 +299,30 @@ mod tests {
);
}
#[actix_rt::test]
async fn test_named_file_javascript() {
let file = NamedFile::open_async("tests/test.js").await.unwrap();
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/javascript; charset=utf-8"
);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"test.js\""
);
}
#[actix_rt::test]
async fn test_named_file_image_attachment() {
let cd = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Filename(String::from("test.png"))],
};
let mut file = NamedFile::open("tests/test.png")
let mut file = NamedFile::open_async("tests/test.png")
.await
.unwrap()
.set_content_disposition(cd);
{
@ -297,7 +334,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png"
@ -310,7 +347,7 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_binary() {
let mut file = NamedFile::open("tests/test.binary").unwrap();
let mut file = NamedFile::open_async("tests/test.binary").await.unwrap();
{
file.file();
let _f: &File = &file;
@ -320,7 +357,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/octet-stream"
@ -331,21 +368,45 @@ mod tests {
);
}
#[allow(deprecated)]
#[actix_rt::test]
async fn test_named_file_status_code_text() {
let mut file = NamedFile::open("Cargo.toml")
async fn status_code_customize_same_output() {
let file1 = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.set_status_code(StatusCode::NOT_FOUND);
let file2 = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.customize()
.with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request();
let res1 = file1.respond_to(&req);
let res2 = file2.respond_to(&req);
assert_eq!(res1.status(), StatusCode::NOT_FOUND);
assert_eq!(res2.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_named_file_status_code_text() {
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
{
file.file();
let _f: &File = &file;
}
{
let _f: &mut File = &mut file;
}
let file = file.customize().with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -493,10 +554,9 @@ mod tests {
#[actix_rt::test]
async fn test_static_files_with_spaces() {
let srv = test::init_service(
App::new().service(Files::new("/", ".").index_file("Cargo.toml")),
)
.await;
let srv =
test::init_service(App::new().service(Files::new("/", ".").index_file("Cargo.toml")))
.await;
let request = TestRequest::get()
.uri("/tests/test%20space.binary")
.to_request();
@ -508,6 +568,30 @@ mod tests {
assert_eq!(bytes, data);
}
#[cfg(not(target_os = "windows"))]
#[actix_rt::test]
async fn test_static_files_with_special_characters() {
// Create the file we want to test against ad-hoc. We can't check it in as otherwise
// Windows can't even checkout this repository.
let temp_dir = tempfile::tempdir().unwrap();
let file_with_newlines = temp_dir.path().join("test\n\x0B\x0C\rnewline.text");
fs::write(&file_with_newlines, "Look at my newlines").unwrap();
let srv = test::init_service(
App::new().service(Files::new("/", temp_dir.path()).index_file("Cargo.toml")),
)
.await;
let request = TestRequest::get()
.uri("/test%0A%0B%0C%0Dnewline.text")
.to_request();
let response = test::call_service(&srv, request).await;
assert_eq!(response.status(), StatusCode::OK);
let bytes = test::read_body(response).await;
let data = web::Bytes::from(fs::read(file_with_newlines).unwrap());
assert_eq!(bytes, data);
}
#[actix_rt::test]
async fn test_files_not_allowed() {
let srv = test::init_service(App::new().service(Files::new("/", "."))).await;
@ -549,7 +633,8 @@ mod tests {
async fn test_named_file_content_encoding() {
let srv = test::init_service(App::new().wrap(Compress::default()).service(
web::resource("/").to(|| async {
NamedFile::open("Cargo.toml")
NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.set_content_encoding(header::ContentEncoding::Identity)
}),
@ -562,14 +647,16 @@ mod tests {
.to_request();
let res = test::call_service(&srv, request).await;
assert_eq!(res.status(), StatusCode::OK);
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
assert!(res.headers().contains_key(header::CONTENT_ENCODING));
assert!(!test::read_body(res).await.is_empty());
}
#[actix_rt::test]
async fn test_named_file_content_encoding_gzip() {
let srv = test::init_service(App::new().wrap(Compress::default()).service(
web::resource("/").to(|| async {
NamedFile::open("Cargo.toml")
NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.set_content_encoding(header::ContentEncoding::Gzip)
}),
@ -595,16 +682,15 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_allowed_method() {
let req = TestRequest::default().method(Method::GET).to_http_request();
let file = NamedFile::open("Cargo.toml").unwrap();
let resp = file.respond_to(&req).await.unwrap();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let resp = file.respond_to(&req);
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_static_files() {
let srv =
test::init_service(App::new().service(Files::new("/", ".").show_files_listing()))
.await;
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await;
let req = TestRequest::with_uri("/missing").to_request();
let resp = test::call_service(&srv, req).await;
@ -617,8 +703,7 @@ mod tests {
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let srv =
test::init_service(App::new().service(Files::new("/", ".").show_files_listing()))
.await;
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await;
let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await;
assert_eq!(
@ -686,8 +771,8 @@ mod tests {
#[actix_rt::test]
async fn test_default_handler_file_missing() {
let st = Files::new("/", ".")
.default_handler(|req: ServiceRequest| {
ok(req.into_response(HttpResponse::Ok().body("default content")))
.default_handler(|req: ServiceRequest| async {
Ok(req.into_response(HttpResponse::Ok().body("default content")))
})
.new_service(())
.await
@ -766,13 +851,46 @@ mod tests {
let req = TestRequest::get().uri("/test/%43argo.toml").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
// `%2F` == `/`
let req = TestRequest::get().uri("/test%2Ftest.binary").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::get().uri("/test/Cargo.toml%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_percent_encoding_2() {
let temp_dir = tempfile::tempdir().unwrap();
let filename = match cfg!(unix) {
true => "ض:?#[]{}<>()@!$&'`|*+,;= %20\n.test",
false => "ض#[]{}()@!$&'`+,;= %20.test",
};
let filename_encoded = filename
.as_bytes()
.iter()
.fold(String::new(), |mut buf, c| {
write!(&mut buf, "%{:02X}", c).unwrap();
buf
});
std::fs::File::create(temp_dir.path().join(filename)).unwrap();
let srv = test::init_service(App::new().service(Files::new("/", temp_dir.path()))).await;
let req = TestRequest::get()
.uri(&format!("/{}", filename_encoded))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_serve_named_file() {
let srv =
test::init_service(App::new().service(NamedFile::open("Cargo.toml").unwrap()))
.await;
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv = test::init_service(App::new().service(factory)).await;
let req = TestRequest::get().uri("/Cargo.toml").to_request();
let res = test::call_service(&srv, req).await;
@ -789,11 +907,9 @@ mod tests {
#[actix_rt::test]
async fn test_serve_named_file_prefix() {
let srv = test::init_service(
App::new()
.service(web::scope("/test").service(NamedFile::open("Cargo.toml").unwrap())),
)
.await;
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv =
test::init_service(App::new().service(web::scope("/test").service(factory))).await;
let req = TestRequest::get().uri("/test/Cargo.toml").to_request();
let res = test::call_service(&srv, req).await;
@ -810,10 +926,8 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_default_service() {
let srv = test::init_service(
App::new().default_service(NamedFile::open("Cargo.toml").unwrap()),
)
.await;
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv = test::init_service(App::new().default_service(factory)).await;
for route in ["/foobar", "/baz", "/"].iter() {
let req = TestRequest::get().uri(route).to_request();
@ -828,8 +942,9 @@ mod tests {
#[actix_rt::test]
async fn test_default_handler_named_file() {
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let st = Files::new("/", ".")
.default_handler(NamedFile::open("Cargo.toml").unwrap())
.default_handler(factory)
.new_service(())
.await
.unwrap();
@ -856,4 +971,69 @@ mod tests {
"inline; filename=\"symlink-test.png\""
);
}
#[actix_rt::test]
async fn test_index_with_show_files_listing() {
let service = Files::new(".", ".")
.index_file("lib.rs")
.show_files_listing()
.new_service(())
.await
.unwrap();
// Serve the index if exists
let req = TestRequest::default().uri("/src").to_srv_request();
let resp = test::call_service(&service, req).await;
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-rust"
);
// Show files listing, otherwise.
let req = TestRequest::default().uri("/tests").to_srv_request();
let resp = test::call_service(&service, req).await;
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/html; charset=utf-8"
);
let bytes = test::read_body(resp).await;
assert!(format!("{:?}", bytes).contains("/tests/test.png"));
}
#[actix_rt::test]
async fn test_path_filter() {
// prevent searching subdirectories
let st = Files::new("/", ".")
.path_filter(|path, _| path.components().count() == 1)
.new_service(())
.await
.unwrap();
let req = TestRequest::with_uri("/Cargo.toml").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/src/lib.rs").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_default_handler_filter() {
let st = Files::new("/", ".")
.default_handler(|req: ServiceRequest| async {
Ok(req.into_response(HttpResponse::Ok().body("default content")))
})
.path_filter(|path, _| path.extension() == Some("png".as_ref()))
.new_service(())
.await
.unwrap();
let req = TestRequest::with_uri("/Cargo.toml").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let bytes = test::read_body(resp).await;
assert_eq!(bytes, web::Bytes::from_static(b"default content"));
}
}

View file

@ -1,32 +1,34 @@
use actix_service::{Service, ServiceFactory};
use actix_utils::future::{ok, ready, Ready};
use actix_web::dev::{AppService, HttpServiceFactory, ResourceDef};
use std::fs::{File, Metadata};
use std::io;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use std::{
fs::Metadata,
io,
path::{Path, PathBuf},
time::{SystemTime, UNIX_EPOCH},
};
use actix_web::{
dev::{BodyEncoding, ServiceRequest, ServiceResponse, SizedStream},
body::{self, BoxBody, SizedStream},
dev::{
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory, ServiceRequest,
ServiceResponse,
},
http::{
header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
self, Charset, ContentDisposition, ContentEncoding, DispositionParam, DispositionType,
ExtendedValue, HeaderValue,
},
ContentEncoding, StatusCode,
StatusCode,
},
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
};
use bitflags::bitflags;
use mime_guess::from_path;
use derive_more::{Deref, DerefMut};
use futures_core::future::LocalBoxFuture;
use mime::Mime;
use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! {
#[derive(Debug, Clone, Copy)]
pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010;
@ -37,7 +39,7 @@ bitflags! {
impl Default for Flags {
fn default() -> Self {
Flags::from_bits_truncate(0b0000_0111)
Flags::from_bits_truncate(0b0000_1111)
}
}
@ -48,9 +50,9 @@ impl Default for Flags {
/// use actix_web::App;
/// use actix_files::NamedFile;
///
/// # fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let app = App::new()
/// .service(NamedFile::open("./static/index.html")?);
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let file = NamedFile::open_async("./static/index.html").await?;
/// let app = App::new().service(file);
/// # Ok(())
/// # }
/// ```
@ -62,22 +64,32 @@ impl Default for Flags {
///
/// #[get("/")]
/// async fn index() -> impl Responder {
/// NamedFile::open("./static/index.html")
/// NamedFile::open_async("./static/index.html").await
/// }
/// ```
#[derive(Debug)]
#[derive(Debug, Deref, DerefMut)]
pub struct NamedFile {
path: PathBuf,
#[deref]
#[deref_mut]
file: File,
path: PathBuf,
modified: Option<SystemTime>,
pub(crate) md: Metadata,
pub(crate) flags: Flags,
pub(crate) status_code: StatusCode,
pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: header::ContentDisposition,
pub(crate) content_type: Mime,
pub(crate) content_disposition: ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>,
}
#[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File;
use super::chunked;
impl NamedFile {
/// Creates an instance from a previously opened file.
///
@ -85,20 +97,19 @@ impl NamedFile {
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```
/// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf();
@ -116,15 +127,25 @@ impl NamedFile {
}
};
let ct = from_path(&path).first_or_octet_stream();
let ct = mime_guess::from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" || name == "xhtml" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment,
};
let mut parameters =
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
// replace special characters in filenames which could occur on some filesystems
let filename_s = filename
.replace('\n', "%0A") // \n line break
.replace('\x0B', "%0B") // \v vertical tab
.replace('\x0C', "%0C") // \f form feed
.replace('\r', "%0D"); // \r carriage return
let mut parameters = vec![DispositionParam::Filename(filename_s)];
if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
@ -142,7 +163,30 @@ impl NamedFile {
(ct, cd)
};
let md = file.metadata()?;
let md = {
#[cfg(not(feature = "experimental-io-uring"))]
{
file.metadata()?
}
#[cfg(feature = "experimental-io-uring")]
{
use std::os::unix::prelude::{AsRawFd, FromRawFd};
let fd = file.as_raw_fd();
// SAFETY: fd is borrowed and lives longer than the unsafe block
unsafe {
let file = std::fs::File::from_raw_fd(fd);
let md = file.metadata();
// SAFETY: forget the fd before exiting block in success or error case but don't
// run destructor (that would close file handle)
std::mem::forget(file);
md?
}
}
};
let modified = md.modified().ok();
let encoding = None;
@ -162,32 +206,59 @@ impl NamedFile {
/// Attempts to open a file in read-only mode.
///
/// # Examples
///
/// ```
/// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt");
/// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
Self::from_file(File::open(&path)?, path)
let file = File::open(&path)?;
Self::from_file(file, path)
}
/// Returns reference to the underlying `File` object.
/// Attempts to open a file asynchronously in read-only mode.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it
/// will behave just like `open`.
///
/// # Examples
/// ```
/// use actix_files::NamedFile;
/// # async fn open() {
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
/// # }
/// ```
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = {
#[cfg(not(feature = "experimental-io-uring"))]
{
File::open(&path)?
}
#[cfg(feature = "experimental-io-uring")]
{
File::open(&path).await?
}
};
Self::from_file(file, path)
}
/// Returns reference to the underlying file object.
#[inline]
pub fn file(&self) -> &File {
&self.file
}
/// Retrieve the path of this file.
/// Returns the filesystem path to this file.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use actix_files::NamedFile;
///
/// # fn path() -> io::Result<()> {
/// let file = NamedFile::open("test.txt")?;
/// # async fn path() -> io::Result<()> {
/// let file = NamedFile::open_async("test.txt").await?;
/// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(())
/// # }
@ -197,51 +268,92 @@ impl NamedFile {
self.path.as_path()
}
/// Set response **Status Code**
/// Returns the time the file was last modified.
///
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
/// Therefore, it is usually safe to unwrap this.
#[inline]
pub fn modified(&self) -> Option<SystemTime> {
self.modified
}
/// Returns the filesystem metadata associated with this file.
#[inline]
pub fn metadata(&self) -> &Metadata {
&self.md
}
/// Returns the `Content-Type` header that will be used when serving this file.
#[inline]
pub fn content_type(&self) -> &Mime {
&self.content_type
}
/// Returns the `Content-Disposition` that will be used when serving this file.
#[inline]
pub fn content_disposition(&self) -> &ContentDisposition {
&self.content_disposition
}
/// Returns the `Content-Encoding` that will be used when serving this file.
///
/// A return value of `None` indicates that the content is not already using a compressed
/// representation and may be subject to compression downstream.
#[inline]
pub fn content_encoding(&self) -> Option<ContentEncoding> {
self.encoding
}
/// Set response status code.
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status;
self
}
/// Set the MIME Content-Type for serving this file. By default
/// the Content-Type is inferred from the filename extension.
/// Sets the `Content-Type` header that will be used when serving this file. By default the
/// `Content-Type` is inferred from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
pub fn set_content_type(mut self, mime_type: Mime) -> Self {
self.content_type = mime_type;
self
}
/// Set the Content-Disposition for serving this file. This allows
/// changing the inline/attachment disposition as well as the filename
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using.
/// [`std::ffi::OsStr::to_string_lossy`]
/// Set the Content-Disposition for serving this file. This allows changing the
/// `inline/attachment` disposition as well as the filename sent to the peer.
///
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
/// (using `to_string_lossy`).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self {
self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION);
self
}
/// Disable `Content-Disposition` header.
/// Disables `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
/// By default, the `Content-Disposition` header is sent.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION);
self
}
/// Set content encoding for serving this file
/// Sets content encoding for this file.
///
/// This prevents the `Compress` middleware from modifying the file contents and signals to
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
self
}
/// Specifies whether to use ETag or not.
/// Specifies whether to return `ETag` header in response.
///
/// Default is true.
#[inline]
@ -250,7 +362,7 @@ impl NamedFile {
self
}
/// Specifies whether to use Last-Modified or not.
/// Specifies whether to return `Last-Modified` header in response.
///
/// Default is true.
#[inline]
@ -268,14 +380,18 @@ impl NamedFile {
self
}
/// Creates an `ETag` in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| {
let ino = {
#[cfg(unix)]
{
#[cfg(unix)]
use std::os::unix::fs::MetadataExt as _;
self.md.ino()
}
#[cfg(not(unix))]
{
0
@ -286,7 +402,7 @@ impl NamedFile {
.duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch");
header::EntityTag::strong(format!(
header::EntityTag::new_strong(format!(
"{:x}:{:x}:{:x}:{:x}",
ino,
self.md.len(),
@ -301,16 +417,17 @@ impl NamedFile {
}
/// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> HttpResponse {
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> {
if self.status_code != StatusCode::OK {
let mut res = HttpResponse::build(self.status_code);
if self.flags.contains(Flags::PREFER_UTF8) {
let ct = equiv_utf8_text(self.content_type.clone());
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
}
self.content_type
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header((
@ -320,10 +437,10 @@ impl NamedFile {
}
if let Some(current_encoding) = self.encoding {
res.encoding(current_encoding);
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
let reader = ChunkedReadFile::new(self.md.len(), 0, self.file);
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
return res.streaming(reader);
}
@ -346,8 +463,8 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
@ -365,8 +482,8 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
@ -376,36 +493,36 @@ impl NamedFile {
false
};
let mut resp = HttpResponse::build(self.status_code);
let mut res = HttpResponse::build(self.status_code);
if self.flags.contains(Flags::PREFER_UTF8) {
let ct = equiv_utf8_text(self.content_type.clone());
resp.insert_header((header::CONTENT_TYPE, ct.to_string()));
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
resp.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
}
self.content_type
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
resp.insert_header((
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
}
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
if let Some(lm) = last_modified {
resp.insert_header((header::LAST_MODIFIED, lm.to_string()));
res.insert_header((header::LAST_MODIFIED, lm.to_string()));
}
if let Some(etag) = etag {
resp.insert_header((header::ETAG, etag.to_string()));
res.insert_header((header::ETAG, etag.to_string()));
}
resp.insert_header((header::ACCEPT_RANGES, "bytes"));
res.insert_header((header::ACCEPT_RANGES, "bytes"));
let mut length = self.md.len();
let mut offset = 0;
@ -417,47 +534,56 @@ impl NamedFile {
length = ranges[0].length;
offset = ranges[0].start;
resp.encoding(ContentEncoding::Identity);
resp.insert_header((
// When a Content-Encoding header is present in a 206 partial content response
// for video content, it prevents browser video players from starting playback
// before loading the whole video and also prevents seeking.
//
// See: https://github.com/actix/actix-web/issues/2815
//
// The assumption of this fix is that the video player knows to not send an
// Accept-Encoding header for this request and that downstream middleware will
// not attempt compression for requests without it.
//
// TODO: Solve question around what to do if self.encoding is set and partial
// range is requested. Reject request? Ignoring self.encoding seems wrong, too.
// In practice, it should not come up.
if req.headers().contains_key(&header::ACCEPT_ENCODING) {
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
}
res.insert_header((
header::CONTENT_RANGE,
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
));
} else {
resp.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
return resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
};
} else {
return resp.status(StatusCode::BAD_REQUEST).finish();
return res.status(StatusCode::BAD_REQUEST).finish();
};
};
if precondition_failed {
return resp.status(StatusCode::PRECONDITION_FAILED).finish();
return res.status(StatusCode::PRECONDITION_FAILED).finish();
} else if not_modified {
return resp.status(StatusCode::NOT_MODIFIED).finish();
return res
.status(StatusCode::NOT_MODIFIED)
.body(body::None::new())
.map_into_boxed_body();
}
let reader = ChunkedReadFile::new(length, offset, self.file);
let reader = chunked::new_chunked_read(length, offset, self.file);
if offset != 0 || length != self.md.len() {
resp.status(StatusCode::PARTIAL_CONTENT);
res.status(StatusCode::PARTIAL_CONTENT);
}
resp.body(SizedStream::new(length, reader))
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
res.body(SizedStream::new(length, reader))
}
}
@ -502,7 +628,9 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
}
impl Responder for NamedFile {
fn respond_to(self, req: &HttpRequest) -> HttpResponse {
type Body = BoxBody;
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.into_response(req)
}
}
@ -511,14 +639,16 @@ impl ServiceFactory<ServiceRequest> for NamedFile {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type InitError = ();
type Service = NamedFileService;
type Future = Ready<Result<Self::Service, ()>>;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(NamedFileService {
let service = NamedFileService {
path: self.path.clone(),
})
};
Box::pin(async move { Ok(service) })
}
}
@ -531,18 +661,19 @@ pub struct NamedFileService {
impl Service<ServiceRequest> for NamedFileService {
type Response = ServiceResponse;
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts();
ready(
NamedFile::open(&self.path)
.map_err(|e| e.into())
.map(|f| f.into_response(&req))
.map(|res| ServiceResponse::new(req, res)),
)
let path = self.path.clone();
Box::pin(async move {
let file = NamedFile::open_async(path).await?;
let res = file.into_response(&req);
Ok(ServiceResponse::new(req, res))
})
}
}

View file

@ -1,5 +1,5 @@
use std::{
path::{Path, PathBuf},
path::{Component, Path, PathBuf},
str::FromStr,
};
@ -8,7 +8,7 @@ use actix_web::{dev::Payload, FromRequest, HttpRequest};
use crate::error::UriSegmentError;
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap {
@ -21,11 +21,28 @@ impl FromStr for PathBufWrap {
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
///
/// Path traversal is guarded by this method.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segment_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') {
if segment == ".." {
segment_count -= 1;
buf.pop();
} else if !hidden_files && segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.'));
@ -38,14 +55,27 @@ impl PathBufWrap {
} else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() {
segment_count -= 1;
continue;
} else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else {
buf.push(segment)
}
}
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(i < segment_count);
}
Ok(PathBufWrap(buf))
}
}
@ -59,17 +89,14 @@ impl AsRef<Path> for PathBufWrap {
impl FromRequest for PathBufWrap {
type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>;
type Config = ();
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().path().parse())
ready(req.match_info().unprocessed().parse())
}
}
#[cfg(test)]
mod tests {
use std::iter::FromIterator;
use super::*;
#[test]
@ -116,4 +143,46 @@ mod tests {
PathBuf::from_iter(vec!["test", ".tt"])
);
}
#[test]
fn path_traversal() {
assert_eq!(
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
.unwrap()
.0,
PathBuf::from_iter(vec!["etc/passwd"])
);
}
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
}

View file

@ -1,4 +1,36 @@
use derive_more::{Display, Error};
use std::fmt;
use derive_more::Error;
/// Copy of `http_range::HttpRangeParseError`.
#[derive(Debug, Clone)]
enum HttpRangeParseError {
InvalidRange,
NoOverlap,
}
impl From<http_range::HttpRangeParseError> for HttpRangeParseError {
fn from(err: http_range::HttpRangeParseError) -> Self {
match err {
http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange,
http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap,
}
}
}
#[derive(Debug, Clone, Error)]
#[non_exhaustive]
pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError);
impl fmt::Display for ParseRangeErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid Range header: ")?;
f.write_str(match self.0 {
HttpRangeParseError::InvalidRange => "invalid syntax",
HttpRangeParseError::NoOverlap => "range starts after end of content",
})
}
}
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
@ -10,26 +42,22 @@ pub struct HttpRange {
pub length: u64,
}
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
match http_range::HttpRange::parse(header, size) {
Ok(ranges) => Ok(ranges
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect()),
Err(_) => Err(ParseRangeErr(())),
}
let ranges =
http_range::HttpRange::parse(header, size).map_err(|err| ParseRangeErr(err.into()))?;
Ok(ranges
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect())
}
}

View file

@ -1,9 +1,8 @@
use std::{fmt, io, path::PathBuf, rc::Rc};
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
use actix_service::Service;
use actix_utils::future::ok;
use actix_web::{
dev::{ServiceRequest, ServiceResponse},
body::BoxBody,
dev::{self, Service, ServiceRequest, ServiceResponse},
error::Error,
guard::Guard,
http::{header, Method},
@ -13,11 +12,22 @@ use futures_core::future::LocalBoxFuture;
use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile,
PathBufWrap,
PathBufWrap, PathFilter,
};
/// Assembled file serving service.
pub struct FilesService {
#[derive(Clone)]
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct FilesServiceInner {
pub(crate) directory: PathBuf,
pub(crate) index: Option<String>,
pub(crate) show_index: bool,
@ -25,25 +35,52 @@ pub struct FilesService {
pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) path_filter: Option<Rc<PathFilter>>,
pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
}
impl fmt::Debug for FilesServiceInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FilesServiceInner")
}
}
impl FilesService {
fn handle_err(
async fn handle_err(
&self,
err: io::Error,
req: ServiceRequest,
) -> LocalBoxFuture<'static, Result<ServiceResponse, Error>> {
) -> Result<ServiceResponse, Error> {
log::debug!("error handling {}: {}", req.path(), err);
if let Some(ref default) = self.default {
Box::pin(default.call(req))
default.call(req).await
} else {
Box::pin(ok(req.error_response(err)))
Ok(req.error_response(err))
}
}
fn serve_named_file(&self, req: ServiceRequest, mut named_file: NamedFile) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
ServiceResponse::new(req, res)
}
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
}
}
impl fmt::Debug for FilesService {
@ -53,104 +90,99 @@ impl fmt::Debug for FilesService {
}
impl Service<ServiceRequest> for FilesService {
type Response = ServiceResponse;
type Response = ServiceResponse<BoxBody>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<ServiceResponse, Error>>;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards
(**guard).check(req.head())
(**guard).check(&req.guard_ctx())
} else {
// default behavior
matches!(*req.method(), Method::HEAD | Method::GET)
};
if !is_method_valid {
return Box::pin(ok(req.into_response(
actix_web::HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
)));
}
let this = self.clone();
let real_path =
match PathBufWrap::parse_path(req.match_info().path(), self.hidden_files) {
Ok(item) => item,
Err(e) => return Box::pin(ok(req.error_response(e))),
};
// full file path
let path = self.directory.join(&real_path);
if let Err(err) = path.canonicalize() {
return Box::pin(self.handle_err(err, req));
}
if path.is_dir() {
if self.redirect_to_slash
&& !req.path().ends_with('/')
&& (self.index.is_some() || self.show_index)
{
let redirect_to = format!("{}/", req.path());
return Box::pin(ok(req.into_response(
HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to))
.finish(),
)));
Box::pin(async move {
if !is_method_valid {
return Ok(req.into_response(
HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
));
}
if let Some(ref redir_index) = self.index {
let path = path.join(redir_index);
let path_on_disk =
match PathBufWrap::parse_path(req.match_info().unprocessed(), this.hidden_files) {
Ok(item) => item,
Err(err) => return Ok(req.error_response(err)),
};
match NamedFile::open(path) {
if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) {
if let Some(ref default) = this.default {
return default.call(req).await;
} else {
return Ok(req.into_response(HttpResponse::NotFound().finish()));
}
}
}
// full file path
let path = this.directory.join(&path_on_disk);
if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await;
}
if path.is_dir() {
if this.redirect_to_slash
&& !req.path().ends_with('/')
&& (this.index.is_some() || this.show_index)
{
let redirect_to = format!("{}/", req.path());
return Ok(req.into_response(
HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to))
.finish(),
));
}
match this.index {
Some(ref index) => {
let named_path = path.join(index);
match NamedFile::open_async(named_path).await {
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
Err(_) if this.show_index => Ok(this.show_index(req, path)),
Err(err) => this.handle_err(err, req).await,
}
}
None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),
}
} else {
match NamedFile::open_async(&path).await {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
if let Some(ref mime_override) = this.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
named_file.flags = this.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
Box::pin(ok(ServiceResponse::new(req, res)))
Ok(ServiceResponse::new(req, res))
}
Err(err) => self.handle_err(err, req),
Err(err) => this.handle_err(err, req).await,
}
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
let x = (self.renderer)(&dir, &req);
Box::pin(match x {
Ok(resp) => ok(resp),
Err(err) => ok(ServiceResponse::from_err(err, req)),
})
} else {
Box::pin(ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)))
}
} else {
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
Box::pin(ok(ServiceResponse::new(req, res)))
}
Err(err) => self.handle_err(err, req),
}
}
})
}
}

View file

@ -1,14 +1,14 @@
use actix_files::Files;
use actix_files::{Files, NamedFile};
use actix_web::{
http::{
header::{self, HeaderValue},
StatusCode,
},
test::{self, TestRequest},
App,
web, App,
};
#[actix_rt::test]
#[actix_web::test]
async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
@ -19,13 +19,12 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
// prefer UTF-8 encoding
// disable UTF-8 attribute
let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true)))
.await;
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
@ -33,6 +32,34 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
Some(&HeaderValue::from_static("text/plain")),
);
}
#[actix_web::test]
async fn partial_range_response_encoding() {
let srv = test::init_service(App::new().default_service(web::to(|| async {
NamedFile::open_async("./tests/test.binary").await.unwrap()
})))
.await;
// range request without accept-encoding returns no content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
// range request with accept-encoding returns a content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.append_header((header::ACCEPT_ENCODING, "identity"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert_eq!(
res.headers().get(header::CONTENT_ENCODING).unwrap(),
"identity"
);
}

View file

@ -7,14 +7,12 @@ use actix_web::{
};
use bytes::Bytes;
#[actix_rt::test]
#[actix_web::test]
async fn test_guard_filter() {
let srv = test::init_service(
App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
),
.service(Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com"))),
)
.await;

View file

@ -0,0 +1 @@
// this file is empty.

View file

@ -0,0 +1,26 @@
use actix_files::Files;
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
)
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}

View file

@ -1,100 +1,175 @@
# Changes
## Unreleased - 2021-xx-xx
## Unreleased
- Minimum supported Rust version (MSRV) is now 1.72.
## 3.0.0-beta.4 - 2021-04-02
* Added `TestServer::client_headers` method. [#2097]
## 3.2.0
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 3.1.0
- Minimum supported Rust version (MSRV) is now 1.59.
## 3.0.0
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Added `TestServer::client_headers` method. [#2097]
- Update `actix-server` dependency to `2`.
- Update `actix-tls` dependency to `3`.
- Update `bytes` to `1.0`. [#1813]
- Minimum supported Rust version (MSRV) is now 1.57.
[#2442]: https://github.com/actix/actix-web/pull/2442
[#2097]: https://github.com/actix/actix-web/pull/2097
[#1813]: https://github.com/actix/actix-web/pull/1813
<details>
<summary>3.0.0 Pre-Releases</summary>
## 3.0.0-beta.13
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12
- No significant changes since `3.0.0-beta.11`.
## 3.0.0-beta.11
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9
- No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5
- Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4
- Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3
## 3.0.0-beta.3 - 2021-03-09
* No notable changes.
- No notable changes.
## 3.0.0-beta.2
## 3.0.0-beta.2 - 2021-02-10
* No notable changes.
- No notable changes.
## 3.0.0-beta.1
## 3.0.0-beta.1 - 2021-01-07
* Update `bytes` to `1.0`. [#1813]
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
</details>
## 2.1.0 - 2020-11-25
* Add ability to set address for `TestServer`. [#1645]
* Upgrade `base64` to `0.13`.
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
## 2.1.0
- Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0
## 2.0.0 - 2020-09-11
* Update actix-codec and actix-utils dependencies.
- Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1
## 2.0.0-alpha.1 - 2020-05-23
* Update the `time` dependency to 0.2.7
* Update `actix-connect` dependency to 2.0.0-alpha.2
* Make `test_server` `async` fn.
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
* Update `base64` dependency to 0.12
* Update `env_logger` dependency to 0.7
- Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7
## 1.0.0 - 2019-12-13
* Replaced `TestServer::start()` with `test_server()`
## 1.0.0
- Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to `std::future`
## 1.0.0-alpha.3
- Migrate to `std::future`
## 0.2.5 - 2019-09-17
* Update serde_urlencoded to "0.6.1"
* Increase TestServerRuntime timeouts from 500ms to 3000ms
* Do not override current `System`
## 0.2.5
- Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System`
## 0.2.4 - 2019-07-18
* Update actix-server to 0.6
## 0.2.4
- Update actix-server to 0.6
## 0.2.3 - 2019-07-16
* Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.3
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2 - 2019-06-16
* Add .put() and .sput() methods
## 0.2.2
- Add .put() and .sput() methods
## 0.2.1 - 2019-06-05
* Add license files
## 0.2.1
- Add license files
## 0.2.0 - 2019-05-12
* Update awc and actix-http deps
## 0.2.0
- Update awc and actix-http deps
## 0.1.1 - 2019-04-24
* Always make new connection for http client
## 0.1.1
- Always make new connection for http client
## 0.1.0 - 2019-04-16
* No changes
## 0.1.0
- No changes
## 0.1.0-alpha.3 - 2019-04-02
* Request functions accept path #743
## 0.1.0-alpha.3
- Request functions accept path #743
## 0.1.0-alpha.2 - 2019-03-29
* Added TestServerRuntime::load_body() method
* Update actix-http and awc libraries
## 0.1.0-alpha.2
- Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries
## 0.1.0-alpha.1 - 2019-03-28
* Initial impl
## 0.1.0-alpha.1
- Initial impl

View file

@ -1,26 +1,34 @@
[package]
name = "actix-http-test"
version = "3.0.0-beta.4"
version = "3.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http-test/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
repository = "https://github.com/actix/actix-web"
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
edition = "2021"
[package.metadata.docs.rs]
features = []
[lib]
name = "actix_http_test"
path = "src/lib.rs"
[package.metadata.cargo_check_external_types]
allowed_external_types = [
"actix_codec::*",
"actix_http::*",
"actix_server::*",
"awc::*",
"bytes::*",
"futures_core::*",
"http::*",
"tokio::*",
]
[features]
default = []
@ -29,27 +37,25 @@ default = []
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "2.0.0"
actix-codec = "0.4.0"
actix-tls = "3.0.0-beta.5"
actix-utils = "3.0.0"
actix-service = "2"
actix-codec = "0.5"
actix-tls = "3"
actix-utils = "3"
actix-rt = "2.2"
actix-server = "2.0.0-beta.3"
awc = { version = "3.0.0-beta.5", default-features = false }
actix-server = "2"
awc = { version = "3", default-features = false }
base64 = "0.13"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
http = "0.2.2"
futures-core = { version = "0.3.17", default-features = false }
http = "0.2.7"
log = "0.4"
socket2 = "0.4"
serde = "1.0"
serde_json = "1.0"
socket2 = "0.5"
serde = "1"
serde_json = "1"
slab = "0.4"
serde_urlencoded = "0.7"
time = { version = "0.2.23", default-features = false, features = ["std"] }
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tls-openssl = { version = "0.10.55", package = "openssl", optional = true }
tokio = { version = "1.24.2", features = ["sync"] }
[dev-dependencies]
actix-web = { version = "4.0.0-beta.6", default-features = false, features = ["cookies"] }
actix-http = "3.0.0-beta.6"
actix-http = "3"

View file

@ -1,15 +1,20 @@
# actix-http-test
# `actix-http-test`
> Various helpers for Actix applications to use during testing.
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.4)](https://docs.rs/actix-http-test/3.0.0-beta.4)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.2.0)](https://docs.rs/actix-http-test/3.2.0)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.4)
[![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.2.0/status.svg)](https://deps.rs/crate/actix-http-test/3.2.0)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
<!-- prettier-ignore-end -->
- [API Documentation](https://docs.rs/actix-http-test)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.46.0
<!-- cargo-rdme start -->
Various helpers for Actix applications to use during testing.
<!-- cargo-rdme end -->

View file

@ -1,127 +1,147 @@
//! Various helpers for Actix applications to use during testing.
#![deny(rust_2018_idioms)]
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(feature = "openssl")]
extern crate tls_openssl as openssl;
use std::sync::mpsc;
use std::{net, thread, time};
use std::{net, thread, time::Duration};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::{net::TcpStream, System};
use actix_server::{Server, ServiceFactory};
use actix_server::{Server, ServerServiceFactory};
use awc::{
error::PayloadError, http::HeaderMap, ws, Client, ClientRequest, ClientResponse, Connector,
error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse,
Connector,
};
use bytes::Bytes;
use futures_core::stream::Stream;
use http::Method;
use socket2::{Domain, Protocol, Socket, Type};
use tokio::sync::mpsc;
/// Start test server
/// Start test server.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests cases for actix web applications.
/// `TestServer` is very simple test server that simplify process of writing integration tests cases
/// for HTTP applications.
///
/// # Examples
///
/// ```
/// use actix_http::HttpService;
/// use actix_http_test::TestServer;
/// use actix_web::{web, App, HttpResponse, Error};
///
/// async fn my_handler() -> Result<HttpResponse, Error> {
/// Ok(HttpResponse::Ok().into())
/// }
/// use actix_http::{HttpService, Response, Error, StatusCode};
/// use actix_http_test::test_server;
/// use actix_service::{fn_service, map_config, ServiceFactoryExt as _};
///
/// #[actix_rt::test]
/// # async fn hidden_test() {}
/// async fn test_example() {
/// let mut srv = TestServer::start(
/// || HttpService::new(
/// App::new().service(
/// web::resource("/").to(my_handler))
/// )
/// );
/// let srv = test_server(|| {
/// HttpService::build()
/// .h1(fn_service(|req| async move {
/// Ok::<_, Error>(Response::ok())
/// }))
/// .tcp()
/// .map_err(|_| ())
/// })
/// .await;
///
/// let req = srv.get("/");
/// let response = req.send().await.unwrap();
/// assert!(response.status().is_success());
///
/// assert_eq!(response.status(), StatusCode::OK);
/// }
/// # actix_rt::System::new().block_on(test_example());
/// ```
pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
test_server_with_addr(tcp, factory).await
}
/// Start [`test server`](test_server()) on a concrete Address
pub async fn test_server_with_addr<F: ServiceFactory<TcpStream>>(
/// Start [`test server`](test_server()) on an existing address binding.
pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
tcp: net::TcpListener,
factory: F,
) -> TestServer {
let (tx, rx) = mpsc::channel();
let (started_tx, started_rx) = std::sync::mpsc::channel();
let (thread_stop_tx, thread_stop_rx) = mpsc::channel(1);
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
let local_addr = tcp.local_addr().unwrap();
System::new().block_on(async move {
let local_addr = tcp.local_addr().unwrap();
let srv = Server::build()
.listen("test", tcp, factory)?
.workers(1)
.disable_signals();
let srv = Server::build()
.workers(1)
.disable_signals()
.system_exit()
.listen("test", tcp, factory)
.expect("test server could not be created");
sys.block_on(async {
srv.run();
tx.send((System::current(), local_addr)).unwrap();
let srv = srv.run();
started_tx
.send((System::current(), srv.handle(), local_addr))
.unwrap();
// drive server loop
srv.await.unwrap();
});
sys.run()
// notify TestServer that server and system have shut down
// all thread managed resources should be dropped at this point
#[allow(clippy::let_underscore_future)]
let _ = thread_stop_tx.send(());
});
let (system, addr) = rx.recv().unwrap();
let (system, server, addr) = started_rx.recv().unwrap();
let client = {
#[cfg(feature = "openssl")]
let connector = {
#[cfg(feature = "openssl")]
{
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
.ssl(builder.build())
}
#[cfg(not(feature = "openssl"))]
{
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
}
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
.openssl(builder.build())
};
#[cfg(not(feature = "openssl"))]
let connector = {
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
};
Client::builder().connector(connector).finish()
};
TestServer {
addr,
server,
client,
system,
addr,
thread_stop_rx,
}
}
/// Test server controller
pub struct TestServer {
server: actix_server::ServerHandle,
client: awc::Client,
system: actix_rt::System,
addr: net::SocketAddr,
client: Client,
system: System,
thread_stop_rx: mpsc::Receiver<()>,
}
impl TestServer {
@ -258,15 +278,33 @@ impl TestServer {
self.client.headers()
}
/// Stop HTTP server
fn stop(&mut self) {
/// Stop HTTP server.
///
/// Waits for spawned `Server` and `System` to (force) shutdown.
pub async fn stop(&mut self) {
// signal server to stop
self.server.stop(false).await;
// also signal system to stop
// though this is handled by `ServerBuilder::exit_system` too
self.system.stop();
// wait for thread to be stopped but don't care about result
let _ = self.thread_stop_rx.recv().await;
}
}
impl Drop for TestServer {
fn drop(&mut self) {
self.stop()
// calls in this Drop impl should be enough to shut down the server, system, and thread
// without needing to await anything
// signal server to stop
#[allow(clippy::let_underscore_future)]
let _ = self.server.stop(true);
// signal system to stop
self.system.stop();
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,108 +1,181 @@
[package]
name = "actix-http"
version = "3.0.0-beta.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "HTTP primitives for the Actix ecosystem"
readme = "README.md"
version = "3.7.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "HTTP types and services for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT OR Apache-2.0"
edition = "2018"
repository = "https://github.com/actix/actix-web"
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license.workspace = true
edition.workspace = true
rust-version.workspace = true
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress"]
rustdoc-args = ["--cfg", "docsrs"]
features = [
"http2",
"ws",
"openssl",
"rustls-0_20",
"rustls-0_21",
"rustls-0_22",
"rustls-0_23",
"compress-brotli",
"compress-gzip",
"compress-zstd",
]
[lib]
name = "actix_http"
path = "src/lib.rs"
[package.metadata.cargo_check_external_types]
allowed_external_types = [
"actix_codec::*",
"actix_service::*",
"actix_tls::*",
"actix_utils::*",
"bytes::*",
"bytestring::*",
"encoding_rs::*",
"futures_core::*",
"h2::*",
"http::*",
"httparse::*",
"language_tags::*",
"mime::*",
"openssl::*",
"rustls::*",
"tokio_util::*",
"tokio::*",
]
[features]
default = []
# openssl
openssl = ["actix-tls/openssl"]
# HTTP/2 protocol support
http2 = ["h2"]
# rustls support
rustls = ["actix-tls/rustls"]
# WebSocket protocol implementation
ws = [
"local-channel",
"base64",
"rand",
"sha1",
]
# enable compression support
compress = ["flate2", "brotli2"]
# TLS via OpenSSL
openssl = ["actix-tls/accept", "actix-tls/openssl"]
# trust-dns as client dns resolver
trust-dns = ["trust-dns-resolver"]
# TLS via Rustls v0.20
rustls = ["rustls-0_20"]
# TLS via Rustls v0.20
rustls-0_20 = ["actix-tls/accept", "actix-tls/rustls-0_20"]
# TLS via Rustls v0.21
rustls-0_21 = ["actix-tls/accept", "actix-tls/rustls-0_21"]
# TLS via Rustls v0.22
rustls-0_22 = ["actix-tls/accept", "actix-tls/rustls-0_22"]
# TLS via Rustls v0.23
rustls-0_23 = ["actix-tls/accept", "actix-tls/rustls-0_23"]
# Compression codecs
compress-brotli = ["__compress", "brotli"]
compress-gzip = ["__compress", "flate2"]
compress-zstd = ["__compress", "zstd"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
__compress = []
[dependencies]
actix-service = "2.0.0"
actix-codec = "0.4.0"
actix-utils = "3.0.0"
actix-rt = "2.2"
actix-tls = { version = "3.0.0-beta.5", features = ["accept", "connect"] }
actix-service = "2"
actix-codec = "0.5"
actix-utils = "3"
actix-rt = { version = "2.2", default-features = false }
ahash = "0.7"
base64 = "0.13"
bitflags = "1.2"
ahash = "0.8"
bitflags = "2"
bytes = "1"
bytestring = "1"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.7", default-features = false, features = ["alloc", "sink"] }
h2 = "0.3.1"
http = "0.2.2"
httparse = "1.3"
itoa = "0.4"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
http = "0.2.7"
httparse = "1.5.1"
httpdate = "1.0.1"
itoa = "1"
language-tags = "0.3"
local-channel = "0.1"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
mime = "0.3.4"
percent-encoding = "2.1"
pin-project = "1.0.0"
pin-project-lite = "0.2"
rand = "0.8"
regex = "1.3"
serde = "1.0"
sha-1 = "0.9"
smallvec = "1.6"
time = { version = "0.2.23", default-features = false, features = ["std"] }
tokio = { version = "1.2", features = ["sync"] }
smallvec = "1.6.1"
tokio = { version = "1.24.2", features = [] }
tokio-util = { version = "0.7", features = ["io", "codec"] }
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
# compression
brotli2 = { version="0.3.2", optional = true }
# http2
h2 = { version = "0.3.26", optional = true }
# websockets
local-channel = { version = "0.1", optional = true }
base64 = { version = "0.22", optional = true }
rand = { version = "0.8", optional = true }
sha1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3.4", default-features = false, optional = true }
# compress-*
brotli = { version = "6", optional = true }
flate2 = { version = "1.0.13", optional = true }
trust-dns-resolver = { version = "0.20.0", optional = true }
zstd = { version = "0.13", optional = true }
[dev-dependencies]
actix-server = "2.0.0-beta.3"
actix-http-test = { version = "3.0.0-beta.4", features = ["openssl"] }
actix-tls = { version = "3.0.0-beta.5", features = ["openssl"] }
criterion = "0.3"
env_logger = "0.8"
rcgen = "0.8"
actix-http-test = { version = "3", features = ["openssl"] }
actix-server = "2"
actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23-webpki-roots"] }
actix-web = "4"
async-stream = "0.3"
criterion = { version = "0.5", features = ["html_reports"] }
divan = "0.1.8"
env_logger = "0.11"
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
memchr = "2.4"
once_cell = "1.9"
rcgen = "0.13"
regex = "1.3"
rustversion = "1"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tls-openssl = { version = "0.10", package = "openssl" }
tls-rustls = { version = "0.19", package = "rustls" }
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.55" }
tls-rustls_023 = { package = "rustls", version = "0.23" }
tokio = { version = "1.24.2", features = ["net", "rt", "macros"] }
[[example]]
name = "ws"
required-features = ["rustls"]
required-features = ["ws", "rustls-0_23"]
[[example]]
name = "tls_rustls"
required-features = ["http2", "rustls-0_23"]
[[bench]]
name = "write-camel-case"
name = "response-body-compression"
harness = false
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]
[[bench]]
name = "status-line"
harness = false
[[bench]]
name = "uninit-headers"
name = "date-formatting"
harness = false

View file

@ -1,23 +1,21 @@
# actix-http
# `actix-http`
> HTTP primitives for the Actix ecosystem.
> HTTP types and services for the Actix ecosystem.
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.6)](https://docs.rs/actix-http/3.0.0-beta.6)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.7.0)](https://docs.rs/actix-http/3.7.0)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.6/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.6)
[![dependency status](https://deps.rs/crate/actix-http/3.7.0/status.svg)](https://deps.rs/crate/actix-http/3.7.0)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
<!-- prettier-ignore-end -->
- [API Documentation](https://docs.rs/actix-http)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.46.0
## Example
## Examples
```rust
use std::{env, io};
@ -26,7 +24,7 @@ use actix_http::{HttpService, Response};
use actix_server::Server;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -50,18 +48,3 @@ async fn main() -> io::Result<()> {
.await
}
```
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-http crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
intervene to uphold that code of conduct.

View file

@ -0,0 +1,20 @@
use std::time::SystemTime;
use actix_http::header::HttpDate;
use divan::{black_box, AllocProfiler, Bencher};
#[global_allocator]
static ALLOC: AllocProfiler = AllocProfiler::system();
#[divan::bench]
fn date_formatting(b: Bencher<'_, '_>) {
let now = SystemTime::now();
b.bench(|| {
black_box(HttpDate::from(black_box(now)).to_string());
})
}
fn main() {
divan::main();
}

View file

@ -0,0 +1,88 @@
use std::convert::Infallible;
use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode};
use actix_service::{fn_service, Service as _};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
static BODY: &[u8] = include_bytes!("../Cargo.toml");
fn compression_responses(c: &mut Criterion) {
let mut group = c.benchmark_group("compression responses");
group.bench_function("identity", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Identity,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("gzip", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Gzip,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("br", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Brotli,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("zstd", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Zstd,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.finish();
}
criterion_group!(benches, compression_responses);
criterion_main!(benches);

View file

@ -1,222 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => {}
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View file

@ -1,137 +0,0 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View file

@ -1,89 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_new::write_camel_case(black_box(bts), &mut buf)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
// first copy entire (potentially wrong) slice to output
buffer[..value.len()].copy_from_slice(value);
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View file

@ -0,0 +1,27 @@
use actix_http::HttpService;
use actix_server::Server;
use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App};
#[get("/")]
async fn index() -> &'static str {
"Hello, world. From Actix Web!"
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> std::io::Result<()> {
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
// construct actix-web app
let app = App::new().service(index);
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
// h1 will configure server to only use HTTP/1.1
.h1(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()
.await
}

View file

@ -0,0 +1,27 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View file

@ -1,21 +1,22 @@
use std::io;
use std::{io, time::Duration};
use actix_http::{http::StatusCode, Error, HttpService, Request, Response};
use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
use http::header::HeaderValue;
use log::info;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
@ -23,15 +24,14 @@ async fn main() -> io::Result<()> {
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::build(StatusCode::OK)
.insert_header((
"x-head",
HeaderValue::from_static("dummy value!"),
))
.body(body),
)
let res = Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body);
Ok::<_, Error>(res)
})
// No TLS
.tcp()
})?
.run()

View file

@ -1,31 +1,34 @@
use std::io;
use actix_http::{body::Body, http::HeaderValue, http::StatusCode};
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
use log::info;
use actix_http::{
body::{BodyStream, MessageBody},
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
};
async fn handle_request(mut req: Request) -> Result<Response<Body>, Error> {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK);
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) {
res.insert_header((header::CONTENT_TYPE, ct));
}
info!("request body: {:?}", body);
Ok(Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body))
// echo request payload stream as (chunked) response body
let res = res.message_body(BodyStream::new(req.payload().take()))?;
Ok(res)
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(handle_request).tcp()
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})?
.run()
.await

View file

@ -0,0 +1,34 @@
//! An example that supports automatic selection of plaintext h1/h2c connections.
//!
//! Notably, both the following commands will work.
//! ```console
//! $ curl --http1.1 'http://localhost:8080/'
//! $ curl --http2-prior-knowledge 'http://localhost:8080/'
//! ```
use std::{convert::Infallible, io};
use actix_http::{body::BodyStream, HttpService, Request, Response, StatusCode};
use actix_server::Server;
#[tokio::main(flavor = "current_thread")]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2c-detect", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|_req: Request| async move {
Ok::<_, Infallible>(Response::build(StatusCode::OK).body(BodyStream::new(
futures_util::stream::iter([
Ok::<_, String>("123".into()),
Err("wertyuikmnbvcxdfty6t".to_owned()),
]),
)))
})
.tcp_auto_h2c()
})?
.workers(2)
.run()
.await
}

View file

@ -0,0 +1,25 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View file

@ -1,28 +1,31 @@
use std::io;
use std::{convert::Infallible, io, time::Duration};
use actix_http::{http::StatusCode, HttpService, Response};
use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode};
use actix_server::Server;
use actix_utils::future;
use http::header::HeaderValue;
use log::info;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
.bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
.on_connect_ext(|_, ext| {
ext.insert(42u32);
})
.finish(|req: Request| async move {
info!("{:?}", req);
let mut res = Response::build(StatusCode::OK);
res.insert_header((
"x-head",
HeaderValue::from_static("dummy value!"),
));
future::ok::<_, ()>(res.body("Hello world!"))
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.conn_data::<u32>().unwrap().to_string();
res.insert_header(("x-forty-two", HeaderValue::from_str(&forty_two).unwrap()));
Ok::<_, Infallible>(res.body("Hello world!"))
})
.tcp()
})?

View file

@ -0,0 +1,41 @@
//! Example showing response body (chunked) stream erroring.
//!
//! Test using `nc` or `curl`.
//! ```sh
//! $ curl -vN 127.0.0.1:8080
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
//! ```
use std::{convert::Infallible, io, time::Duration};
use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
info!("{:?}", req);
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_millis(1000)).await;
yield Err(io::Error::new(io::ErrorKind::Other, ""));
})))
})
.tcp()
})?
.run()
.await
}

View file

@ -0,0 +1,76 @@
//! Demonstrates TLS configuration (via Rustls) for HTTP/1.1 and HTTP/2 connections.
//!
//! Test using cURL:
//!
//! ```console
//! $ curl --insecure https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/2.0
//!
//! $ curl --insecure --http1.1 https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/1.1
//! ```
extern crate tls_rustls_023 as rustls;
use std::io;
use actix_http::{Error, HttpService, Request, Response};
use actix_utils::future::ok;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
tracing::info!("starting HTTP server at https://127.0.0.1:8443");
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8443), || {
HttpService::build()
.finish(|req: Request| {
let body = format!(
"Hello World!\n\
Protocol: {:?}",
req.head().version
);
ok::<_, Error>(Response::ok().set_body(body))
})
.rustls_0_23(rustls_config())
})?
.run()
.await
}
fn rustls_config() -> rustls::ServerConfig {
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_file = cert.pem();
let key_file = key_pair.serialize_pem();
let cert_file = &mut io::BufReader::new(cert_file.as_bytes());
let key_file = &mut io::BufReader::new(key_file.as_bytes());
let cert_chain = rustls_pemfile::certs(cert_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut keys = rustls_pemfile::pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap();
const H1_ALPN: &[u8] = b"http/1.1";
const H2_ALPN: &[u8] = b"h2";
config.alpn_protocols.push(H2_ALPN.to_vec());
config.alpn_protocols.push(H1_ALPN.to_vec());
config
}

View file

@ -1,7 +1,7 @@
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls as rustls;
extern crate tls_rustls_023 as rustls;
use std::{
io,
@ -10,13 +10,14 @@ use std::{
time::Duration,
};
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
use tokio_util::codec::Encoder;
use tracing::{info, trace};
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -27,20 +28,22 @@ async fn main() -> io::Result<()> {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build().finish(handler).rustls(tls_config())
HttpService::build()
.finish(handler)
.rustls_0_23(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
log::info!("handshaking");
info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
log::info!("responding");
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
info!("responding");
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))
}
struct Heartbeat {
@ -60,11 +63,8 @@ impl Heartbeat {
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
log::trace!("poll");
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
@ -85,22 +85,31 @@ impl Stream for Heartbeat {
fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader;
use rustls::{
internal::pemfile::{certs, pkcs8_private_keys},
NoClientAuth, ServerConfig,
};
use rustls_pemfile::{certs, pkcs8_private_keys};
let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
let cert_file = cert.serialize_pem().unwrap();
let key_file = cert.serialize_private_key_pem();
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_file = cert.pem();
let key_file = key_pair.serialize_pem();
let mut config = ServerConfig::new(NoClientAuth::new());
let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file).unwrap();
let mut keys = pkcs8_private_keys(key_file).unwrap();
config.set_single_cert(cert_chain, keys.remove(0)).unwrap();
let cert_chain = certs(cert_file).collect::<Result<Vec<_>, _>>().unwrap();
let mut keys = pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
}

View file

@ -1,5 +0,0 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

View file

@ -1,230 +0,0 @@
use std::{
borrow::Cow,
error::Error as StdError,
fmt, mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Bytes, BytesMut};
use futures_core::{ready, Stream};
use crate::error::Error;
use super::{BodySize, BodyStream, MessageBody, MessageBodyMapErr, SizedStream};
pub type Body = AnyBody;
/// Represents various types of HTTP message body.
pub enum AnyBody {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(BoxAnyBody),
}
impl AnyBody {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Self {
Self::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B>(body: B) -> Self
where
B: MessageBody + 'static,
B::Error: Into<Box<dyn StdError + 'static>>,
{
Self::Message(BoxAnyBody::from_body(body))
}
}
impl MessageBody for AnyBody {
type Error = Error;
fn size(&self) -> BodySize {
match self {
AnyBody::None => BodySize::None,
AnyBody::Empty => BodySize::Empty,
AnyBody::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
AnyBody::Message(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.get_mut() {
AnyBody::None => Poll::Ready(None),
AnyBody::Empty => Poll::Ready(None),
AnyBody::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
// TODO: MSRV 1.51: poll_map_err
AnyBody::Message(body) => match ready!(body.as_pin_mut().poll_next(cx)) {
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
},
}
}
}
impl PartialEq for AnyBody {
fn eq(&self, other: &Body) -> bool {
match *self {
AnyBody::None => matches!(*other, AnyBody::None),
AnyBody::Empty => matches!(*other, AnyBody::Empty),
AnyBody::Bytes(ref b) => match *other {
AnyBody::Bytes(ref b2) => b == b2,
_ => false,
},
AnyBody::Message(_) => false,
}
}
}
impl fmt::Debug for AnyBody {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
AnyBody::None => write!(f, "AnyBody::None"),
AnyBody::Empty => write!(f, "AnyBody::Empty"),
AnyBody::Bytes(ref b) => write!(f, "AnyBody::Bytes({:?})", b),
AnyBody::Message(_) => write!(f, "AnyBody::Message(_)"),
}
}
}
impl From<&'static str> for AnyBody {
fn from(s: &'static str) -> Body {
AnyBody::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for AnyBody {
fn from(s: &'static [u8]) -> Body {
AnyBody::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for AnyBody {
fn from(vec: Vec<u8>) -> Body {
AnyBody::Bytes(Bytes::from(vec))
}
}
impl From<String> for AnyBody {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl From<&'_ String> for AnyBody {
fn from(s: &String) -> Body {
AnyBody::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Cow<'_, str>> for AnyBody {
fn from(s: Cow<'_, str>) -> Body {
match s {
Cow::Owned(s) => AnyBody::from(s),
Cow::Borrowed(s) => {
AnyBody::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(s)))
}
}
}
}
impl From<Bytes> for AnyBody {
fn from(s: Bytes) -> Body {
AnyBody::Bytes(s)
}
}
impl From<BytesMut> for AnyBody {
fn from(s: BytesMut) -> Body {
AnyBody::Bytes(s.freeze())
}
}
impl<S> From<SizedStream<S>> for AnyBody
where
S: Stream<Item = Result<Bytes, Error>> + 'static,
{
fn from(s: SizedStream<S>) -> Body {
AnyBody::from_message(s)
}
}
impl<S, E> From<BodyStream<S>> for AnyBody
where
S: Stream<Item = Result<Bytes, E>> + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S>) -> Body {
AnyBody::from_message(s)
}
}
/// A boxed message body with boxed errors.
pub struct BoxAnyBody(Pin<Box<dyn MessageBody<Error = Box<dyn StdError + 'static>>>>);
impl BoxAnyBody {
/// Boxes a `MessageBody` and any errors it generates.
pub fn from_body<B>(body: B) -> Self
where
B: MessageBody + 'static,
B::Error: Into<Box<dyn StdError + 'static>>,
{
let body = MessageBodyMapErr::new(body, Into::into);
Self(Box::pin(body))
}
/// Returns a mutable pinned reference to the inner message body type.
pub fn as_pin_mut(
&mut self,
) -> Pin<&mut (dyn MessageBody<Error = Box<dyn StdError + 'static>>)> {
self.0.as_mut()
}
}
impl fmt::Debug for BoxAnyBody {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("BoxAnyBody(dyn MessageBody)")
}
}
impl MessageBody for BoxAnyBody {
type Error = Error;
fn size(&self) -> BodySize {
self.0.size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
// TODO: MSRV 1.51: poll_map_err
match ready!(self.0.as_mut().poll_next(cx)) {
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
}
}

View file

@ -1,4 +1,5 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
@ -7,8 +8,6 @@ use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use crate::error::Error;
use super::{BodySize, MessageBody};
pin_project! {
@ -21,11 +20,14 @@ pin_project! {
}
}
// TODO: from_infallible method
impl<S, E> BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Error>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(stream: S) -> Self {
BodyStream { stream }
}
@ -34,19 +36,19 @@ where
impl<S, E> MessageBody for BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Error>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = Error;
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being ended on a
/// zero-length chunk, but rather proceed until the underlying [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
@ -56,7 +58,7 @@ where
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
opt => opt,
};
return Poll::Ready(chunk);
@ -66,19 +68,39 @@ where
#[cfg(test)]
mod tests {
use actix_rt::pin;
use std::{convert::Infallible, time::Duration};
use actix_rt::{
pin,
time::{sleep, Sleep},
};
use actix_utils::future::poll_fn;
use futures_util::stream;
use derive_more::{Display, Error};
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
pin!(body);
@ -103,9 +125,90 @@ mod tests {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[derive(Debug, Display, Error)]
#[display(fmt = "stream error")]
struct StreamErr;
#[actix_rt::test]
async fn stream_immediate_error() {
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async {
Err(Box::<dyn StdError>::from("stringy error"))
}));
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
#[actix_rt::test]
async fn stream_delayed_error() {
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
pin_project! {
#[derive(Debug)]
#[project = TimeDelayStreamProj]
enum TimeDelayStream {
Start,
Sleep { delay: Pin<Box<Sleep>> },
Done,
}
}
impl Stream for TimeDelayStream {
type Item = Result<Bytes, StreamErr>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.as_mut().get_mut() {
TimeDelayStream::Start => {
let sleep = sleep(Duration::from_millis(1));
self.as_mut().set(TimeDelayStream::Sleep {
delay: Box::pin(sleep),
});
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Sleep { ref mut delay } => {
ready!(delay.poll_unpin(cx));
self.set(TimeDelayStream::Done);
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
}
}
}
let body = BodyStream::new(TimeDelayStream::Start);
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
}

View file

@ -0,0 +1,121 @@
use std::{
error::Error as StdError,
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body;
/// A boxed message body with boxed errors.
#[derive(Debug)]
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody {
/// Boxes body type, erasing type information.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self
where
B: MessageBody + 'static,
{
match body.size() {
BodySize::None => Self(BoxBodyInner::None(body::None)),
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
}
/// Returns a mutable pinned reference to the inner message body type.
#[inline]
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
Pin::new(self)
}
}
impl MessageBody for BoxBody {
type Error = Box<dyn StdError>;
#[inline]
fn size(&self) -> BodySize {
match &self.0 {
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
}
#[inline]
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 {
BoxBodyInner::None(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}),
BoxBodyInner::Bytes(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}),
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
#[inline]
fn boxed(self) -> BoxBody {
self
}
}
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
#[actix_rt::test]
async fn nested_boxed_body() {
let body = Bytes::from_static(&[1, 2, 3]);
let boxed_body = BoxBody::new(BoxBody::new(body));
assert_eq!(
to_bytes(boxed_body).await.unwrap(),
Bytes::from(vec![1, 2, 3]),
);
}
}

View file

@ -0,0 +1,122 @@
use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
/// A body of type `L`.
Left { #[pin] body: L },
/// A body of type `R`.
Right { #[pin] body: R },
}
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }
}
}
impl<L, R> EitherBody<L, R> {
/// Creates new `EitherBody` using left variant.
#[inline]
pub fn left(body: L) -> Self {
Self::Left { body }
}
/// Creates new `EitherBody` using right variant.
#[inline]
pub fn right(body: R) -> Self {
Self::Right { body }
}
}
impl<L, R> MessageBody for EitherBody<L, R>
where
L: MessageBody + 'static,
R: MessageBody + 'static,
{
type Error = Error;
#[inline]
fn size(&self) -> BodySize {
match self {
EitherBody::Left { body } => body.size(),
EitherBody::Right { body } => body.size(),
}
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EitherBodyProj::Left { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
EitherBodyProj::Right { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
EitherBody::Left { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
}
}
#[inline]
fn boxed(self) -> BoxBody {
match self {
EitherBody::Left { body } => body.boxed(),
EitherBody::Right { body } => body.boxed(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn type_parameter_inference() {
let _body: EitherBody<(), _> = EitherBody::new(());
let _body: EitherBody<_, ()> = EitherBody::left(());
let _body: EitherBody<(), _> = EitherBody::right(());
}
}

View file

@ -2,6 +2,7 @@
use std::{
convert::Infallible,
error::Error as StdError,
mem,
pin::Pin,
task::{Context, Poll},
@ -11,172 +12,459 @@ use bytes::{Bytes, BytesMut};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::error::Error;
use super::{BodySize, BoxBody};
use super::BodySize;
/// An interface for response bodies.
/// An interface for types that can be used as a response body.
///
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
pub trait MessageBody {
type Error;
/// The type of error that will be returned if streaming body fails.
///
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>;
/// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
///
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>;
}
impl MessageBody for () {
type Error = Infallible;
fn size(&self) -> BodySize {
BodySize::Empty
/// Try to convert into the complete chunk of body bytes.
///
/// Override this method if the complete body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
/// this method, it is recommended to check `size` first and return early.
///
/// # Errors
/// The default implementation will error and return the original type back to the caller for
/// further use.
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
/// Wraps this body into a `BoxBody`.
///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
/// is required.
#[inline]
fn boxed(self) -> BoxBody
where
Self: Sized + 'static,
{
BoxBody::new(self)
}
}
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin,
B::Error: Into<Error>,
{
type Error = B::Error;
mod foreign_impls {
use std::{borrow::Cow, ops::DerefMut};
fn size(&self) -> BodySize {
self.as_ref().size()
}
use super::*;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
impl<B> MessageBody for &mut B
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
impl<B> MessageBody for Pin<Box<B>>
where
B: MessageBody,
B::Error: Into<Error>,
{
type Error = B::Error;
fn size(&self) -> BodySize {
(**self).size()
}
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.as_mut().poll_next(cx)
}
}
impl MessageBody for Bytes {
type Error = Infallible;
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(&mut **self).poll_next(cx)
}
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
impl MessageBody for Infallible {
type Error = Infallible;
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn size(&self) -> BodySize {
match *self {}
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {}
}
}
}
impl MessageBody for &'static str {
type Error = Infallible;
impl MessageBody for () {
type Error = Infallible;
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(0)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
}
impl MessageBody for String {
type Error = Infallible;
impl<T, B> MessageBody for Pin<T>
where
T: DerefMut<Target = B> + Unpin,
B: MessageBody + ?Sized,
{
type Error = B::Error;
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx)
}
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
impl MessageBody for &'static [u8] {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self))
}
}
impl MessageBody for Bytes {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self)
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.freeze())
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for Cow<'static, [u8]> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(b) => Bytes::from_static(b),
Cow::Owned(b) => Bytes::from(b),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(b) => Ok(Bytes::from_static(b)),
Cow::Owned(b) => Ok(Bytes::from(b)),
}
}
}
impl MessageBody for &'static str {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
let bytes = Bytes::from_static(string.as_bytes());
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self.as_bytes()))
}
}
impl MessageBody for String {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(Bytes::from(string))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for Cow<'static, str> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()),
Cow::Owned(s) => Bytes::from(s.into_bytes()),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())),
Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())),
}
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(string.into_bytes())))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.into_bytes())
}
}
}
@ -206,9 +494,11 @@ impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
E: Into<Box<dyn StdError>>,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
self.body.size()
}
@ -229,4 +519,222 @@ where
None => Poll::Ready(None),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
}
#[cfg(test)]
mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => {
assert_eq!(
poll_fn(|cx| $pin.as_mut().poll_next(cx))
.await
.unwrap() // unwrap option
.unwrap(), // unwrap result
$exp
);
};
}
macro_rules! assert_poll_next_none {
($pin:expr) => {
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
};
}
#[allow(unused_allocation)] // triggered by `Box::new(()).size()`
#[actix_rt::test]
async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), Box::new(()).size());
assert_eq!(().size(), Box::pin(()).size());
let pl = Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut pl = Box::pin(());
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn mut_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), (&(&mut ())).size());
let pl = &mut ();
pin!(pl);
assert_poll_next_none!(pl);
let pl = &mut Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut body = body::SizedStream::new(
8,
stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]),
);
let body = &mut body;
assert_eq!(body.size(), BodySize::Sized(8));
pin!(body);
assert_poll_next!(body, Bytes::from_static(b"1234"));
assert_poll_next!(body, Bytes::from_static(b"5678"));
assert_poll_next_none!(body);
}
#[allow(clippy::let_unit_value)]
#[actix_rt::test]
async fn test_unit() {
let pl = ();
assert_eq!(pl.size(), BodySize::Sized(0));
pin!(pl);
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!("".size(), BodySize::Sized(0));
assert_eq!("test".size(), BodySize::Sized(4));
let pl = "test";
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
let pl = b"test".as_ref();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
let pl = Vec::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes() {
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
let pl = Bytes::from_static(b"test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes_mut() {
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
let pl = BytesMut::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_string() {
assert_eq!(String::new().size(), BodySize::Sized(0));
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
let pl = "test".to_owned();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators_poll() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
assert_poll_next_none!(Pin::new(&mut body));
}
#[actix_rt::test]
async fn none_body_combinators() {
fn none_body() -> BoxBody {
let body = body::None;
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None);
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
assert_poll_next_none!(Pin::new(&mut none_body()));
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
#[actix_rt::test]
async fn non_owning_to_bytes() {
let mut body = BoxBody::new(());
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::new());
let mut body = body::BodyStream::new(stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]));
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::from_static(b"12345678"));
}
}

View file

@ -1,259 +1,27 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
use std::task::Poll;
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_core::ready;
#[allow(clippy::module_inception)]
mod body;
mod body_stream;
mod boxed;
mod either;
mod message_body;
mod response_body;
mod none;
mod size;
mod sized_stream;
mod utils;
pub use self::body::{AnyBody, Body, BoxAnyBody};
pub use self::body_stream::BodyStream;
pub use self::message_body::MessageBody;
pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::response_body::ResponseBody;
pub use self::size::BodySize;
pub use self::sized_stream::SizedStream;
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// # Examples
/// ```
/// use actix_http::body::{Body, to_bytes};
/// use bytes::Bytes;
///
/// # async fn test_to_bytes() {
/// let body = Body::Empty;
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Body::Bytes(Bytes::from_static(b"123"));
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, b"123"[..]);
/// # }
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
let cap = match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) => size as usize,
BodySize::Stream => 32_768,
};
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
Ok(buf.freeze())
}
#[cfg(test)]
mod tests {
use std::pin::Pin;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use super::*;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::{json, Value};
assert_eq!(
Body::from(serde_json::to_vec(&Value::String("test".to_owned())).unwrap())
.size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(serde_json::to_vec(&json!({"test-key":"test-value"})).unwrap())
.size(),
BodySize::Sized(25)
);
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
#[actix_rt::test]
async fn test_to_bytes() {
let body = Body::Empty;
let bytes = to_bytes(body).await.unwrap();
assert!(bytes.is_empty());
let body = Body::Bytes(Bytes::from_static(b"123"));
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
}
}
pub use self::{
body_stream::BodyStream,
boxed::BoxBody,
either::EitherBody,
message_body::MessageBody,
none::None,
size::BodySize,
sized_stream::SizedStream,
utils::{to_bytes, to_bytes_limited, BodyLimitExceeded},
};

View file

@ -0,0 +1,51 @@
use std::{
convert::Infallible,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header.
/// For an "empty" body, use `()` or `Bytes::new()`.
///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;
impl None {
/// Constructs new "none" body.
#[inline]
pub fn new() -> Self {
None
}
}
impl MessageBody for None {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::None
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(Option::None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}

View file

@ -1,89 +0,0 @@
use std::{
mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project::pin_project;
use crate::error::Error;
use super::{Body, BodySize, MessageBody};
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(#[pin] B),
Other(Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B> MessageBody for ResponseBody<B>
where
B: MessageBody,
B::Error: Into<Error>,
{
type Error = Error;
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Stream::poll_next(self, cx)
}
}
impl<B> Stream for ResponseBody<B>
where
B: MessageBody,
B::Error: Into<Error>,
{
type Item = Result<Bytes, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
// TODO: MSRV 1.51: poll_map_err
ResponseBodyProj::Body(body) => match ready!(body.poll_next(cx)) {
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
},
ResponseBodyProj::Other(body) => Pin::new(body).poll_next(cx),
}
}
}

View file

@ -1,19 +1,16 @@
/// Body size hint.
#[derive(Debug, Clone, Copy, PartialEq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BodySize {
/// Absence of body can be assumed from method or status code.
/// Implicitly empty body.
///
/// Will skip writing Content-Length header.
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
/// hint are allowed to make optimizations that skip reading or writing the payload.
None,
/// Zero size body.
///
/// Will write `Content-Length: 0` header.
Empty,
/// Known size body.
///
/// Will write `Content-Length: N` header. `Sized(0)` is treated the same as `Empty`.
/// Will write `Content-Length: N` header.
Sized(u64),
/// Unknown size body.
@ -23,18 +20,22 @@ pub enum BodySize {
}
impl BodySize {
/// Returns true if size hint indicates no or empty body.
/// Equivalent to `BodySize::Sized(0)`;
pub const ZERO: Self = Self::Sized(0);
/// Returns true if size hint indicates omitted or empty body.
///
/// Streams will return false because it cannot be known without reading the stream.
///
/// ```
/// # use actix_http::body::BodySize;
/// assert!(BodySize::None.is_eof());
/// assert!(BodySize::Empty.is_eof());
/// assert!(BodySize::Sized(0).is_eof());
///
/// assert!(!BodySize::Sized(64).is_eof());
/// assert!(!BodySize::Stream.is_eof());
/// ```
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
matches!(self, BodySize::None | BodySize::Sized(0))
}
}

View file

@ -1,4 +1,5 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
@ -7,15 +8,13 @@ use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use crate::error::Error;
use super::{BodySize, MessageBody};
pin_project! {
/// Known sized streaming response wrapper.
///
/// This body implementation should be used if total size of stream is known. Data get sent as is
/// without using transfer encoding.
/// This body implementation should be used if total size of stream is known. Data is sent as-is
/// without using chunked transfer encoding.
pub struct SizedStream<S> {
size: u64,
#[pin]
@ -23,23 +22,29 @@ pin_project! {
}
}
impl<S> SizedStream<S>
impl<S, E> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
type Error = Error;
// TODO: from_infallible method
impl<S, E> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
BodySize::Sized(self.size)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
@ -66,18 +71,36 @@ where
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
pin!(body);
@ -103,9 +126,46 @@ mod tests {
async fn read_to_bytes() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(
0,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
let body = SizedStream::new(
1,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
}

View file

@ -0,0 +1,198 @@
use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use derive_more::{Display, Error};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects all the bytes produced by `body`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, "123");
/// # });
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
to_bytes_limited(body, usize::MAX)
.await
.expect("body should never yield more than usize::MAX bytes")
}
/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit.
#[derive(Debug, Display, Error)]
#[display(fmt = "limit exceeded while collecting body bytes")]
#[non_exhaustive]
pub struct BodyLimitExceeded;
/// Collects the bytes produced by `body`, up to `limit` bytes.
///
/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an
/// `Err(BodyLimitExceeded)` is returned.
///
/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes_limited};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert_eq!(bytes, "123");
///
/// let body = Bytes::from_static(b"123");
/// assert!(to_bytes_limited(body, 2).await.is_err());
/// # });
/// ```
pub async fn to_bytes_limited<B: MessageBody>(
body: B,
limit: usize,
) -> Result<Result<Bytes, B::Error>, BodyLimitExceeded> {
/// Sensible default (32kB) for initial, bounded allocation when collecting body bytes.
const INITIAL_ALLOC_BYTES: usize = 32 * 1024;
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())),
BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded),
BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES),
BodySize::Stream => INITIAL_ALLOC_BYTES,
};
let mut exceeded_limit = false;
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
match poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => {
// if limit is exceeded...
if buf.len() + bytes.len() > limit {
// ...set flag to true and break out of poll_fn
exceeded_limit = true;
return Poll::Ready(Ok(()));
}
buf.extend_from_slice(&bytes)
}
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await
{
// propagate error returned from body poll
Err(err) => Ok(Err(err)),
// limit was exceeded while reading body
Ok(()) if exceeded_limit => Err(BodyLimitExceeded),
// otherwise return body buffer
Ok(()) => Ok(Ok(buf.freeze())),
}
}
#[cfg(test)]
mod tests {
use std::io;
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{
body::{BodyStream, SizedStream},
Error,
};
#[actix_rt::test]
async fn to_bytes_complete() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
}
#[actix_rt::test]
async fn to_bytes_streams() {
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
#[actix_rt::test]
async fn to_bytes_limited_complete() {
let bytes = to_bytes_limited((), 0).await.unwrap().unwrap();
assert!(bytes.is_empty());
let bytes = to_bytes_limited((), 1).await.unwrap().unwrap();
assert!(bytes.is_empty());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 0)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 1)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok());
}
#[actix_rt::test]
async fn to_bytes_limited_streams() {
// hinting a larger body fails
let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.is_err());
// hinting a smaller body is okay
let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty());
// hinting a smaller body then returning a larger one fails
let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>);
let body = SizedStream::new(3, stream);
assert!(to_bytes_limited(body, 3).await.is_err());
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
assert!(to_bytes_limited(body, 3).await.is_err());
}
#[actix_rt::test]
async fn to_body_limit_error() {
let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) });
let body = SizedStream::new(8, err_stream);
// not too big, but propagates error from body stream
assert!(to_bytes_limited(body, 10).await.unwrap().is_err());
}
}

View file

@ -1,28 +1,22 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration};
use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
use crate::{ConnectCallback, Extensions};
use crate::{
body::{BoxBody, MessageBody},
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
service::HttpService,
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig,
};
/// A HTTP service builder
/// An HTTP service builder.
///
/// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
/// This type can construct an instance of [`HttpService`] through a builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<net::SocketAddr>,
expect: X,
@ -31,21 +25,23 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
_phantom: PhantomData<S>,
}
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static,
{
/// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
fn default() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
// ServiceConfig parts (make sure defaults match)
keep_alive: KeepAlive::default(),
client_request_timeout: Duration::from_secs(5),
client_disconnect_timeout: Duration::ZERO,
secure: false,
local_addr: None,
// dispatcher parts
expect: ExpectHandler,
upgrade: None,
on_connect_ext: None,
@ -57,19 +53,21 @@ where
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
/// Set server keep-alive setting.
/// Set connection keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong.
///
/// By default keep-alive is 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
@ -87,33 +85,45 @@ where
self
}
/// Set server client timeout in milliseconds for first request.
/// Set client request timeout (for first request).
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
/// Defines a timeout for reading client request header. If the client does not transmit the
/// request head within this duration, the connection is terminated with a `408 Request Timeout`
/// response error.
///
/// To disable timeout set value to 0.
/// A duration of zero disables the timeout.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
/// By default, the client timeout is 5 seconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self {
self.client_request_timeout = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections.
///
/// To disable timeout set value to 0.
/// A duration of zero disables the timeout.
///
/// By default disconnect timeout is set to 0.
pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect = val;
/// By default, the disconnect timeout is disabled.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self {
self.client_disconnect_timeout = dur;
self
}
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
@ -123,13 +133,13 @@ where
where
F: IntoServiceFactory<X1, Request>,
X1: ServiceFactory<Request, Config = (), Response = Request>,
X1::Error: Into<Error>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
secure: self.secure,
local_addr: self.local_addr,
expect: expect.into_factory(),
@ -145,15 +155,15 @@ where
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoServiceFactory<U1, (Request, Framed<T, Codec>)>,
U1: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>,
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
secure: self.secure,
local_addr: self.local_addr,
expect: self.expect,
@ -176,19 +186,19 @@ where
self
}
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
/// Finish service configuration and create a service for the HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where
B: MessageBody,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);
@ -199,26 +209,26 @@ where
.on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
/// Finish service configuration and create a service for the HTTP/2 protocol.
#[cfg(feature = "http2")]
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
where
F: IntoServiceFactory<S, Request>,
S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
B::Error: Into<Error>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);
H2Service::with_config(cfg, service.into_factory())
crate::h2::H2Service::with_config(cfg, service.into_factory())
.on_connect_ext(self.on_connect_ext)
}
@ -226,17 +236,16 @@ where
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where
F: IntoServiceFactory<S, Request>,
S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
B::Error: Into<Error>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);

View file

@ -1,766 +0,0 @@
use std::{
fmt,
future::Future,
net::IpAddr,
pin::Pin,
rc::Rc,
task::{Context, Poll},
time::Duration,
};
use actix_rt::{
net::{ActixStream, TcpStream},
time::{sleep, Sleep},
};
use actix_service::Service;
use actix_tls::connect::{
new_connector, Connect as TcpConnect, ConnectError as TcpConnectError,
Connection as TcpConnection, Resolver,
};
use futures_core::{future::LocalBoxFuture, ready};
use http::Uri;
use pin_project::pin_project;
use super::config::ConnectorConfig;
use super::connection::{Connection, ConnectionIo};
use super::error::ConnectError;
use super::pool::ConnectionPool;
use super::Connect;
use super::Protocol;
#[cfg(feature = "openssl")]
use actix_tls::connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use actix_tls::connect::ssl::rustls::ClientConfig;
enum SslConnector {
#[allow(dead_code)]
None,
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(std::sync::Arc<ClientConfig>),
}
/// Manages HTTP client network connectivity.
///
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T> {
connector: T,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
}
impl Connector<()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_tls::connect::ConnectError,
> + Clone,
> {
Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: new_connector(resolver::resolver()),
config: ConnectorConfig::default(),
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_tls::connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config.root_store.add_server_trust_anchors(
&actix_tls::connect::ssl::rustls::TLS_SERVER_ROOTS,
);
SslConnector::Rustls(std::sync::Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {
SslConnector::None
}
}
impl<S> Connector<S> {
/// Use custom connector.
pub fn connector<S1, Io1>(self, connector: S1) -> Connector<S1>
where
Io1: ActixStream + fmt::Debug + 'static,
S1: Service<
TcpConnect<Uri>,
Response = TcpConnection<Uri, Io1>,
Error = TcpConnectError,
> + Clone,
{
Connector {
connector,
config: self.config,
ssl: self.ssl,
}
}
}
impl<S, Io> Connector<S>
where
// Note:
// Input Io type is bound to ActixStream trait but internally in client module they
// are bound to ConnectionIo trait alias. And latter is the trait exposed to public
// in the form of Box<dyn ConnectionIo> type.
//
// This remap is to hide ActixStream's trait methods. They are not meant to be called
// from user code.
Io: ActixStream + fmt::Debug + 'static,
S: Service<
TcpConnect<Uri>,
Response = TcpConnection<Uri, Io>,
Error = TcpConnectError,
> + Clone
+ 'static,
{
/// Tcp connection timeout, i.e. max time to connect to remote host including dns name
/// resolution. Set to 5 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
/// Tls handshake timeout, i.e. max time to do tls handshake with remote host after tcp
/// connection established. Set to 5 second by default.
pub fn handshake_timeout(mut self, timeout: Duration) -> Self {
self.config.handshake_timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
/// Use custom `SslConnector` instance.
pub fn rustls(mut self, connector: std::sync::Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported HTTP major version.
///
/// Supported versions are HTTP/1.1 and HTTP/2.
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.config.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.config.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.config.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.config.disconnect_timeout = Some(dur);
self
}
/// Set local IP Address the connector would use for establishing connection.
pub fn local_address(mut self, addr: IpAddr) -> Self {
self.config.local_address = Some(addr);
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(self) -> ConnectorService<S, Io> {
let local_address = self.config.local_address;
let timeout = self.config.timeout;
let tcp_service_inner =
TcpConnectorInnerService::new(self.connector, timeout, local_address);
#[allow(clippy::redundant_clone)]
let tcp_service = TcpConnectorService {
service: tcp_service_inner.clone(),
};
let tls_service = match self.ssl {
SslConnector::None => None,
#[cfg(feature = "openssl")]
SslConnector::Openssl(tls) => {
const H2: &[u8] = b"h2";
use actix_tls::connect::ssl::openssl::{OpensslConnector, SslStream};
impl<Io: ConnectionIo> IntoConnectionIo for TcpConnection<Uri, SslStream<Io>> {
fn into_connection_io(self) -> (Box<dyn ConnectionIo>, Protocol) {
let sock = self.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock), Protocol::Http2)
} else {
(Box::new(sock), Protocol::Http1)
}
}
}
let handshake_timeout = self.config.handshake_timeout;
let tls_service = TlsConnectorService {
tcp_service: tcp_service_inner,
tls_service: OpensslConnector::service(tls),
timeout: handshake_timeout,
};
Some(actix_service::boxed::rc_service(tls_service))
}
#[cfg(feature = "rustls")]
SslConnector::Rustls(tls) => {
const H2: &[u8] = b"h2";
use actix_tls::connect::ssl::rustls::{
RustlsConnector, Session, TlsStream,
};
impl<Io: ConnectionIo> IntoConnectionIo for TcpConnection<Uri, TlsStream<Io>> {
fn into_connection_io(self) -> (Box<dyn ConnectionIo>, Protocol) {
let sock = self.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock), Protocol::Http2)
} else {
(Box::new(sock), Protocol::Http1)
}
}
}
let handshake_timeout = self.config.handshake_timeout;
let tls_service = TlsConnectorService {
tcp_service: tcp_service_inner,
tls_service: RustlsConnector::service(tls),
timeout: handshake_timeout,
};
Some(actix_service::boxed::rc_service(tls_service))
}
};
let tcp_config = self.config.no_disconnect_timeout();
let tcp_pool = ConnectionPool::new(tcp_service, tcp_config);
let tls_config = self.config;
let tls_pool = tls_service
.map(move |tls_service| ConnectionPool::new(tls_service, tls_config));
ConnectorServicePriv { tcp_pool, tls_pool }
}
}
/// tcp service for map `TcpConnection<Uri, Io>` type to `(Io, Protocol)`
#[derive(Clone)]
pub struct TcpConnectorService<S: Clone> {
service: S,
}
impl<S, Io> Service<Connect> for TcpConnectorService<S>
where
S: Service<Connect, Response = TcpConnection<Uri, Io>, Error = ConnectError>
+ Clone
+ 'static,
{
type Response = (Io, Protocol);
type Error = ConnectError;
type Future = TcpConnectorFuture<S::Future>;
actix_service::forward_ready!(service);
fn call(&self, req: Connect) -> Self::Future {
TcpConnectorFuture {
fut: self.service.call(req),
}
}
}
#[pin_project]
pub struct TcpConnectorFuture<Fut> {
#[pin]
fut: Fut,
}
impl<Fut, Io> Future for TcpConnectorFuture<Fut>
where
Fut: Future<Output = Result<TcpConnection<Uri, Io>, ConnectError>>,
{
type Output = Result<(Io, Protocol), ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project()
.fut
.poll(cx)
.map_ok(|res| (res.into_parts().0, Protocol::Http1))
}
}
/// service for establish tcp connection and do client tls handshake.
/// operation is canceled when timeout limit reached.
struct TlsConnectorService<S, St> {
/// tcp connection is canceled on `TcpConnectorInnerService`'s timeout setting.
tcp_service: S,
/// tls connection is canceled on `TlsConnectorService`'s timeout setting.
tls_service: St,
timeout: Duration,
}
impl<S, St, Io> Service<Connect> for TlsConnectorService<S, St>
where
S: Service<Connect, Response = TcpConnection<Uri, Io>, Error = ConnectError>
+ Clone
+ 'static,
St: Service<TcpConnection<Uri, Io>, Error = std::io::Error> + Clone + 'static,
Io: ConnectionIo,
St::Response: IntoConnectionIo,
{
type Response = (Box<dyn ConnectionIo>, Protocol);
type Error = ConnectError;
type Future = TlsConnectorFuture<St, S::Future, St::Future>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
ready!(self.tcp_service.poll_ready(cx))?;
ready!(self.tls_service.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&self, req: Connect) -> Self::Future {
let fut = self.tcp_service.call(req);
let tls_service = self.tls_service.clone();
let timeout = self.timeout;
TlsConnectorFuture::TcpConnect {
fut,
tls_service: Some(tls_service),
timeout,
}
}
}
#[pin_project(project = TlsConnectorProj)]
#[allow(clippy::large_enum_variant)]
enum TlsConnectorFuture<S, Fut1, Fut2> {
TcpConnect {
#[pin]
fut: Fut1,
tls_service: Option<S>,
timeout: Duration,
},
TlsConnect {
#[pin]
fut: Fut2,
#[pin]
timeout: Sleep,
},
}
/// helper trait for generic over different TlsStream types between tls crates.
trait IntoConnectionIo {
fn into_connection_io(self) -> (Box<dyn ConnectionIo>, Protocol);
}
impl<S, Io, Fut1, Fut2, Res> Future for TlsConnectorFuture<S, Fut1, Fut2>
where
S: Service<
TcpConnection<Uri, Io>,
Response = Res,
Error = std::io::Error,
Future = Fut2,
>,
S::Response: IntoConnectionIo,
Fut1: Future<Output = Result<TcpConnection<Uri, Io>, ConnectError>>,
Fut2: Future<Output = Result<S::Response, S::Error>>,
Io: ConnectionIo,
{
type Output = Result<(Box<dyn ConnectionIo>, Protocol), ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().project() {
TlsConnectorProj::TcpConnect {
fut,
tls_service,
timeout,
} => {
let res = ready!(fut.poll(cx))?;
let fut = tls_service
.take()
.expect("TlsConnectorFuture polled after complete")
.call(res);
let timeout = sleep(*timeout);
self.set(TlsConnectorFuture::TlsConnect { fut, timeout });
self.poll(cx)
}
TlsConnectorProj::TlsConnect { fut, timeout } => match fut.poll(cx)? {
Poll::Ready(res) => Poll::Ready(Ok(res.into_connection_io())),
Poll::Pending => timeout.poll(cx).map(|_| Err(ConnectError::Timeout)),
},
}
}
}
/// service for establish tcp connection.
/// operation is canceled when timeout limit reached.
#[derive(Clone)]
pub struct TcpConnectorInnerService<S: Clone> {
service: S,
timeout: Duration,
local_address: Option<std::net::IpAddr>,
}
impl<S: Clone> TcpConnectorInnerService<S> {
fn new(
service: S,
timeout: Duration,
local_address: Option<std::net::IpAddr>,
) -> Self {
Self {
service,
timeout,
local_address,
}
}
}
impl<S, Io> Service<Connect> for TcpConnectorInnerService<S>
where
S: Service<
TcpConnect<Uri>,
Response = TcpConnection<Uri, Io>,
Error = TcpConnectError,
> + Clone
+ 'static,
{
type Response = S::Response;
type Error = ConnectError;
type Future = TcpConnectorInnerFuture<S::Future>;
actix_service::forward_ready!(service);
fn call(&self, req: Connect) -> Self::Future {
let mut req = TcpConnect::new(req.uri).set_addr(req.addr);
if let Some(local_addr) = self.local_address {
req = req.set_local_addr(local_addr);
}
TcpConnectorInnerFuture {
fut: self.service.call(req),
timeout: sleep(self.timeout),
}
}
}
#[pin_project]
pub struct TcpConnectorInnerFuture<Fut> {
#[pin]
fut: Fut,
#[pin]
timeout: Sleep,
}
impl<Fut, Io> Future for TcpConnectorInnerFuture<Fut>
where
Fut: Future<Output = Result<TcpConnection<Uri, Io>, TcpConnectError>>,
{
type Output = Result<TcpConnection<Uri, Io>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
match this.fut.poll(cx) {
Poll::Ready(res) => Poll::Ready(res.map_err(ConnectError::from)),
Poll::Pending => this.timeout.poll(cx).map(|_| Err(ConnectError::Timeout)),
}
}
}
/// Connector service for pooled Plain/Tls Tcp connections.
pub type ConnectorService<S, Io> = ConnectorServicePriv<
TcpConnectorService<TcpConnectorInnerService<S>>,
Rc<
dyn Service<
Connect,
Response = (Box<dyn ConnectionIo>, Protocol),
Error = ConnectError,
Future = LocalBoxFuture<
'static,
Result<(Box<dyn ConnectionIo>, Protocol), ConnectError>,
>,
>,
>,
Io,
Box<dyn ConnectionIo>,
>;
pub struct ConnectorServicePriv<S1, S2, Io1, Io2>
where
S1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError>,
S2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError>,
Io1: ConnectionIo,
Io2: ConnectionIo,
{
tcp_pool: ConnectionPool<S1, Io1>,
tls_pool: Option<ConnectionPool<S2, Io2>>,
}
impl<S1, S2, Io1, Io2> Service<Connect> for ConnectorServicePriv<S1, S2, Io1, Io2>
where
S1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError>
+ Clone
+ 'static,
S2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError>
+ Clone
+ 'static,
Io1: ConnectionIo,
Io2: ConnectionIo,
{
type Response = Connection<Io1, Io2>;
type Error = ConnectError;
type Future = ConnectorServiceFuture<S1, S2, Io1, Io2>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
ready!(self.tcp_pool.poll_ready(cx))?;
if let Some(ref tls_pool) = self.tls_pool {
ready!(tls_pool.poll_ready(cx))?;
}
Poll::Ready(Ok(()))
}
fn call(&self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => match self.tls_pool {
None => ConnectorServiceFuture::SslIsNotSupported,
Some(ref pool) => ConnectorServiceFuture::Tls(pool.call(req)),
},
_ => ConnectorServiceFuture::Tcp(self.tcp_pool.call(req)),
}
}
}
#[pin_project(project = ConnectorServiceProj)]
pub enum ConnectorServiceFuture<S1, S2, Io1, Io2>
where
S1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError>
+ Clone
+ 'static,
S2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError>
+ Clone
+ 'static,
Io1: ConnectionIo,
Io2: ConnectionIo,
{
Tcp(#[pin] <ConnectionPool<S1, Io1> as Service<Connect>>::Future),
Tls(#[pin] <ConnectionPool<S2, Io2> as Service<Connect>>::Future),
SslIsNotSupported,
}
impl<S1, S2, Io1, Io2> Future for ConnectorServiceFuture<S1, S2, Io1, Io2>
where
S1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError>
+ Clone
+ 'static,
S2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError>
+ Clone
+ 'static,
Io1: ConnectionIo,
Io2: ConnectionIo,
{
type Output = Result<Connection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
ConnectorServiceProj::Tcp(fut) => fut.poll(cx).map_ok(Connection::Tcp),
ConnectorServiceProj::Tls(fut) => fut.poll(cx).map_ok(Connection::Tls),
ConnectorServiceProj::SslIsNotSupported => {
Poll::Ready(Err(ConnectError::SslIsNotSupported))
}
}
}
}
#[cfg(not(feature = "trust-dns"))]
mod resolver {
use super::*;
pub(super) fn resolver() -> Resolver {
Resolver::Default
}
}
#[cfg(feature = "trust-dns")]
mod resolver {
use std::{cell::RefCell, net::SocketAddr};
use actix_tls::connect::Resolve;
use futures_core::future::LocalBoxFuture;
use trust_dns_resolver::{
config::{ResolverConfig, ResolverOpts},
system_conf::read_system_conf,
TokioAsyncResolver,
};
use super::*;
pub(super) fn resolver() -> Resolver {
// new type for impl Resolve trait for TokioAsyncResolver.
struct TrustDnsResolver(TokioAsyncResolver);
impl Resolve for TrustDnsResolver {
fn lookup<'a>(
&'a self,
host: &'a str,
port: u16,
) -> LocalBoxFuture<'a, Result<Vec<SocketAddr>, Box<dyn std::error::Error>>>
{
Box::pin(async move {
let res = self
.0
.lookup_ip(host)
.await?
.iter()
.map(|ip| SocketAddr::new(ip, port))
.collect();
Ok(res)
})
}
}
// dns struct is cached in thread local.
// so new client constructor can reuse the existing dns resolver.
thread_local! {
static TRUST_DNS_RESOLVER: RefCell<Option<Resolver>> = RefCell::new(None);
}
// get from thread local or construct a new trust-dns resolver.
TRUST_DNS_RESOLVER.with(|local| {
let resolver = local.borrow().as_ref().map(Clone::clone);
match resolver {
Some(resolver) => resolver,
None => {
let (cfg, opts) = match read_system_conf() {
Ok((cfg, opts)) => (cfg, opts),
Err(e) => {
log::error!("TRust-DNS can not load system config: {}", e);
(ResolverConfig::default(), ResolverOpts::default())
}
};
let resolver = TokioAsyncResolver::tokio(cfg, opts).unwrap();
// box trust dns resolver and put it in thread local.
let resolver = Resolver::new_custom(TrustDnsResolver(resolver));
*local.borrow_mut() = Some(resolver.clone());
resolver
}
}
})
}
}

View file

@ -1,26 +0,0 @@
//! HTTP client.
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use actix_tls::connect::{
Connect as TcpConnect, ConnectError as TcpConnectError, Connection as TcpConnection,
};
pub use self::connection::{Connection, ConnectionIo};
pub use self::connector::{Connector, ConnectorService};
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use crate::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View file

@ -1,356 +1,177 @@
use std::cell::Cell;
use std::fmt::Write;
use std::rc::Rc;
use std::time::Duration;
use std::{fmt, net};
use actix_rt::{
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
use std::{
net,
rc::Rc,
time::{Duration, Instant},
};
use bytes::BytesMut;
use time::OffsetDateTime;
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
use crate::{date::DateService, KeepAlive};
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
/// HTTP service configuration.
#[derive(Debug, Clone)]
pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner {
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
keep_alive: KeepAlive,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
date_service: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
Self::new(
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
}
}
impl ServiceConfig {
/// Create instance of `ServiceConfig`
/// Create instance of `ServiceConfig`.
pub fn new(
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<net::SocketAddr>,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner {
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
keep_alive: keep_alive.normalize(),
client_request_timeout,
client_disconnect_timeout,
secure,
local_addr,
date_service: DateService::new(),
}))
}
/// Returns true if connection is secure (HTTPS)
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
#[inline]
pub fn secure(&self) -> bool {
self.0.secure
}
/// Returns the local address that this server is bound to.
///
/// Returns `None` for connections via UDS (Unix Domain Socket).
#[inline]
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr
}
/// Keep alive duration if configured.
/// Connection keep-alive setting.
#[inline]
pub fn keep_alive(&self) -> Option<Duration> {
pub fn keep_alive(&self) -> KeepAlive {
self.0.keep_alive
}
/// Return state of connection keep-alive functionality
#[inline]
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
/// Client timeout for first request.
#[inline]
pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
/// Creates a time object representing the deadline for this connection's keep-alive period, if
/// enabled.
///
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`.
pub fn keep_alive_deadline(&self) -> Option<Instant> {
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur),
KeepAlive::Os => None,
KeepAlive::Disabled => None,
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
/// Creates a time object representing the deadline for the client to finish sending the head of
/// its first request.
///
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
pub fn client_request_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_request_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
}
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
/// Creates a time object representing the deadline for the client to disconnect.
pub fn client_disconnect_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.date_service.now()
}
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)]
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) {
let mut buf: [u8; 37] = [0; 37];
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
self.0
.date_service
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n");
dst.extend_from_slice(&buf);
}
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
#[allow(unused)] // used with `http2` feature flag
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
self.0
.date_service
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}
impl DateService {
fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
}
});
DateService { current, handle }
}
fn now(&self) -> Instant {
self.current.get().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
#[cfg(test)]
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panic:
///
/// When there was no `NotifyOnDrop` instance on current thread
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
.with_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_rt::{
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
use actix_rt::task::yield_now;
use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
#[actix_rt::test]
async fn test_date_service_update() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let settings =
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
settings.write_date_header(&mut buf1, false);
let now1 = settings.now();
sleep_until(Instant::now() + Duration::from_secs(2)).await;
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
settings.write_date_header(&mut buf2, false);
assert_ne!(now1, now2);
assert_ne!(buf1, buf2);
drop(settings);
assert!(notify_on_drop::is_dropped());
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[actix_rt::test]
@ -365,14 +186,21 @@ mod tests {
let clone3 = service.clone();
drop(clone1);
assert_eq!(false, notify_on_drop::is_dropped());
assert!(!notify_on_drop::is_dropped());
drop(clone2);
assert_eq!(false, notify_on_drop::is_dropped());
assert!(!notify_on_drop::is_dropped());
drop(clone3);
assert_eq!(false, notify_on_drop::is_dropped());
assert!(!notify_on_drop::is_dropped());
drop(service);
assert!(notify_on_drop::is_dropped());
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[test]
@ -382,11 +210,27 @@ mod tests {
#[actix_rt::test]
async fn test_date() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let settings = ServiceConfig::default();
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
settings.write_date_header(&mut buf1, false);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
settings.write_date_header(&mut buf2, false);
assert_eq!(buf1, buf2);
}
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
}

92
actix-http/src/date.rs Normal file
View file

@ -0,0 +1,92 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::HttpDate::from(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View file

@ -8,24 +8,29 @@ use std::{
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use brotli2::write::BrotliDecoder;
use bytes::Bytes;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream};
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{
encoding::Writer,
error::{BlockingError, PayloadError},
http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
error::PayloadError,
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
pin_project_lite::pin_project! {
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
#[pin]
stream: S,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
}
impl<S> Decoder<S>
@ -36,14 +41,27 @@ where
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096),
))),
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(ZlibDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
"Failed to create zstd decoder. This is a bug. \
Please report it to the actix-web repository.",
),
))),
_ => None,
};
@ -63,7 +81,7 @@ where
let encoding = headers
.get(&CONTENT_ENCODING)
.and_then(|val| val.to_str().ok())
.map(ContentEncoding::from)
.and_then(|x| x.parse().ok())
.unwrap_or(ContentEncoding::Identity);
Self::new(stream, encoding)
@ -72,45 +90,48 @@ where
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
S: Stream<Item = Result<Bytes, PayloadError>>,
{
type Item = Result<Bytes, PayloadError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) =
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
self.decoder = Some(decoder);
self.fut.take();
loop {
if let Some(ref mut fut) = this.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})??;
*this.decoder = Some(decoder);
this.fut.take();
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
}
if self.eof {
if *this.eof {
return Poll::Ready(None);
}
match ready!(Pin::new(&mut self.stream).poll_next(cx)) {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => {
if let Some(mut decoder) = self.decoder.take() {
if let Some(mut decoder) = this.decoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?;
self.decoder = Some(decoder);
*this.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(spawn_blocking(move || {
*this.fut = Some(spawn_blocking(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
@ -123,9 +144,9 @@ where
}
None => {
self.eof = true;
*this.eof = true;
return if let Some(mut decoder) = self.decoder.take() {
return if let Some(mut decoder) = this.decoder.take() {
match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None),
@ -141,15 +162,26 @@ where
}
enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>),
Br(Box<BrotliDecoder<Writer>>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]
Zstd(Box<ZstdDecoder<'static, Writer>>),
}
impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
@ -159,9 +191,10 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
@ -172,9 +205,10 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
@ -184,14 +218,28 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(err) => Err(err),
},
}
}
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
@ -202,9 +250,10 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
@ -216,9 +265,10 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
@ -230,7 +280,22 @@ impl ContentDecoder {
Ok(None)
}
}
Err(e) => Err(e),
Err(err) => Err(err),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(err) => Err(err),
},
}
}

View file

@ -9,106 +9,118 @@ use std::{
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use brotli2::write::BrotliEncoder;
use bytes::Bytes;
use derive_more::Display;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready;
use pin_project::pin_project;
use crate::{
body::{Body, BodySize, BoxAnyBody, MessageBody, ResponseBody},
http::{
header::{ContentEncoding, CONTENT_ENCODING},
HeaderValue, StatusCode,
},
Error, ResponseHead,
};
use pin_project_lite::pin_project;
use tracing::trace;
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer;
use crate::error::BlockingError;
use crate::{
body::{self, BodySize, MessageBody},
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
}
impl<B: MessageBody> Encoder<B> {
pub fn response(
encoding: ContentEncoding,
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
}
}
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
pin_project! {
pub struct Encoder<B> {
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
eof: bool,
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(#[pin] B),
BoxedStream(BoxAnyBody),
impl<B: MessageBody> Encoder<B> {
fn none() -> Self {
Encoder {
body: EncoderBody::None {
body: body::None::new(),
},
encoder: None,
fut: None,
eof: true,
}
}
fn empty() -> Self {
Encoder {
body: EncoderBody::Full { body: Bytes::new() },
encoder: None,
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress empty bodies
match body.size() {
BodySize::None => return Self::none(),
BodySize::Sized(0) => return Self::empty(),
_ => {}
}
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity);
let body = match body.try_into_bytes() {
Ok(body) => EncoderBody::Full { body },
Err(body) => EncoderBody::Stream { body },
};
if should_encode {
// wrap body only if encoder is feature-enabled
if let Some(enc) = ContentEncoder::select(encoding) {
update_head(encoding, head);
return Encoder {
body,
encoder: Some(enc),
fut: None,
eof: false,
};
}
}
Encoder {
body,
encoder: None,
fut: None,
eof: false,
}
}
}
pin_project! {
#[project = EncoderBodyProj]
enum EncoderBody<B> {
None { body: body::None },
Full { body: Bytes },
Stream { #[pin] body: B },
}
}
impl<B> MessageBody for EncoderBody<B>
where
B: MessageBody,
B::Error: Into<Error>,
{
type Error = EncoderError<B::Error>;
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
EncoderBody::None { body } => body.size(),
EncoderBody::Full { body } => body.size(),
EncoderBody::Stream { body } => body.size(),
}
}
@ -117,28 +129,27 @@ where
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
EncoderBodyProj::None { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
// TODO: MSRV 1.51: poll_map_err
EncoderBodyProj::Stream(b) => match ready!(b.poll_next(cx)) {
Some(Err(err)) => Poll::Ready(Some(Err(EncoderError::Body(err)))),
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
},
EncoderBodyProj::BoxedStream(ref mut b) => {
match ready!(b.as_pin_mut().poll_next(cx)) {
Some(Err(err)) => {
Poll::Ready(Some(Err(EncoderError::Boxed(err.into()))))
}
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
EncoderBodyProj::Full { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream { body } => body
.poll_next(cx)
.map_err(|err| EncoderError::Body(err.into())),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
}
@ -146,15 +157,15 @@ where
impl<B> MessageBody for Encoder<B>
where
B: MessageBody,
B::Error: Into<Error>,
{
type Error = EncoderError<B::Error>;
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
if self.encoder.is_none() {
self.body.size()
} else {
if self.encoder.is_some() {
BodySize::Stream
} else {
self.body.size()
}
}
@ -163,6 +174,7 @@ where
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let mut this = self.project();
loop {
if *this.eof {
return Poll::Ready(None);
@ -170,7 +182,12 @@ where
if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| EncoderError::Blocking(BlockingError))?
.map_err(|_| {
EncoderError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(EncoderError::Io)?;
let chunk = encoder.take();
@ -211,6 +228,7 @@ where
None => {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish().map_err(EncoderError::Io)?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
@ -224,35 +242,75 @@ where
}
}
}
#[inline]
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() {
Err(self)
} else {
match self.body.try_into_bytes() {
Ok(body) => Ok(body),
Err(body) => {
self.body = body;
Err(self)
}
}
}
}
}
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::from_static(encoding.as_str()),
);
head.headers_mut()
.insert(header::CONTENT_ENCODING, encoding.to_header_value());
head.headers_mut()
.append(header::VARY, HeaderValue::from_static("accept-encoding"));
head.no_chunking(false);
}
enum ContentEncoder {
#[cfg(feature = "compress-gzip")]
Deflate(ZlibEncoder<Writer>),
#[cfg(feature = "compress-gzip")]
Gzip(GzEncoder<Writer>),
Br(BrotliEncoder<Writer>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
#[cfg(feature = "compress-zstd")]
Zstd(ZstdEncoder<'static, Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
fn select(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => {
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
Some(ContentEncoder::Zstd(encoder))
}
_ => None,
}
}
@ -260,38 +318,60 @@ impl ContentEncoder {
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(mut encoder) => match encoder.flush() {
Ok(()) => Ok(encoder.into_inner().buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
}
}
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
@ -299,6 +379,8 @@ impl ContentEncoder {
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
@ -306,39 +388,52 @@ impl ContentEncoder {
Err(err)
}
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding ztsd encoding: {}", err);
Err(err)
}
},
}
}
}
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError<E> {
pub enum EncoderError {
/// Wrapped body stream error.
#[display(fmt = "body")]
Body(E),
#[display(fmt = "boxed")]
Boxed(Error),
#[display(fmt = "blocking")]
Blocking(BlockingError),
Body(Box<dyn StdError>),
/// Generic I/O error.
#[display(fmt = "io")]
Io(io::Error),
}
impl<E: StdError> StdError for EncoderError<E> {
impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
None
}
}
impl<E: Into<Error>> From<EncoderError<E>> for Error {
fn from(err: EncoderError<E>) -> Self {
match err {
EncoderError::Body(err) => err.into(),
EncoderError::Boxed(err) => err,
EncoderError::Blocking(err) => err.into(),
EncoderError::Io(err) => err.into(),
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Io(err) => Some(err),
}
}
}
impl From<EncoderError> for crate::Error {
fn from(err: EncoderError) -> Self {
crate::Error::new_encoder().with_cause(err)
}
}

View file

@ -7,10 +7,12 @@ use bytes::{Bytes, BytesMut};
mod decoder;
mod encoder;
pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
pub use self::{decoder::Decoder, encoder::Encoder};
pub(self) struct Writer {
/// Special-purpose writer for streaming (de-)compression.
///
/// Pre-allocates 8KiB of capacity.
struct Writer {
buf: BytesMut,
}

View file

@ -1,174 +1,157 @@
//! Error and Result module
use std::{
error::Error as StdError,
fmt,
io::{self, Write as _},
str::Utf8Error,
string::FromUtf8Error,
};
use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error};
use bytes::BytesMut;
use derive_more::{Display, Error, From};
use http::{header, uri::InvalidUri, StatusCode};
use serde::de::value::Error as DeError;
pub use http::{status::InvalidStatusCode, Error as HttpError};
use http::{uri::InvalidUri, StatusCode};
use crate::{body::Body, helpers::Writer, Response};
use crate::{body::BoxBody, Response};
pub use http::Error as HttpError;
/// General purpose actix web error.
///
/// An actix web error is used to carry errors from `std::error`
/// through actix in a convenient way. It can be created through
/// converting errors with `into()`.
///
/// Whenever it is created from an external object a response error is created
/// for it that can be used to create an HTTP response from it this means that
/// if you have access to an actix `Error` you can always get a
/// `ResponseError` reference from it.
pub struct Error {
cause: Box<dyn ResponseError>,
inner: Box<ErrorInner>,
}
pub(crate) struct ErrorInner {
#[allow(dead_code)]
kind: Kind,
cause: Option<Box<dyn StdError>>,
}
impl Error {
/// Returns the reference to the underlying `ResponseError`.
pub fn as_response_error(&self) -> &dyn ResponseError {
self.cause.as_ref()
fn new(kind: Kind) -> Self {
Self {
inner: Box::new(ErrorInner { kind, cause: None }),
}
}
/// Similar to `as_response_error` but downcasts.
pub fn as_error<T: ResponseError + 'static>(&self) -> Option<&T> {
<dyn ResponseError>::downcast_ref(self.cause.as_ref())
pub(crate) fn with_cause(mut self, cause: impl Into<Box<dyn StdError>>) -> Self {
self.inner.cause = Some(cause.into());
self
}
pub(crate) fn new_http() -> Self {
Self::new(Kind::Http)
}
pub(crate) fn new_parse() -> Self {
Self::new(Kind::Parse)
}
pub(crate) fn new_payload() -> Self {
Self::new(Kind::Payload)
}
pub(crate) fn new_body() -> Self {
Self::new(Kind::Body)
}
pub(crate) fn new_send_response() -> Self {
Self::new(Kind::SendResponse)
}
#[allow(unused)] // available for future use
pub(crate) fn new_io() -> Self {
Self::new(Kind::Io)
}
#[allow(unused)] // used in encoder behind feature flag so ignore unused warning
pub(crate) fn new_encoder() -> Self {
Self::new(Kind::Encoder)
}
#[allow(unused)] // used with `ws` feature flag
pub(crate) fn new_ws() -> Self {
Self::new(Kind::Ws)
}
}
/// Errors that can generate responses.
pub trait ResponseError: fmt::Debug + fmt::Display {
/// Returns appropriate status code for error.
///
/// A 500 Internal Server Error is used by default. If [error_response](Self::error_response) is
/// also implemented and does not call `self.status_code()`, then this will not be used.
fn status_code(&self) -> StatusCode {
StatusCode::INTERNAL_SERVER_ERROR
}
impl From<Error> for Response<BoxBody> {
fn from(err: Error) -> Self {
// TODO: more appropriate error status codes, usage assessment needed
let status_code = match err.inner.kind {
Kind::Parse => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
/// Creates full response for error.
///
/// By default, the generated response uses a 500 Internal Server Error status code, a
/// `Content-Type` of `text/plain`, and the body is set to `Self`'s `Display` impl.
fn error_response(&self) -> Response<Body> {
let mut resp = Response::new(self.status_code());
let mut buf = BytesMut::new();
let _ = write!(Writer(&mut buf), "{}", self);
resp.headers_mut().insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain; charset=utf-8"),
);
resp.set_body(Body::from(buf))
Response::new(status_code).set_body(BoxBody::new(err.to_string()))
}
downcast_get_type_id!();
}
downcast!(ResponseError);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Display)]
pub(crate) enum Kind {
#[display(fmt = "error processing HTTP")]
Http,
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.cause, f)
}
#[display(fmt = "error parsing HTTP message")]
Parse,
#[display(fmt = "request payload read error")]
Payload,
#[display(fmt = "response body write error")]
Body,
#[display(fmt = "send response error")]
SendResponse,
#[display(fmt = "error in WebSocket process")]
Ws,
#[display(fmt = "connection error")]
Io,
#[display(fmt = "encoder error")]
Encoder,
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", &self.cause)
f.debug_struct("actix_http::Error")
.field("kind", &self.inner.kind)
.field("cause", &self.inner.cause)
.finish()
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.inner.cause.as_ref() {
Some(err) => write!(f, "{}: {}", &self.inner.kind, err),
None => write!(f, "{}", &self.inner.kind),
}
}
}
impl From<()> for Error {
fn from(_: ()) -> Self {
Error::from(UnitError)
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.inner.cause.as_ref().map(Box::as_ref)
}
}
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Self {
// hint that an error that will never happen
unreachable!()
fn from(err: std::convert::Infallible) -> Self {
match err {}
}
}
/// Convert `Error` to a `Response` instance
impl From<Error> for Response<Body> {
fn from(err: Error) -> Self {
Response::from_error(err)
impl From<HttpError> for Error {
fn from(err: HttpError) -> Self {
Self::new_http().with_cause(err)
}
}
/// `Error` for any error that implements `ResponseError`
impl<T: ResponseError + 'static> From<T> for Error {
fn from(err: T) -> Error {
Error {
cause: Box::new(err),
}
#[cfg(feature = "ws")]
impl From<crate::ws::HandshakeError> for Error {
fn from(err: crate::ws::HandshakeError) -> Self {
Self::new_ws().with_cause(err)
}
}
#[derive(Debug, Display, Error)]
#[display(fmt = "Unknown Error")]
struct UnitError;
impl ResponseError for Box<dyn StdError + 'static> {}
/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`UnitError`].
impl ResponseError for UnitError {}
/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`actix_tls::accept::openssl::SslError`].
#[cfg(feature = "openssl")]
impl ResponseError for actix_tls::accept::openssl::SslError {}
/// Returns [`StatusCode::BAD_REQUEST`] for [`DeError`].
impl ResponseError for DeError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
/// Returns [`StatusCode::BAD_REQUEST`] for [`Utf8Error`].
impl ResponseError for Utf8Error {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`HttpError`].
impl ResponseError for HttpError {}
/// Inspects the underlying [`io::ErrorKind`] and returns an appropriate status code.
///
/// If the error is [`io::ErrorKind::NotFound`], [`StatusCode::NOT_FOUND`] is returned. If the
/// error is [`io::ErrorKind::PermissionDenied`], [`StatusCode::FORBIDDEN`] is returned. Otherwise,
/// [`StatusCode::INTERNAL_SERVER_ERROR`] is returned.
impl ResponseError for io::Error {
fn status_code(&self) -> StatusCode {
match self.kind() {
io::ErrorKind::NotFound => StatusCode::NOT_FOUND,
io::ErrorKind::PermissionDenied => StatusCode::FORBIDDEN,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// Returns [`StatusCode::BAD_REQUEST`] for [`header::InvalidHeaderValue`].
impl ResponseError for header::InvalidHeaderValue {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
#[cfg(feature = "ws")]
impl From<crate::ws::ProtocolError> for Error {
fn from(err: crate::ws::ProtocolError) -> Self {
Self::new_ws().with_cause(err)
}
}
@ -177,54 +160,47 @@ impl ResponseError for header::InvalidHeaderValue {
#[non_exhaustive]
pub enum ParseError {
/// An invalid `Method`, such as `GE.T`.
#[display(fmt = "Invalid Method specified")]
#[display(fmt = "invalid method specified")]
Method,
/// An invalid `Uri`, such as `exam ple.domain`.
#[display(fmt = "Uri error: {}", _0)]
#[display(fmt = "URI error: {}", _0)]
Uri(InvalidUri),
/// An invalid `HttpVersion`, such as `HTP/1.1`
#[display(fmt = "Invalid HTTP version specified")]
#[display(fmt = "invalid HTTP version specified")]
Version,
/// An invalid `Header`.
#[display(fmt = "Invalid Header provided")]
#[display(fmt = "invalid Header provided")]
Header,
/// A message head is too large to be reasonable.
#[display(fmt = "Message head is too large")]
#[display(fmt = "message head is too large")]
TooLarge,
/// A message reached EOF, but is not complete.
#[display(fmt = "Message is incomplete")]
#[display(fmt = "message is incomplete")]
Incomplete,
/// An invalid `Status`, such as `1337 ELITE`.
#[display(fmt = "Invalid Status provided")]
#[display(fmt = "invalid status provided")]
Status,
/// A timeout occurred waiting for an IO event.
#[allow(dead_code)]
#[display(fmt = "Timeout")]
#[display(fmt = "timeout")]
Timeout,
/// An `io::Error` that occurred while trying to read or write to a network stream.
#[display(fmt = "IO error: {}", _0)]
/// An I/O error that occurred while trying to read or write to a network stream.
#[display(fmt = "I/O error: {}", _0)]
Io(io::Error),
/// Parsing a field as string failed
#[display(fmt = "UTF8 error: {}", _0)]
/// Parsing a field as string failed.
#[display(fmt = "UTF-8 error: {}", _0)]
Utf8(Utf8Error),
}
/// Return `BadRequest` for `ParseError`
impl ResponseError for ParseError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::Io(err)
@ -263,40 +239,42 @@ impl From<httparse::Error> for ParseError {
}
}
/// A set of errors that can occur running blocking tasks in thread pool.
#[derive(Debug, Display, Error)]
#[display(fmt = "Blocking thread pool is gone")]
pub struct BlockingError;
impl From<ParseError> for Error {
fn from(err: ParseError) -> Self {
Self::new_parse().with_cause(err)
}
}
/// `InternalServerError` for `BlockingError`
impl ResponseError for BlockingError {}
impl From<ParseError> for Response<BoxBody> {
fn from(err: ParseError) -> Self {
Error::from(err).into()
}
}
/// A set of errors that can occur during payload parsing.
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum PayloadError {
/// A payload reached EOF, but is not complete.
#[display(
fmt = "A payload reached EOF, but is not complete. Inner error: {:?}",
_0
)]
#[display(fmt = "payload reached EOF before completing: {:?}", _0)]
Incomplete(Option<io::Error>),
/// Content encoding stream corruption.
#[display(fmt = "Can not decode content-encoding.")]
#[display(fmt = "can not decode content-encoding")]
EncodingCorrupted,
/// Payload reached size limit.
#[display(fmt = "Payload reached size limit.")]
#[display(fmt = "payload reached size limit")]
Overflow,
/// Payload length is unknown.
#[display(fmt = "Payload length is unknown.")]
#[display(fmt = "payload length is unknown")]
UnknownLength,
/// HTTP/2 payload error.
#[cfg(feature = "http2")]
#[display(fmt = "{}", _0)]
Http2Payload(h2::Error),
Http2Payload(::h2::Error),
/// Generic I/O error.
#[display(fmt = "{}", _0)]
@ -307,18 +285,20 @@ impl std::error::Error for PayloadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
PayloadError::Incomplete(None) => None,
PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
PayloadError::Incomplete(Some(err)) => Some(err),
PayloadError::EncodingCorrupted => None,
PayloadError::Overflow => None,
PayloadError::UnknownLength => None,
PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
PayloadError::Io(err) => Some(err as &dyn std::error::Error),
#[cfg(feature = "http2")]
PayloadError::Http2Payload(err) => Some(err),
PayloadError::Io(err) => Some(err),
}
}
}
impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self {
#[cfg(feature = "http2")]
impl From<::h2::Error> for PayloadError {
fn from(err: ::h2::Error) -> Self {
PayloadError::Http2Payload(err)
}
}
@ -335,168 +315,138 @@ impl From<io::Error> for PayloadError {
}
}
impl From<BlockingError> for PayloadError {
fn from(_: BlockingError) -> Self {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Operation is canceled",
))
}
}
/// `PayloadError` returns two possible results:
///
/// - `Overflow` returns `PayloadTooLarge`
/// - Other errors returns `BadRequest`
impl ResponseError for PayloadError {
fn status_code(&self) -> StatusCode {
match *self {
PayloadError::Overflow => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
}
impl From<PayloadError> for Error {
fn from(err: PayloadError) -> Self {
Self::new_payload().with_cause(err)
}
}
/// A set of errors that can occur during dispatching HTTP requests.
#[derive(Debug, Display, Error, From)]
#[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum DispatchError {
/// Service error
Service(Error),
/// Service error.
#[display(fmt = "service error")]
Service(Response<BoxBody>),
/// Upgrade service error
/// Body streaming error.
#[display(fmt = "body error: {}", _0)]
Body(Box<dyn StdError>),
/// Upgrade service error.
#[display(fmt = "upgrade error")]
Upgrade,
/// An `io::Error` that occurred while trying to read or write to a network
/// stream.
#[display(fmt = "IO error: {}", _0)]
/// An `io::Error` that occurred while trying to read or write to a network stream.
#[display(fmt = "I/O error: {}", _0)]
Io(io::Error),
/// Http request parse error.
#[display(fmt = "Parse error: {}", _0)]
/// Request parse error.
#[display(fmt = "request parse error: {}", _0)]
Parse(ParseError),
/// Http/2 error
/// HTTP/2 error.
#[display(fmt = "{}", _0)]
#[cfg(feature = "http2")]
H2(h2::Error),
/// The first request did not complete within the specified timeout.
#[display(fmt = "The first request did not complete within the specified timeout")]
#[display(fmt = "request did not complete within the specified timeout")]
SlowRequestTimeout,
/// Disconnect timeout. Makes sense for ssl streams.
#[display(fmt = "Connection shutdown timeout")]
/// Disconnect timeout. Makes sense for TLS streams.
#[display(fmt = "connection shutdown timeout")]
DisconnectTimeout,
/// Payload is not consumed
#[display(fmt = "Task is completed but request's payload is not consumed")]
PayloadIsNotConsumed,
/// Handler dropped payload before reading EOF.
#[display(fmt = "handler dropped payload before reading EOF")]
HandlerDroppedPayload,
/// Malformed request
#[display(fmt = "Malformed request")]
MalformedRequest,
/// Internal error
#[display(fmt = "Internal error")]
/// Internal error.
#[display(fmt = "internal error")]
InternalError,
/// Unknown error
#[display(fmt = "Unknown error")]
Unknown,
}
/// A set of error that can occur during parsing content type.
#[derive(Debug, Display, Error)]
#[non_exhaustive]
pub enum ContentTypeError {
/// Can not parse content type
#[display(fmt = "Can not parse content type")]
ParseError,
impl StdError for DispatchError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
DispatchError::Service(_res) => None,
DispatchError::Body(err) => Some(&**err),
DispatchError::Io(err) => Some(err),
DispatchError::Parse(err) => Some(err),
/// Unknown content encoding
#[display(fmt = "Unknown content encoding")]
UnknownEncoding,
}
#[cfg(feature = "http2")]
DispatchError::H2(err) => Some(err),
#[cfg(test)]
mod content_type_test_impls {
use super::*;
impl std::cmp::PartialEq for ContentTypeError {
fn eq(&self, other: &Self) -> bool {
match self {
Self::ParseError => matches!(other, ContentTypeError::ParseError),
Self::UnknownEncoding => {
matches!(other, ContentTypeError::UnknownEncoding)
}
}
_ => None,
}
}
}
impl ResponseError for ContentTypeError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
/// A set of error that can occur during parsing content type.
#[derive(Debug, Display, Error)]
#[cfg_attr(test, derive(PartialEq, Eq))]
#[non_exhaustive]
pub enum ContentTypeError {
/// Can not parse content type.
#[display(fmt = "could not parse content type")]
ParseError,
/// Unknown content encoding.
#[display(fmt = "unknown content encoding")]
UnknownEncoding,
}
#[cfg(test)]
mod tests {
use http::Error as HttpError;
use super::*;
use http::{Error as HttpError, StatusCode};
use std::io;
#[test]
fn test_into_response() {
let resp: Response<Body> = ParseError::Incomplete.error_response();
let resp: Response<BoxBody> = ParseError::Incomplete.into();
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
let err: HttpError = StatusCode::from_u16(10000).err().unwrap().into();
let resp: Response<Body> = err.error_response();
let resp: Response<BoxBody> = Error::new_http().with_cause(err).into();
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[test]
fn test_as_response() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let e: Error = ParseError::Io(orig).into();
assert_eq!(format!("{}", e.as_response_error()), "IO error: other");
}
#[test]
fn test_error_cause() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let desc = orig.to_string();
let e = Error::from(orig);
assert_eq!(format!("{}", e.as_response_error()), desc);
let err: Error = ParseError::Io(orig).into();
assert_eq!(
format!("{}", err),
"error parsing HTTP message: I/O error: other"
);
}
#[test]
fn test_error_display() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let desc = orig.to_string();
let e = Error::from(orig);
assert_eq!(format!("{}", e), desc);
let err = Error::new_io().with_cause(orig);
assert_eq!("connection error: other", err.to_string());
}
#[test]
fn test_error_http_response() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let e = Error::from(orig);
let resp: Response<Body> = e.into();
let err = Error::new_io().with_cause(orig);
let resp: Response<BoxBody> = err.into();
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[test]
fn test_payload_error() {
let err: PayloadError =
io::Error::new(io::ErrorKind::Other, "ParseError").into();
let err: PayloadError = io::Error::new(io::ErrorKind::Other, "ParseError").into();
assert!(err.to_string().contains("ParseError"));
let err = PayloadError::Incomplete(None);
assert_eq!(
err.to_string(),
"A payload reached EOF, but is not complete. Inner error: None"
"payload reached EOF before completing: None"
);
}
@ -516,7 +466,7 @@ mod tests {
match ParseError::from($from) {
e @ $error => {
let desc = format!("{}", e);
assert_eq!(desc, format!("IO error: {}", $from));
assert_eq!(desc, format!("I/O error: {}", $from));
}
_ => unreachable!("{:?}", $from),
}
@ -535,14 +485,4 @@ mod tests {
from!(httparse::Error::TooManyHeaders => ParseError::TooLarge);
from!(httparse::Error::Version => ParseError::Version);
}
#[test]
fn test_error_casting() {
let err = PayloadError::Overflow;
let resp_err: &dyn ResponseError = &err;
let err = resp_err.downcast_ref::<PayloadError>().unwrap();
assert_eq!(err.to_string(), "Payload reached size limit.");
let not_err = resp_err.downcast_ref::<ContentTypeError>();
assert!(not_err.is_none());
}
}

View file

@ -1,18 +1,38 @@
use std::{
any::{Any, TypeId},
fmt, mem,
collections::HashMap,
fmt,
hash::{BuildHasherDefault, Hasher},
};
use ahash::AHashMap;
/// A hasher for `TypeId`s that takes advantage of its known characteristics.
///
/// Author of `anymap` crate has done research on the topic:
/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599
#[derive(Debug, Default)]
struct NoOpHasher(u64);
impl Hasher for NoOpHasher {
fn write(&mut self, _bytes: &[u8]) {
unimplemented!("This NoOpHasher can only handle u64s")
}
fn write_u64(&mut self, i: u64) {
self.0 = i;
}
fn finish(&self) -> u64 {
self.0
}
}
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)]
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: AHashMap<TypeId, Box<dyn Any>>,
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
map: HashMap<TypeId, Box<dyn Any>, BuildHasherDefault<NoOpHasher>>,
}
impl Extensions {
@ -20,7 +40,7 @@ impl Extensions {
#[inline]
pub fn new() -> Extensions {
Extensions {
map: AHashMap::default(),
map: HashMap::default(),
}
}
@ -123,11 +143,6 @@ impl Extensions {
pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map);
}
/// Sets (or overrides) items from `other` into this map.
pub(crate) fn drain_from(&mut self, other: &mut Self) {
self.map.extend(mem::take(&mut other.map));
}
}
impl fmt::Debug for Extensions {
@ -179,6 +194,8 @@ mod tests {
#[test]
fn test_integers() {
static A: u32 = 8;
let mut map = Extensions::new();
map.insert::<i8>(8);
@ -191,6 +208,7 @@ mod tests {
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
map.insert::<&'static u32>(&A);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
@ -201,6 +219,7 @@ mod tests {
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
assert!(map.get::<&'static u32>().is_some());
}
#[test]
@ -279,27 +298,4 @@ mod tests {
assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
}
#[test]
fn test_drain_from() {
let mut ext = Extensions::new();
ext.insert(2isize);
let mut more_ext = Extensions::new();
more_ext.insert(5isize);
more_ext.insert(5usize);
assert_eq!(ext.get::<isize>(), Some(&2isize));
assert_eq!(ext.get::<usize>(), None);
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
ext.drain_from(&mut more_ext);
assert_eq!(ext.get::<isize>(), Some(&5isize));
assert_eq!(ext.get::<usize>(), Some(&5usize));
assert_eq!(more_ext.get::<isize>(), None);
assert_eq!(more_ext.get::<usize>(), None);
}
}

View file

@ -0,0 +1,427 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
use tracing::{debug, trace};
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl ChunkedState {
pub(super) fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use actix_codec::Decoder as _;
use bytes::{Bytes, BytesMut};
use http::Method;
use crate::{
error::ParseError,
h1::decoder::{MessageDecoder, PayloadItem},
HttpMessage as _, Request,
};
macro_rules! parse_ready {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Ok(Some((msg, _))) => msg,
Ok(_) => unreachable!("Eof during parsing http request"),
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
}
}};
}
macro_rules! expect_parse_err {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {}
},
_ => unreachable!("Error expected"),
}
}};
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn chunk_extension_quoted() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;hello=b;one=\"1 2 3\"\r\n\
xx",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
}
#[test]
fn hrs_chunk_extension_invalid() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;x\nx\r\n\
4c\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid character in chunk extension"));
}
#[test]
fn hrs_chunk_size_overflow() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
f0000000000000003\r\n\
abc\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid chunk size line: Size is too big"));
}
}

View file

@ -1,23 +1,26 @@
use std::io;
use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, reserve_readbuf, Message, MessageType,
};
use crate::{
body::BodySize,
error::{ParseError, PayloadError},
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! {
#[derive(Debug, Clone, Copy)]
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
}
}
@ -36,7 +39,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
conn_type: ConnectionType,
// encoder part
flags: Flags,
@ -49,23 +52,32 @@ impl Default for ClientCodec {
}
}
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
} else {
Flags::empty()
};
ClientCodec {
inner: ClientCodecInner {
config,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
conn_type: ConnectionType::Close,
flags,
encoder: encoder::MessageEncoder::default(),
@ -75,12 +87,12 @@ impl ClientCodec {
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.inner.ctype == ConnectionType::Upgrade
self.inner.conn_type == ConnectionType::Upgrade
}
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
}
/// Check last request's message type
@ -102,8 +114,8 @@ impl ClientCodec {
impl ClientPayloadCodec {
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
}
/// Transform payload codec to a message codec
@ -117,15 +129,18 @@ impl Decoder for ClientCodec {
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
debug_assert!(
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.ctype() {
if let Some(conn_type) = req.conn_type() {
// do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive {
self.inner.conn_type
} else {
ctype
conn_type
};
}
@ -190,9 +205,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status
inner.ctype = match head.as_ref().connection_type() {
inner.conn_type = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) {
ConnectionType::KeepAlive
} else {
ConnectionType::Close
@ -209,7 +224,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false,
inner.version,
length,
inner.ctype,
inner.conn_type,
&inner.config,
)?;
}

View file

@ -1,25 +1,22 @@
use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::BytesMut;
use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, Message, MessageType,
};
use crate::{body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig};
bitflags! {
#[derive(Debug, Clone, Copy)]
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
@ -29,7 +26,7 @@ pub struct Codec {
decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
conn_type: ConnectionType,
// encoder part
flags: Flags,
@ -44,7 +41,9 @@ impl Default for Codec {
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "h1::Codec({:?})", self.flags)
f.debug_struct("h1::Codec")
.field("flags", &self.flags)
.finish_non_exhaustive()
}
}
@ -53,8 +52,8 @@ impl Codec {
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
} else {
Flags::empty()
};
@ -65,7 +64,7 @@ impl Codec {
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
conn_type: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(),
}
}
@ -73,19 +72,19 @@ impl Codec {
/// Check if request is upgrade.
#[inline]
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
self.conn_type == ConnectionType::Upgrade
}
/// Check if last response is keep-alive.
#[inline]
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive
}
/// Check if keep-alive enabled on server level.
#[inline]
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
pub fn keep_alive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
}
/// Check last request's message type.
@ -124,12 +123,14 @@ impl Decoder for Codec {
let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.ctype = head.connection_type();
if self.ctype == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
{
self.ctype = ConnectionType::Close
self.conn_type = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl),
@ -159,14 +160,14 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
res.head_mut().version = self.version;
// connection status
self.ctype = if let Some(ct) = res.head().ctype() {
self.conn_type = if let Some(ct) = res.head().conn_type() {
if ct == ConnectionType::KeepAlive {
self.ctype
self.conn_type
} else {
ct
}
} else {
self.ctype
self.conn_type
};
// encode message
@ -177,29 +178,28 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.flags.contains(Flags::STREAM),
self.version,
length,
self.ctype,
self.conn_type,
&self.config,
)?;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use bytes::BytesMut;
use http::Method;
use super::*;
use crate::HttpMessage;
use crate::HttpMessage as _;
#[actix_rt::test]
async fn test_http_request_chunked_payload_and_next_message() {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,972 @@
use std::{future::Future, str, task::Poll, time::Duration};
use actix_codec::Framed;
use actix_rt::{pin, time::sleep};
use actix_service::{fn_service, Service};
use actix_utils::future::{ready, Ready};
use bytes::{Buf, Bytes, BytesMut};
use futures_util::future::lazy;
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
use crate::{
body::MessageBody,
config::ServiceConfig,
h1::{Codec, ExpectHandler, UpgradeHandler},
service::HttpFlow,
test::{TestBuffer, TestSeqBuffer},
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
memchr::memmem::find(&haystack[from..], needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
status_service(StatusCode::OK)
}
fn status_service(
status: StatusCode,
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
}
fn echo_path_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error>
{
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(
Response::ok().set_body(Bytes::copy_from_slice(path)),
))
})
}
fn drop_payload_service() -> impl Service<Request, Response = Response<&'static str>, Error = Error>
{
fn_service(|mut req: Request| async move {
let _ = req.take_payload();
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
})
})
}
#[actix_rt::test]
async fn late_request() {
let mut buf = TestBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Ready(_) => panic!("first poll should not be ready"),
Poll::Pending => {}
}
// polls: initial
assert_eq!(h1.poll_count, 1);
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("second poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial pending => handle req => shutdown
assert_eq!(h1.poll_count, 3);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn oneshot_connection() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 5
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
/abcd
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
#[actix_rt::test]
async fn keep_alive_timeout() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(200)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep slightly longer than keep-alive timeout
sleep(Duration::from_millis(250)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_ready(),
"keep-alive should have resolved",
);
// polls: initial => keep-alive wake-up shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
}
#[actix_rt::test]
async fn keep_alive_follow_up_req() {
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(500)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep for less than KA timeout
sleep(Duration::from_millis(100)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should not have resolved dispatcher yet",
);
// polls: initial => manual
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection not closed
assert!(!inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
lazy(|cx| {
buf.extend_read_buf(
"\
GET /efg HTTP/1.1\r\n\
Connection: close\r\n\
\r\n\r\n",
);
assert!(
h1.as_mut().poll(cx).is_ready(),
"connection close header should override keep-alive setting",
);
// polls: initial => manual => follow-up req => shutdown
assert_eq!(h1.poll_count, 4);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
}
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/efg\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&buf.write_buf_slice()[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_ok() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_bad() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn expect_handling() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn expect_eager() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn upgrade_handling() {
struct TestUpgrade;
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
assert_eq!(req.method(), Method::GET);
assert!(req.upgrade());
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
ready(Ok(()))
}
}
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
// fix in #2624 reverted temporarily
// complete fix tracked in #2745
#[ignore]
#[actix_rt::test]
async fn handler_drop_payload() {
let _ = env_logger::try_init();
let mut buf = TestBuffer::new(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 3
abc
",
));
let services = HttpFlow::new(
drop_payload_service(),
ExpectHandler,
None::<UpgradeHandler>,
);
let h1 = Dispatcher::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual
assert_eq!(h1.poll_count, 1);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
assert!(inner.state.is_none());
}
})
.await;
lazy(|cx| {
// add message that claims to have payload longer than provided
buf.extend_read_buf(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 200
abc
",
));
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual => manual
assert_eq!(h1.poll_count, 2);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect response immediately even though request side has not finished reading payload
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual => manual => manual
assert_eq!(h1.poll_count, 3);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect that unrequested error response is sent back since connection could not be cleaned
let exp = http_msg(
r"
HTTP/1.1 500 Internal Server Error
content-length: 0
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
let mut msg = msg
.as_ref()
.trim()
.split('\n')
.map(|line| [line.trim_start(), "\r"].concat())
.collect::<Vec<_>>()
.join("\n");
// remove trailing \r
msg.pop();
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
msg.push_str("\r\n\r\n");
}
BytesMut::from(msg.as_bytes())
}
#[test]
fn http_msg_creates_msg() {
assert_eq!(http_msg(r""), "");
assert_eq!(
http_msg(
r"
POST / HTTP/1.1
Content-Length: 3
abc
"
),
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
);
assert_eq!(
http_msg(
r"
GET / HTTP/1.1
Content-Length: 3
"
),
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
);
}

View file

@ -1,24 +1,26 @@
use std::io::Write;
use std::marker::PhantomData;
use std::ptr::copy_nonoverlapping;
use std::slice::from_raw_parts_mut;
use std::{cmp, io};
use std::{
cmp,
io::{self, Write as _},
marker::PhantomData,
ptr::copy_nonoverlapping,
slice::from_raw_parts_mut,
};
use bytes::{BufMut, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::{map::Value, HeaderName};
use crate::helpers;
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use crate::http::{HeaderMap, StatusCode, Version};
use crate::message::{ConnectionType, RequestHeadType};
use crate::response::Response;
use crate::{
body::BodySize,
header::{
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
},
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version,
};
const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> {
#[allow(dead_code)]
pub length: BodySize,
pub te: TransferEncoding,
_phantom: PhantomData<T>,
@ -54,7 +56,7 @@ pub(crate) trait MessageType: Sized {
dst: &mut BytesMut,
version: Version,
mut length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
let chunked = self.chunked();
@ -69,17 +71,28 @@ pub(crate) trait MessageType: Sized {
| StatusCode::PROCESSING
| StatusCode::NO_CONTENT => {
// skip content-length and transfer-encoding headers
// See https://tools.ietf.org/html/rfc7230#section-3.3.1
// and https://tools.ietf.org/html/rfc7230#section-3.3.2
// see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1
// and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2
skip_len = true;
length = BodySize::None
}
StatusCode::NOT_MODIFIED => {
// 304 responses should never have a body but should retain a manually set
// content-length header
// see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
skip_len = false;
length = BodySize::None;
}
_ => {}
}
}
match length {
BodySize::Stream => {
if chunked {
skip_len = true;
if camel_case {
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
} else {
@ -90,19 +103,14 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case),
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
match conn_type {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
if camel_case {
@ -144,7 +152,6 @@ pub(crate) trait MessageType: Sized {
let k = key.as_str().as_bytes();
let k_len = k.len();
// TODO: drain?
for val in value.iter() {
let v = val.as_ref();
let v_len = v.len();
@ -173,7 +180,7 @@ pub(crate) trait MessageType: Sized {
unsafe {
if camel_case {
// use Camel-Case headers
write_camel_case(k, from_raw_parts_mut(buf, k_len));
write_camel_case(k, buf, k_len);
} else {
write_data(k, buf, k_len);
}
@ -203,14 +210,14 @@ pub(crate) trait MessageType: Sized {
dst.advance_mut(pos);
}
// optimized date header, set_date writes \r\n
if !has_date {
config.set_date(dst);
} else {
// msg eof
dst.extend_from_slice(b"\r\n");
// optimized date header, write_date_header writes its own \r\n
config.write_date_header(dst, camel_case);
}
// end-of-headers marker
dst.extend_from_slice(b"\r\n");
Ok(())
}
@ -250,6 +257,12 @@ impl MessageType for Response<()> {
None
}
fn camel_case(&self) -> bool {
self.head()
.flags
.contains(crate::message::Flags::CAMEL_CASE)
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
@ -287,7 +300,7 @@ impl MessageType for RequestHeadType {
let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!(
helpers::Writer(dst),
helpers::MutWriter(dst),
"{} {} {}",
head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
@ -297,11 +310,7 @@ impl MessageType for RequestHeadType {
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
Version::HTTP_3 => "HTTP/3.0",
_ =>
return Err(io::Error::new(
io::ErrorKind::Other,
"unsupported version"
)),
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")),
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
@ -309,16 +318,17 @@ impl MessageType for RequestHeadType {
}
impl<T: MessageType> MessageEncoder<T> {
/// Encode message
/// Encode chunk.
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf)
}
/// Encode eof
/// Encode EOF.
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf)
}
/// Encode message.
pub fn encode(
&mut self,
dst: &mut BytesMut,
@ -327,13 +337,13 @@ impl<T: MessageType> MessageEncoder<T> {
stream: bool,
version: Version,
length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
// transfer encoding
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(0) => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
@ -349,7 +359,7 @@ impl<T: MessageType> MessageEncoder<T> {
}
message.encode_status(dst)?;
message.encode_headers(dst, version, length, ctype, config)
message.encode_headers(dst, version, length, conn_type, config)
}
}
@ -363,10 +373,12 @@ pub(crate) struct TransferEncoding {
enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
/// An Encoder for when Content-Length is not known.
///
/// Application decides when to stop writing.
@ -420,7 +432,7 @@ impl TransferEncoding {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
} else {
writeln!(helpers::Writer(buf), "{:X}\r", msg.len())
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2);
@ -438,7 +450,7 @@ impl TransferEncoding {
buf.extend_from_slice(&msg[..len as usize]);
*remaining -= len as u64;
*remaining -= len;
Ok(*remaining == 0)
} else {
Ok(true)
@ -471,15 +483,22 @@ impl TransferEncoding {
}
/// # Safety
/// Callers must ensure that the given length matches given value length.
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len);
}
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
/// # Safety
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
buffer[..value.len()].copy_from_slice(value);
write_data(value, buf, len);
// SAFETY: We just initialized the buffer with `value`
let buffer = from_raw_parts_mut(buf, len);
let mut iter = value.iter();
@ -498,6 +517,7 @@ fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
index += 1;
}
index += 1;
@ -509,11 +529,13 @@ mod tests {
use std::rc::Rc;
use bytes::Bytes;
use http::header::AUTHORIZATION;
use http::header::{AUTHORIZATION, UPGRADE_INSECURE_REQUESTS};
use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE};
use crate::RequestHead;
use crate::{
header::{HeaderValue, CONTENT_TYPE},
RequestHead,
};
#[test]
fn test_chunked_te() {
@ -538,22 +560,25 @@ mod tests {
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
head.headers
.insert(UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
assert!(data.contains("Upgrade-Insecure-Requests: 1\r\n"));
let _ = head.encode_headers(
&mut bytes,
@ -562,8 +587,7 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
@ -584,8 +608,7 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n"));
@ -614,12 +637,11 @@ mod tests {
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n"));
@ -642,8 +664,7 @@ mod tests {
ConnectionType::Upgrade,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(!data.contains("content-length: 0\r\n"));
assert!(!data.contains("transfer-encoding: chunked\r\n"));
}

View file

@ -1,8 +1,7 @@
use actix_service::{Service, ServiceFactory};
use actix_utils::future::{ready, Ready};
use crate::error::Error;
use crate::request::Request;
use crate::{Error, Request};
pub struct ExpectHandler;

View file

@ -1,32 +1,40 @@
//! HTTP/1 protocol implementation.
use bytes::{Bytes, BytesMut};
mod chunked;
mod client;
mod codec;
mod decoder;
mod dispatcher;
#[cfg(test)]
mod dispatcher_tests;
mod encoder;
mod expect;
mod payload;
mod service;
mod timer;
mod upgrade;
mod utils;
pub use self::client::{ClientCodec, ClientPayloadCodec};
pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
pub use self::{
client::{ClientCodec, ClientPayloadCodec},
codec::Codec,
dispatcher::Dispatcher,
expect::ExpectHandler,
payload::Payload,
service::{H1Service, H1ServiceHandler},
upgrade::UpgradeHandler,
utils::SendResponse,
};
#[derive(Debug)]
/// Codec message
pub enum Message<T> {
/// Http message
/// HTTP message.
Item(T),
/// Payload chunk
/// Payload chunk.
Chunk(Option<Bytes>),
}
@ -57,7 +65,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
#[cfg(test)]
mod tests {
use super::*;
use crate::request::Request;
use crate::Request;
impl Message<Request> {
pub fn message(self) -> Request {

View file

@ -1,9 +1,12 @@
//! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll, Waker};
use std::{
cell::RefCell,
collections::VecDeque,
pin::Pin,
rc::{Rc, Weak},
task::{Context, Poll, Waker},
};
use bytes::Bytes;
use futures_core::Stream;
@ -13,7 +16,7 @@ use crate::error::PayloadError;
/// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Eq)]
pub enum PayloadStatus {
Read,
Pause,
@ -22,39 +25,32 @@ pub enum PayloadStatus {
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does
/// not notify current task when new data is available.
///
/// Payload stream can be used as `Response` body stream.
/// Payload can be used as `Response` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
/// Creates a payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
/// This method construct two objects responsible for bytes stream generation:
/// - `PayloadSender` - *Sender* side of the stream
/// - `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
PayloadSender::new(Rc::downgrade(&shared)),
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
/// Creates an empty payload.
pub(crate) fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
@ -77,14 +73,6 @@ impl Payload {
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
#[inline]
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Stream for Payload {
@ -94,7 +82,7 @@ impl Stream for Payload {
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx)
}
}
@ -104,6 +92,10 @@ pub struct PayloadSender {
}
impl PayloadSender {
fn new(inner: Weak<RefCell<Inner>>) -> Self {
Self { inner }
}
#[inline]
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
@ -125,6 +117,7 @@ impl PayloadSender {
}
}
#[allow(clippy::needless_pass_by_ref_mut)]
#[inline]
pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive,
@ -182,12 +175,11 @@ impl Inner {
/// Register future waiting data from payload.
/// Waker would be used in `Inner::wake`
fn register(&mut self, cx: &mut Context<'_>) {
fn register(&mut self, cx: &Context<'_>) {
if self
.task
.as_ref()
.map(|w| !cx.waker().will_wake(w))
.unwrap_or(true)
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.task = Some(cx.waker().clone());
}
@ -195,12 +187,11 @@ impl Inner {
// Register future feeding data to payload.
/// Waker would be used in `Inner::wake_io`
fn register_io(&mut self, cx: &mut Context<'_>) {
fn register_io(&mut self, cx: &Context<'_>) {
if self
.io_task
.as_ref()
.map(|w| !cx.waker().will_wake(w))
.unwrap_or(true)
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.io_task = Some(cx.waker().clone());
}
@ -229,9 +220,9 @@ impl Inner {
self.len
}
fn readany(
&mut self,
cx: &mut Context<'_>,
fn poll_next(
mut self: Pin<&mut Self>,
cx: &Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
@ -262,8 +253,15 @@ impl Inner {
#[cfg(test)]
mod tests {
use super::*;
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync);
assert_impl_all!(Inner: Unpin, Send, Sync);
#[actix_rt::test]
async fn test_unread_data() {
@ -275,7 +273,10 @@ mod tests {
assert_eq!(
Bytes::from("data"),
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
poll_fn(|cx| Pin::new(&mut payload).poll_next(cx))
.await
.unwrap()
.unwrap()
);
}
}

View file

@ -1,7 +1,10 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::{fmt, net};
use std::{
fmt,
marker::PhantomData,
net,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream;
@ -10,18 +13,16 @@ use actix_service::{
};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use tracing::error;
use crate::body::MessageBody;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpServiceHandler;
use crate::{ConnectCallback, OnConnectData};
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, UpgradeHandler};
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
use crate::{
body::{BoxBody, MessageBody},
config::ServiceConfig,
error::DispatchError,
service::HttpServiceHandler,
ConnectCallback, OnConnectData, Request, Response,
};
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
@ -36,7 +37,7 @@ pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
impl<T, S, B> H1Service<T, S, B>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
@ -61,33 +62,27 @@ impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
B::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = DispatchError,
InitError = (),
> {
) -> impl ServiceFactory<TcpStream, Config = (), Response = (), Error = DispatchError, InitError = ()>
{
fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ready(Ok((io, peer_addr)))
@ -98,28 +93,29 @@ where
#[cfg(feature = "openssl")]
mod openssl {
use super::*;
use actix_service::ServiceFactoryExt;
use actix_tls::accept::{
openssl::{Acceptor, SslAcceptor, SslError, TlsStream},
openssl::{
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
B::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
@ -128,10 +124,10 @@ mod openssl {
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create openssl based service
/// Create OpenSSL based service.
pub fn openssl(
self,
acceptor: SslAcceptor,
@ -143,43 +139,44 @@ mod openssl {
InitError = (),
> {
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!())
.and_then(|io: TlsStream<TcpStream>| {
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
ready(Ok((io, peer_addr)))
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls")]
mod rustls {
use super::*;
#[cfg(feature = "rustls-0_20")]
mod rustls_0_20 {
use std::io;
use actix_service::ServiceFactoryExt;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls::{Acceptor, ServerConfig, TlsStream},
rustls_0_20::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
B::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
@ -188,10 +185,10 @@ mod rustls {
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create rustls based service
/// Create Rustls v0.20 based service.
pub fn rustls(
self,
config: ServerConfig,
@ -203,11 +200,196 @@ mod rustls {
InitError = (),
> {
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!())
.and_then(|io: TlsStream<TcpStream>| {
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
ready(Ok((io, peer_addr)))
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_21")]
mod rustls_0_21 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.21 based service.
pub fn rustls_021(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_22")]
mod rustls_0_22 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.22 based service.
pub fn rustls_0_22(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_23")]
mod rustls_0_23 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.23 based service.
pub fn rustls_0_23(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
@ -217,7 +399,7 @@ mod rustls {
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
@ -225,7 +407,7 @@ where
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where
X1: ServiceFactory<Request, Response = Request>,
X1::Error: Into<Error>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
{
H1Service {
@ -261,28 +443,26 @@ where
}
}
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)>
for H1Service<T, S, B, X, U>
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)> for H1Service<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
B::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
type Response = ();
@ -302,13 +482,13 @@ where
Box::pin(async move {
let expect = expect
.await
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
.map_err(|e| error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade.await.map_err(|e| {
log::error!("Init http upgrade service error: {:?}", e)
})?;
let upgrade = upgrade
.await
.map_err(|e| error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
}
None => None,
@ -316,7 +496,7 @@ where
let service = service
.await
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
.map_err(|e| error!("Init http service error: {:?}", e))?;
Ok(H1ServiceHandler::new(
cfg,
@ -332,45 +512,35 @@ where
/// `Service` implementation for HTTP/1 transport
pub type H1ServiceHandler<T, S, B, X, U> = HttpServiceHandler<T, S, B, X, U>;
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)>
for HttpServiceHandler<T, S, B, X, U>
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)> for HttpServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
B: MessageBody,
B::Error: Into<Error>,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
{
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|e| {
log::error!("HTTP/1 service readiness error: {:?}", e);
DispatchError::Service(e)
self._poll_ready(cx).map_err(|err| {
error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err)
})
}
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let on_connect_data =
OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(
io,
self.cfg.clone(),
self.flow.clone(),
on_connect_data,
addr,
)
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data)
}
}

View file

@ -0,0 +1,81 @@
use std::{fmt, future::Future, pin::Pin, task::Context};
use actix_rt::time::{Instant, Sleep};
use tracing::trace;
#[derive(Debug)]
pub(super) enum TimerState {
Disabled,
Inactive,
Active { timer: Pin<Box<Sleep>> },
}
impl TimerState {
pub(super) fn new(enabled: bool) -> Self {
if enabled {
Self::Inactive
} else {
Self::Disabled
}
}
pub(super) fn is_enabled(&self) -> bool {
matches!(self, Self::Active { .. } | Self::Inactive)
}
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
if matches!(self, Self::Disabled) {
trace!("setting disabled timer from line {}", line);
}
*self = Self::Active {
timer: Box::pin(timer),
};
}
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
self.set(timer, line);
self.init(cx);
}
pub(super) fn clear(&mut self, line: u32) {
if matches!(self, Self::Disabled) {
trace!("trying to clear a disabled timer from line {}", line);
}
if matches!(self, Self::Inactive) {
trace!("trying to clear an inactive timer from line {}", line);
}
*self = Self::Inactive;
}
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
if let TimerState::Active { timer } = self {
let _ = timer.as_mut().poll(cx);
}
}
}
impl fmt::Display for TimerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimerState::Disabled => f.write_str("timer is disabled"),
TimerState::Inactive => f.write_str("timer is inactive"),
TimerState::Active { timer } => {
let deadline = timer.deadline();
let now = Instant::now();
if deadline < now {
f.write_str("timer is active and has reached deadline")
} else {
write!(
f,
"timer is active and due to expire in {} milliseconds",
((deadline - now).as_secs_f32() * 1000.0)
)
}
}
}
}
}

View file

@ -2,9 +2,7 @@ use actix_codec::Framed;
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
use crate::{h1::Codec, Error, Request};
pub struct UpgradeHandler;

View file

@ -1,22 +1,29 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use pin_project_lite::pin_project;
use crate::body::{BodySize, MessageBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
use crate::{
body::{BodySize, MessageBody},
h1::{Codec, Message},
Error, Response,
};
/// Send HTTP/1 response
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<B>,
#[pin]
framed: Option<Framed<T, Codec>>,
pin_project! {
/// Send HTTP/1 response
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<B>,
#[pin]
framed: Option<Framed<T, Codec>>,
}
}
impl<T, B> SendResponse<T, B>
@ -38,7 +45,7 @@ where
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin,
B: MessageBody,
B::Error: Into<Error>,
{
type Output = Result<Framed<T, Codec>, Error>;
@ -62,26 +69,24 @@ where
.unwrap()
.is_write_buf_full()
{
let next =
// TODO: MSRV 1.51: poll_map_err
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)),
Poll::Ready(Some(Err(err))) => {
return Poll::Ready(Err(err.into()))
}
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
let next = match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
match next {
Poll::Ready(item) => {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
this.body.set(None);
}
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?;
framed
.write(Message::Chunk(item))
.map_err(|err| Error::new_send_response().with_cause(err))?;
}
Poll::Pending => body_ready = false,
}
@ -92,7 +97,10 @@ where
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.flush(cx)? {
match framed
.flush(cx)
.map_err(|err| Error::new_send_response().with_cause(err))?
{
Poll::Ready(_) => {
if body_ready {
continue;
@ -106,7 +114,9 @@ where
// send response
if let Some(res) = this.res.take() {
framed.write(res)?;
framed
.write(res)
.map_err(|err| Error::new_send_response().with_cause(err))?;
continue;
}

View file

@ -1,32 +1,35 @@
use std::{
cmp,
error::Error as StdError,
future::Future,
marker::PhantomData,
net,
pin::Pin,
pin::{pin, Pin},
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep, Sleep};
use actix_service::Service;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use h2::server::{Connection, SendResponse};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use log::{error, trace};
use h2::{
server::{Connection, SendResponse},
Ping, PingPong,
};
use pin_project_lite::pin_project;
use crate::body::{BodySize, MessageBody};
use crate::config::ServiceConfig;
use crate::error::Error;
use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpFlow;
use crate::OnConnectData;
use crate::{
body::{BodySize, BoxBody, MessageBody},
config::ServiceConfig,
header::{
HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
},
service::HttpFlow,
Extensions, Method, OnConnectData, Payload, Request, Response, ResponseHead,
};
const CHUNK_SIZE: usize = 16_384;
@ -35,43 +38,71 @@ pin_project! {
pub struct Dispatcher<T, S, B, X, U> {
flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>,
on_connect_data: OnConnectData,
conn_data: Option<Rc<Extensions>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
_phantom: PhantomData<B>,
ping_pong: Option<H2PingPong>,
_phantom: PhantomData<B>
}
}
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U> {
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
{
pub(crate) fn new(
mut conn: Connection<T, Bytes>,
flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>,
on_connect_data: OnConnectData,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
conn_data: OnConnectData,
timer: Option<Pin<Box<Sleep>>>,
) -> Self {
let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong {
timer: timer
.map(|mut timer| {
// reuse timer slot if it was initialized for handshake
timer.as_mut().reset((config.now() + dur).into());
timer
})
.unwrap_or_else(|| Box::pin(sleep(dur))),
in_flight: false,
ping_pong: conn.ping_pong().unwrap(),
});
Self {
flow,
config,
peer_addr,
connection,
on_connect_data,
connection: conn,
conn_data: conn_data.0.map(Rc::new),
ping_pong,
_phantom: PhantomData,
}
}
}
struct H2PingPong {
/// Handle to send ping frames from the peer.
ping_pong: PingPong,
/// True when a ping has been sent and is waiting for a reply.
in_flight: bool,
/// Timeout for pong response.
timer: Pin<Box<Sleep>>,
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody,
B::Error: Into<Error>,
{
type Output = Result<(), crate::error::DispatchError>;
@ -79,107 +110,147 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
while let Some((req, tx)) =
ready!(Pin::new(&mut this.connection).poll_accept(cx)?)
{
let (parts, body) = req.into_parts();
let pl = crate::h2::Payload::new(body);
let pl = Payload::<crate::payload::PayloadStream>::H2(pl);
let mut req = Request::with_payload(pl);
loop {
match Pin::new(&mut this.connection).poll_accept(cx)? {
Poll::Ready(Some((req, tx))) => {
let (parts, body) = req.into_parts();
let payload = crate::h2::Payload::new(body);
let pl = Payload::H2 { payload };
let mut req = Request::with_payload(pl);
let head_req = parts.method == Method::HEAD;
let head = req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers.into();
head.peer_addr = this.peer_addr;
let head = req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers.into();
head.peer_addr = this.peer_addr;
// merge on_connect_ext data into request extensions
this.on_connect_data.merge_into(&mut req);
req.conn_data.clone_from(&this.conn_data);
let fut = this.flow.service.call(req);
let config = this.config.clone();
let fut = this.flow.service.call(req);
let config = this.config.clone();
// multiplex request handling with spawn task
actix_rt::spawn(async move {
// resolve service call and send response.
let res = match fut.await {
Ok(res) => handle_response(res.into(), tx, config).await,
Err(err) => {
let res = Response::from_error(err.into());
handle_response(res, tx, config).await
}
};
// multiplex request handling with spawn task
actix_rt::spawn(async move {
// resolve service call and send response.
let res = match fut.await {
Ok(res) => handle_response(res.into(), tx, config, head_req).await,
Err(err) => {
let res: Response<BoxBody> = err.into();
handle_response(res, tx, config, head_req).await
}
};
// log error.
if let Err(err) = res {
match err {
DispatchError::SendResponse(err) => {
trace!("Error sending HTTP/2 response: {:?}", err)
// log error.
if let Err(err) = res {
match err {
DispatchError::SendResponse(err) => {
tracing::trace!("Error sending response: {err:?}");
}
DispatchError::SendData(err) => {
tracing::warn!("Send data error: {err:?}");
}
DispatchError::ResponseBody(err) => {
tracing::error!("Response payload stream error: {err:?}");
}
}
}
DispatchError::SendData(err) => warn!("{:?}", err),
DispatchError::ResponseBody(err) => {
error!("Response payload stream error: {:?}", err)
}
}
});
}
});
}
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Ready(Ok(()))
Poll::Pending => match this.ping_pong.as_mut() {
Some(ping_pong) => loop {
if ping_pong.in_flight {
// When there is an in-flight ping-pong, poll pong and and keep-alive
// timer. On successful pong received, update keep-alive timer to
// determine the next timing of ping pong.
match ping_pong.ping_pong.poll_pong(cx)? {
Poll::Ready(_) => {
ping_pong.in_flight = false;
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into());
}
Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()));
}
}
} else {
// When there is no in-flight ping-pong, keep-alive timer is used to
// wait for next timing of ping-pong. Therefore, at this point it serves
// as an interval instead.
ready!(ping_pong.timer.as_mut().poll(cx));
ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into());
ping_pong.in_flight = true;
}
},
None => return Poll::Pending,
},
}
}
}
}
enum DispatchError {
SendResponse(h2::Error),
SendData(h2::Error),
ResponseBody(Error),
ResponseBody(Box<dyn StdError>),
}
async fn handle_response<B>(
res: Response<B>,
mut tx: SendResponse<Bytes>,
config: ServiceConfig,
head_req: bool,
) -> Result<(), DispatchError>
where
B: MessageBody,
B::Error: Into<Error>,
{
let (res, body) = res.replace_body(());
// prepare response.
let mut size = body.size();
let res = prepare_response(config, res.head(), &mut size);
let eof = size.is_eof();
let eof_or_head = size.is_eof() || head_req;
// send response head and return on eof.
let mut stream = tx
.send_response(res, eof)
.send_response(res, eof_or_head)
.map_err(DispatchError::SendResponse)?;
if eof {
if eof_or_head {
return Ok(());
}
// poll response body and send chunks to client.
actix_rt::pin!(body);
let mut body = pin!(body);
// poll response body and send chunks to client
while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await {
let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?;
'send: loop {
let chunk_size = cmp::min(chunk.len(), CHUNK_SIZE);
// reserve enough space and wait for stream ready.
stream.reserve_capacity(cmp::min(chunk.len(), CHUNK_SIZE));
stream.reserve_capacity(chunk_size);
match poll_fn(|cx| stream.poll_capacity(cx)).await {
// No capacity left. drop body and return.
None => return Ok(()),
Some(res) => {
// Split chuck to writeable size and send to client.
let cap = res.map_err(DispatchError::SendData)?;
Some(Err(err)) => return Err(DispatchError::SendData(err)),
Some(Ok(cap)) => {
// split chunk to writeable size and send to client
let len = chunk.len();
let bytes = chunk.split_to(cmp::min(cap, len));
let bytes = chunk.split_to(cmp::min(len, cap));
stream
.send_data(bytes, false)
@ -226,30 +297,43 @@ fn prepare_response(
_ => {}
}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
match size {
BodySize::None | BodySize::Stream => {}
BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO);
}
BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new();
res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::from_str(buf.format(*len)).unwrap(),
)
);
}
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
// TODO: consider skipping other headers according to:
// https://tools.ietf.org/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
match key {
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
&CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue,
&CONTENT_LENGTH if skip_len => continue,
&DATE => has_date = true,
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
hdr if hdr == HeaderName::from_static("keep-alive")
|| hdr == HeaderName::from_static("proxy-connection") =>
{
continue
}
_ => {}
}
@ -259,7 +343,7 @@ fn prepare_response(
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
config.set_date_header(&mut bytes);
config.write_date_header_value(&mut bytes);
res.headers_mut().insert(
DATE,
// SAFETY: serialized date-times are known ASCII strings

View file

@ -1,20 +1,29 @@
//! HTTP/2 protocol.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep_until, Sleep};
use bytes::Bytes;
use futures_core::{ready, Stream};
use h2::RecvStream;
use h2::{
server::{handshake, Connection, Handshake},
RecvStream,
};
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::error::PayloadError;
pub use self::{dispatcher::Dispatcher, service::H2Service};
/// HTTP/2 peer stream.
pub struct Payload {
@ -30,10 +39,7 @@ impl Payload {
impl Stream for Payload {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match ready!(Pin::new(&mut this.stream).poll_data(cx)) {
@ -50,3 +56,52 @@ impl Stream for Payload {
}
}
}
pub(crate) fn handshake_with_timeout<T>(io: T, config: &ServiceConfig) -> HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
HandshakeWithTimeout {
handshake: handshake(io),
timer: config
.client_request_deadline()
.map(|deadline| Box::pin(sleep_until(deadline.into()))),
}
}
pub(crate) struct HandshakeWithTimeout<T: AsyncRead + AsyncWrite + Unpin> {
handshake: Handshake<T>,
timer: Option<Pin<Box<Sleep>>>,
}
impl<T> Future for HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Connection<T, Bytes>, Option<Pin<Box<Sleep>>>), DispatchError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake; its slot can be re-used for h2 ping-pong
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() {
Some(timer) => {
ready!(timer.as_mut().poll(cx));
Poll::Ready(Err(DispatchError::SlowRequestTimeout))
}
None => Poll::Pending,
},
}
}
}
#[cfg(test)]
mod tests {
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync);
}

Some files were not shown because too many files have changed in this diff Show more