1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-05 10:25:21 +02:00

Compare commits

..

1 Commits

Author SHA1 Message Date
1fe04cf51e allow camel case response headers
closes #1979
2022-01-16 02:35:10 +00:00
290 changed files with 6646 additions and 12749 deletions

View File

@ -6,12 +6,9 @@ lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dcli
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,experimental-io-uring check"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
# compile docs as docs.rs would
# RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace

View File

@ -5,9 +5,6 @@ on:
branches:
- master
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
check_benchmark:
runs-on: ubuntu-latest

View File

@ -1,12 +1,9 @@
name: CI (post-merge)
name: CI (master only)
on:
push:
branches: [master]
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
build_and_test_nightly:
strategy:
@ -26,7 +23,6 @@ jobs:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
CARGO_UNSTABLE_SPARSE_REGISTRY: true
steps:
- uses: actions/checkout@v2
@ -48,15 +44,18 @@ jobs:
profile: minimal
override: true
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
@ -79,58 +78,76 @@ jobs:
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
env:
CI: 1
CARGO_INCREMENTAL: 0
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
run: cargo generate-lockfile
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: check feature combinations
run: cargo ci-check-all-feature-powerset
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
run: cargo ci-check-all-feature-powerset-linux
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
nextest:
name: nextest
coverage:
name: coverage
runs-on: ubuntu-latest
env:
CI: 1
CARGO_INCREMENTAL: 0
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
run: cargo generate-lockfile
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
uses: Swatinem/rust-cache@v1.2.0
- name: Test with cargo-nextest
run: cargo nextest run
- name: Generate coverage file
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }

View File

@ -6,9 +6,6 @@ on:
push:
branches: [master]
permissions:
contents: read # to fetch code (actions/checkout)
jobs:
build_and_test:
strategy:
@ -19,7 +16,7 @@ jobs:
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- 1.59.0 # MSRV
- 1.54.0 # MSRV
- stable
name: ${{ matrix.target.name }} / ${{ matrix.version }}
@ -50,26 +47,17 @@ jobs:
profile: minimal
override: true
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
- name: workaround MSRV issues
if: matrix.version != 'stable'
run: |
cargo install cargo-edit --version=0.8.0
cargo add const-str@0.3 --dev -p=actix-web
cargo add const-str@0.3 --dev -p=awc
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: workaround MSRV issues
if: matrix.version != 'stable'
run: |
cargo update -p=zstd-sys --precise=2.0.1+zstd.1.5.2
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
@ -93,31 +81,19 @@ jobs:
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
cargo-cache
io-uring:
name: io-uring tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
- name: Generate Cargo.lock
run: cargo generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
rustdoc:
name: doc tests
@ -125,13 +101,20 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@nightly
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
run: cargo generate-lockfile
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests
run: cargo ci-doctest
uses: actions-rs/cargo@v1
timeout-minutes: 60
with: { command: ci-doctest }

View File

@ -9,41 +9,40 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@nightly
with: { components: rustfmt }
- run: cargo fmt --all -- --check
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
with: { components: clippy }
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Generate Cargo.lock
run: cargo generate-lockfile
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
args: --workspace --tests --examples --all-features
token: ${{ secrets.GITHUB_TOKEN }}
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
with: { components: rust-docs }
- name: Check for broken intra-doc links
uses: actions-rs/cargo@v1
env:
RUSTDOCFLAGS: "-D warnings"
with:
command: doc
args: --no-deps --all-features --workspace
args: --workspace --tests --examples --all-features

View File

@ -1,36 +0,0 @@
# disabled because `cargo tarpaulin` currently segfaults
name: Coverage
on:
push:
branches: [master]
jobs:
# job currently (1st Feb 2022) segfaults
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Generate coverage file
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }

View File

@ -4,29 +4,32 @@ on:
push:
branches: [master]
permissions: {}
jobs:
build:
permissions:
contents: write # to push changes in repo (jamesives/github-pages-deploy-action)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@nightly
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
run: cargo +nightly doc --no-deps --workspace --all-features
env:
RUSTDOCFLAGS: --cfg=docsrs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@v4.4.1
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
folder: target/doc
single-commit: true
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

View File

@ -1,3 +0,0 @@
{
"proseWrap": "never"
}

1006
CHANGES.md

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,33 @@
[package]
name = "actix-web"
version = "4.0.0-beta.20"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
rustdoc-args = ["--cfg", "docsrs"]
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace]
resolver = "2"
members = [
".",
"actix-files",
"actix-http-test",
"actix-http",
@ -9,10 +36,93 @@ members = [
"actix-test",
"actix-web-actors",
"actix-web-codegen",
"actix-web",
"awc",
]
[features]
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
# Brotli algorithm content-encoding support
compress-brotli = ["actix-http/compress-brotli", "__compress"]
# Gzip and deflate algorithms content-encoding support
compress-gzip = ["actix-http/compress-gzip", "__compress"]
# Zstd algorithm content-encoding support
compress-zstd = ["actix-http/compress-zstd", "__compress"]
# support for cookies
cookies = ["cookie"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
# rustls
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
# io-uring feature only avaiable for Linux OSes.
experimental-io-uring = ["actix-server/io-uring"]
[dependencies]
actix-codec = "0.4.1"
actix-macros = "0.2.3"
actix-rt = "2.6"
actix-server = "2.0.0-rc.4"
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-tls = { version = "3.0.0", default-features = false, optional = true }
actix-http = "3.0.0-beta.18"
actix-router = "0.5.0-rc.1"
actix-web-codegen = "0.5.0-rc.1"
ahash = "0.7"
bytes = "1"
cfg-if = "1"
cookie = { version = "0.16", features = ["percent-encode"], optional = true }
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
itoa = "1"
language-tags = "0.3"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
pin-project-lite = "0.2.7"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
smallvec = "1.6.1"
socket2 = "0.4.0"
time = { version = "0.3", default-features = false, features = ["formatting"] }
url = "2.1"
[dev-dependencies]
actix-files = "0.6.0-beta.14"
actix-test = { version = "0.1.0-beta.11", features = ["openssl", "rustls"] }
awc = { version = "3.0.0-beta.18", features = ["openssl"] }
brotli = "3.3.3"
const-str = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
flate2 = "1.0.13"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
rand = "0.8"
rcgen = "0.8"
rustls-pemfile = "0.2"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
zstd = "0.9"
[profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0
@ -29,7 +139,7 @@ actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "actix-web" }
actix-web = { path = "." }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" }
@ -42,3 +152,31 @@ awc = { path = "awc" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }
[[test]]
name = "test_server"
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
[[example]]
name = "basic"
required-features = ["compress-gzip"]
[[example]]
name = "uds"
required-features = ["compress-gzip"]
[[example]]
name = "on-connect"
required-features = []
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false
[[bench]]
name = "responder"
harness = false

677
MIGRATION.md Normal file
View File

@ -0,0 +1,677 @@
## Unreleased
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
It is advised that the `new` method be used instead.
Before: `#[get("/test/")]`
After: `#[get("/test")]`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
- The `type Config` of `FromRequest` was removed.
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
By default all compression algorithms are enabled.
To select algorithm you want to include with `middleware::Compress` use following flags:
- `compress-brotli`
- `compress-gzip`
- `compress-zstd`
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
to have compression enabled. Please change features selection like bellow:
Before: `"compress"`
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
## 3.0.0
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
- Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
- actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
- content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
- Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
- Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
- `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
- Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
- Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
- Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
- Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
- Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
- `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
- `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
- `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
- Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
- StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
- Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
- Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
- Session middleware moved to actix-session crate
- Actors support have been moved to `actix-web-actors` crate
- Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
- `Handler::handle()` uses `&self` instead of `&mut self`
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
- Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
- Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
- `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
- `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
- `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
- `HttpServer::threads()` renamed to `HttpServer::workers()`.
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
- `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
- Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
- `FromRequest::from_request()` accepts mutable reference to a request
- `FromRequest::Result` has to implement `Into<Reply<Self>>`
- [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
- Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
- Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
- `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
- `actix_web::header` moved to `actix_web::http::header`
- `NormalizePath` moved to `actix_web::http` module
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
- `Application` renamed to a `App`
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1 +0,0 @@
actix-web/README.md

109
README.md Normal file
View File

@ -0,0 +1,109 @@
<div align="center">
<h1>Actix Web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.20)](https://docs.rs/actix-web/4.0.0-beta.20)
![MSRV](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.20/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.20)
<br />
[![CI](https://github.com/actix/actix-web/actions/workflows/ci.yml/badge.svg)](https://github.com/actix/actix-web/actions/workflows/ci.yml)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
- Supports *HTTP/1.x* and *HTTP/2*
- Streaming and pipelining
- Keep-alive and slow requests handling
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support
- Transparent content compression/decompression (br, gzip, deflate, zstd)
- Powerful [request routing](https://actix.rs/docs/url-dispatch/)
- Multipart streams
- Static assets
- SSL support using OpenSSL or Rustls
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
- Includes an async [HTTP client](https://docs.rs/awc/)
- Runs on stable Rust 1.54+
## Documentation
- [Website & User Guide](https://actix.rs)
- [Examples Repository](https://github.com/actix/examples)
- [API Documentation](https://docs.rs/actix-web)
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
- [Application State](https://github.com/actix/examples/tree/master/basics/state/)
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
## License
This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0])
- MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT])
at your option.
## Code of Conduct
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@ -1,42 +1,6 @@
# Changes
## Unreleased - 2022-xx-xx
## 0.6.3 - 2023-01-21
- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903]
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
- Update `tokio-uring` dependency to `0.4`.
[#2903]: https://github.com/actix/actix-web/pull/2903
## 0.6.2 - 2022-07-23
- Allow partial range responses for video content to start streaming sooner. [#2817]
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
[#2817]: https://github.com/actix/actix-web/pull/2817
## 0.6.1 - 2022-06-11
- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021]
- Update `tokio-uring` dependency to `0.3`.
- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645]
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
[#2021]: https://github.com/actix/actix-web/pull/2021
[#2645]: https://github.com/actix/actix-web/pull/2645
## 0.6.0 - 2022-02-25
- No significant changes since `0.6.0-beta.16`.
## 0.6.0-beta.16 - 2022-01-31
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15 - 2022-01-21
- No significant changes since `0.6.0-beta.14`.
## Unreleased - 2021-xx-xx
## 0.6.0-beta.14 - 2022-01-14

View File

@ -1,8 +1,9 @@
[package]
name = "actix-files"
version = "0.6.3"
version = "0.6.0-beta.14"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web"
@ -21,30 +22,27 @@ path = "src/lib.rs"
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies]
actix-http = "3"
actix-http = "3.0.0-beta.18"
actix-service = "2"
actix-utils = "3"
actix-web = { version = "4", default-features = false }
actix-web = { version = "4.0.0-beta.20", default-features = false }
askama_escape = "0.10"
bitflags = "1"
bytes = "1"
derive_more = "0.99.5"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.1"
percent-encoding = "2.1"
pin-project-lite = "0.2.7"
v_htmlescape= "0.15"
# experimental-io-uring
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.4", optional = true, features = ["bytes"] }
actix-server = { version = "2.2", optional = true } # ensure matching tokio-uring versions
tokio-uring = { version = "0.2", optional = true, features = ["bytes"] }
[dev-dependencies]
actix-rt = "2.7"
actix-test = "0.1"
actix-web = "4"
actix-rt = "2.2"
actix-test = "0.1.0-beta.11"
actix-web = "4.0.0-beta.20"
tempfile = "3.2"

View File

@ -3,16 +3,16 @@
> Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.3)](https://docs.rs/actix-files/0.6.3)
![Version](https://img.shields.io/badge/rustc-1.59+-ab6000.svg)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.14)](https://docs.rs/actix-files/0.6.0-beta.14)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![License](https://img.shields.io/crates/l/actix-files.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.3/status.svg)](https://deps.rs/crate/actix-files/0.6.3)
[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.14/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.14)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static-files)
- [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
- Minimum Supported Rust Version (MSRV): 1.54

View File

@ -81,7 +81,7 @@ async fn chunked_read_file_callback(
) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _};
let res = actix_web::web::block(move || {
let res = actix_web::rt::task::spawn_blocking(move || {
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
@ -94,7 +94,8 @@ async fn chunked_read_file_callback(
Ok((file, Bytes::from(buf)))
}
})
.await??;
.await
.map_err(|_| actix_web::error::BlockingError)??;
Ok(res)
}

View File

@ -1,8 +1,8 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS};
use v_htmlescape::escape as escape_html_entity;
/// A directory; responds with the generated directory listing.
#[derive(Debug)]
@ -59,7 +59,7 @@ macro_rules! encode_file_url {
/// ```
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy())
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
};
}
@ -75,7 +75,7 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"),
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};

View File

@ -2,7 +2,7 @@ use actix_web::{http::StatusCode, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
#[derive(Debug, PartialEq, Eq, Display)]
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[allow(dead_code)]
@ -22,7 +22,7 @@ impl ResponseError for FilesError {
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Eq, Display)]
#[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.

View File

@ -37,7 +37,7 @@ use crate::{
/// .service(Files::new("/static", "."));
/// ```
pub struct Files {
mount_path: String,
path: String,
directory: PathBuf,
index: Option<String>,
show_index: bool,
@ -68,7 +68,7 @@ impl Clone for Files {
default: self.default.clone(),
renderer: self.renderer.clone(),
file_flags: self.file_flags,
mount_path: self.mount_path.clone(),
path: self.path.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
@ -107,7 +107,7 @@ impl Files {
};
Files {
mount_path: mount_path.trim_end_matches('/').to_owned(),
path: mount_path.trim_end_matches('/').to_owned(),
directory: dir,
index: None,
show_index: false,
@ -342,9 +342,9 @@ impl HttpServiceFactory for Files {
}
let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.mount_path)
ResourceDef::root_prefix(&self.path)
} else {
ResourceDef::prefix(&self.mount_path)
ResourceDef::prefix(&self.path)
};
config.register_service(rdef, guards, self, None)

View File

@ -2,7 +2,7 @@
//!
//! Provides a non-blocking service for serving static files from disk.
//!
//! # Examples
//! # Example
//! ```
//! use actix_web::App;
//! use actix_files::Files;
@ -13,7 +13,6 @@
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible, missing_docs, missing_debug_implementations)]
#![allow(clippy::uninlined_format_args)]
use actix_service::boxed::{BoxService, BoxServiceFactory};
use actix_web::{
@ -107,7 +106,7 @@ mod tests {
let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
}
@ -119,7 +118,7 @@ mod tests {
let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
}
@ -132,7 +131,7 @@ mod tests {
.insert_header((header::IF_NONE_MATCH, "miss_etag"))
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
}
@ -144,7 +143,7 @@ mod tests {
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
@ -156,7 +155,7 @@ mod tests {
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
}
@ -173,7 +172,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -197,7 +196,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"Cargo.toml\""
@ -208,7 +207,7 @@ mod tests {
.unwrap()
.disable_content_disposition();
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none());
}
@ -236,7 +235,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -262,7 +261,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/xml"
@ -285,7 +284,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png"
@ -301,7 +300,7 @@ mod tests {
let file = NamedFile::open_async("tests/test.js").await.unwrap();
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/javascript; charset=utf-8"
@ -331,7 +330,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png"
@ -354,7 +353,7 @@ mod tests {
}
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/octet-stream"
@ -365,45 +364,22 @@ mod tests {
);
}
#[allow(deprecated)]
#[actix_rt::test]
async fn status_code_customize_same_output() {
let file1 = NamedFile::open_async("Cargo.toml")
async fn test_named_file_status_code_text() {
let mut file = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.set_status_code(StatusCode::NOT_FOUND);
let file2 = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.customize()
.with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request();
let res1 = file1.respond_to(&req);
let res2 = file2.respond_to(&req);
assert_eq!(res1.status(), StatusCode::NOT_FOUND);
assert_eq!(res2.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_named_file_status_code_text() {
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
{
file.file();
let _f: &File = &file;
}
{
let _f: &mut File = &mut file;
}
let file = file.customize().with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml"
@ -657,7 +633,7 @@ mod tests {
async fn test_named_file_allowed_method() {
let req = TestRequest::default().method(Method::GET).to_http_request();
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let resp = file.respond_to(&req);
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}

View File

@ -23,7 +23,6 @@ use actix_web::{
use bitflags::bitflags;
use derive_more::{Deref, DerefMut};
use futures_core::future::LocalBoxFuture;
use mime::Mime;
use mime_guess::from_path;
use crate::{encoding::equiv_utf8_text, range::HttpRange};
@ -77,8 +76,8 @@ pub struct NamedFile {
pub(crate) md: Metadata,
pub(crate) flags: Flags,
pub(crate) status_code: StatusCode,
pub(crate) content_type: Mime,
pub(crate) content_disposition: ContentDisposition,
pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: header::ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>,
}
@ -97,18 +96,18 @@ impl NamedFile {
///
/// # Examples
/// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf();
@ -129,10 +128,10 @@ impl NamedFile {
let ct = from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline,
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" || name == "xhtml" => DispositionType::Inline,
name if name == "wasm" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment,
@ -212,8 +211,8 @@ impl NamedFile {
/// Attempts to open a file asynchronously in read-only mode.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it
/// will behave just like `open`.
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
/// Otherwise, it will be just like [`open`][Self::open].
///
/// # Examples
/// ```
@ -238,13 +237,13 @@ impl NamedFile {
Self::from_file(file, path)
}
/// Returns reference to the underlying file object.
/// Returns reference to the underlying `File` object.
#[inline]
pub fn file(&self) -> &File {
&self.file
}
/// Returns the filesystem path to this file.
/// Retrieve the path of this file.
///
/// # Examples
/// ```
@ -262,53 +261,16 @@ impl NamedFile {
self.path.as_path()
}
/// Returns the time the file was last modified.
///
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
/// Therefore, it is usually safe to unwrap this.
#[inline]
pub fn modified(&self) -> Option<SystemTime> {
self.modified
}
/// Returns the filesystem metadata associated with this file.
#[inline]
pub fn metadata(&self) -> &Metadata {
&self.md
}
/// Returns the `Content-Type` header that will be used when serving this file.
#[inline]
pub fn content_type(&self) -> &Mime {
&self.content_type
}
/// Returns the `Content-Disposition` that will be used when serving this file.
#[inline]
pub fn content_disposition(&self) -> &ContentDisposition {
&self.content_disposition
}
/// Returns the `Content-Encoding` that will be used when serving this file.
///
/// A return value of `None` indicates that the content is not already using a compressed
/// representation and may be subject to compression downstream.
#[inline]
pub fn content_encoding(&self) -> Option<ContentEncoding> {
self.encoding
}
/// Set response status code.
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
/// Set response **Status Code**
pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status;
self
}
/// Sets the `Content-Type` header that will be used when serving this file. By default the
/// `Content-Type` is inferred from the filename extension.
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred
/// from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: Mime) -> Self {
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type;
self
}
@ -321,26 +283,24 @@ impl NamedFile {
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
/// (using `to_string_lossy`).
#[inline]
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self {
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION);
self
}
/// Disables `Content-Disposition` header.
/// Disable `Content-Disposition` header.
///
/// By default, the `Content-Disposition` header is sent.
/// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION);
self
}
/// Sets content encoding for this file.
/// Set content encoding for serving this file
///
/// This prevents the `Compress` middleware from modifying the file contents and signals to
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
/// Must be used with [`actix_web::middleware::Compress`] to take effect.
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
@ -528,26 +488,11 @@ impl NamedFile {
length = ranges[0].length;
offset = ranges[0].start;
// When a Content-Encoding header is present in a 206 partial content response
// for video content, it prevents browser video players from starting playback
// before loading the whole video and also prevents seeking.
//
// See: https://github.com/actix/actix-web/issues/2815
//
// The assumption of this fix is that the video player knows to not send an
// Accept-Encoding header for this request and that downstream middleware will
// not attempt compression for requests without it.
//
// TODO: Solve question around what to do if self.encoding is set and partial
// range is requested. Reject request? Ignoring self.encoding seems wrong, too.
// In practice, it should not come up.
if req.headers().contains_key(&header::ACCEPT_ENCODING) {
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
}
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
res.insert_header((
header::CONTENT_RANGE,

View File

@ -59,8 +59,6 @@ impl PathBufWrap {
continue;
} else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else {
buf.push(segment)
}
@ -68,11 +66,7 @@ impl PathBufWrap {
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(matches!(component, Component::Normal(_)));
assert!(i < segment_count);
}
@ -91,7 +85,7 @@ impl FromRequest for PathBufWrap {
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().unprocessed().parse())
ready(req.match_info().path().parse())
}
}
@ -165,26 +159,4 @@ mod tests {
PathBuf::from_iter(vec!["etc/passwd"])
);
}
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
}

View File

@ -23,7 +23,7 @@ impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&self.0
&*self.0
}
}
@ -120,16 +120,14 @@ impl Service<ServiceRequest> for FilesService {
));
}
let path_on_disk = match PathBufWrap::parse_path(
req.match_info().unprocessed(),
this.hidden_files,
) {
Ok(item) => item,
Err(err) => return Ok(req.error_response(err)),
};
let real_path =
match PathBufWrap::parse_path(req.match_info().path(), this.hidden_files) {
Ok(item) => item,
Err(err) => return Ok(req.error_response(err)),
};
if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) {
if !filter(real_path.as_ref(), req.head()) {
if let Some(ref default) = this.default {
return default.call(req).await;
} else {
@ -139,7 +137,7 @@ impl Service<ServiceRequest> for FilesService {
}
// full file path
let path = this.directory.join(&path_on_disk);
let path = this.directory.join(&real_path);
if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await;
}
@ -168,7 +166,7 @@ impl Service<ServiceRequest> for FilesService {
}
}
None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err(
_ => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),

View File

@ -1,11 +1,11 @@
use actix_files::{Files, NamedFile};
use actix_files::Files;
use actix_web::{
http::{
header::{self, HeaderValue},
StatusCode,
},
test::{self, TestRequest},
web, App,
App,
};
#[actix_web::test]
@ -36,31 +36,3 @@ async fn test_utf8_file_contents() {
Some(&HeaderValue::from_static("text/plain")),
);
}
#[actix_web::test]
async fn partial_range_response_encoding() {
let srv = test::init_service(App::new().default_service(web::to(|| async {
NamedFile::open_async("./tests/test.binary").await.unwrap()
})))
.await;
// range request without accept-encoding returns no content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
// range request with accept-encoding returns a content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.append_header((header::ACCEPT_ENCODING, "identity"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert_eq!(
res.headers().get(header::CONTENT_ENCODING).unwrap(),
"identity"
);
}

View File

@ -1,34 +1,6 @@
# Changes
## Unreleased - 2022-xx-xx
## 3.1.0 - 2023-01-21
- Minimum supported Rust version (MSRV) is now 1.59.
## 3.0.0 - 2022-07-24
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Added `TestServer::client_headers` method. [#2097]
- Update `actix-server` dependency to `2`.
- Update `actix-tls` dependency to `3`.
- Update `bytes` to `1.0`. [#1813]
- Minimum supported Rust version (MSRV) is now 1.57.
[#2442]: https://github.com/actix/actix-web/pull/2442
[#2097]: https://github.com/actix/actix-web/pull/2097
[#1813]: https://github.com/actix/actix-web/pull/1813
<details>
<summary>3.0.0 Pre-Releases</summary>
## 3.0.0-beta.13 - 2022-02-16
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12 - 2022-01-31
- No significant changes since `3.0.0-beta.11`.
## Unreleased - 2021-xx-xx
## 3.0.0-beta.11 - 2022-01-04
@ -88,7 +60,6 @@
[#1813]: https://github.com/actix/actix-web/pull/1813
</details>
## 2.1.0 - 2020-11-25
- Add ability to set address for `TestServer`. [#1645]

View File

@ -1,6 +1,6 @@
[package]
name = "actix-http-test"
version = "3.1.0"
version = "3.0.0-beta.11"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"]
@ -29,16 +29,17 @@ default = []
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "2"
actix-codec = "0.5"
actix-tls = "3"
actix-utils = "3"
actix-service = "2.0.0"
actix-codec = "0.4.1"
actix-tls = "3.0.0"
actix-utils = "3.0.0"
actix-rt = "2.2"
actix-server = "2"
awc = { version = "3", default-features = false }
actix-server = "2.0.0-rc.2"
awc = { version = "3.0.0-beta.18", default-features = false }
base64 = "0.13"
bytes = "1"
futures-core = { version = "0.3.17", default-features = false }
futures-core = { version = "0.3.7", default-features = false }
http = "0.2.5"
log = "0.4"
socket2 = "0.4"
@ -47,8 +48,8 @@ serde_json = "1.0"
slab = "0.4"
serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.18.4", features = ["sync"] }
tokio = { version = "1.8.4", features = ["sync"] }
[dev-dependencies]
actix-web = { version = "4", default-features = false, features = ["cookies"] }
actix-http = "3"
actix-web = { version = "4.0.0-beta.20", default-features = false, features = ["cookies"] }
actix-http = "3.0.0-beta.18"

View File

@ -3,11 +3,11 @@
> Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.1.0)](https://docs.rs/actix-http-test/3.1.0)
![Version](https://img.shields.io/badge/rustc-1.59+-ab6000.svg)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.11)](https://docs.rs/actix-http-test/3.0.0-beta.11)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.1.0/status.svg)](https://deps.rs/crate/actix-http-test/3.1.0)
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.11/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.11)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)

View File

@ -2,7 +2,6 @@
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible)]
#![allow(clippy::uninlined_format_args)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
@ -88,7 +87,6 @@ pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
// notify TestServer that server and system have shut down
// all thread managed resources should be dropped at this point
#[allow(clippy::let_underscore_future)]
let _ = thread_stop_tx.send(());
});
@ -296,7 +294,6 @@ impl Drop for TestServer {
// without needing to await anything
// signal server to stop
#[allow(clippy::let_underscore_future)]
let _ = self.server.stop(true);
// signal system to stop

View File

@ -1,461 +1,13 @@
# Changes
## Unreleased - 2022-xx-xx
## 3.3.0 - 2023-01-21
### Added
- Implement `MessageBody` for `Cow<'static, str>` and `Cow<'static, [u8]>`. [#2959]
- Implement `MessageBody` for `&mut B` where `B: MessageBody + Unpin`. [#2868]
- Implement `MessageBody` for `Pin<B>` where `B::Target: MessageBody`. [#2868]
- Automatic h2c detection via new service finalizer `HttpService::tcp_auto_h2c()`. [#2957]
- `HeaderMap::retain()` [#2955].
- Header name constants in `header` module. [#2956] [#2968]
- `CACHE_STATUS`
- `CDN_CACHE_CONTROL`
- `CROSS_ORIGIN_EMBEDDER_POLICY`
- `CROSS_ORIGIN_OPENER_POLICY`
- `PERMISSIONS_POLICY`
- `X_FORWARDED_FOR`
- `X_FORWARDED_HOST`
- `X_FORWARDED_PROTO`
### Fixed
- Fix non-empty body of HTTP/2 HEAD responses. [#2920]
### Performance
- Improve overall performance of operations on `Extensions`. [#2890]
[#2959]: https://github.com/actix/actix-web/pull/2959
[#2868]: https://github.com/actix/actix-web/pull/2868
[#2890]: https://github.com/actix/actix-web/pull/2890
[#2920]: https://github.com/actix/actix-web/pull/2920
[#2957]: https://github.com/actix/actix-web/pull/2957
[#2955]: https://github.com/actix/actix-web/pull/2955
[#2956]: https://github.com/actix/actix-web/pull/2956
[#2968]: https://github.com/actix/actix-web/pull/2968
## 3.2.2 - 2022-09-11
### Changed
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
### Fixed
- Avoid possibility of dispatcher getting stuck while back-pressuring I/O. [#2369]
[#2369]: https://github.com/actix/actix-web/pull/2369
## 3.2.1 - 2022-07-02
### Fixed
- Fix parsing ambiguity in Transfer-Encoding and Content-Length headers for HTTP/1.0 requests. [#2794]
[#2794]: https://github.com/actix/actix-web/pull/2794
## 3.2.0 - 2022-06-30
### Changed
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
### Fixed
- Websocket parser no longer throws endless overflow errors after receiving an oversized frame. [#2790]
- Retain previously set Vary headers when using compression encoder. [#2798]
[#2790]: https://github.com/actix/actix-web/pull/2790
[#2798]: https://github.com/actix/actix-web/pull/2798
## 3.1.0 - 2022-06-11
### Changed
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
### Fixed
- Revert broken fix in [#2624] that caused erroneous 500 error responses. Temporarily re-introduces [#2357] bug. [#2779]
[#2624]: https://github.com/actix/actix-web/pull/2624
[#2357]: https://github.com/actix/actix-web/issues/2357
[#2779]: https://github.com/actix/actix-web/pull/2779
## 3.0.4 - 2022-03-09
### Fixed
- Document on docs.rs with `ws` feature enabled.
## 3.0.3 - 2022-03-08
### Fixed
- Allow spaces between header name and colon when parsing responses. [#2684]
[#2684]: https://github.com/actix/actix-web/pull/2684
## 3.0.2 - 2022-03-05
### Fixed
- Fix encoding camel-case header names with more than one hyphen. [#2683]
[#2683]: https://github.com/actix/actix-web/pull/2683
## 3.0.1 - 2022-03-04
- Fix panic in H1 dispatcher when pipelining is used with keep-alive. [#2678]
[#2678]: https://github.com/actix/actix-web/issues/2678
## 3.0.0 - 2022-02-25
### Dependencies
- Updated `actix-*` to Tokio v1-based versions. [#1813]
- Updated `bytes` to `1.0`. [#1813]
- Updated `h2` to `0.3`. [#1813]
- Updated `rustls` to `0.20.0`. [#2414]
- Updated `language-tags` to `0.3`.
- Updated `tokio` to `1`.
### Added
- Crate Features:
- `ws`; disabled by default. [#2618]
- `http2`; disabled by default. [#2618]
- `compress-brotli`; disabled by default. [#2618]
- `compress-gzip`; disabled by default. [#2618]
- `compress-zstd`; disabled by default. [#2618]
- Functions:
- `body::to_bytes` for async collecting message body into Bytes. [#2158]
- Traits:
- `TryIntoHeaderPair`; allows using typed and untyped headers in the same methods. [#1869]
- Types:
- `body::BoxBody`; a boxed message body with boxed errors. [#2183]
- `body::EitherBody` enum. [#2468]
- `body::None` struct. [#2468]
- Re-export `http` crate's `Error` type as `error::HttpError`. [#2171]
- Variants:
- `ContentEncoding::Zstd` along with . [#2244]
- `Protocol::Http3` for future compatibility and also mark `#[non_exhaustive]`. [00ba8d55]
- Methods:
- `ContentEncoding::to_header_value()`. [#2501]
- `header::QualityItem::{max, min}()`. [#2486]
- `header::QualityItem::zero()` that uses `Quality::ZERO`. [#2501]
- `HeaderMap::drain()` as an efficient draining iterator. [#1964]
- `HeaderMap::len_keys()` has the behavior of the old `len` method. [#1964]
- `MessageBody::boxed` trait method for wrapping boxing types efficiently. [#2520]
- `MessageBody::try_into_bytes` trait method, with default implementation, for optimizations on body types that complete in exactly one poll. [#2522]
- `Request::conn_data()`. [#2491]
- `Request::take_conn_data()`. [#2491]
- `Request::take_req_data()`. [#2487]
- `Response::{ok, bad_request, not_found, internal_server_error}()`. [#2159]
- `Response::into_body()` that consumes response and returns body type. [#2201]
- `Response::map_into_boxed_body()`. [#2468]
- `ResponseBuilder::append_header()` method which allows using typed and untyped headers. [#1869]
- `ResponseBuilder::insert_header()` method which allows using typed and untyped headers. [#1869]
- `ResponseHead::set_camel_case_headers()`. [#2587]
- `TestRequest::insert_header()` method which allows using typed and untyped headers. [#1869]
- Implementations:
- Implement `Clone for ws::HandshakeError`. [#2468]
- Implement `Clone` for `body::AnyBody<S> where S: Clone`. [#2448]
- Implement `Clone` for `RequestHead`. [#2487]
- Implement `Clone` for `ResponseHead`. [#2585]
- Implement `Copy` for `QualityItem<T> where T: Copy`. [#2501]
- Implement `Default` for `ContentEncoding`. [#1912]
- Implement `Default` for `HttpServiceBuilder`. [#2611]
- Implement `Default` for `KeepAlive`. [#2611]
- Implement `Default` for `Response`. [#2201]
- Implement `Default` for `ws::Codec`. [#1920]
- Implement `Display` for `header::Quality`. [#2486]
- Implement `Eq` for `header::ContentEncoding`. [#2501]
- Implement `ExactSizeIterator` and `FusedIterator` for all `HeaderMap` iterators. [#2470]
- Implement `From<Duration>` for `KeepAlive`. [#2611]
- Implement `From<Option<Duration>>` for `KeepAlive`. [#2611]
- Implement `From<Vec<u8>>` for `Response<Vec<u8>>`. [#2625]
- Implement `FromStr` for `ContentEncoding`. [#1912]
- Implement `Header` for `ContentEncoding`. [#1912]
- Implement `IntoHeaderValue` for `ContentEncoding`. [#1912]
- Implement `IntoIterator` for `HeaderMap`. [#1964]
- Implement `MessageBody` for `bytestring::ByteString`. [#2468]
- Implement `MessageBody` for `Pin<Box<T>> where T: MessageBody`. [#2152]
- Misc:
- Re-export `StatusCode`, `Method`, `Version` and `Uri` at the crate root. [#2171]
- Re-export `ContentEncoding` and `ConnectionType` at the crate root. [#2171]
- `Quality::ZERO` associated constant equivalent to `q=0`. [#2501]
- `header::Quality::{MAX, MIN}` associated constants equivalent to `q=1` and `q=0.001`, respectively. [#2486]
- Timeout for canceling HTTP/2 server side connection handshake. Configurable with `ServiceConfig::client_timeout`; defaults to 5 seconds. [#2483]
- `#[must_use]` for `ws::Codec` to prevent subtle bugs. [#1920]
### Changed
- Traits:
- Rename `IntoHeaderValue => TryIntoHeaderValue`. [#2510]
- `MessageBody` now has an associated `Error` type. [#2183]
- Types:
- `Protocol` enum is now marked `#[non_exhaustive]`.
- `error::DispatcherError` enum is now marked `#[non_exhaustive]`. [#2624]
- `ContentEncoding` is now marked `#[non_exhaustive]`. [#2377]
- Error enums are marked `#[non_exhaustive]`. [#2161]
- Rename `PayloadStream` to `BoxedPayloadStream`. [#2545]
- The body type parameter of `Response` no longer has a default. [#2152]
- Enum Variants:
- Rename `ContentEncoding::{Br => Brotli}`. [#2501]
- `Payload` inner fields are now named. [#2545]
- `ws::Message::Text` now contains a `bytestring::ByteString`. [#1864]
- Methods:
- Rename `ServiceConfig::{client_timer_expire => client_request_deadline}`. [#2611]
- Rename `ServiceConfig::{client_disconnect_timer => client_disconnect_deadline}`. [#2611]
- Rename `h1::Codec::{keepalive => keep_alive}`. [#2611]
- Rename `h1::Codec::{keepalive_enabled => keep_alive_enabled}`. [#2611]
- Rename `h1::ClientCodec::{keepalive => keep_alive}`. [#2611]
- Rename `h1::ClientPayloadCodec::{keepalive => keep_alive}`. [#2611]
- Rename `header::EntityTag::{weak => new_weak, strong => new_strong}`. [#2565]
- Rename `TryIntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std `TryInto` trait. [#1894]
- Deadline methods in `ServiceConfig` now return `std::time::Instant`s instead of Tokio's wrapper type. [#2611]
- Places in `Response` where `ResponseBody<B>` was received or returned now simply use `B`. [#2201]
- `encoding::Encoder::response` now returns `AnyBody<Encoder<B>>`. [#2448]
- `Extensions::insert` returns replaced item. [#1904]
- `HeaderMap::get_all` now returns a `std::slice::Iter`. [#2527]
- `HeaderMap::insert` now returns iterator of removed values. [#1964]
- `HeaderMap::len` now returns number of values instead of number of keys. [#1964]
- `HeaderMap::remove` now returns iterator of removed values. [#1964]
- `ResponseBuilder::body(B)` now returns `Response<EitherBody<B>>`. [#2468]
- `ResponseBuilder::content_type` now takes an `impl TryIntoHeaderValue` to support using typed `mime` types. [#1894]
- `ResponseBuilder::finish()` now returns `Response<EitherBody<()>>`. [#2468]
- `ResponseBuilder::json` now takes `impl Serialize`. [#2052]
- `ResponseBuilder::message_body` now returns a `Result`. [#2201]∑
- `ServiceConfig::keep_alive` now returns a `KeepAlive`. [#2611]
- `ws::hash_key` now returns array. [#2035]
- Trait Implementations:
- Implementation of `Stream` for `Payload` no longer requires the `Stream` variant be `Unpin`. [#2545]
- Implementation of `Future` for `h1::SendResponse` no longer requires the body type be `Unpin`. [#2545]
- Implementation of `Stream` for `encoding::Decoder` no longer requires the stream type be `Unpin`. [#2545]
- Implementation of `From` for error types now return a `Response<BoxBody>`. [#2468]
- Misc:
- `header` module is now public. [#2171]
- `uri` module is now public. [#2171]
- Request-local data container is no longer part of a `RequestHead`. Instead it is a distinct part of a `Request`. [#2487]
- All error trait bounds in server service builders have changed from `Into<Error>` to `Into<Response<BoxBody>>`. [#2253]
- All error trait bounds in message body and stream impls changed from `Into<Error>` to `Into<Box<dyn std::error::Error>>`. [#2253]
- Guarantee ordering of `header::GetAll` iterator to be same as insertion order. [#2467]
- Connection data set through the `on_connect_ext` callbacks is now accessible only from the new `Request::conn_data()` method. [#2491]
- Brotli (de)compression support is now provided by the `brotli` crate. [#2538]
- Minimum supported Rust version (MSRV) is now 1.54.
### Fixed
- A `Vary` header is now correctly sent along with compressed content. [#2501]
- HTTP/1.1 dispatcher correctly uses client request timeout. [#2611]
- Fixed issue where handlers that took payload but then dropped without reading it to EOF it would cause keep-alive connections to become stuck. [#2624]
- `ContentEncoding`'s `Identity` variant can now be parsed from a string. [#2501]
- `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuration parameter. [#2226]
- Remove unnecessary `Into<Error>` bound on `Encoder` body types. [#2375]
- Remove unnecessary `Unpin` bound on `ResponseBuilder::streaming`. [#2253]
- `BodyStream` and `SizedStream` are no longer restricted to `Unpin` types. [#2152]
- Fixed slice creation pointing to potential uninitialized data on h1 encoder. [#2364]
- Fixed quality parse error in Accept-Encoding header. [#2344]
### Removed
- Crate Features:
- `compress` feature. [#2065]
- `cookies` feature. [#2065]
- `trust-dns` feature. [#2425]
- `actors` optional feature and trait implementation for `actix` types. [#1969]
- Functions:
- `header::qitem` helper. Replaced with `header::QualityItem::max`. [#2486]
- Types:
- `body::Body`; replaced with `EitherBody` and `BoxBody`. [#2468]
- `body::ResponseBody`. [#2446]
- `ConnectError::SslHandshakeError` and re-export of `HandshakeError`. Due to the removal of this type from `tokio-openssl` crate. OpenSSL handshake error now returns `ConnectError::SslError`. [#1813]
- `error::Canceled` re-export. [#1994]
- `error::Result` type alias. [#2201]
- `error::BlockingError` [#2660]
- `InternalError` and all the error types it constructed were moved up to `actix-web`. [#2215]
- Typed HTTP headers; they have moved up to `actix-web`. [2094]
- Re-export of `http` crate's `HeaderMap` types in addition to ours. [#2171]
- Enum Variants:
- `body::BodySize::Empty`; an empty body can now only be represented as a `Sized(0)` variant. [#2446]
- `ContentEncoding::Auto`. [#2501]
- `EncoderError::Boxed`. [#2446]
- Methods:
- `ContentEncoding::is_compression()`. [#2501]
- `h1::Payload::readany()`. [#2545]
- `HttpMessage::cookie[s]()` trait methods. [#2065]
- `HttpServiceBuilder::new()`; use `default` instead. [#2611]
- `on_connect` (previously deprecated) methods have been removed; use `on_connect_ext`. [#1857]
- `Response::build_from()`. [#2159]
- `Response::error()` [#2205]
- `Response::take_body()` and old `Response::into_body()` method that casted body type. [#2201]
- `Response`'s status code builders. [#2159]
- `ResponseBuilder::{if_true, if_some}()` (previously deprecated). [#2148]
- `ResponseBuilder::{set, set_header}()`; use `ResponseBuilder::insert_header()`. [#1869]
- `ResponseBuilder::extensions[_mut]()`. [#2585]
- `ResponseBuilder::header()`; use `ResponseBuilder::append_header()`. [#1869]
- `ResponseBuilder::json()`. [#2148]
- `ResponseBuilder::json2()`. [#1903]
- `ResponseBuilder::streaming()`. [#2468]
- `ResponseHead::extensions[_mut]()`. [#2585]
- `ServiceConfig::{client_timer, keep_alive_timer}()`. [#2611]
- `TestRequest::with_hdr()`; use `TestRequest::default().insert_header()`. [#1869]
- `TestRequest::with_header()`; use `TestRequest::default().insert_header()`. [#1869]
- Trait implementations:
- Implementation of `Copy` for `ws::Codec`. [#1920]
- Implementation of `From<Option<usize>> for KeepAlive`; use `Duration`s instead. [#2611]
- Implementation of `From<serde_json::Value>` for `Body`. [#2148]
- Implementation of `From<usize> for KeepAlive`; use `Duration`s instead. [#2611]
- Implementation of `Future` for `Response`. [#2201]
- Implementation of `Future` for `ResponseBuilder`. [#2468]
- Implementation of `Into<Error>` for `Response<Body>`. [#2215]
- Implementation of `Into<Error>` for `ResponseBuilder`. [#2215]
- Implementation of `ResponseError` for `actix_utils::timeout::TimeoutError`. [#2127]
- Implementation of `ResponseError` for `CookieParseError`. [#2065]
- Implementation of `TryFrom<u16>` for `header::Quality`. [#2486]
- Misc:
- `http` module; most everything it contained is exported at the crate root. [#2488]
- `cookies` module (re-export). [#2065]
- `client` module. Connector types now live in `awc`. [#2425]
- `error` field from `Response`. [#2205]
- `downcast` and `downcast_get_type_id` macros. [#2291]
- Down-casting for `MessageBody` types; use standard `Any` trait. [#2183]
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1845]: https://github.com/actix/actix-web/pull/1845
[#1857]: https://github.com/actix/actix-web/pull/1857
[#1864]: https://github.com/actix/actix-web/pull/1864
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1878]: https://github.com/actix/actix-web/pull/1878
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1903]: https://github.com/actix/actix-web/pull/1903
[#1904]: https://github.com/actix/actix-web/pull/1904
[#1912]: https://github.com/actix/actix-web/pull/1912
[#1920]: https://github.com/actix/actix-web/pull/1920
[#1964]: https://github.com/actix/actix-web/pull/1964
[#1969]: https://github.com/actix/actix-web/pull/1969
[#1981]: https://github.com/actix/actix-web/pull/1981
[#1994]: https://github.com/actix/actix-web/pull/1994
[#2035]: https://github.com/actix/actix-web/pull/2035
[#2052]: https://github.com/actix/actix-web/pull/2052
[#2065]: https://github.com/actix/actix-web/pull/2065
[#2094]: https://github.com/actix/actix-web/pull/2094
[#2127]: https://github.com/actix/actix-web/pull/2127
[#2148]: https://github.com/actix/actix-web/pull/2148
[#2152]: https://github.com/actix/actix-web/pull/2152
[#2158]: https://github.com/actix/actix-web/pull/2158
[#2159]: https://github.com/actix/actix-web/pull/2159
[#2161]: https://github.com/actix/actix-web/pull/2161
[#2171]: https://github.com/actix/actix-web/pull/2171
[#2183]: https://github.com/actix/actix-web/pull/2183
[#2196]: https://github.com/actix/actix-web/pull/2196
[#2201]: https://github.com/actix/actix-web/pull/2201
[#2205]: https://github.com/actix/actix-web/pull/2205
[#2215]: https://github.com/actix/actix-web/pull/2215
[#2244]: https://github.com/actix/actix-web/pull/2244
[#2250]: https://github.com/actix/actix-web/pull/2250
[#2253]: https://github.com/actix/actix-web/pull/2253
[#2291]: https://github.com/actix/actix-web/pull/2291
[#2344]: https://github.com/actix/actix-web/pull/2344
[#2364]: https://github.com/actix/actix-web/pull/2364
[#2375]: https://github.com/actix/actix-web/pull/2375
[#2377]: https://github.com/actix/actix-web/pull/2377
[#2414]: https://github.com/actix/actix-web/pull/2414
[#2425]: https://github.com/actix/actix-web/pull/2425
[#2442]: https://github.com/actix/actix-web/pull/2442
[#2446]: https://github.com/actix/actix-web/pull/2446
[#2448]: https://github.com/actix/actix-web/pull/2448
[#2456]: https://github.com/actix/actix-web/pull/2456
[#2467]: https://github.com/actix/actix-web/pull/2467
[#2468]: https://github.com/actix/actix-web/pull/2468
[#2470]: https://github.com/actix/actix-web/pull/2470
[#2474]: https://github.com/actix/actix-web/pull/2474
[#2483]: https://github.com/actix/actix-web/pull/2483
[#2486]: https://github.com/actix/actix-web/pull/2486
[#2487]: https://github.com/actix/actix-web/pull/2487
[#2488]: https://github.com/actix/actix-web/pull/2488
[#2491]: https://github.com/actix/actix-web/pull/2491
[#2497]: https://github.com/actix/actix-web/pull/2497
[#2501]: https://github.com/actix/actix-web/pull/2501
[#2510]: https://github.com/actix/actix-web/pull/2510
[#2520]: https://github.com/actix/actix-web/pull/2520
[#2522]: https://github.com/actix/actix-web/pull/2522
[#2527]: https://github.com/actix/actix-web/pull/2527
[#2538]: https://github.com/actix/actix-web/pull/2538
[#2545]: https://github.com/actix/actix-web/pull/2545
[#2565]: https://github.com/actix/actix-web/pull/2565
[#2585]: https://github.com/actix/actix-web/pull/2585
[#2587]: https://github.com/actix/actix-web/pull/2587
[#2611]: https://github.com/actix/actix-web/pull/2611
[#2618]: https://github.com/actix/actix-web/pull/2618
[#2624]: https://github.com/actix/actix-web/pull/2624
[#2625]: https://github.com/actix/actix-web/pull/2625
[#2660]: https://github.com/actix/actix-web/pull/2660
[00ba8d55]: https://github.com/actix/actix-web/commit/00ba8d55492284581695d824648590715a8bd386
<details>
<summary>3.0.0 Pre-Releases</summary>
## 3.0.0-rc.4 - 2022-02-22
### Fixed
- Fix h1 dispatcher panic. [1ce58ecb]
[1ce58ecb]: https://github.com/actix/actix-web/commit/1ce58ecb305c60e51db06e6c913b7a1344e229ca
## 3.0.0-rc.3 - 2022-02-16
- No significant changes since `3.0.0-rc.2`.
## 3.0.0-rc.2 - 2022-02-08
### Added
- Implement `From<Vec<u8>>` for `Response<Vec<u8>>`. [#2625]
### Changed
- `error::DispatcherError` enum is now marked `#[non_exhaustive]`. [#2624]
### Fixed
- Issue where handlers that took payload but then dropped without reading it to EOF it would cause keep-alive connections to become stuck. [#2624]
[#2624]: https://github.com/actix/actix-web/pull/2624
[#2625]: https://github.com/actix/actix-web/pull/2625
## 3.0.0-rc.1 - 2022-01-31
### Added
- Implement `Default` for `KeepAlive`. [#2611]
- Implement `From<Duration>` for `KeepAlive`. [#2611]
- Implement `From<Option<Duration>>` for `KeepAlive`. [#2611]
- Implement `Default` for `HttpServiceBuilder`. [#2611]
- Crate `ws` feature flag, disabled by default. [#2618]
- Crate `http2` feature flag, disabled by default. [#2618]
### Changed
- Rename `ServiceConfig::{client_timer_expire => client_request_deadline}`. [#2611]
- Rename `ServiceConfig::{client_disconnect_timer => client_disconnect_deadline}`. [#2611]
- Deadline methods in `ServiceConfig` now return `std::time::Instant`s instead of Tokio's wrapper type. [#2611]
- Rename `h1::Codec::{keepalive => keep_alive}`. [#2611]
- Rename `h1::Codec::{keepalive_enabled => keep_alive_enabled}`. [#2611]
- Rename `h1::ClientCodec::{keepalive => keep_alive}`. [#2611]
- Rename `h1::ClientPayloadCodec::{keepalive => keep_alive}`. [#2611]
- `ServiceConfig::keep_alive` now returns a `KeepAlive`. [#2611]
### Fixed
- HTTP/1.1 dispatcher correctly uses client request timeout. [#2611]
### Removed
- `ServiceConfig::{client_timer, keep_alive_timer}`. [#2611]
- `impl From<usize> for KeepAlive`; use `Duration`s instead. [#2611]
- `impl From<Option<usize>> for KeepAlive`; use `Duration`s instead. [#2611]
- `HttpServiceBuilder::new`; use `default` instead. [#2611]
[#2611]: https://github.com/actix/actix-web/pull/2611
[#2618]: https://github.com/actix/actix-web/pull/2618
## 3.0.0-beta.19 - 2022-01-21
## Unreleased - 2021-xx-xx
### Added
- Response headers can be sent as camel case using `res.head_mut().set_camel_case_headers(true)`. [#2587]
- `ResponseHead` now implements `Clone`. [#2585]
### Changed
- Brotli (de)compression support is now provided by the `brotli` crate. [#2538]
### Removed
- `ResponseHead::extensions[_mut]()`. [#2585]
- `ResponseBuilder::extensions[_mut]()`. [#2585]
[#2538]: https://github.com/actix/actix-web/pull/2538
[#2585]: https://github.com/actix/actix-web/pull/2585
[#2587]: https://github.com/actix/actix-web/pull/2587
@ -487,7 +39,7 @@
## 3.0.0-beta.17 - 2021-12-27
### Changed
### Changes
- `HeaderMap::get_all` now returns a `std::slice::Iter`. [#2527]
- `Payload` inner fields are now named. [#2545]
- `impl Stream` for `Payload` no longer requires the `Stream` variant be `Unpin`. [#2545]
@ -710,7 +262,7 @@
- `Response::{ok, bad_request, not_found, internal_server_error}`. [#2159]
- Helper `body::to_bytes` for async collecting message body into Bytes. [#2158]
### Changed
### Changes
- The type parameter of `Response` no longer has a default. [#2152]
- The `Message` variant of `body::Body` is now `Pin<Box<dyn MessageBody>>`. [#2152]
- `BodyStream` and `SizedStream` are no longer restricted to Unpin types. [#2152]
@ -843,10 +395,10 @@
- Remove `ResponseError` impl for `actix::actors::resolver::ResolverError`
due to deprecate of resolver actor. [#1813]
- Remove `ConnectError::SslHandshakeError` and re-export of `HandshakeError`.
due to the removal of this type from `tokio-openssl` crate. openssl handshake
due to the removal of this type from `tokio-openssl` crate. openssl handshake
error would return as `ConnectError::SslError`. [#1813]
- Remove `actix-threadpool` dependency. Use `actix_rt::task::spawn_blocking`.
Due to this change `actix_threadpool::BlockingError` type is moved into
Due to this change `actix_threadpool::BlockingError` type is moved into
`actix_http::error` module. [#1878]
[#1813]: https://github.com/actix/actix-web/pull/1813
@ -854,15 +406,6 @@
[#1864]: https://github.com/actix/actix-web/pull/1864
[#1878]: https://github.com/actix/actix-web/pull/1878
</details>
## 2.2.2 - 2022-01-21
### Changed
- Migrate to `brotli` crate. [ad7e3c06]
[ad7e3c06]: https://github.com/actix/actix-web/commit/ad7e3c06
## 2.2.1 - 2021-08-09
### Fixed

View File

@ -1,10 +1,7 @@
[package]
name = "actix-http"
version = "3.3.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
version = "3.0.0-beta.18"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "HTTP primitives for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
@ -20,7 +17,7 @@ edition = "2018"
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["http2", "ws", "openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
[lib]
name = "actix_http"
@ -29,101 +26,82 @@ path = "src/lib.rs"
[features]
default = []
# HTTP/2 protocol support
http2 = ["h2"]
# WebSocket protocol implementation
ws = [
"local-channel",
"base64",
"rand",
"sha1",
]
# TLS via OpenSSL
# openssl
openssl = ["actix-tls/accept", "actix-tls/openssl"]
# TLS via Rustls
# rustls support
rustls = ["actix-tls/accept", "actix-tls/rustls"]
# Compression codecs
compress-brotli = ["__compress", "brotli"]
compress-gzip = ["__compress", "flate2"]
compress-zstd = ["__compress", "zstd"]
# enable compression support
compress-brotli = ["brotli", "__compress"]
compress-gzip = ["flate2", "__compress"]
compress-zstd = ["zstd", "__compress"]
# Internal (PRIVATE!) features used to aid testing and cheking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
[dependencies]
actix-service = "2"
actix-codec = "0.5"
actix-utils = "3"
actix-service = "2.0.0"
actix-codec = "0.4.1"
actix-utils = "3.0.0"
actix-rt = { version = "2.2", default-features = false }
ahash = "0.7"
base64 = "0.13"
bitflags = "1.2"
bytes = "1"
bytestring = "1"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
h2 = "0.3.9"
http = "0.2.5"
httparse = "1.5.1"
httpdate = "1.0.1"
itoa = "1"
language-tags = "0.3"
local-channel = "0.1"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1"
pin-project-lite = "0.2"
rand = "0.8"
sha-1 = "0.10"
smallvec = "1.6.1"
tokio = { version = "1.18.4", features = [] }
tokio-util = { version = "0.7", features = ["io", "codec"] }
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
# http2
h2 = { version = "0.3.9", optional = true }
# tls
actix-tls = { version = "3.0.0", default-features = false, optional = true }
# websockets
local-channel = { version = "0.1", optional = true }
base64 = { version = "0.21", optional = true }
rand = { version = "0.8", optional = true }
sha1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3", default-features = false, optional = true }
# compress-*
# compression
brotli = { version = "3.3.3", optional = true }
flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.12", optional = true }
zstd = { version = "0.9", optional = true }
[dev-dependencies]
actix-http-test = { version = "3", features = ["openssl"] }
actix-server = "2"
actix-tls = { version = "3", features = ["openssl"] }
actix-web = "4"
actix-http-test = { version = "3.0.0-beta.11", features = ["openssl"] }
actix-server = "2.0.0-rc.2"
actix-tls = { version = "3.0.0", features = ["openssl"] }
actix-web = "4.0.0-beta.20"
async-stream = "0.3"
criterion = { version = "0.4", features = ["html_reports"] }
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
memchr = "2.4"
once_cell = "1.9"
rcgen = "0.9"
rcgen = "0.8"
regex = "1.3"
rustversion = "1"
rustls-pemfile = "1"
rustls-pemfile = "0.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.18.4", features = ["net", "rt", "macros"] }
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
[[example]]
name = "ws"
required-features = ["ws", "rustls"]
required-features = ["rustls"]
[[bench]]
name = "write-camel-case"

View File

@ -3,11 +3,11 @@
> HTTP primitives for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.3.0)](https://docs.rs/actix-http/3.3.0)
![Version](https://img.shields.io/badge/rustc-1.59+-ab6000.svg)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.18)](https://docs.rs/actix-http/3.0.0-beta.18)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-http/3.3.0/status.svg)](https://deps.rs/crate/actix-http/3.3.0)
[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.18/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.18)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
@ -25,7 +25,7 @@ use actix_http::{HttpService, Response};
use actix_server::Server;
use futures_util::future;
use http::header::HeaderValue;
use tracing::info;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {

View File

@ -1,5 +1,3 @@
#![allow(clippy::uninlined_format_args)]
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];

View File

@ -114,12 +114,11 @@ mod _original {
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(invalid_value, clippy::uninit_assumed_init)]
#![allow(clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
#[allow(invalid_value)]
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };

View File

@ -18,8 +18,7 @@ async fn main() -> std::io::Result<()> {
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
// h1 will configure server to only use HTTP/1.1
.h1(map_config(app, |_| AppConfig::default()))
.finish(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()

View File

@ -1,27 +0,0 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View File

@ -1,11 +1,10 @@
use std::{io, time::Duration};
use std::io;
use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
use http::header::HeaderValue;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -14,24 +13,22 @@ async fn main() -> io::Result<()> {
Server::build()
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
// handles HTTP/1.1 and HTTP/2
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
log::info!("request body: {:?}", body);
let res = Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body);
Ok::<_, Error>(res)
Ok::<_, Error>(
Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body),
)
})
// No TLS
.tcp()
})?
.run()

View File

@ -1,34 +1,32 @@
use std::io;
use actix_http::{
body::{BodyStream, MessageBody},
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
body::MessageBody, header::HeaderValue, Error, HttpService, Request, Response, StatusCode,
};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK);
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) {
res.insert_header((header::CONTENT_TYPE, ct));
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
}
// echo request payload stream as (chunked) response body
let res = res.message_body(BodyStream::new(req.payload().take()))?;
log::info!("request body: {:?}", body);
Ok(res)
Ok(Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body))
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
actix_server::Server::build()
Server::build()
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
HttpService::build().finish(handle_request).tcp()
})?
.run()
.await

View File

@ -1,29 +0,0 @@
//! An example that supports automatic selection of plaintext h1/h2c connections.
//!
//! Notably, both the following commands will work.
//! ```console
//! $ curl --http1.1 'http://localhost:8080/'
//! $ curl --http2-prior-knowledge 'http://localhost:8080/'
//! ```
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
#[tokio::main(flavor = "current_thread")]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2c-detect", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|_req: Request| async move {
Ok::<_, Infallible>(Response::build(StatusCode::OK).body("Hello!"))
})
.tcp_auto_h2c()
})?
.workers(2)
.run()
.await
}

View File

@ -1,25 +0,0 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View File

@ -1,8 +1,9 @@
use std::{convert::Infallible, io, time::Duration};
use std::{convert::Infallible, io};
use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode};
use actix_http::{
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
};
use actix_server::Server;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -11,18 +12,18 @@ async fn main() -> io::Result<()> {
Server::build()
.bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
.client_timeout(1000)
.client_disconnect(1000)
.on_connect_ext(|_, ext| {
ext.insert(42u32);
})
.finish(|req: Request| async move {
info!("{:?}", req);
log::info!("{:?}", req);
let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.conn_data::<u32>().unwrap().to_string();
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
res.insert_header((
"x-forty-two",
HeaderValue::from_str(&forty_two).unwrap(),

View File

@ -12,7 +12,6 @@ use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -22,7 +21,7 @@ async fn main() -> io::Result<()> {
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
info!("{:?}", req);
log::info!("{:?}", req);
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {

View File

@ -10,14 +10,13 @@ use std::{
time::Duration,
};
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
use tokio_util::codec::Encoder;
use tracing::{info, trace};
#[actix_rt::main]
async fn main() -> io::Result<()> {
@ -35,13 +34,13 @@ async fn main() -> io::Result<()> {
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
info!("handshaking");
log::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
info!("responding");
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))
log::info!("responding");
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
}
struct Heartbeat {
@ -62,7 +61,7 @@ impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
trace!("poll");
log::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));

View File

@ -80,7 +80,7 @@ mod tests {
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
@ -91,10 +91,10 @@ mod tests {
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {

View File

@ -31,7 +31,7 @@ impl fmt::Debug for BoxBodyInner {
}
impl BoxBody {
/// Boxes body type, erasing type information.
/// Same as `MessageBody::boxed`.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
@ -105,13 +105,14 @@ impl MessageBody for BoxBody {
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any};
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
#[actix_rt::test]
async fn nested_boxed_body() {

View File

@ -10,17 +10,6 @@ use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
@ -33,10 +22,7 @@ pin_project! {
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
/// Creates new `EitherBody` using left variant and boxed right variant.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }

View File

@ -14,44 +14,8 @@ use pin_project_lite::pin_project;
use super::{BodySize, BoxBody};
/// An interface for types that can be used as a response body.
///
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
/// An interface types that can converted to bytes and used as response bodies.
// TODO: examples
pub trait MessageBody {
/// The type of error that will be returned if streaming body fails.
///
@ -65,22 +29,7 @@ pub trait MessageBody {
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
///
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
// TODO: expand documentation
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
@ -88,7 +37,7 @@ pub trait MessageBody {
/// Try to convert into the complete chunk of body bytes.
///
/// Override this method if the complete body can be trivially extracted. This is useful for
/// Implement this method if the entire body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
@ -105,11 +54,7 @@ pub trait MessageBody {
Err(self)
}
/// Wraps this body into a `BoxBody`.
///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
/// is required.
/// Converts this body into `BoxBody`.
#[inline]
fn boxed(self) -> BoxBody
where
@ -120,28 +65,8 @@ pub trait MessageBody {
}
mod foreign_impls {
use std::{borrow::Cow, ops::DerefMut};
use super::*;
impl<B> MessageBody for &mut B
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
fn size(&self) -> BodySize {
(**self).size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(&mut **self).poll_next(cx)
}
}
impl MessageBody for Infallible {
type Error = Infallible;
@ -199,9 +124,8 @@ mod foreign_impls {
}
}
impl<T, B> MessageBody for Pin<T>
impl<B> MessageBody for Pin<Box<B>>
where
T: DerefMut<Target = B> + Unpin,
B: MessageBody + ?Sized,
{
type Error = B::Error;
@ -324,39 +248,6 @@ mod foreign_impls {
}
}
impl MessageBody for Cow<'static, [u8]> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(b) => Bytes::from_static(b),
Cow::Owned(b) => Bytes::from(b),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(b) => Ok(Bytes::from_static(b)),
Cow::Owned(b) => Ok(Bytes::from(b)),
}
}
}
impl MessageBody for &'static str {
type Error = Infallible;
@ -412,39 +303,6 @@ mod foreign_impls {
}
}
impl MessageBody for Cow<'static, str> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()),
Cow::Owned(s) => Bytes::from(s.into_bytes()),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())),
Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())),
}
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
@ -532,7 +390,6 @@ mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_util::stream;
use super::*;
use crate::body::{self, EitherBody};
@ -569,35 +426,6 @@ mod tests {
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn mut_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), (&(&mut ())).size());
let pl = &mut ();
pin!(pl);
assert_poll_next_none!(pl);
let pl = &mut Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut body = body::SizedStream::new(
8,
stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]),
);
let body = &mut body;
assert_eq!(body.size(), BodySize::Sized(8));
pin!(body);
assert_poll_next!(body, Bytes::from_static(b"1234"));
assert_poll_next!(body, Bytes::from_static(b"5678"));
assert_poll_next_none!(body);
}
#[allow(clippy::let_unit_value)]
#[actix_rt::test]
async fn test_unit() {
let pl = ();
@ -723,18 +551,4 @@ mod tests {
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
#[actix_rt::test]
async fn non_owning_to_bytes() {
let mut body = BoxBody::new(());
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::new());
let mut body = body::BodyStream::new(stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]));
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::from_static(b"12345678"));
}
}

View File

@ -1,9 +1,4 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
mod body_stream;
mod boxed;

View File

@ -10,12 +10,9 @@ use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header.
/// For an "empty" body, use `()` or `Bytes::new()`.
/// Distinct from an empty response which would contain a Content-Length header.
///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
/// For an "empty" body, use `()` or `Bytes::new()`.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;

View File

@ -44,7 +44,7 @@ where
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size)
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
@ -76,7 +76,7 @@ mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
@ -87,10 +87,10 @@ mod tests {
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {

View File

@ -42,7 +42,7 @@ pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&bytes),
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}

View File

@ -1,22 +1,25 @@
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration};
use std::{fmt, marker::PhantomData, net, rc::Rc};
use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::{
body::{BoxBody, MessageBody},
config::{KeepAlive, ServiceConfig},
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
h2::H2Service,
service::HttpService,
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig,
ConnectCallback, Extensions, Request, Response,
};
/// An HTTP service builder.
/// A HTTP service builder
///
/// This type can construct an instance of [`HttpService`] through a builder-like pattern.
/// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
client_timeout: u64,
client_disconnect: u64,
secure: bool,
local_addr: Option<net::SocketAddr>,
expect: X,
@ -25,23 +28,22 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
_phantom: PhantomData<S>,
}
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static,
{
fn default() -> Self {
/// Create instance of `ServiceConfigBuilder`
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
HttpServiceBuilder {
// ServiceConfig parts (make sure defaults match)
keep_alive: KeepAlive::default(),
client_request_timeout: Duration::from_secs(5),
client_disconnect_timeout: Duration::ZERO,
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
secure: false,
local_addr: None,
// dispatcher parts
expect: ExpectHandler,
upgrade: None,
on_connect_ext: None,
@ -63,11 +65,9 @@ where
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
/// Set connection keep-alive setting.
/// Set server keep-alive setting.
///
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong.
///
/// By default keep-alive is 5 seconds.
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
@ -85,45 +85,33 @@ where
self
}
/// Set client request timeout (for first request).
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If the client does not transmit the
/// request head within this duration, the connection is terminated with a `408 Request Timeout`
/// response error.
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// A duration of zero disables the timeout.
/// To disable timeout set value to 0.
///
/// By default, the client timeout is 5 seconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self {
self.client_request_timeout = dur;
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections.
///
/// A duration of zero disables the timeout.
/// To disable timeout set value to 0.
///
/// By default, the disconnect timeout is disabled.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self {
self.client_disconnect_timeout = dur;
/// By default disconnect timeout is set to 0.
pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect = val;
self
}
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
@ -138,8 +126,8 @@ where
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
secure: self.secure,
local_addr: self.local_addr,
expect: expect.into_factory(),
@ -162,8 +150,8 @@ where
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
secure: self.secure,
local_addr: self.local_addr,
expect: self.expect,
@ -186,7 +174,7 @@ where
self
}
/// Finish service configuration and create a service for the HTTP/1 protocol.
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where
B: MessageBody,
@ -197,8 +185,8 @@ where
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_request_timeout,
self.client_disconnect_timeout,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);
@ -209,10 +197,8 @@ where
.on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create a service for the HTTP/2 protocol.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
where
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static,
@ -223,14 +209,13 @@ where
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_request_timeout,
self.client_disconnect_timeout,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);
crate::h2::H2Service::with_config(cfg, service.into_factory())
.on_connect_ext(self.on_connect_ext)
H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create `HttpService` instance.
@ -245,8 +230,8 @@ where
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_request_timeout,
self.client_disconnect_timeout,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);

View File

@ -1,59 +1,106 @@
use std::{
cell::Cell,
fmt::{self, Write},
net,
rc::Rc,
time::{Duration, Instant},
time::{Duration, SystemTime},
};
use actix_rt::{
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
};
use bytes::BytesMut;
use crate::{date::DateService, KeepAlive};
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
/// HTTP service configuration.
#[derive(Debug, Clone)]
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner {
keep_alive: KeepAlive,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
date_service: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
}
}
impl ServiceConfig {
/// Create instance of `ServiceConfig`.
/// Create instance of `ServiceConfig`
pub fn new(
keep_alive: KeepAlive,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
client_timeout: u64,
client_disconnect: u64,
secure: bool,
local_addr: Option<net::SocketAddr>,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner {
keep_alive: keep_alive.normalize(),
client_request_timeout,
client_disconnect_timeout,
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
secure,
local_addr,
date_service: DateService::new(),
}))
}
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
/// Returns true if connection is secure (HTTPS)
#[inline]
pub fn secure(&self) -> bool {
self.0.secure
@ -67,97 +114,235 @@ impl ServiceConfig {
self.0.local_addr
}
/// Connection keep-alive setting.
/// Keep alive duration if configured.
#[inline]
pub fn keep_alive(&self) -> KeepAlive {
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
/// Creates a time object representing the deadline for this connection's keep-alive period, if
/// enabled.
///
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`.
pub fn keep_alive_deadline(&self) -> Option<Instant> {
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur),
KeepAlive::Os => None,
KeepAlive::Disabled => None,
/// Return state of connection keep-alive functionality
#[inline]
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
/// Client timeout for first request.
#[inline]
pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
}
}
/// Creates a time object representing the deadline for the client to finish sending the head of
/// its first request.
///
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
pub fn client_request_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_request_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Creates a time object representing the deadline for the client to disconnect.
pub fn client_disconnect_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Return keep-alive timer delay is configured.
#[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.date_service.now()
}
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)]
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) {
let mut buf: [u8; 37] = [0; 37];
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
self.0
.date_service
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n");
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
}
#[allow(unused)] // used with `http2` feature flag
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
self.0
.date_service
.with_date(|date| dst.extend_from_slice(&date.bytes));
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}
impl DateService {
fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
}
});
DateService { current, handle }
}
fn now(&self) -> Instant {
self.current.get().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
#[cfg(test)]
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
use actix_rt::{
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
use actix_rt::{task::yield_now, time::sleep};
#[actix_rt::test]
async fn test_date_service_update() {
let settings =
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false);
settings.set_date(&mut buf1);
let now1 = settings.now();
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await;
sleep_until(Instant::now() + Duration::from_secs(2)).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false);
settings.set_date(&mut buf2);
assert_ne!(now1, now2);
@ -210,27 +395,11 @@ mod tests {
#[actix_rt::test]
async fn test_date() {
let settings = ServiceConfig::default();
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false);
settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false);
settings.set_date(&mut buf2);
assert_eq!(buf1, buf2);
}
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
}

View File

@ -1,92 +0,0 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View File

@ -19,7 +19,7 @@ use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{
encoding::Writer,
error::PayloadError,
error::{BlockingError, PayloadError},
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
@ -47,17 +47,14 @@ where
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
@ -101,12 +98,8 @@ where
loop {
if let Some(ref mut fut) = this.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})??;
let (chunk, decoder) =
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
*this.decoder = Some(decoder);
this.fut.take();
@ -166,13 +159,10 @@ where
enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]

View File

@ -17,13 +17,13 @@ use pin_project_lite::pin_project;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
use tracing::trace;
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer;
use crate::{
body::{self, BodySize, MessageBody},
error::BlockingError,
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
@ -173,12 +173,7 @@ where
if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| {
EncoderError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(|_| EncoderError::Blocking(BlockingError))?
.map_err(EncoderError::Io)?;
let chunk = encoder.take();
@ -257,7 +252,7 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut()
.insert(header::CONTENT_ENCODING, encoding.to_header_value());
head.headers_mut()
.append(header::VARY, HeaderValue::from_static("accept-encoding"));
.insert(header::VARY, HeaderValue::from_static("accept-encoding"));
head.no_chunking(false);
}
@ -396,20 +391,21 @@ impl ContentEncoder {
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
8 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError {
/// Wrapped body stream error.
#[display(fmt = "body")]
Body(Box<dyn StdError>),
/// Generic I/O error.
#[display(fmt = "blocking")]
Blocking(BlockingError),
#[display(fmt = "io")]
Io(io::Error),
}
@ -418,6 +414,7 @@ impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Blocking(err) => Some(err),
EncoderError::Io(err) => Some(err),
}
}

View File

@ -5,7 +5,7 @@ use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Err
use derive_more::{Display, Error, From};
use http::{uri::InvalidUri, StatusCode};
use crate::{body::BoxBody, Response};
use crate::{body::BoxBody, ws, Response};
pub use http::Error as HttpError;
@ -51,7 +51,7 @@ impl Error {
Self::new(Kind::SendResponse)
}
#[allow(unused)] // available for future use
#[allow(unused)] // reserved for future use (TODO: remove allow when being used)
pub(crate) fn new_io() -> Self {
Self::new(Kind::Io)
}
@ -61,7 +61,6 @@ impl Error {
Self::new(Kind::Encoder)
}
#[allow(unused)] // used with `ws` feature flag
pub(crate) fn new_ws() -> Self {
Self::new(Kind::Ws)
}
@ -108,10 +107,8 @@ pub(crate) enum Kind {
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("actix_http::Error")
.field("kind", &self.inner.kind)
.field("cause", &self.inner.cause)
.finish()
// TODO: more detail
f.write_str("actix_http::Error")
}
}
@ -142,16 +139,14 @@ impl From<HttpError> for Error {
}
}
#[cfg(feature = "ws")]
impl From<crate::ws::HandshakeError> for Error {
fn from(err: crate::ws::HandshakeError) -> Self {
impl From<ws::HandshakeError> for Error {
fn from(err: ws::HandshakeError) -> Self {
Self::new_ws().with_cause(err)
}
}
#[cfg(feature = "ws")]
impl From<crate::ws::ProtocolError> for Error {
fn from(err: crate::ws::ProtocolError) -> Self {
impl From<ws::ProtocolError> for Error {
fn from(err: ws::ProtocolError) -> Self {
Self::new_ws().with_cause(err)
}
}
@ -252,6 +247,12 @@ impl From<ParseError> for Response<BoxBody> {
}
}
/// A set of errors that can occur running blocking tasks in thread pool.
#[derive(Debug, Display, Error)]
#[display(fmt = "Blocking thread pool is gone")]
// TODO: non-exhaustive
pub struct BlockingError;
/// A set of errors that can occur during payload parsing.
#[derive(Debug, Display)]
#[non_exhaustive]
@ -276,9 +277,8 @@ pub enum PayloadError {
UnknownLength,
/// HTTP/2 payload error.
#[cfg(feature = "http2")]
#[display(fmt = "{}", _0)]
Http2Payload(::h2::Error),
Http2Payload(h2::Error),
/// Generic I/O error.
#[display(fmt = "{}", _0)]
@ -289,21 +289,18 @@ impl std::error::Error for PayloadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
PayloadError::Incomplete(None) => None,
PayloadError::Incomplete(Some(err)) => Some(err),
PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
PayloadError::EncodingCorrupted => None,
PayloadError::Overflow => None,
PayloadError::UnknownLength => None,
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
PayloadError::Http2Payload(err) => Some(err),
PayloadError::Io(err) => Some(err),
PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
PayloadError::Io(err) => Some(err as &dyn std::error::Error),
}
}
}
#[cfg(feature = "http2")]
impl From<::h2::Error> for PayloadError {
fn from(err: ::h2::Error) -> Self {
impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self {
PayloadError::Http2Payload(err)
}
}
@ -320,6 +317,15 @@ impl From<io::Error> for PayloadError {
}
}
impl From<BlockingError> for PayloadError {
fn from(_: BlockingError) -> Self {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Operation is canceled",
))
}
}
impl From<PayloadError> for Error {
fn from(err: PayloadError) -> Self {
Self::new_payload().with_cause(err)
@ -328,7 +334,6 @@ impl From<PayloadError> for Error {
/// A set of errors that can occur during dispatching HTTP requests.
#[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum DispatchError {
/// Service error.
#[display(fmt = "Service Error")]
@ -351,8 +356,6 @@ pub enum DispatchError {
/// HTTP/2 error.
#[display(fmt = "{}", _0)]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
H2(h2::Error),
/// The first request did not complete within the specified timeout.
@ -363,10 +366,6 @@ pub enum DispatchError {
#[display(fmt = "Connection shutdown timeout")]
DisconnectTimeout,
/// Handler dropped payload before reading EOF.
#[display(fmt = "Handler dropped payload before reading EOF")]
HandlerDroppedPayload,
/// Internal error.
#[display(fmt = "Internal error")]
InternalError,
@ -375,14 +374,12 @@ pub enum DispatchError {
impl StdError for DispatchError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
// TODO: error source extraction?
DispatchError::Service(_res) => None,
DispatchError::Body(err) => Some(&**err),
DispatchError::Io(err) => Some(err),
DispatchError::Parse(err) => Some(err),
#[cfg(feature = "http2")]
DispatchError::H2(err) => Some(err),
_ => None,
}
}
@ -390,7 +387,6 @@ impl StdError for DispatchError {
/// A set of error that can occur during parsing content type.
#[derive(Debug, Display, Error)]
#[cfg_attr(test, derive(PartialEq, Eq))]
#[non_exhaustive]
pub enum ContentTypeError {
/// Can not parse content type
@ -403,13 +399,27 @@ pub enum ContentTypeError {
}
#[cfg(test)]
mod tests {
use std::io;
use http::{Error as HttpError, StatusCode};
mod content_type_test_impls {
use super::*;
impl std::cmp::PartialEq for ContentTypeError {
fn eq(&self, other: &Self) -> bool {
match self {
Self::ParseError => matches!(other, ContentTypeError::ParseError),
Self::UnknownEncoding => {
matches!(other, ContentTypeError::UnknownEncoding)
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::{Error as HttpError, StatusCode};
use std::io;
#[test]
fn test_into_response() {
let resp: Response<BoxBody> = ParseError::Incomplete.into();

View File

@ -1,30 +1,9 @@
use std::{
any::{Any, TypeId},
collections::HashMap,
fmt,
hash::{BuildHasherDefault, Hasher},
};
/// A hasher for `TypeId`s that takes advantage of its known characteristics.
///
/// Author of `anymap` crate has done research on the topic:
/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599
#[derive(Debug, Default)]
struct NoOpHasher(u64);
impl Hasher for NoOpHasher {
fn write(&mut self, _bytes: &[u8]) {
unimplemented!("This NoOpHasher can only handle u64s")
}
fn write_u64(&mut self, i: u64) {
self.0 = i;
}
fn finish(&self) -> u64 {
self.0
}
}
use ahash::AHashMap;
/// A type map for request extensions.
///
@ -32,7 +11,7 @@ impl Hasher for NoOpHasher {
#[derive(Default)]
pub struct Extensions {
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
map: HashMap<TypeId, Box<dyn Any>, BuildHasherDefault<NoOpHasher>>,
map: AHashMap<TypeId, Box<dyn Any>>,
}
impl Extensions {
@ -40,7 +19,7 @@ impl Extensions {
#[inline]
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
map: AHashMap::new(),
}
}

View File

@ -1,7 +1,6 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
use tracing::{debug, trace};
macro_rules! byte (
($rdr:ident) => ({
@ -15,7 +14,7 @@ macro_rules! byte (
})
);
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, PartialEq, Clone)]
pub(super) enum ChunkedState {
Size,
SizeLws,
@ -71,13 +70,13 @@ impl ChunkedState {
match size.checked_mul(radix) {
Some(n) => {
*size = n;
*size = n as u64;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
debug!("chunk size would overflow u64");
log::debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
@ -125,7 +124,7 @@ impl ChunkedState {
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
log::trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {

View File

@ -1,9 +1,9 @@
use std::{fmt, io};
use std::io;
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
@ -17,9 +17,9 @@ use crate::{
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
}
}
@ -38,7 +38,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>,
version: Version,
conn_type: ConnectionType,
ctype: ConnectionType,
// encoder part
flags: Flags,
@ -51,32 +51,23 @@ impl Default for ClientCodec {
}
}
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
ClientCodec {
inner: ClientCodecInner {
config,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
conn_type: ConnectionType::Close,
ctype: ConnectionType::Close,
flags,
encoder: encoder::MessageEncoder::default(),
@ -86,12 +77,12 @@ impl ClientCodec {
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.inner.conn_type == ConnectionType::Upgrade
self.inner.ctype == ConnectionType::Upgrade
}
/// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Check last request's message type
@ -113,8 +104,8 @@ impl ClientCodec {
impl ClientPayloadCodec {
/// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Transform payload codec to a message codec
@ -128,18 +119,15 @@ impl Decoder for ClientCodec {
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(conn_type) = req.conn_type() {
if let Some(ctype) = req.conn_type() {
// do not use peer's keep-alive
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive {
self.inner.conn_type
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
} else {
conn_type
ctype
};
}
@ -204,9 +192,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status
inner.conn_type = match head.as_ref().connection_type() {
inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive
} else {
ConnectionType::Close
@ -223,7 +211,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false,
inner.version,
length,
inner.conn_type,
inner.ctype,
&inner.config,
)?;
}

View File

@ -1,9 +1,9 @@
use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::BytesMut;
use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
@ -15,9 +15,9 @@ use crate::{
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
@ -42,9 +42,7 @@ impl Default for Codec {
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::Codec")
.field("flags", &self.flags)
.finish_non_exhaustive()
write!(f, "h1::Codec({:?})", self.flags)
}
}
@ -53,8 +51,8 @@ impl Codec {
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
@ -78,14 +76,14 @@ impl Codec {
/// Check if last response is keep-alive.
#[inline]
pub fn keep_alive(&self) -> bool {
pub fn keepalive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive
}
/// Check if keep-alive enabled on server level.
#[inline]
pub fn keep_alive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
/// Check last request's message type.
@ -125,13 +123,11 @@ impl Decoder for Codec {
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{
self.conn_type = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl),
@ -183,11 +179,9 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
&self.config,
)?;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.encoder.encode_eof(dst)?;
}

View File

@ -6,7 +6,7 @@ use http::{
header::{self, HeaderName, HeaderValue},
Method, StatusCode, Uri, Version,
};
use tracing::{debug, error, trace};
use log::{debug, error, trace};
use super::chunked::ChunkedState;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
@ -46,23 +46,6 @@ pub(crate) enum PayloadLength {
None,
}
impl PayloadLength {
/// Returns true if variant is `None`.
fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns true if variant is represents zero-length (not none) payload.
fn is_zero(&self) -> bool {
matches!(
self,
PayloadLength::Payload(PayloadType::Payload(PayloadDecoder {
kind: Kind::Length(0)
}))
)
}
}
pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
@ -76,7 +59,6 @@ pub(crate) trait MessageType: Sized {
&mut self,
slice: &Bytes,
raw_headers: &[HeaderIndex],
version: Version,
) -> Result<PayloadLength, ParseError> {
let mut ka = None;
let mut has_upgrade_websocket = false;
@ -105,23 +87,21 @@ pub(crate) trait MessageType: Sized {
return Err(ParseError::Header);
}
header::CONTENT_LENGTH => match value.to_str().map(str::trim) {
Ok(val) if val.starts_with('+') => {
debug!("illegal Content-Length: {:?}", val);
header::CONTENT_LENGTH => match value.to_str() {
Ok(s) if s.trim().starts_with('+') => {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
Ok(val) => {
if let Ok(len) = val.parse::<u64>() {
// accept 0 lengths here and remove them in `decode` after all
// headers have been processed to prevent request smuggling issues
content_length = Some(len);
Ok(s) => {
if let Ok(len) = s.parse::<u64>() {
if len != 0 {
content_length = Some(len);
}
} else {
debug!("illegal Content-Length: {:?}", val);
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
}
Err(_) => {
debug!("illegal Content-Length: {:?}", value);
return Err(ParseError::Header);
@ -134,23 +114,22 @@ pub(crate) trait MessageType: Sized {
return Err(ParseError::Header);
}
header::TRANSFER_ENCODING if version == Version::HTTP_11 => {
header::TRANSFER_ENCODING => {
seen_te = true;
if let Ok(val) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("chunked") {
if let Ok(s) = value.to_str().map(str::trim) {
if s.eq_ignore_ascii_case("chunked") {
chunked = true;
} else if val.eq_ignore_ascii_case("identity") {
} else if s.eq_ignore_ascii_case("identity") {
// allow silently since multiple TE headers are already checked
} else {
debug!("illegal Transfer-Encoding: {:?}", val);
debug!("illegal Transfer-Encoding: {:?}", s);
return Err(ParseError::Header);
}
} else {
return Err(ParseError::Header);
}
}
// connection keep-alive state
header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(str::trim) {
@ -167,7 +146,6 @@ pub(crate) trait MessageType: Sized {
None
};
}
header::UPGRADE => {
if let Ok(val) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("websocket") {
@ -175,23 +153,19 @@ pub(crate) trait MessageType: Sized {
}
}
}
header::EXPECT => {
let bytes = value.as_bytes();
if bytes.len() >= 4 && &bytes[0..4] == b"100-" {
expect = true;
}
}
_ => {}
}
headers.append(name, value);
}
}
self.set_connection_type(ka);
if expect {
self.set_expect()
}
@ -235,16 +209,15 @@ impl MessageType for Request {
let (len, method, uri, ver, h_len) = {
// SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
// do not require initialization.
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is
// safe because the type we are claiming to have initialized here is a
// bunch of `MaybeUninit`s, which do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut req = httparse::Request::new(&mut []);
match req.parse_with_uninit_headers(src, &mut parsed)? {
httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes())
@ -259,7 +232,6 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len())
}
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
@ -275,22 +247,7 @@ impl MessageType for Request {
let mut msg = Request::new();
// convert headers
let mut length =
msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?;
// disallow HTTP/1.0 POST requests that do not contain a Content-Length headers
// see https://datatracker.ietf.org/doc/html/rfc1945#section-7.2.2
if ver == Version::HTTP_10 && method == Method::POST && length.is_none() {
debug!("no Content-Length specified for HTTP/1.0 POST request");
return Err(ParseError::Header);
}
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
// Protects against some request smuggling attacks.
// See https://github.com/actix/actix-web/issues/2767.
if length.is_zero() {
length = PayloadLength::None;
}
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// payload decoder
let decoder = match length {
@ -334,35 +291,22 @@ impl MessageType for ResponseHead {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, ver, status, h_len) = {
// SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
// do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut res = httparse::Response::new(&mut []);
let mut config = httparse::ParserConfig::default();
config.allow_spaces_after_header_name_in_responses(true);
match config.parse_response_with_uninit_headers(&mut res, src, &mut parsed)? {
let mut res = httparse::Response::new(&mut parsed);
match res.parse(src)? {
httparse::Status::Complete(len) => {
let version = if res.version.unwrap() == 1 {
Version::HTTP_11
} else {
Version::HTTP_10
};
let status = StatusCode::from_u16(res.code.unwrap())
.map_err(|_| ParseError::Status)?;
HeaderIndex::record(src, res.headers, &mut headers);
(len, version, status, res.headers.len())
}
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
@ -378,15 +322,7 @@ impl MessageType for ResponseHead {
msg.version = ver;
// convert headers
let mut length =
msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?;
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
// Protects against some request smuggling attacks.
// See https://github.com/actix/actix-web/issues/2767.
if length.is_zero() {
length = PayloadLength::None;
}
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// message payload
let decoder = if let PayloadLength::Payload(pl) = length {
@ -422,6 +358,9 @@ pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
impl HeaderIndex {
pub(crate) fn record(
bytes: &[u8],
@ -440,64 +379,61 @@ impl HeaderIndex {
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
/// Chunk type yielded while decoding a payload.
#[derive(Debug, Clone, PartialEq)]
/// Http payload item
pub enum PayloadItem {
Chunk(Bytes),
Eof,
}
/// Decoder that can handle different payload types.
/// Decoders to handle different Transfer-Encodings.
///
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`.
#[derive(Debug, Clone, PartialEq, Eq)]
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadDecoder {
kind: Kind,
}
impl PayloadDecoder {
/// Constructs a fixed-length payload decoder.
pub fn length(x: u64) -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Length(x),
}
}
/// Constructs a chunked encoding decoder.
pub fn chunked() -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Chunked(ChunkedState::Size, 0),
}
}
/// Creates an decoder that yields chunks until the stream returns EOF.
pub fn eof() -> PayloadDecoder {
PayloadDecoder { kind: Kind::Eof }
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq)]
enum Kind {
/// A reader used when a `Content-Length` header is passed with a positive integer.
/// A Reader used when a Content-Length header is passed with a positive
/// integer.
Length(u64),
/// A reader used when `Transfer-Encoding` is `chunked`.
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(ChunkedState, u64),
/// A reader used for responses that don't indicate a length or chunked.
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a `Request` to be made
/// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained
/// in [RFC 7230 §3.3.3]:
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and the chunked transfer
/// > coding is not the final encoding, the message body length is determined by reading the
/// > connection until it is closed by the server. If a Transfer-Encoding header field is
/// > present in a request and the chunked transfer coding is not the final encoding, the
/// > message body length cannot be determined reliably; the server MUST respond with the 400
/// > (Bad Request) status code and then close the connection.
///
/// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof,
}
@ -527,7 +463,6 @@ impl Decoder for PayloadDecoder {
Ok(Some(PayloadItem::Chunk(buf)))
}
}
Kind::Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
@ -553,7 +488,6 @@ impl Decoder for PayloadDecoder {
}
}
}
Kind::Eof => {
if src.is_empty() {
Ok(None)
@ -655,100 +589,14 @@ mod tests {
}
#[test]
fn parse_h09_reject() {
let mut buf = BytesMut::from(
"GET /test1 HTTP/0.9\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
reader.decode(&mut buf).unwrap_err();
let mut buf = BytesMut::from(
"POST /test2 HTTP/0.9\r\n\
Content-Length: 3\r\n\
\r\n
abc",
);
let mut reader = MessageDecoder::<Request>::default();
reader.decode(&mut buf).unwrap_err();
}
#[test]
fn parse_h10_get() {
let mut buf = BytesMut::from(
"GET /test1 HTTP/1.0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test1");
let mut buf = BytesMut::from(
"GET /test2 HTTP/1.0\r\n\
Content-Length: 0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test2");
let mut buf = BytesMut::from(
"GET /test3 HTTP/1.0\r\n\
Content-Length: 3\r\n\
\r\n
abc",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test3");
}
#[test]
fn parse_h10_post() {
let mut buf = BytesMut::from(
"POST /test1 HTTP/1.0\r\n\
Content-Length: 3\r\n\
\r\n\
abc",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::POST);
assert_eq!(req.path(), "/test1");
let mut buf = BytesMut::from(
"POST /test2 HTTP/1.0\r\n\
Content-Length: 0\r\n\
\r\n",
);
fn test_parse_post() {
let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n");
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::POST);
assert_eq!(req.path(), "/test2");
let mut buf = BytesMut::from(
"POST /test3 HTTP/1.0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let err = reader.decode(&mut buf).unwrap_err();
assert!(err.to_string().contains("Header"))
}
#[test]
@ -844,98 +692,121 @@ mod tests {
#[test]
fn test_conn_default_1_0() {
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.0\r\n\r\n"));
let mut buf = BytesMut::from("GET /test HTTP/1.0\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close);
}
#[test]
fn test_conn_default_1_1() {
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.1\r\n\r\n"));
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
}
#[test]
fn test_conn_close() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: close\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close);
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: Close\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close);
}
#[test]
fn test_conn_close_1_0() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\
connection: close\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close);
}
#[test]
fn test_conn_keep_alive_1_0() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\
connection: keep-alive\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\
connection: Keep-Alive\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
}
#[test]
fn test_conn_keep_alive_1_1() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: keep-alive\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
}
#[test]
fn test_conn_other_1_0() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\
connection: other\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close);
}
#[test]
fn test_conn_other_1_1() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: other\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
}
#[test]
fn test_conn_upgrade() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
upgrade: websockets\r\n\
connection: upgrade\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert!(req.upgrade());
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
upgrade: Websockets\r\n\
connection: Upgrade\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert!(req.upgrade());
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
@ -943,62 +814,59 @@ mod tests {
#[test]
fn test_conn_upgrade_connect_method() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"CONNECT /test HTTP/1.1\r\n\
content-type: text/plain\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert!(req.upgrade());
}
#[test]
fn test_headers_bad_content_length() {
// string CL
expect_parse_err!(&mut BytesMut::from(
fn test_headers_content_length_err_1() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
content-length: line\r\n\r\n",
));
);
// negative CL
expect_parse_err!(&mut BytesMut::from(
"GET /test HTTP/1.1\r\n\
content-length: -1\r\n\r\n",
));
expect_parse_err!(&mut buf)
}
#[test]
fn octal_ish_cl_parsed_as_decimal() {
fn test_headers_content_length_err_2() {
let mut buf = BytesMut::from(
"POST /test HTTP/1.1\r\n\
content-length: 011\r\n\r\n",
"GET /test HTTP/1.1\r\n\
content-length: -1\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_req, pl) = reader.decode(&mut buf).unwrap().unwrap();
assert!(matches!(
pl,
PayloadType::Payload(pl) if pl == PayloadDecoder::length(11)
));
expect_parse_err!(&mut buf);
}
#[test]
fn test_invalid_header() {
expect_parse_err!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
test line\r\n\r\n",
));
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_invalid_name() {
expect_parse_err!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
test[]: line\r\n\r\n",
));
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_bad_status_line() {
expect_parse_err!(&mut BytesMut::from("getpath \r\n\r\n"));
let mut buf = BytesMut::from("getpath \r\n\r\n");
expect_parse_err!(&mut buf);
}
#[test]
@ -1038,10 +906,11 @@ mod tests {
#[test]
fn test_http_request_parser_utf8() {
let req = parse_ready!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
x-test: тест\r\n\r\n",
));
);
let req = parse_ready!(&mut buf);
assert_eq!(
req.headers().get("x-test").unwrap().as_bytes(),
@ -1051,18 +920,24 @@ mod tests {
#[test]
fn test_http_request_parser_two_slashes() {
let req = parse_ready!(&mut BytesMut::from("GET //path HTTP/1.1\r\n\r\n"));
let mut buf = BytesMut::from("GET //path HTTP/1.1\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.path(), "//path");
}
#[test]
fn test_http_request_parser_bad_method() {
expect_parse_err!(&mut BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n"));
let mut buf = BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n");
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_parser_bad_version() {
expect_parse_err!(&mut BytesMut::from("GET //get HT/11\r\n\r\n"));
let mut buf = BytesMut::from("GET //get HT/11\r\n\r\n");
expect_parse_err!(&mut buf);
}
#[test]
@ -1079,66 +954,29 @@ mod tests {
#[test]
fn hrs_multiple_content_length() {
expect_parse_err!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 4\r\n\
Content-Length: 2\r\n\
\r\n\
abcd",
));
);
expect_parse_err!(&mut BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 0\r\n\
Content-Length: 2\r\n\
\r\n\
ab",
));
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_content_length_plus() {
expect_parse_err!(&mut BytesMut::from(
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: +3\r\n\
\r\n\
000",
));
}
#[test]
fn hrs_te_http10() {
// in HTTP/1.0 transfer encoding is ignored and must therefore contain a CL header
expect_parse_err!(&mut BytesMut::from(
"POST / HTTP/1.0\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
3\r\n\
aaa\r\n\
0\r\n\
",
));
}
#[test]
fn hrs_cl_and_te_http10() {
// in HTTP/1.0 transfer encoding is simply ignored so it's fine to have both
let mut buf = BytesMut::from(
"GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Content-Length: 3\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
000",
);
parse_ready!(&mut buf);
expect_parse_err!(&mut buf);
}
#[test]

File diff suppressed because it is too large Load Diff

View File

@ -1,976 +0,0 @@
use std::{future::Future, str, task::Poll, time::Duration};
use actix_rt::{pin, time::sleep};
use actix_service::fn_service;
use actix_utils::future::{ready, Ready};
use bytes::Bytes;
use futures_util::future::lazy;
use actix_codec::Framed;
use actix_service::Service;
use bytes::{Buf, BytesMut};
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
use crate::{
body::MessageBody,
config::ServiceConfig,
h1::{Codec, ExpectHandler, UpgradeHandler},
service::HttpFlow,
test::{TestBuffer, TestSeqBuffer},
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
memchr::memmem::find(&haystack[from..], needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
status_service(StatusCode::OK)
}
fn status_service(
status: StatusCode,
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
}
fn echo_path_service(
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(
Response::ok().set_body(Bytes::copy_from_slice(path)),
))
})
}
fn drop_payload_service(
) -> impl Service<Request, Response = Response<&'static str>, Error = Error> {
fn_service(|mut req: Request| async move {
let _ = req.take_payload();
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
})
})
}
#[actix_rt::test]
async fn late_request() {
let mut buf = TestBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Ready(_) => panic!("first poll should not be ready"),
Poll::Pending => {}
}
// polls: initial
assert_eq!(h1.poll_count, 1);
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("second poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial pending => handle req => shutdown
assert_eq!(h1.poll_count, 3);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn oneshot_connection() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 5
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
/abcd
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
#[actix_rt::test]
async fn keep_alive_timeout() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(200)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep slightly longer than keep-alive timeout
sleep(Duration::from_millis(250)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_ready(),
"keep-alive should have resolved",
);
// polls: initial => keep-alive wake-up shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
}
#[actix_rt::test]
async fn keep_alive_follow_up_req() {
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(500)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep for less than KA timeout
sleep(Duration::from_millis(100)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should not have resolved dispatcher yet",
);
// polls: initial => manual
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection not closed
assert!(!inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
lazy(|cx| {
buf.extend_read_buf(
"\
GET /efg HTTP/1.1\r\n\
Connection: close\r\n\
\r\n\r\n",
);
assert!(
h1.as_mut().poll(cx).is_ready(),
"connection close header should override keep-alive setting",
);
// polls: initial => manual => follow-up req => shutdown
assert_eq!(h1.poll_count, 4);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
}
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/efg\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&buf.write_buf_slice()[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_ok() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_bad() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn expect_handling() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn expect_eager() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn upgrade_handling() {
struct TestUpgrade;
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
assert_eq!(req.method(), Method::GET);
assert!(req.upgrade());
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
ready(Ok(()))
}
}
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
// fix in #2624 reverted temporarily
// complete fix tracked in #2745
#[ignore]
#[actix_rt::test]
async fn handler_drop_payload() {
let _ = env_logger::try_init();
let mut buf = TestBuffer::new(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 3
abc
",
));
let services = HttpFlow::new(
drop_payload_service(),
ExpectHandler,
None::<UpgradeHandler>,
);
let h1 = Dispatcher::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual
assert_eq!(h1.poll_count, 1);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
assert!(inner.state.is_none());
}
})
.await;
lazy(|cx| {
// add message that claims to have payload longer than provided
buf.extend_read_buf(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 200
abc
",
));
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual => manual
assert_eq!(h1.poll_count, 2);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect response immediately even though request side has not finished reading payload
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual => manual => manual
assert_eq!(h1.poll_count, 3);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect that unrequested error response is sent back since connection could not be cleaned
let exp = http_msg(
r"
HTTP/1.1 500 Internal Server Error
content-length: 0
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
let mut msg = msg
.as_ref()
.trim()
.split('\n')
.into_iter()
.map(|line| [line.trim_start(), "\r"].concat())
.collect::<Vec<_>>()
.join("\n");
// remove trailing \r
msg.pop();
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
msg.push_str("\r\n\r\n");
}
BytesMut::from(msg.as_bytes())
}
#[test]
fn http_msg_creates_msg() {
assert_eq!(http_msg(r""), "");
assert_eq!(
http_msg(
r"
POST / HTTP/1.1
Content-Length: 3
abc
"
),
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
);
assert_eq!(
http_msg(
r"
GET / HTTP/1.1
Content-Length: 3
"
),
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
);
}

View File

@ -105,7 +105,7 @@ pub(crate) trait MessageType: Sized {
}
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case),
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::None => dst.put_slice(b"\r\n"),
}
@ -152,6 +152,7 @@ pub(crate) trait MessageType: Sized {
let k = key.as_str().as_bytes();
let k_len = k.len();
// TODO: drain?
for val in value.iter() {
let v = val.as_ref();
let v_len = v.len();
@ -210,14 +211,14 @@ pub(crate) trait MessageType: Sized {
dst.advance_mut(pos);
}
// optimized date header, set_date writes \r\n
if !has_date {
// optimized date header, write_date_header writes its own \r\n
config.write_date_header(dst, camel_case);
config.set_date(dst);
} else {
// msg eof
dst.extend_from_slice(b"\r\n");
}
// end-of-headers marker
dst.extend_from_slice(b"\r\n");
Ok(())
}
@ -318,17 +319,16 @@ impl MessageType for RequestHeadType {
}
impl<T: MessageType> MessageEncoder<T> {
/// Encode chunk.
/// Encode message
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf)
}
/// Encode EOF.
/// Encode eof
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf)
}
/// Encode message.
pub fn encode(
&mut self,
dst: &mut BytesMut,
@ -450,7 +450,7 @@ impl TransferEncoding {
buf.extend_from_slice(&msg[..len as usize]);
*remaining -= len;
*remaining -= len as u64;
Ok(*remaining == 0)
} else {
Ok(true)
@ -517,7 +517,6 @@ unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
index += 1;
}
index += 1;
@ -529,7 +528,7 @@ mod tests {
use std::rc::Rc;
use bytes::Bytes;
use http::header::{AUTHORIZATION, UPGRADE_INSECURE_REQUESTS};
use http::header::AUTHORIZATION;
use super::*;
use crate::{
@ -560,9 +559,6 @@ mod tests {
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
head.headers
.insert(UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
@ -578,7 +574,6 @@ mod tests {
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
assert!(data.contains("Upgrade-Insecure-Requests: 1\r\n"));
let _ = head.encode_headers(
&mut bytes,

View File

@ -7,13 +7,10 @@ mod client;
mod codec;
mod decoder;
mod dispatcher;
#[cfg(test)]
mod dispatcher_tests;
mod encoder;
mod expect;
mod payload;
mod service;
mod timer;
mod upgrade;
mod utils;
@ -29,10 +26,9 @@ pub use self::utils::SendResponse;
#[derive(Debug)]
/// Codec message
pub enum Message<T> {
/// HTTP message.
/// Http message
Item(T),
/// Payload chunk.
/// Payload chunk
Chunk(Option<Bytes>),
}

View File

@ -16,7 +16,7 @@ use crate::error::PayloadError;
/// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq)]
pub enum PayloadStatus {
Read,
Pause,
@ -252,15 +252,18 @@ impl Inner {
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test]
async fn test_unread_data() {

View File

@ -13,7 +13,6 @@ use actix_service::{
};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use tracing::error;
use crate::{
body::{BoxBody, MessageBody},
@ -134,7 +133,6 @@ mod openssl {
U::InitError: fmt::Debug,
{
/// Create OpenSSL based service.
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
pub fn openssl(
self,
acceptor: SslAcceptor,
@ -197,7 +195,6 @@ mod rustls {
U::InitError: fmt::Debug,
{
/// Create Rustls based service.
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
pub fn rustls(
self,
config: ServerConfig,
@ -308,13 +305,13 @@ where
Box::pin(async move {
let expect = expect
.await
.map_err(|e| error!("Init http expect service error: {:?}", e))?;
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade
.await
.map_err(|e| error!("Init http upgrade service error: {:?}", e))?;
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
}
None => None,
@ -322,7 +319,7 @@ where
let service = service
.await
.map_err(|e| error!("Init http service error: {:?}", e))?;
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
Ok(H1ServiceHandler::new(
cfg,
@ -360,7 +357,7 @@ where
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| {
error!("HTTP/1 service readiness error: {:?}", err);
log::error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err)
})
}

View File

@ -1,81 +0,0 @@
use std::{fmt, future::Future, pin::Pin, task::Context};
use actix_rt::time::{Instant, Sleep};
use tracing::trace;
#[derive(Debug)]
pub(super) enum TimerState {
Disabled,
Inactive,
Active { timer: Pin<Box<Sleep>> },
}
impl TimerState {
pub(super) fn new(enabled: bool) -> Self {
if enabled {
Self::Inactive
} else {
Self::Disabled
}
}
pub(super) fn is_enabled(&self) -> bool {
matches!(self, Self::Active { .. } | Self::Inactive)
}
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
if matches!(self, Self::Disabled) {
trace!("setting disabled timer from line {}", line);
}
*self = Self::Active {
timer: Box::pin(timer),
};
}
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
self.set(timer, line);
self.init(cx);
}
pub(super) fn clear(&mut self, line: u32) {
if matches!(self, Self::Disabled) {
trace!("trying to clear a disabled timer from line {}", line);
}
if matches!(self, Self::Inactive) {
trace!("trying to clear an inactive timer from line {}", line);
}
*self = Self::Inactive;
}
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
if let TimerState::Active { timer } = self {
let _ = timer.as_mut().poll(cx);
}
}
}
impl fmt::Display for TimerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimerState::Disabled => f.write_str("timer is disabled"),
TimerState::Inactive => f.write_str("timer is inactive"),
TimerState::Active { timer } => {
let deadline = timer.deadline();
let now = Instant::now();
if deadline < now {
f.write_str("timer is active and has reached deadline")
} else {
write!(
f,
"timer is active and due to expire in {} milliseconds",
((deadline - now).as_secs_f32() * 1000.0)
)
}
}
}
}
}

View File

@ -19,17 +19,15 @@ use h2::{
server::{Connection, SendResponse},
Ping, PingPong,
};
use log::{error, trace};
use pin_project_lite::pin_project;
use tracing::{error, trace, warn};
use crate::{
body::{BodySize, BoxBody, MessageBody},
config::ServiceConfig,
header::{
HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
},
header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
service::HttpFlow,
Extensions, Method, OnConnectData, Payload, Request, Response, ResponseHead,
Extensions, OnConnectData, Payload, Request, Response, ResponseHead,
};
const CHUNK_SIZE: usize = 16_384;
@ -59,15 +57,15 @@ where
conn_data: OnConnectData,
timer: Option<Pin<Box<Sleep>>>,
) -> Self {
let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong {
let ping_pong = config.keep_alive().map(|dur| H2PingPong {
timer: timer
.map(|mut timer| {
// reuse timer slot if it was initialized for handshake
timer.as_mut().reset((config.now() + dur).into());
// reset timer if it's received from new function.
timer.as_mut().reset(config.now() + dur);
timer
})
.unwrap_or_else(|| Box::pin(sleep(dur))),
in_flight: false,
on_flight: false,
ping_pong: conn.ping_pong().unwrap(),
});
@ -84,14 +82,9 @@ where
}
struct H2PingPong {
/// Handle to send ping frames from the peer.
ping_pong: PingPong,
/// True when a ping has been sent and is waiting for a reply.
in_flight: bool,
/// Timeout for pong response.
timer: Pin<Box<Sleep>>,
on_flight: bool,
ping_pong: PingPong,
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
@ -118,7 +111,6 @@ where
let payload = crate::h2::Payload::new(body);
let pl = Payload::H2 { payload };
let mut req = Request::with_payload(pl);
let head_req = parts.method == Method::HEAD;
let head = req.head_mut();
head.uri = parts.uri;
@ -136,10 +128,10 @@ where
actix_rt::spawn(async move {
// resolve service call and send response.
let res = match fut.await {
Ok(res) => handle_response(res.into(), tx, config, head_req).await,
Ok(res) => handle_response(res.into(), tx, config).await,
Err(err) => {
let res: Response<BoxBody> = err.into();
handle_response(res, tx, config, head_req).await
handle_response(res, tx, config).await
}
};
@ -158,36 +150,34 @@ where
});
}
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Pending => match this.ping_pong.as_mut() {
Some(ping_pong) => loop {
if ping_pong.in_flight {
// When there is an in-flight ping-pong, poll pong and and keep-alive
// timer. On successful pong received, update keep-alive timer to
// determine the next timing of ping pong.
if ping_pong.on_flight {
// When have on flight ping pong. poll pong and and keep alive timer.
// on success pong received update keep alive timer to determine the next timing of
// ping pong.
match ping_pong.ping_pong.poll_pong(cx)? {
Poll::Ready(_) => {
ping_pong.in_flight = false;
ping_pong.on_flight = false;
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into());
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
}
Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()));
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
}
}
} else {
// When there is no in-flight ping-pong, keep-alive timer is used to
// wait for next timing of ping-pong. Therefore, at this point it serves
// as an interval instead.
// When there is no on flight ping pong. keep alive timer is used to wait for next
// timing of ping pong. Therefore at this point it serves as an interval instead.
ready!(ping_pong.timer.as_mut().poll(cx));
ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into());
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
ping_pong.in_flight = true;
ping_pong.on_flight = true;
}
},
None => return Poll::Pending,
@ -207,7 +197,6 @@ async fn handle_response<B>(
res: Response<B>,
mut tx: SendResponse<Bytes>,
config: ServiceConfig,
head_req: bool,
) -> Result<(), DispatchError>
where
B: MessageBody,
@ -217,14 +206,14 @@ where
// prepare response.
let mut size = body.size();
let res = prepare_response(config, res.head(), &mut size);
let eof_or_head = size.is_eof() || head_req;
let eof = size.is_eof();
// send response head and return on eof.
let mut stream = tx
.send_response(res, eof_or_head)
.send_response(res, eof)
.map_err(DispatchError::SendResponse)?;
if eof_or_head {
if eof {
return Ok(());
}
@ -296,13 +285,13 @@ fn prepare_response(
_ => {}
}
match size {
BodySize::None | BodySize::Stream => {}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO);
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => {
@ -311,28 +300,19 @@ fn prepare_response(
res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::from_str(buf.format(*len)).unwrap(),
);
)
}
};
// copy headers
for (key, value) in head.headers.iter() {
match key {
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
&CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue,
&CONTENT_LENGTH if skip_len => continue,
&DATE => has_date = true,
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
hdr if hdr == HeaderName::from_static("keep-alive")
|| hdr == HeaderName::from_static("proxy-connection") =>
{
continue
}
match *key {
// TODO: consider skipping other headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
_ => {}
}
@ -342,7 +322,7 @@ fn prepare_response(
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
config.write_date_header_value(&mut bytes);
config.set_date_header(&mut bytes);
res.headers_mut().insert(
DATE,
// SAFETY: serialized date-times are known ASCII strings

View File

@ -7,7 +7,7 @@ use std::{
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep_until, Sleep};
use actix_rt::time::Sleep;
use bytes::Bytes;
use futures_core::{ready, Stream};
use h2::{
@ -15,17 +15,17 @@ use h2::{
RecvStream,
};
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
/// HTTP/2 peer stream.
pub struct Payload {
stream: RecvStream,
@ -67,9 +67,7 @@ where
{
HandshakeWithTimeout {
handshake: handshake(io),
timer: config
.client_request_deadline()
.map(|deadline| Box::pin(sleep_until(deadline.into()))),
timer: config.client_timer().map(Box::pin),
}
}
@ -88,7 +86,7 @@ where
let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake; its slot can be re-used for h2 ping-pong
// return the timer on success handshake. It can be re-used for h2 ping-pong.
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() {
Some(timer) => {
@ -103,9 +101,11 @@ where
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync);
assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
}

View File

@ -14,7 +14,7 @@ use actix_service::{
};
use actix_utils::future::ready;
use futures_core::{future::LocalBoxFuture, ready};
use tracing::{error, trace};
use log::error;
use crate::{
body::{BoxBody, MessageBody},
@ -117,7 +117,6 @@ mod openssl {
B: MessageBody + 'static,
{
/// Create OpenSSL based service.
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
pub fn openssl(
self,
acceptor: SslAcceptor,
@ -165,7 +164,6 @@ mod rustls {
B: MessageBody + 'static,
{
/// Create Rustls based service.
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
pub fn rustls(
self,
mut config: ServerConfig,

View File

@ -1,51 +0,0 @@
//! Common header names not defined in [`http`].
//!
//! Any headers added to this file will need to be re-exported from the list at `crate::headers`.
use http::header::HeaderName;
/// Response header field that indicates how caches have handled that response and its corresponding
/// request.
///
/// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211) for full semantics.
pub const CACHE_STATUS: HeaderName = HeaderName::from_static("cache-status");
/// Response header field that allows origin servers to control the behavior of CDN caches
/// interposed between them and clients separately from other caches that might handle the response.
///
/// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213) for full semantics.
pub const CDN_CACHE_CONTROL: HeaderName = HeaderName::from_static("cdn-cache-control");
/// Response header that prevents a document from loading any cross-origin resources that don't
/// explicitly grant the document permission (using [CORP] or [CORS]).
///
/// [CORP]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Cross-Origin_Resource_Policy_(CORP)
/// [CORS]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
pub const CROSS_ORIGIN_EMBEDDER_POLICY: HeaderName =
HeaderName::from_static("cross-origin-embedder-policy");
/// Response header that allows you to ensure a top-level document does not share a browsing context
/// group with cross-origin documents.
pub const CROSS_ORIGIN_OPENER_POLICY: HeaderName =
HeaderName::from_static("cross-origin-opener-policy");
/// Response header that conveys a desire that the browser blocks no-cors cross-origin/cross-site
/// requests to the given resource.
pub const CROSS_ORIGIN_RESOURCE_POLICY: HeaderName =
HeaderName::from_static("cross-origin-resource-policy");
/// Response header that provides a mechanism to allow and deny the use of browser features in a
/// document or within any `<iframe>` elements in the document.
pub const PERMISSIONS_POLICY: HeaderName = HeaderName::from_static("permissions-policy");
/// Request header (de-facto standard) for identifying the originating IP address of a client
/// connecting to a web server through a proxy server.
pub const X_FORWARDED_FOR: HeaderName = HeaderName::from_static("x-forwarded-for");
/// Request header (de-facto standard) for identifying the original host requested by the client in
/// the `Host` HTTP request header.
pub const X_FORWARDED_HOST: HeaderName = HeaderName::from_static("x-forwarded-host");
/// Request header (de-facto standard) for identifying the protocol that a client used to connect to
/// your proxy or load balancer.
pub const X_FORWARDED_PROTO: HeaderName = HeaderName::from_static("x-forwarded-proto");

View File

@ -150,7 +150,9 @@ impl HeaderMap {
/// assert_eq!(map.len(), 3);
/// ```
pub fn len(&self) -> usize {
self.inner.values().map(|vals| vals.len()).sum()
self.inner
.iter()
.fold(0, |acc, (_, values)| acc + values.len())
}
/// Returns the number of _keys_ stored in the map.
@ -307,7 +309,7 @@ impl HeaderMap {
pub fn get_all(&self, key: impl AsHeaderName) -> std::slice::Iter<'_, HeaderValue> {
match self.get_value(key) {
Some(value) => value.iter(),
None => [].iter(),
None => (&[]).iter(),
}
}
@ -550,39 +552,6 @@ impl HeaderMap {
Keys(self.inner.keys())
}
/// Retains only the headers specified by the predicate.
///
/// In other words, removes all headers `(name, val)` for which `retain_fn(&name, &mut val)`
/// returns false.
///
/// The order in which headers are visited should be considered arbitrary.
///
/// # Examples
/// ```
/// # use actix_http::header::{self, HeaderMap, HeaderValue};
/// let mut map = HeaderMap::new();
///
/// map.append(header::HOST, HeaderValue::from_static("duck.com"));
/// map.append(header::SET_COOKIE, HeaderValue::from_static("one=1"));
/// map.append(header::SET_COOKIE, HeaderValue::from_static("two=2"));
///
/// map.retain(|name, val| val.as_bytes().starts_with(b"one"));
///
/// assert_eq!(map.len(), 1);
/// assert!(map.contains_key(&header::SET_COOKIE));
/// ```
pub fn retain<F>(&mut self, mut retain_fn: F)
where
F: FnMut(&HeaderName, &mut HeaderValue) -> bool,
{
self.inner.retain(|name, vals| {
vals.inner.retain(|val| retain_fn(name, val));
// invariant: make sure newly empty value lists are removed
!vals.is_empty()
})
}
/// Clears the map, returning all name-value sets as an iterator.
///
/// Header names will only be yielded for the first value in each set. All items that are
@ -661,7 +630,7 @@ impl Removed {
/// Returns true if iterator contains no elements, without consuming it.
///
/// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate
/// whether any items were actually replaced or removed, respectively.
/// wether any items were actually replaced or removed, respectively.
pub fn is_empty(&self) -> bool {
match self.inner {
// size hint lower bound of smallvec is the correct length
@ -974,55 +943,6 @@ mod tests {
assert!(map.is_empty());
}
#[test]
fn retain() {
let mut map = HeaderMap::new();
map.append(header::LOCATION, HeaderValue::from_static("/test"));
map.append(header::HOST, HeaderValue::from_static("duck.com"));
map.append(header::COOKIE, HeaderValue::from_static("one=1"));
map.append(header::COOKIE, HeaderValue::from_static("two=2"));
assert_eq!(map.len(), 4);
// by value
map.retain(|_, val| !val.as_bytes().contains(&b'/'));
assert_eq!(map.len(), 3);
// by name
map.retain(|name, _| name.as_str() != "cookie");
assert_eq!(map.len(), 1);
// keep but mutate value
map.retain(|_, val| {
*val = HeaderValue::from_static("replaced");
true
});
assert_eq!(map.len(), 1);
assert_eq!(map.get("host").unwrap(), "replaced");
}
#[test]
fn retain_removes_empty_value_lists() {
let mut map = HeaderMap::with_capacity(3);
map.append(header::HOST, HeaderValue::from_static("duck.com"));
map.append(header::HOST, HeaderValue::from_static("duck.com"));
assert_eq!(map.len(), 2);
assert_eq!(map.len_keys(), 1);
assert_eq!(map.inner.len(), 1);
assert_eq!(map.capacity(), 3);
// remove everything
map.retain(|_n, _v| false);
assert_eq!(map.len(), 0);
assert_eq!(map.len_keys(), 0);
assert_eq!(map.inner.len(), 0);
assert_eq!(map.capacity(), 3);
}
#[test]
fn entries_into_iter() {
let mut map = HeaderMap::new();

View File

@ -1,18 +1,14 @@
//! Pre-defined `HeaderName`s, traits for parsing and conversion, and other header utility methods.
// declaring new header consts will yield this error
#![allow(clippy::declare_interior_mutable_const)]
use percent_encoding::{AsciiSet, CONTROLS};
// re-export from http except header map related items
pub use ::http::header::{
pub use http::header::{
HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, ToStrError,
};
// re-export const header names, list is explicit so that any updates to `common` module do not
// conflict with this set
pub use ::http::header::{
// re-export const header names
pub use http::header::{
ACCEPT, ACCEPT_CHARSET, ACCEPT_ENCODING, ACCEPT_LANGUAGE, ACCEPT_RANGES,
ACCESS_CONTROL_ALLOW_CREDENTIALS, ACCESS_CONTROL_ALLOW_HEADERS,
ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS,
@ -34,30 +30,22 @@ pub use ::http::header::{
use crate::{error::ParseError, HttpMessage};
mod as_name;
mod common;
mod into_pair;
mod into_value;
pub mod map;
mod shared;
mod utils;
pub use self::{
as_name::AsHeaderName,
into_pair::TryIntoHeaderPair,
into_value::TryIntoHeaderValue,
map::HeaderMap,
shared::{
parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate,
LanguageTag, Quality, QualityItem,
},
utils::{fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode},
pub use self::as_name::AsHeaderName;
pub use self::into_pair::TryIntoHeaderPair;
pub use self::into_value::TryIntoHeaderValue;
pub use self::map::HeaderMap;
pub use self::shared::{
parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate, LanguageTag,
Quality, QualityItem,
};
// re-export list is explicit so that any updates to `http` do not conflict with this set
pub use self::common::{
CACHE_STATUS, CDN_CACHE_CONTROL, CROSS_ORIGIN_EMBEDDER_POLICY, CROSS_ORIGIN_OPENER_POLICY,
CROSS_ORIGIN_RESOURCE_POLICY, PERMISSIONS_POLICY, X_FORWARDED_FOR, X_FORWARDED_HOST,
X_FORWARDED_PROTO,
pub use self::utils::{
fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode,
};
/// An interface for types that already represent a valid header.

View File

@ -12,7 +12,7 @@ use crate::header::{Charset, HTTP_VALUE};
/// - A character sequence representing the actual value (`value`), separated by single quotes.
///
/// It is defined in [RFC 5987 §3.2](https://datatracker.ietf.org/doc/html/rfc5987#section-3.2).
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq)]
pub struct ExtendedValue {
/// The character set that is used to encode the `value` to a string.
pub charset: Charset,

View File

@ -4,7 +4,8 @@ use bytes::BytesMut;
use http::header::{HeaderValue, InvalidHeaderValue};
use crate::{
date::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue, helpers::MutWriter,
config::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue,
helpers::MutWriter,
};
/// A timestamp with HTTP-style formatting and parsing.

View File

@ -147,7 +147,7 @@ mod tests {
// copy of encoding from actix-web headers
#[allow(clippy::enum_variant_names)] // allow Encoding prefix on EncodingExt
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Clone, PartialEq, Debug)]
pub enum Encoding {
Chunked,
Brotli,

View File

@ -30,25 +30,15 @@ pub(crate) fn write_status_line<B: BufMut>(version: Version, n: u16, buf: &mut B
/// Write out content length header.
///
/// Buffer must to contain enough space or be implicitly extendable.
pub fn write_content_length<B: BufMut>(n: u64, buf: &mut B, camel_case: bool) {
pub fn write_content_length<B: BufMut>(n: u64, buf: &mut B) {
if n == 0 {
if camel_case {
buf.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
buf.put_slice(b"\r\ncontent-length: 0\r\n");
}
buf.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buffer = itoa::Buffer::new();
if camel_case {
buf.put_slice(b"\r\nContent-Length: ");
} else {
buf.put_slice(b"\r\ncontent-length: ");
}
buf.put_slice(b"\r\ncontent-length: ");
buf.put_slice(buffer.format(n).as_bytes());
buf.put_slice(b"\r\n");
}
@ -105,88 +95,77 @@ mod tests {
fn test_write_content_length() {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_content_length(0, &mut bytes, false);
write_content_length(0, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
bytes.reserve(50);
write_content_length(9, &mut bytes, false);
write_content_length(9, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9\r\n"[..]);
bytes.reserve(50);
write_content_length(10, &mut bytes, false);
write_content_length(10, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10\r\n"[..]);
bytes.reserve(50);
write_content_length(99, &mut bytes, false);
write_content_length(99, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99\r\n"[..]);
bytes.reserve(50);
write_content_length(100, &mut bytes, false);
write_content_length(100, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 100\r\n"[..]);
bytes.reserve(50);
write_content_length(101, &mut bytes, false);
write_content_length(101, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 101\r\n"[..]);
bytes.reserve(50);
write_content_length(998, &mut bytes, false);
write_content_length(998, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 998\r\n"[..]);
bytes.reserve(50);
write_content_length(1000, &mut bytes, false);
write_content_length(1000, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1000\r\n"[..]);
bytes.reserve(50);
write_content_length(1001, &mut bytes, false);
write_content_length(1001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1001\r\n"[..]);
bytes.reserve(50);
write_content_length(5909, &mut bytes, false);
write_content_length(5909, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
bytes.reserve(50);
write_content_length(9999, &mut bytes, false);
write_content_length(9999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]);
bytes.reserve(50);
write_content_length(10001, &mut bytes, false);
write_content_length(10001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]);
bytes.reserve(50);
write_content_length(59094, &mut bytes, false);
write_content_length(59094, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]);
bytes.reserve(50);
write_content_length(99999, &mut bytes, false);
write_content_length(99999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]);
bytes.reserve(50);
write_content_length(590947, &mut bytes, false);
write_content_length(590947, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 590947\r\n"[..]
);
bytes.reserve(50);
write_content_length(999999, &mut bytes, false);
write_content_length(999999, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 999999\r\n"[..]
);
bytes.reserve(50);
write_content_length(5909471, &mut bytes, false);
write_content_length(5909471, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 5909471\r\n"[..]
);
bytes.reserve(50);
write_content_length(59094718, &mut bytes, false);
write_content_length(59094718, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 59094718\r\n"[..]
);
bytes.reserve(50);
write_content_length(4294973728, &mut bytes, false);
write_content_length(4294973728, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 4294973728\r\n"[..]
);
}
#[test]
fn write_content_length_camel_case() {
let mut bytes = BytesMut::new();
write_content_length(0, &mut bytes, false);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
let mut bytes = BytesMut::new();
write_content_length(0, &mut bytes, true);
assert_eq!(bytes.split().freeze(), b"\r\nContent-Length: 0\r\n"[..]);
}
}

View File

@ -25,10 +25,10 @@ pub trait HttpMessage: Sized {
/// Message payload stream
fn take_payload(&mut self) -> Payload<Self::Stream>;
/// Returns a reference to the request-local data/extensions container.
/// Request's extensions container
fn extensions(&self) -> Ref<'_, Extensions>;
/// Returns a mutable reference to the request-local data/extensions container.
/// Mutable reference to a the request's extensions container
fn extensions_mut(&self) -> RefMut<'_, Extensions>;
/// Get a header.
@ -55,7 +55,7 @@ pub trait HttpMessage: Sized {
""
}
/// Get content type encoding.
/// Get content type encoding
///
/// UTF-8 is used by default, If request charset is not set.
fn encoding(&self) -> Result<&'static Encoding, ContentTypeError> {

View File

@ -1,84 +0,0 @@
use std::time::Duration;
/// Connection keep-alive config.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum KeepAlive {
/// Keep-alive duration.
///
/// `KeepAlive::Timeout(Duration::ZERO)` is mapped to `KeepAlive::Disabled`.
Timeout(Duration),
/// Rely on OS to shutdown TCP connection.
///
/// Some defaults can be very long, check your OS documentation.
Os,
/// Keep-alive is disabled.
///
/// Connections will be closed immediately.
Disabled,
}
impl KeepAlive {
pub(crate) fn enabled(&self) -> bool {
!matches!(self, Self::Disabled)
}
#[allow(unused)] // used with `http2` feature flag
pub(crate) fn duration(&self) -> Option<Duration> {
match self {
KeepAlive::Timeout(dur) => Some(*dur),
_ => None,
}
}
/// Map zero duration to disabled.
pub(crate) fn normalize(self) -> KeepAlive {
match self {
KeepAlive::Timeout(Duration::ZERO) => KeepAlive::Disabled,
ka => ka,
}
}
}
impl Default for KeepAlive {
fn default() -> Self {
Self::Timeout(Duration::from_secs(5))
}
}
impl From<Duration> for KeepAlive {
fn from(dur: Duration) -> Self {
KeepAlive::Timeout(dur).normalize()
}
}
impl From<Option<Duration>> for KeepAlive {
fn from(ka_dur: Option<Duration>) -> Self {
match ka_dur {
Some(dur) => KeepAlive::from(dur),
None => KeepAlive::Disabled,
}
.normalize()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn from_impls() {
let test: KeepAlive = Duration::from_secs(1).into();
assert_eq!(test, KeepAlive::Timeout(Duration::from_secs(1)));
let test: KeepAlive = Duration::from_secs(0).into();
assert_eq!(test, KeepAlive::Disabled);
let test: KeepAlive = Some(Duration::from_secs(0)).into();
assert_eq!(test, KeepAlive::Disabled);
let test: KeepAlive = None.into();
assert_eq!(test, KeepAlive::Disabled);
}
}

View File

@ -3,7 +3,6 @@
//! ## Crate Features
//! | Feature | Functionality |
//! | ------------------- | ------------------------------------------- |
//! | `http2` | HTTP/2 support via [h2]. |
//! | `openssl` | TLS support via [OpenSSL]. |
//! | `rustls` | TLS support via [rustls]. |
//! | `compress-brotli` | Payload compression support: Brotli. |
@ -11,7 +10,6 @@
//! | `compress-zstd` | Payload compression support: Zstd. |
//! | `trust-dns` | Use [trust-dns] as the client DNS resolver. |
//!
//! [h2]: https://crates.io/crates/h2
//! [OpenSSL]: https://crates.io/crates/openssl
//! [rustls]: https://crates.io/crates/rustls
//! [trust-dns]: https://crates.io/crates/trust-dns
@ -21,12 +19,13 @@
#![allow(
clippy::type_complexity,
clippy::too_many_arguments,
clippy::borrow_interior_mutable_const,
clippy::uninlined_format_args
clippy::borrow_interior_mutable_const
)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate log;
pub use ::http::{uri, uri::Uri};
pub use ::http::{Method, StatusCode, Version};
@ -34,38 +33,29 @@ pub use ::http::{Method, StatusCode, Version};
pub mod body;
mod builder;
mod config;
mod date;
#[cfg(feature = "__compress")]
pub mod encoding;
pub mod error;
mod extensions;
pub mod h1;
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub mod h2;
pub mod header;
mod helpers;
mod http_message;
mod keep_alive;
mod message;
#[cfg(test)]
mod notify_on_drop;
mod payload;
mod requests;
mod responses;
mod service;
pub mod test;
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub mod ws;
pub use self::builder::HttpServiceBuilder;
pub use self::config::ServiceConfig;
pub use self::config::{KeepAlive, ServiceConfig};
pub use self::error::Error;
pub use self::extensions::Extensions;
pub use self::header::ContentEncoding;
pub use self::http_message::HttpMessage;
pub use self::keep_alive::KeepAlive;
pub use self::message::ConnectionType;
pub use self::message::Message;
#[allow(deprecated)]
@ -73,9 +63,6 @@ pub use self::payload::{BoxedPayloadStream, Payload, PayloadStream};
pub use self::requests::{Request, RequestHead, RequestHeadType};
pub use self::responses::{Response, ResponseBuilder, ResponseHead};
pub use self::service::HttpService;
#[cfg(any(feature = "openssl", feature = "rustls"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "openssl", feature = "rustls"))))]
pub use self::service::TlsAcceptorConfig;
/// A major HTTP protocol version.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]

View File

@ -3,15 +3,15 @@ use std::{cell::RefCell, ops, rc::Rc};
use bitflags::bitflags;
/// Represents various types of connection
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ConnectionType {
/// Close connection after response.
/// Close connection after response
Close,
/// Keep connection alive after response.
/// Keep connection alive after response
KeepAlive,
/// Connection is upgraded to different type.
/// Connection is upgraded to different type
Upgrade,
}
@ -69,8 +69,8 @@ impl<T: Head> Drop for Message<T> {
}
}
/// Generic `Head` object pool.
#[doc(hidden)]
/// Request's objects pool
pub struct MessagePool<T: Head>(RefCell<Vec<Rc<T>>>);
impl<T: Head> MessagePool<T> {

View File

@ -1,49 +0,0 @@
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panics
/// Panics hen construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
}

View File

@ -6,30 +6,16 @@ use std::{
use bytes::Bytes;
use futures_core::Stream;
use pin_project_lite::pin_project;
use crate::error::PayloadError;
/// A boxed payload stream.
pub type BoxedPayloadStream = Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>>;
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `BoxedPayloadStream`.")]
#[deprecated(since = "4.0.0", note = "Renamed to `BoxedPayloadStream`.")]
pub type PayloadStream = BoxedPayloadStream;
#[cfg(not(feature = "http2"))]
pin_project! {
/// A streaming payload.
#[project = PayloadProj]
pub enum Payload<S = BoxedPayloadStream> {
None,
H1 { payload: crate::h1::Payload },
Stream { #[pin] payload: S },
}
}
#[cfg(feature = "http2")]
pin_project! {
pin_project_lite::pin_project! {
/// A streaming payload.
#[project = PayloadProj]
pub enum Payload<S = BoxedPayloadStream> {
@ -46,16 +32,14 @@ impl<S> From<crate::h1::Payload> for Payload<S> {
}
}
#[cfg(feature = "http2")]
impl<S> From<crate::h2::Payload> for Payload<S> {
fn from(payload: crate::h2::Payload) -> Self {
Payload::H2 { payload }
}
}
#[cfg(feature = "http2")]
impl<S> From<::h2::RecvStream> for Payload<S> {
fn from(stream: ::h2::RecvStream) -> Self {
impl<S> From<h2::RecvStream> for Payload<S> {
fn from(stream: h2::RecvStream) -> Self {
Payload::H2 {
payload: crate::h2::Payload::new(stream),
}
@ -86,10 +70,7 @@ where
match self.project() {
PayloadProj::None => Poll::Ready(None),
PayloadProj::H1 { payload } => Pin::new(payload).poll_next(cx),
#[cfg(feature = "http2")]
PayloadProj::H2 { payload } => Pin::new(payload).poll_next(cx),
PayloadProj::Stream { payload } => payload.poll_next(cx),
}
}
@ -97,10 +78,12 @@ where
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
}

View File

@ -130,8 +130,8 @@ impl RequestHead {
}
}
/// Request contains `EXPECT` header.
#[inline]
/// Request contains `EXPECT` header
pub fn expect(&self) -> bool {
self.flags.contains(Flags::EXPECT)
}
@ -142,8 +142,8 @@ impl RequestHead {
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum RequestHeadType {
Owned(RequestHead),
Rc(Rc<RequestHead>, Option<HeaderMap>),

View File

@ -19,7 +19,7 @@ pub struct Request<P = BoxedPayloadStream> {
pub(crate) payload: Payload<P>,
pub(crate) head: Message<RequestHead>,
pub(crate) conn_data: Option<Rc<Extensions>>,
pub(crate) extensions: RefCell<Extensions>,
pub(crate) req_data: RefCell<Extensions>,
}
impl<P> HttpMessage for Request<P> {
@ -34,14 +34,16 @@ impl<P> HttpMessage for Request<P> {
mem::replace(&mut self.payload, Payload::None)
}
/// Request extensions
#[inline]
fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
self.req_data.borrow()
}
/// Mutable reference to a the request's extensions
#[inline]
fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
self.req_data.borrow_mut()
}
}
@ -50,7 +52,7 @@ impl From<Message<RequestHead>> for Request<BoxedPayloadStream> {
Request {
head,
payload: Payload::None,
extensions: RefCell::new(Extensions::default()),
req_data: RefCell::new(Extensions::default()),
conn_data: None,
}
}
@ -63,7 +65,7 @@ impl Request<BoxedPayloadStream> {
Request {
head: Message::new(),
payload: Payload::None,
extensions: RefCell::new(Extensions::default()),
req_data: RefCell::new(Extensions::default()),
conn_data: None,
}
}
@ -75,7 +77,7 @@ impl<P> Request<P> {
Request {
payload,
head: Message::new(),
extensions: RefCell::new(Extensions::default()),
req_data: RefCell::new(Extensions::default()),
conn_data: None,
}
}
@ -88,7 +90,7 @@ impl<P> Request<P> {
Request {
payload,
head: self.head,
extensions: self.extensions,
req_data: self.req_data,
conn_data: self.conn_data,
},
pl,
@ -113,14 +115,14 @@ impl<P> Request<P> {
#[inline]
/// Http message part of the request
pub fn head(&self) -> &RequestHead {
&self.head
&*self.head
}
#[inline]
#[doc(hidden)]
/// Mutable reference to a HTTP message part of the request
pub fn head_mut(&mut self) -> &mut RequestHead {
&mut self.head
&mut *self.head
}
/// Mutable reference to the message's headers.
@ -193,17 +195,16 @@ impl<P> Request<P> {
.and_then(|container| container.get::<T>())
}
/// Returns the connection-level data/extensions container if an [on-connect] callback was
/// registered, leaving an empty one in its place.
/// Returns the connection data container if an [on-connect] callback was registered.
///
/// [on-connect]: crate::HttpServiceBuilder::on_connect_ext
pub fn take_conn_data(&mut self) -> Option<Rc<Extensions>> {
self.conn_data.take()
}
/// Returns the request-local data/extensions container, leaving an empty one in its place.
/// Returns the request data container, leaving an empty one in it's place.
pub fn take_req_data(&mut self) -> Extensions {
mem::take(self.extensions.get_mut())
mem::take(self.req_data.get_mut())
}
}

View File

@ -1,6 +1,9 @@
//! HTTP response builder.
use std::{cell::RefCell, fmt, str};
use std::{
cell::{Ref, RefMut},
fmt, str,
};
use crate::{
body::{EitherBody, MessageBody},
@ -144,7 +147,7 @@ impl ResponseBuilder {
self
}
/// Set connection type to `Upgrade`.
/// Set connection type to Upgrade
#[inline]
pub fn upgrade<V>(&mut self, value: V) -> &mut Self
where
@ -161,7 +164,7 @@ impl ResponseBuilder {
self
}
/// Force-close connection, even if it is marked as keep-alive.
/// Force close connection, even if it is marked as keep-alive
#[inline]
pub fn force_close(&mut self) -> &mut Self {
if let Some(parts) = self.inner() {
@ -199,6 +202,20 @@ impl ResponseBuilder {
self
}
/// Responses extensions
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow()
}
/// Mutable reference to a the response's extensions
#[inline]
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow_mut()
}
/// Generate response with a wrapped body.
///
/// This `ResponseBuilder` will be left in a useless state.
@ -221,12 +238,7 @@ impl ResponseBuilder {
}
let head = self.head.take().expect("cannot reuse response builder");
Ok(Response {
head,
body,
extensions: RefCell::new(Extensions::new()),
})
Ok(Response { head, body })
}
/// Generate response with an empty body.

View File

@ -1,19 +1,25 @@
//! Response head type and caching pool.
use std::{cell::RefCell, ops};
use std::{
cell::{Ref, RefCell, RefMut},
ops,
};
use crate::{header::HeaderMap, message::Flags, ConnectionType, StatusCode, Version};
use crate::{
header::HeaderMap, message::Flags, ConnectionType, Extensions, StatusCode, Version,
};
thread_local! {
static RESPONSE_POOL: BoxedResponsePool = BoxedResponsePool::create();
}
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct ResponseHead {
pub version: Version,
pub status: StatusCode,
pub headers: HeaderMap,
pub reason: Option<&'static str>,
pub(crate) extensions: RefCell<Extensions>,
pub(crate) flags: Flags,
}
@ -27,22 +33,23 @@ impl ResponseHead {
headers: HeaderMap::with_capacity(12),
reason: None,
flags: Flags::empty(),
extensions: RefCell::new(Extensions::new()),
}
}
/// Read the message headers.
#[inline]
/// Read the message headers.
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Mutable reference to the message headers.
#[inline]
/// Mutable reference to the message headers.
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
/// Sets the flag that controls whether to send headers formatted as Camel-Case.
/// Sets the flag that controls wether to send headers formatted as Camel-Case.
///
/// Only applicable to HTTP/1.x responses; HTTP/2 header names are always lowercase.
#[inline]
@ -54,8 +61,20 @@ impl ResponseHead {
}
}
/// Set connection type of the message
/// Message extensions
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
}
/// Mutable reference to a the message's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
}
#[inline]
/// Set connection type of the message
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
match ctype {
ConnectionType::Close => self.flags.insert(Flags::CLOSE),
@ -114,14 +133,14 @@ impl ResponseHead {
}
}
/// Get response body chunking state
#[inline]
/// Get response body chunking state
pub fn chunked(&self) -> bool {
!self.flags.contains(Flags::NO_CHUNKING)
}
/// Set no chunking for payload
#[inline]
/// Set no chunking for payload
pub fn no_chunking(&mut self, val: bool) {
if val {
self.flags.insert(Flags::NO_CHUNKING);
@ -164,7 +183,7 @@ impl Drop for BoxedResponseHead {
}
}
/// Response head object pool.
/// Request's objects pool
#[doc(hidden)]
pub struct BoxedResponsePool(#[allow(clippy::vec_box)] RefCell<Vec<Box<ResponseHead>>>);
@ -173,7 +192,7 @@ impl BoxedResponsePool {
BoxedResponsePool(RefCell::new(Vec::with_capacity(128)))
}
/// Get message from the pool.
/// Get message from the pool
#[inline]
fn get_message(&self, status: StatusCode) -> BoxedResponseHead {
if let Some(mut head) = self.0.borrow_mut().pop() {
@ -189,12 +208,12 @@ impl BoxedResponsePool {
}
}
/// Release request instance.
/// Release request instance
#[inline]
fn release(&self, msg: Box<ResponseHead>) {
fn release(&self, mut msg: Box<ResponseHead>) {
let pool = &mut self.0.borrow_mut();
if pool.len() < 128 {
msg.extensions.get_mut().clear();
pool.push(msg);
}
}
@ -210,15 +229,14 @@ mod tests {
use memchr::memmem;
use crate::{
h1::H1Service,
header::{HeaderName, HeaderValue},
Error, Request, Response, ServiceConfig,
Error, HttpService, Request, Response,
};
#[actix_rt::test]
async fn camel_case_headers() {
let mut srv = actix_http_test::test_server(|| {
H1Service::with_config(ServiceConfig::default(), |req: Request| async move {
HttpService::new(|req: Request| async move {
let mut res = Response::ok();
if req.path().contains("camel") {
@ -229,7 +247,6 @@ mod tests {
HeaderName::from_static("foo-bar"),
HeaderValue::from_static("baz"),
);
Ok::<_, Error>(res)
})
.tcp()
@ -237,32 +254,20 @@ mod tests {
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
stream
.write_all(b"GET /camel HTTP/1.1\r\nConnection: Close\r\n\r\n")
.unwrap();
let mut data = vec![];
let _ = stream.read_to_end(&mut data).unwrap();
let _ = stream.write_all(b"GET /camel HTTP/1.1\r\nConnection: Close\r\n\r\n");
let mut data = vec![0; 1024];
let _ = stream.read(&mut data);
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
assert!(memmem::find(&data, b"Foo-Bar").is_some());
assert!(memmem::find(&data, b"foo-bar").is_none());
assert!(memmem::find(&data, b"Date").is_some());
assert!(memmem::find(&data, b"date").is_none());
assert!(memmem::find(&data, b"Content-Length").is_some());
assert!(memmem::find(&data, b"content-length").is_none());
assert!(!memmem::find(&data, b"foo-bar").is_some());
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
stream
.write_all(b"GET /lower HTTP/1.1\r\nConnection: Close\r\n\r\n")
.unwrap();
let mut data = vec![];
let _ = stream.read_to_end(&mut data).unwrap();
let _ = stream.write_all(b"GET /lower HTTP/1.1\r\nConnection: Close\r\n\r\n");
let mut data = vec![0; 1024];
let _ = stream.read(&mut data);
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
assert!(memmem::find(&data, b"Foo-Bar").is_none());
assert!(!memmem::find(&data, b"Foo-Bar").is_some());
assert!(memmem::find(&data, b"foo-bar").is_some());
assert!(memmem::find(&data, b"Date").is_none());
assert!(memmem::find(&data, b"date").is_some());
assert!(memmem::find(&data, b"Content-Length").is_none());
assert!(memmem::find(&data, b"content-length").is_some());
srv.stop().await;
}

View File

@ -1,7 +1,7 @@
//! HTTP response.
use std::{
cell::{Ref, RefCell, RefMut},
cell::{Ref, RefMut},
fmt, str,
};
@ -9,7 +9,7 @@ use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use crate::{
body::{BoxBody, EitherBody, MessageBody},
body::{BoxBody, MessageBody},
header::{self, HeaderMap, TryIntoHeaderValue},
responses::BoxedResponseHead,
Error, Extensions, ResponseBuilder, ResponseHead, StatusCode,
@ -19,7 +19,6 @@ use crate::{
pub struct Response<B> {
pub(crate) head: BoxedResponseHead,
pub(crate) body: B,
pub(crate) extensions: RefCell<Extensions>,
}
impl Response<BoxBody> {
@ -29,7 +28,6 @@ impl Response<BoxBody> {
Response {
head: BoxedResponseHead::new(status),
body: BoxBody::new(()),
extensions: RefCell::new(Extensions::new()),
}
}
@ -76,20 +74,19 @@ impl<B> Response<B> {
Response {
head: BoxedResponseHead::new(status),
body,
extensions: RefCell::new(Extensions::new()),
}
}
/// Returns a reference to the head of this response.
#[inline]
pub fn head(&self) -> &ResponseHead {
&self.head
&*self.head
}
/// Returns a mutable reference to the head of this response.
#[inline]
pub fn head_mut(&mut self) -> &mut ResponseHead {
&mut self.head
&mut *self.head
}
/// Returns the status code of this response.
@ -123,21 +120,20 @@ impl<B> Response<B> {
}
/// Returns true if keep-alive is enabled.
#[inline]
pub fn keep_alive(&self) -> bool {
self.head.keep_alive()
}
/// Returns a reference to the request-local data/extensions container.
/// Returns a reference to the extensions of this response.
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
self.head.extensions.borrow()
}
/// Returns a mutable reference to the request-local data/extensions container.
/// Returns a mutable reference to the extensions of this response.
#[inline]
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
self.head.extensions.borrow_mut()
}
/// Returns a reference to the body of this response.
@ -147,29 +143,24 @@ impl<B> Response<B> {
}
/// Sets new body.
#[inline]
pub fn set_body<B2>(self, body: B2) -> Response<B2> {
Response {
head: self.head,
body,
extensions: self.extensions,
}
}
/// Drops body and returns new response.
#[inline]
pub fn drop_body(self) -> Response<()> {
self.set_body(())
}
/// Sets new body, returning new response and previous body value.
#[inline]
pub(crate) fn replace_body<B2>(self, body: B2) -> (Response<B2>, B) {
(
Response {
head: self.head,
body,
extensions: self.extensions,
},
self.body,
)
@ -180,15 +171,11 @@ impl<B> Response<B> {
/// # Implementation Notes
/// Due to internal performance optimizations, the first element of the returned tuple is a
/// `Response` as well but only contains the head of the response this was called on.
#[inline]
pub fn into_parts(self) -> (Response<()>, B) {
self.replace_body(())
}
/// Map the current body type to another using a closure, returning a new response.
///
/// Closure receives the response head and the current body type.
#[inline]
/// Returns new response with mapped body.
pub fn map_body<F, B2>(mut self, f: F) -> Response<B2>
where
F: FnOnce(&mut ResponseHead, B) -> B2,
@ -198,11 +185,9 @@ impl<B> Response<B> {
Response {
head: self.head,
body,
extensions: self.extensions,
}
}
/// Map the current body to a type-erased `BoxBody`.
#[inline]
pub fn map_into_boxed_body(self) -> Response<BoxBody>
where
@ -211,8 +196,7 @@ impl<B> Response<B> {
self.map_body(|_, body| body.boxed())
}
/// Returns the response body, dropping all other parts.
#[inline]
/// Returns body, consuming this response.
pub fn into_body(self) -> B {
self.body
}
@ -255,9 +239,9 @@ impl<I: Into<Response<BoxBody>>, E: Into<Error>> From<Result<I, E>> for Response
}
}
impl From<ResponseBuilder> for Response<EitherBody<()>> {
impl From<ResponseBuilder> for Response<BoxBody> {
fn from(mut builder: ResponseBuilder) -> Self {
builder.finish()
builder.finish().map_into_boxed_body()
}
}
@ -285,24 +269,6 @@ impl From<&'static [u8]> for Response<&'static [u8]> {
}
}
impl From<Vec<u8>> for Response<Vec<u8>> {
fn from(val: Vec<u8>) -> Self {
let mut res = Response::with_body(StatusCode::OK, val);
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
res.headers_mut().insert(header::CONTENT_TYPE, mime);
res
}
}
impl From<&Vec<u8>> for Response<Vec<u8>> {
fn from(val: &Vec<u8>) -> Self {
let mut res = Response::with_body(StatusCode::OK, val.clone());
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
res.headers_mut().insert(header::CONTENT_TYPE, mime);
res
}
}
impl From<String> for Response<String> {
fn from(val: String) -> Self {
let mut res = Response::with_body(StatusCode::OK, val);

View File

@ -15,48 +15,16 @@ use actix_service::{
};
use futures_core::{future::LocalBoxFuture, ready};
use pin_project_lite::pin_project;
use tracing::error;
use crate::{
body::{BoxBody, MessageBody},
builder::HttpServiceBuilder,
config::{KeepAlive, ServiceConfig},
error::DispatchError,
h1, ConnectCallback, OnConnectData, Protocol, Request, Response, ServiceConfig,
h1, h2, ConnectCallback, OnConnectData, Protocol, Request, Response,
};
/// A [`ServiceFactory`] for HTTP/1.1 and HTTP/2 connections.
///
/// Use [`build`](Self::build) to begin constructing service. Also see [`HttpServiceBuilder`].
///
/// # Automatic HTTP Version Selection
/// There are two ways to select the HTTP version of an incoming connection:
/// - One is to rely on the ALPN information that is provided when using a TLS (HTTPS); both
/// versions are supported automatically when using either of the `.rustls()` or `.openssl()`
/// finalizing methods.
/// - The other is to read the first few bytes of the TCP stream. This is the only viable approach
/// for supporting H2C, which allows the HTTP/2 protocol to work over plaintext connections. Use
/// the `.tcp_auto_h2c()` finalizing method to enable this behavior.
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// use actix_http::{HttpService, Request, Response, StatusCode};
///
/// // this service would constructed in an actix_server::Server
///
/// # actix_rt::System::new().block_on(async {
/// HttpService::build()
/// // the builder finalizing method, other finalizers would not return an `HttpService`
/// .finish(|_req: Request| async move {
/// Ok::<_, Infallible>(
/// Response::build(StatusCode::OK).body("Hello!")
/// )
/// })
/// // the service finalizing method method
/// // you can use `.tcp_auto_h2c()`, `.rustls()`, or `.openssl()` instead of `.tcp()`
/// .tcp();
/// # })
/// ```
/// A `ServiceFactory` for HTTP/1.1 or HTTP/2 protocol.
pub struct HttpService<T, S, B, X = h1::ExpectHandler, U = h1::UpgradeHandler> {
srv: S,
cfg: ServiceConfig,
@ -75,9 +43,9 @@ where
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static,
{
/// Constructs builder for `HttpService` instance.
/// Create builder for `HttpService` instance.
pub fn build() -> HttpServiceBuilder<T, S> {
HttpServiceBuilder::default()
HttpServiceBuilder::new()
}
}
@ -90,10 +58,12 @@ where
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static,
{
/// Constructs new `HttpService` instance from service with default config.
/// Create new `HttpService` instance.
pub fn new<F: IntoServiceFactory<S, Request>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0, false, None);
HttpService {
cfg: ServiceConfig::default(),
cfg,
srv: service.into_factory(),
expect: h1::ExpectHandler,
upgrade: None,
@ -102,7 +72,7 @@ where
}
}
/// Constructs new `HttpService` instance from config and service.
/// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig,
service: F,
@ -127,10 +97,11 @@ where
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody,
{
/// Sets service for `Expect: 100-Continue` handling.
/// Provide service for `EXPECT: 100-Continue` support.
///
/// An expect service is called with requests that contain an `Expect` header. A successful
/// response type is also a request which will be forwarded to the main service.
/// Service get called with request that contains `EXPECT` header.
/// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<X1>(self, expect: X1) -> HttpService<T, S, B, X1, U>
where
X1: ServiceFactory<Request, Config = (), Response = Request>,
@ -147,10 +118,10 @@ where
}
}
/// Sets service for custom `Connection: Upgrade` handling.
/// Provide service for custom `Connection: UPGRADE` support.
///
/// If service is provided then normal requests handling get halted and this service get called
/// with original request and framed object.
/// If service is provided then normal requests handling get halted
/// and this service get called with original request and framed object.
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> HttpService<T, S, B, X, U1>
where
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
@ -195,9 +166,7 @@ where
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Creates TCP stream service from HTTP service.
///
/// The resulting service only supports HTTP/1.x.
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
@ -213,61 +182,6 @@ where
})
.and_then(self)
}
/// Creates TCP stream service from HTTP service that automatically selects HTTP/1.x or HTTP/2
/// on plaintext connections.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn tcp_auto_h2c(
self,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = DispatchError,
InitError = (),
> {
fn_service(move |io: TcpStream| async move {
// subset of HTTP/2 preface defined by RFC 9113 §3.4
// this subset was chosen to maximize likelihood that peeking only once will allow us to
// reliably determine version or else it should fallback to h1 and fail quickly if data
// on the wire is junk
const H2_PREFACE: &[u8] = b"PRI * HTTP/2";
let mut buf = [0; 12];
io.peek(&mut buf).await?;
let proto = if buf == H2_PREFACE {
Protocol::Http2
} else {
Protocol::Http1
};
let peer_addr = io.peer_addr().ok();
Ok((io, proto, peer_addr))
})
.and_then(self)
}
}
/// Configuration options used when accepting TLS connection.
#[cfg(any(feature = "openssl", feature = "rustls"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "openssl", feature = "rustls"))))]
#[derive(Debug, Default)]
pub struct TlsAcceptorConfig {
pub(crate) handshake_timeout: Option<std::time::Duration>,
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
impl TlsAcceptorConfig {
/// Set TLS handshake timeout duration.
pub fn handshake_timeout(self, dur: std::time::Duration) -> Self {
Self {
handshake_timeout: Some(dur),
// ..self
}
}
}
#[cfg(feature = "openssl")]
@ -309,7 +223,6 @@ mod openssl {
U::InitError: fmt::Debug,
{
/// Create OpenSSL based service.
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
pub fn openssl(
self,
acceptor: SslAcceptor,
@ -320,29 +233,7 @@ mod openssl {
Error = TlsError<SslError, DispatchError>,
InitError = (),
> {
self.openssl_with_config(acceptor, TlsAcceptorConfig::default())
}
/// Create OpenSSL based service with custom TLS acceptor configuration.
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
pub fn openssl_with_config(
self,
acceptor: SslAcceptor,
tls_acceptor_config: TlsAcceptorConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<SslError, DispatchError>,
InitError = (),
> {
let mut acceptor = Acceptor::new(acceptor);
if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout {
acceptor.set_handshake_timeout(handshake_timeout);
}
acceptor
Acceptor::new(acceptor)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
@ -404,26 +295,9 @@ mod rustls {
U::InitError: fmt::Debug,
{
/// Create Rustls based service.
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
pub fn rustls(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
self.rustls_with_config(config, TlsAcceptorConfig::default())
}
/// Create Rustls based service with custom TLS acceptor configuration.
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
pub fn rustls_with_config(
self,
mut config: ServerConfig,
tls_acceptor_config: TlsAcceptorConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
@ -435,13 +309,7 @@ mod rustls {
protos.extend_from_slice(&config.alpn_protocols);
config.alpn_protocols = protos;
let mut acceptor = Acceptor::new(config);
if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout {
acceptor.set_handshake_timeout(handshake_timeout);
}
acceptor
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
@ -505,13 +373,13 @@ where
Box::pin(async move {
let expect = expect
.await
.map_err(|e| error!("Init http expect service error: {:?}", e))?;
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade
.await
.map_err(|e| error!("Init http upgrade service error: {:?}", e))?;
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
}
None => None,
@ -519,7 +387,7 @@ where
let service = service
.await
.map_err(|e| error!("Init http service error: {:?}", e))?;
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
Ok(HttpServiceHandler::new(
cfg,
@ -626,7 +494,7 @@ where
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| {
error!("HTTP service readiness error: {:?}", err);
log::error!("HTTP service readiness error: {:?}", err);
DispatchError::Service(err)
})
}
@ -638,11 +506,10 @@ where
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
match proto {
#[cfg(feature = "http2")]
Protocol::Http2 => HttpServiceHandlerResponse {
state: State::H2Handshake {
handshake: Some((
crate::h2::handshake_with_timeout(io, &self.cfg),
h2::handshake_with_timeout(io, &self.cfg),
self.cfg.clone(),
self.flow.clone(),
conn_data,
@ -651,11 +518,6 @@ where
},
},
#[cfg(not(feature = "http2"))]
Protocol::Http2 => {
panic!("HTTP/2 support is disabled (enable with the `http2` feature flag)")
}
Protocol::Http1 => HttpServiceHandlerResponse {
state: State::H1 {
dispatcher: h1::Dispatcher::new(
@ -673,7 +535,6 @@ where
}
}
#[cfg(not(feature = "http2"))]
pin_project! {
#[project = StateProj]
enum State<T, S, B, X, U>
@ -695,37 +556,10 @@ pin_project! {
U::Error: fmt::Display,
{
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
}
}
#[cfg(feature = "http2")]
pin_project! {
#[project = StateProj]
enum State<T, S, B, X, U>
where
T: AsyncRead,
T: AsyncWrite,
T: Unpin,
S: Service<Request>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
H2 { #[pin] dispatcher: crate::h2::Dispatcher<T, S, B, X, U> },
H2 { #[pin] dispatcher: h2::Dispatcher<T, S, B, X, U> },
H2Handshake {
handshake: Option<(
crate::h2::HandshakeWithTimeout<T>,
h2::HandshakeWithTimeout<T>,
ServiceConfig,
Rc<HttpFlow<S, X, U>>,
OnConnectData,
@ -784,25 +618,21 @@ where
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().project().state.project() {
StateProj::H1 { dispatcher } => dispatcher.poll(cx),
#[cfg(feature = "http2")]
StateProj::H2 { dispatcher } => dispatcher.poll(cx),
#[cfg(feature = "http2")]
StateProj::H2Handshake { handshake: data } => {
match ready!(Pin::new(&mut data.as_mut().unwrap().0).poll(cx)) {
Ok((conn, timer)) => {
let (_, config, flow, conn_data, peer_addr) = data.take().unwrap();
self.as_mut().project().state.set(State::H2 {
dispatcher: crate::h2::Dispatcher::new(
dispatcher: h2::Dispatcher::new(
conn, flow, config, peer_addr, conn_data, timer,
),
});
self.poll(cx)
}
Err(err) => {
tracing::trace!("H2 handshake error: {}", err);
trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err))
}
}

View File

@ -1,7 +1,7 @@
//! Various testing helpers for use in internal and app tests.
use std::{
cell::{Ref, RefCell, RefMut},
cell::{Ref, RefCell},
io::{self, Read, Write},
pin::Pin,
rc::Rc,
@ -19,7 +19,29 @@ use crate::{
Request,
};
/// Test `Request` builder.
/// Test `Request` builder
///
/// ```ignore
/// # use http::{header, StatusCode};
/// # use actix_web::*;
/// use actix_web::test::TestRequest;
///
/// fn index(req: &HttpRequest) -> Response {
/// if let Some(hdr) = req.headers().get(header::CONTENT_TYPE) {
/// Response::Ok().into()
/// } else {
/// Response::BadRequest().into()
/// }
/// }
///
/// let resp = TestRequest::default().insert_header("content-type", "text/plain")
/// .run(&index)
/// .unwrap();
/// assert_eq!(resp.status(), StatusCode::OK);
///
/// let resp = TestRequest::default().run(&index).unwrap();
/// assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
/// ```
pub struct TestRequest(Option<Inner>);
struct Inner {
@ -135,11 +157,10 @@ fn parts(parts: &mut Option<Inner>) -> &mut Inner {
}
/// Async I/O test buffer.
#[derive(Debug)]
pub struct TestBuffer {
pub read_buf: Rc<RefCell<BytesMut>>,
pub write_buf: Rc<RefCell<BytesMut>>,
pub err: Option<Rc<io::Error>>,
pub read_buf: BytesMut,
pub write_buf: BytesMut,
pub err: Option<io::Error>,
}
impl TestBuffer {
@ -149,69 +170,34 @@ impl TestBuffer {
T: Into<BytesMut>,
{
Self {
read_buf: Rc::new(RefCell::new(data.into())),
write_buf: Rc::new(RefCell::new(BytesMut::new())),
read_buf: data.into(),
write_buf: BytesMut::new(),
err: None,
}
}
// intentionally not using Clone trait
#[allow(dead_code)]
pub(crate) fn clone(&self) -> Self {
Self {
read_buf: self.read_buf.clone(),
write_buf: self.write_buf.clone(),
err: self.err.clone(),
}
}
/// Create new empty `TestBuffer` instance.
pub fn empty() -> Self {
Self::new("")
}
#[allow(dead_code)]
pub(crate) fn read_buf_slice(&self) -> Ref<'_, [u8]> {
Ref::map(self.read_buf.borrow(), |b| b.as_ref())
}
#[allow(dead_code)]
pub(crate) fn read_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
RefMut::map(self.read_buf.borrow_mut(), |b| b.as_mut())
}
#[allow(dead_code)]
pub(crate) fn write_buf_slice(&self) -> Ref<'_, [u8]> {
Ref::map(self.write_buf.borrow(), |b| b.as_ref())
}
#[allow(dead_code)]
pub(crate) fn write_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
RefMut::map(self.write_buf.borrow_mut(), |b| b.as_mut())
}
#[allow(dead_code)]
pub(crate) fn take_write_buf(&self) -> Bytes {
self.write_buf.borrow_mut().split().freeze()
}
/// Add data to read buffer.
pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) {
self.read_buf.borrow_mut().extend_from_slice(data.as_ref())
self.read_buf.extend_from_slice(data.as_ref())
}
}
impl io::Read for TestBuffer {
fn read(&mut self, dst: &mut [u8]) -> Result<usize, io::Error> {
if self.read_buf.borrow().is_empty() {
if self.read_buf.is_empty() {
if self.err.is_some() {
Err(Rc::try_unwrap(self.err.take().unwrap()).unwrap())
Err(self.err.take().unwrap())
} else {
Err(io::Error::new(io::ErrorKind::WouldBlock, ""))
}
} else {
let size = std::cmp::min(self.read_buf.borrow().len(), dst.len());
let b = self.read_buf.borrow_mut().split_to(size);
let size = std::cmp::min(self.read_buf.len(), dst.len());
let b = self.read_buf.split_to(size);
dst[..size].copy_from_slice(&b);
Ok(size)
}
@ -220,7 +206,7 @@ impl io::Read for TestBuffer {
impl io::Write for TestBuffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_buf.borrow_mut().extend(buf);
self.write_buf.extend(buf);
Ok(buf.len())
}

View File

@ -1,17 +1,14 @@
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use tokio_util::codec::{Decoder, Encoder};
use tracing::error;
use super::{
frame::Parser,
proto::{CloseReason, OpCode},
ProtocolError,
};
use super::frame::Parser;
use super::proto::{CloseReason, OpCode};
use super::ProtocolError;
/// A WebSocket message.
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq)]
pub enum Message {
/// Text message.
Text(ByteString),
@ -36,7 +33,7 @@ pub enum Message {
}
/// A WebSocket frame.
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq)]
pub enum Frame {
/// Text frame. Note that the codec does not validate UTF-8 encoding.
Text(Bytes),
@ -58,7 +55,7 @@ pub enum Frame {
}
/// A WebSocket continuation item.
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq)]
pub enum Item {
FirstText(Bytes),
FirstBinary(Bytes),

View File

@ -1,8 +1,6 @@
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_service::{IntoService, Service};
@ -73,12 +71,10 @@ mod inner {
use actix_service::{IntoService, Service};
use futures_core::stream::Stream;
use local_channel::mpsc;
use log::debug;
use pin_project_lite::pin_project;
use tracing::debug;
use actix_codec::Framed;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::codec::{Decoder, Encoder};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use crate::{body::BoxBody, Response};

View File

@ -1,13 +1,11 @@
use std::convert::TryFrom;
use bytes::{Buf, BufMut, BytesMut};
use tracing::debug;
use log::debug;
use super::{
mask::apply_mask,
proto::{CloseCode, CloseReason, OpCode},
ProtocolError,
};
use crate::ws::mask::apply_mask;
use crate::ws::proto::{CloseCode, CloseReason, OpCode};
use crate::ws::ProtocolError;
/// A struct representing a WebSocket frame.
#[derive(Debug)]
@ -17,6 +15,7 @@ impl Parser {
fn parse_metadata(
src: &[u8],
server: bool,
max_size: usize,
) -> Result<Option<(usize, bool, OpCode, usize, Option<[u8; 4]>)>, ProtocolError> {
let chunk_len = src.len();
@ -59,12 +58,20 @@ impl Parser {
return Ok(None);
}
let len = u64::from_be_bytes(TryFrom::try_from(&src[idx..idx + 8]).unwrap());
if len > max_size as u64 {
return Err(ProtocolError::Overflow);
}
idx += 8;
len as usize
} else {
len as usize
};
// check for max allowed size
if length > max_size {
return Err(ProtocolError::Overflow);
}
let mask = if server {
if chunk_len < idx + 4 {
return Ok(None);
@ -89,10 +96,11 @@ impl Parser {
max_size: usize,
) -> Result<Option<(bool, OpCode, Option<BytesMut>)>, ProtocolError> {
// try to parse ws frame metadata
let (idx, finished, opcode, length, mask) = match Parser::parse_metadata(src, server)? {
None => return Ok(None),
Some(res) => res,
};
let (idx, finished, opcode, length, mask) =
match Parser::parse_metadata(src, server, max_size)? {
None => return Ok(None),
Some(res) => res,
};
// not enough data
if src.len() < idx + length {
@ -102,13 +110,6 @@ impl Parser {
// remove prefix
src.advance(idx);
// check for max allowed size
if length > max_size {
// drop the payload
src.advance(length);
return Err(ProtocolError::Overflow);
}
// no need for body
if length == 0 {
return Ok(Some((finished, opcode, None)));
@ -313,7 +314,7 @@ mod tests {
#[test]
fn test_parse_frame_no_mask() {
let mut buf = BytesMut::from(&[0b0000_0001u8, 0b0000_0001u8][..]);
buf.extend([1u8]);
buf.extend(&[1u8]);
assert!(Parser::parse(&mut buf, true, 1024).is_err());
@ -326,7 +327,7 @@ mod tests {
#[test]
fn test_parse_frame_max_size() {
let mut buf = BytesMut::from(&[0b0000_0001u8, 0b0000_0010u8][..]);
buf.extend([1u8, 1u8]);
buf.extend(&[1u8, 1u8]);
assert!(Parser::parse(&mut buf, true, 1).is_err());
@ -336,30 +337,6 @@ mod tests {
}
}
#[test]
fn test_parse_frame_max_size_recoverability() {
let mut buf = BytesMut::new();
// The first text frame with length == 2, payload doesn't matter.
buf.extend([0b0000_0001u8, 0b0000_0010u8, 0b0000_0000u8, 0b0000_0000u8]);
// Next binary frame with length == 2 and payload == `[0x1111_1111u8, 0x1111_1111u8]`.
buf.extend([0b0000_0010u8, 0b0000_0010u8, 0b1111_1111u8, 0b1111_1111u8]);
assert_eq!(buf.len(), 8);
assert!(matches!(
Parser::parse(&mut buf, false, 1),
Err(ProtocolError::Overflow)
));
assert_eq!(buf.len(), 4);
let frame = extract(Parser::parse(&mut buf, false, 2));
assert!(!frame.finished);
assert_eq!(frame.opcode, OpCode::Binary);
assert_eq!(
frame.payload,
Bytes::from(vec![0b1111_1111u8, 0b1111_1111u8])
);
assert_eq!(buf.len(), 0);
}
#[test]
fn test_ping_frame() {
let mut buf = BytesMut::new();

View File

@ -47,6 +47,40 @@ pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) {
mod tests {
use super::*;
// legacy test from old apply mask test. kept for now for back compat test.
// TODO: remove it and favor the other test.
#[test]
fn test_apply_mask_legacy() {
let mask = [0x6d, 0xb6, 0xb2, 0x80];
let unmasked = vec![
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9,
0x12, 0x03,
];
// Check masking with proper alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked, mask);
let mut masked_fast = unmasked.clone();
apply_mask(&mut masked_fast, mask);
assert_eq!(masked, masked_fast);
}
// Check masking without alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked[1..], mask);
let mut masked_fast = unmasked;
apply_mask(&mut masked_fast[1..], mask);
assert_eq!(masked, masked_fast);
}
}
#[test]
fn test_apply_mask() {
let mask = [0x6d, 0xb6, 0xb2, 0x80];

View File

@ -67,7 +67,7 @@ pub enum ProtocolError {
}
/// WebSocket handshake errors
#[derive(Debug, Clone, Copy, PartialEq, Eq, Display, Error)]
#[derive(Debug, Clone, Copy, PartialEq, Display, Error)]
pub enum HandshakeError {
/// Only get method is allowed.
#[display(fmt = "Method not allowed.")]

View File

@ -3,9 +3,6 @@ use std::{
fmt,
};
use base64::prelude::*;
use tracing::error;
/// Operation codes defined in [RFC 6455 §11.8].
///
/// [RFC 6455]: https://datatracker.ietf.org/doc/html/rfc6455#section-11.8
@ -61,7 +58,7 @@ impl From<OpCode> for u8 {
Ping => 9,
Pong => 10,
Bad => {
error!("Attempted to convert invalid opcode to u8. This is a bug.");
log::error!("Attempted to convert invalid opcode to u8. This is a bug.");
8 // if this somehow happens, a close frame will help us tear down quickly
}
}
@ -245,7 +242,7 @@ pub fn hash_key(key: &[u8]) -> [u8; 28] {
};
let mut hash_b64 = [0; 28];
let n = BASE64_STANDARD.encode_slice(hash, &mut hash_b64).unwrap();
let n = base64::encode_config_slice(&hash, base64::STANDARD, &mut hash_b64);
assert_eq!(n, 28);
hash_b64

View File

@ -31,7 +31,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World";
#[actix_rt::test]
async fn h1_v2() {
async fn test_h1_v2() {
let srv = test_server(move || {
HttpService::build()
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
@ -59,7 +59,7 @@ async fn h1_v2() {
}
#[actix_rt::test]
async fn connection_close() {
async fn test_connection_close() {
let srv = test_server(move || {
HttpService::build()
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
@ -73,7 +73,7 @@ async fn connection_close() {
}
#[actix_rt::test]
async fn with_query_parameter() {
async fn test_with_query_parameter() {
let srv = test_server(move || {
HttpService::build()
.finish(|req: Request| async move {
@ -104,7 +104,7 @@ impl From<ExpectFailed> for Response<BoxBody> {
}
#[actix_rt::test]
async fn h1_expect() {
async fn test_h1_expect() {
let srv = test_server(move || {
HttpService::build()
.expect(|req: Request| async {

View File

@ -1,4 +1,4 @@
use std::{io, time::Duration};
use std::io;
use actix_http::{error::Error, HttpService, Response};
use actix_server::Server;
@ -19,7 +19,7 @@ async fn h2_ping_pong() -> io::Result<()> {
.workers(1)
.listen("h2_ping_pong", lst, || {
HttpService::build()
.keep_alive(Duration::from_secs(3))
.keep_alive(3)
.h2(|_| async { Ok::<_, Error>(Response::ok()) })
.tcp()
})?
@ -92,10 +92,10 @@ async fn h2_handshake_timeout() -> io::Result<()> {
.workers(1)
.listen("h2_ping_pong", lst, || {
HttpService::build()
.keep_alive(Duration::from_secs(30))
.keep_alive(30)
// set first request timeout to 5 seconds.
// this is the timeout used for http2 handshake.
.client_request_timeout(Duration::from_secs(5))
.client_timeout(5000)
.h2(|_| async { Ok::<_, Error>(Response::ok()) })
.tcp()
})?

Some files were not shown because too many files have changed in this diff Show More