1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-16 14:45:47 +02:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Rob Ede
2ee953a118 remove either crate conversions 2021-12-14 19:01:14 +00:00
290 changed files with 11875 additions and 17042 deletions

View File

@@ -6,12 +6,9 @@ lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dcli
ci-check-min = "hack --workspace check --no-default-features" ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check" ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests" ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,experimental-io-uring check" ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check" ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing # testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture" ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture" ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
# compile docs as docs.rs would
# RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace

3
.github/FUNDING.yml vendored
View File

@@ -1,3 +0,0 @@
# These are supported funding model platforms
github: [robjtede]

View File

@@ -33,5 +33,5 @@ Please search on the [Actix Web issue tracker](https://github.com/actix/actix-we
## Your Environment ## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in --> <!--- Include as many relevant details about the environment you experienced the bug in -->
- Rust Version (I.e, output of `rustc -V`): * Rust Version (I.e, output of `rustc -V`):
- Actix Web Version: * Actix Web Version:

View File

@@ -1,6 +1,8 @@
name: Benchmark name: Benchmark
on: on:
pull_request:
types: [opened, synchronize, reopened]
push: push:
branches: branches:
- master - master

View File

@@ -1,185 +0,0 @@
name: CI (post-merge)
on:
push:
branches: [master]
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
# job currently (1st Feb 2022) segfaults
# coverage:
# name: coverage
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Install stable
# uses: actions-rs/toolchain@v1
# with:
# toolchain: stable-x86_64-unknown-linux-gnu
# profile: minimal
# override: true
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with: { command: generate-lockfile }
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
# - name: Generate coverage file
# run: |
# cargo install cargo-tarpaulin --vers "^0.13"
# cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
# - name: Upload to Codecov
# uses: codecov/codecov-action@v1
# with: { file: cobertura.xml }
nextest:
name: nextest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Install cargo-nextest
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-nextest
- name: Test with cargo-nextest
uses: actions-rs/cargo@v1
with:
command: nextest
args: run

View File

@@ -16,8 +16,9 @@ jobs:
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin } - { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc } - { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version: version:
- 1.54.0 # MSRV - 1.52.0 # MSRV
- stable - stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }} name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }} runs-on: ${{ matrix.target.os }}
@@ -95,6 +96,68 @@ jobs:
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Generate coverage file
if: github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
if: github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }
rustdoc: rustdoc:
name: doc tests name: doc tests
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -14,7 +14,6 @@ jobs:
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: stable
profile: minimal
components: rustfmt components: rustfmt
- name: Check with rustfmt - name: Check with rustfmt
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
@@ -31,36 +30,10 @@ jobs:
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: stable
profile: minimal
components: clippy components: clippy
override: true override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy - name: Check with Clippy
uses: actions-rs/clippy-check@v1 uses: actions-rs/clippy-check@v1
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --examples --all-features args: --workspace --all-features --tests
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rust-docs
- name: Check for broken intra-doc links
uses: actions-rs/cargo@v1
env:
RUSTDOCFLAGS: "-D warnings"
with:
command: doc
args: --no-deps --all-features --workspace

View File

@@ -1,3 +0,0 @@
{
"proseWrap": "never"
}

View File

@@ -1,5 +1,903 @@
# Changelog # Changes
Changelogs are kept separately for each crate in this repo. ## Unreleased - 2021-xx-xx
### Added
* Method on `Responder` trait (`customize`) for customizing responders and `CustomizeResponder` struct. [#2510]
* Implement `Debug` for `DefaultHeaders`. [#2510]
Actix Web changelog [is now here &rarr;](./actix-web/CHANGES.md). ### Changed
* Align `DefaultHeader` method terminology, deprecating previous methods. [#2510]
### Removed
* Top-level `EitherExtractError` export. [#2510]
* Conversion implementations for `either` crate. [#2516]
[#2510]: https://github.com/actix/actix-web/pull/2510
[#2516]: https://github.com/actix/actix-web/pull/2516
## 4.0.0-beta.14 - 2021-12-11
### Added
* Methods on `AcceptLanguage`: `ranked` and `preference`. [#2480]
* `AcceptEncoding` typed header. [#2482]
* `Range` typed header. [#2485]
* `HttpResponse::map_into_{left,right}_body` and `HttpResponse::map_into_boxed_body`. [#2468]
* `ServiceResponse::map_into_{left,right}_body` and `HttpResponse::map_into_boxed_body`. [#2468]
* Connection data set through the `HttpServer::on_connect` callback is now accessible only from the new `HttpRequest::conn_data()` and `ServiceRequest::conn_data()` methods. [#2491]
* `HttpRequest::{req_data,req_data_mut}`. [#2487]
* `ServiceResponse::into_parts`. [#2499]
### Changed
* Rename `Accept::{mime_precedence => ranked}`. [#2480]
* Rename `Accept::{mime_preference => preference}`. [#2480]
* Un-deprecate `App::data_factory`. [#2484]
* `HttpRequest::url_for` no longer constructs URLs with query or fragment components. [#2430]
* Remove `B` (body) type parameter on `App`. [#2493]
* Add `B` (body) type parameter on `Scope`. [#2492]
* Request-local data container is no longer part of a `RequestHead`. Instead it is a distinct part of a `Request`. [#2487]
### Fixed
* Accept wildcard `*` items in `AcceptLanguage`. [#2480]
* Re-exports `dev::{BodySize, MessageBody, SizedStream}`. They are exposed through the `body` module. [#2468]
* Typed headers containing lists that require one or more items now enforce this minimum. [#2482]
### Removed
* `ConnectionInfo::get`. [#2487]
[#2430]: https://github.com/actix/actix-web/pull/2430
[#2468]: https://github.com/actix/actix-web/pull/2468
[#2480]: https://github.com/actix/actix-web/pull/2480
[#2482]: https://github.com/actix/actix-web/pull/2482
[#2484]: https://github.com/actix/actix-web/pull/2484
[#2485]: https://github.com/actix/actix-web/pull/2485
[#2487]: https://github.com/actix/actix-web/pull/2487
[#2491]: https://github.com/actix/actix-web/pull/2491
[#2492]: https://github.com/actix/actix-web/pull/2492
[#2493]: https://github.com/actix/actix-web/pull/2493
[#2499]: https://github.com/actix/actix-web/pull/2499
## 4.0.0-beta.13 - 2021-11-30
### Changed
* Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 4.0.0-beta.12 - 2021-11-22
### Changed
* Compress middleware's response type is now `AnyBody<Encoder<B>>`. [#2448]
### Fixed
* Relax `Unpin` bound on `S` (stream) parameter of `HttpResponseBuilder::streaming`. [#2448]
### Removed
* `dev::ResponseBody` re-export; is function is replaced by the new `dev::AnyBody` enum. [#2446]
[#2446]: https://github.com/actix/actix-web/pull/2446
[#2448]: https://github.com/actix/actix-web/pull/2448
## 4.0.0-beta.11 - 2021-11-15
### Added
* Re-export `dev::ServerHandle` from `actix-server`. [#2442]
### Changed
* `ContentType::html` now produces `text/html; charset=utf-8` instead of `text/html`. [#2423]
* Update `actix-server` to `2.0.0-beta.9`. [#2442]
[#2423]: https://github.com/actix/actix-web/pull/2423
[#2442]: https://github.com/actix/actix-web/pull/2442
## 4.0.0-beta.10 - 2021-10-20
### Added
* Option to allow `Json` extractor to work without a `Content-Type` header present. [#2362]
* `#[actix_web::test]` macro for setting up tests with a runtime. [#2409]
### Changed
* Associated type `FromRequest::Config` was removed. [#2233]
* Inner field made private on `web::Payload`. [#2384]
* `Data::into_inner` and `Data::get_ref` no longer requires `T: Sized`. [#2403]
* Updated rustls to v0.20. [#2414]
* Minimum supported Rust version (MSRV) is now 1.52.
### Removed
* Useless `ServiceResponse::checked_expr` method. [#2401]
[#2233]: https://github.com/actix/actix-web/pull/2233
[#2362]: https://github.com/actix/actix-web/pull/2362
[#2384]: https://github.com/actix/actix-web/pull/2384
[#2401]: https://github.com/actix/actix-web/pull/2401
[#2403]: https://github.com/actix/actix-web/pull/2403
[#2409]: https://github.com/actix/actix-web/pull/2409
[#2414]: https://github.com/actix/actix-web/pull/2414
## 4.0.0-beta.9 - 2021-09-09
### Added
* Re-export actix-service `ServiceFactory` in `dev` module. [#2325]
### Changed
* Compress middleware will return 406 Not Acceptable when no content encoding is acceptable to the client. [#2344]
* Move `BaseHttpResponse` to `dev::Response`. [#2379]
* Enable `TestRequest::param` to accept more than just static strings. [#2172]
* Minimum supported Rust version (MSRV) is now 1.51.
### Fixed
* Fix quality parse error in Accept-Encoding header. [#2344]
* Re-export correct type at `web::HttpResponse`. [#2379]
[#2172]: https://github.com/actix/actix-web/pull/2172
[#2325]: https://github.com/actix/actix-web/pull/2325
[#2344]: https://github.com/actix/actix-web/pull/2344
[#2379]: https://github.com/actix/actix-web/pull/2379
## 4.0.0-beta.8 - 2021-06-26
### Added
* Add `ServiceRequest::parts_mut`. [#2177]
* Add extractors for `Uri` and `Method`. [#2263]
* Add extractors for `ConnectionInfo` and `PeerAddr`. [#2263]
* Add `Route::service` for using hand-written services as handlers. [#2262]
### Changed
* Change compression algorithm features flags. [#2250]
* Deprecate `App::data` and `App::data_factory`. [#2271]
* Smarter extraction of `ConnectionInfo` parts. [#2282]
### Fixed
* Scope and Resource middleware can access data items set on their own layer. [#2288]
[#2177]: https://github.com/actix/actix-web/pull/2177
[#2250]: https://github.com/actix/actix-web/pull/2250
[#2271]: https://github.com/actix/actix-web/pull/2271
[#2262]: https://github.com/actix/actix-web/pull/2262
[#2263]: https://github.com/actix/actix-web/pull/2263
[#2282]: https://github.com/actix/actix-web/pull/2282
[#2288]: https://github.com/actix/actix-web/pull/2288
## 4.0.0-beta.7 - 2021-06-17
### Added
* `HttpServer::worker_max_blocking_threads` for setting block thread pool. [#2200]
### Changed
* Adjusted default JSON payload limit to 2MB (from 32kb) and included size and limits in the `JsonPayloadError::Overflow` error variant. [#2162]
[#2162]: (https://github.com/actix/actix-web/pull/2162)
* `ServiceResponse::error_response` now uses body type of `Body`. [#2201]
* `ServiceResponse::checked_expr` now returns a `Result`. [#2201]
* Update `language-tags` to `0.3`.
* `ServiceResponse::take_body`. [#2201]
* `ServiceResponse::map_body` closure receives and returns `B` instead of `ResponseBody<B>` types. [#2201]
* All error trait bounds in server service builders have changed from `Into<Error>` to `Into<Response<AnyBody>>`. [#2253]
* All error trait bounds in message body and stream impls changed from `Into<Error>` to `Into<Box<dyn std::error::Error>>`. [#2253]
* `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuation parameter. [#2226]
* `middleware::normalize` now will not try to normalize URIs with no valid path [#2246]
### Removed
* `HttpResponse::take_body` and old `HttpResponse::into_body` method that casted body type. [#2201]
[#2200]: https://github.com/actix/actix-web/pull/2200
[#2201]: https://github.com/actix/actix-web/pull/2201
[#2253]: https://github.com/actix/actix-web/pull/2253
[#2246]: https://github.com/actix/actix-web/pull/2246
## 4.0.0-beta.6 - 2021-04-17
### Added
* `HttpResponse` and `HttpResponseBuilder` structs. [#2065]
### Changed
* Most error types are now marked `#[non_exhaustive]`. [#2148]
* Methods on `ContentDisposition` that took `T: AsRef<str>` now take `impl AsRef<str>`.
[#2065]: https://github.com/actix/actix-web/pull/2065
[#2148]: https://github.com/actix/actix-web/pull/2148
## 4.0.0-beta.5 - 2021-04-02
### Added
* `Header` extractor for extracting common HTTP headers in handlers. [#2094]
* Added `TestServer::client_headers` method. [#2097]
### Fixed
* Double ampersand in Logger format is escaped correctly. [#2067]
### Changed
* `CustomResponder` would return error as `HttpResponse` when `CustomResponder::with_header` failed
instead of skipping. (Only the first error is kept when multiple error occur) [#2093]
### Removed
* The `client` mod was removed. Clients should now use `awc` directly.
[871ca5e4](https://github.com/actix/actix-web/commit/871ca5e4ae2bdc22d1ea02701c2992fa8d04aed7)
* Integration testing was moved to new `actix-test` crate. Namely these items from the `test`
module: `TestServer`, `TestServerConfig`, `start`, `start_with`, and `unused_addr`. [#2112]
[#2067]: https://github.com/actix/actix-web/pull/2067
[#2093]: https://github.com/actix/actix-web/pull/2093
[#2094]: https://github.com/actix/actix-web/pull/2094
[#2097]: https://github.com/actix/actix-web/pull/2097
[#2112]: https://github.com/actix/actix-web/pull/2112
## 4.0.0-beta.4 - 2021-03-09
### Changed
* Feature `cookies` is now optional and enabled by default. [#1981]
* `JsonBody::new` returns a default limit of 32kB to be consistent with `JsonConfig` and the default
behaviour of the `web::Json<T>` extractor. [#2010]
[#1981]: https://github.com/actix/actix-web/pull/1981
[#2010]: https://github.com/actix/actix-web/pull/2010
## 4.0.0-beta.3 - 2021-02-10
* Update `actix-web-codegen` to `0.5.0-beta.1`.
## 4.0.0-beta.2 - 2021-02-10
### Added
* The method `Either<web::Json<T>, web::Form<T>>::into_inner()` which returns the inner type for
whichever variant was created. Also works for `Either<web::Form<T>, web::Json<T>>`. [#1894]
* Add `services!` macro for helping register multiple services to `App`. [#1933]
* Enable registering a vec of services of the same type to `App` [#1933]
### Changed
* Rework `Responder` trait to be sync and returns `Response`/`HttpResponse` directly.
Making it simpler and more performant. [#1891]
* `ServiceRequest::into_parts` and `ServiceRequest::from_parts` can no longer fail. [#1893]
* `ServiceRequest::from_request` can no longer fail. [#1893]
* Our `Either` type now uses `Left`/`Right` variants (instead of `A`/`B`) [#1894]
* `test::{call_service, read_response, read_response_json, send_request}` take `&Service`
in argument [#1905]
* `App::wrap_fn`, `Resource::wrap_fn` and `Scope::wrap_fn` provide `&Service` in closure
argument. [#1905]
* `web::block` no longer requires the output is a Result. [#1957]
### Fixed
* Multiple calls to `App::data` with the same type now keeps the latest call's data. [#1906]
### Removed
* Public field of `web::Path` has been made private. [#1894]
* Public field of `web::Query` has been made private. [#1894]
* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
* `AppService::set_service_data`; for custom HTTP service factories adding application data, use the
layered data model by calling `ServiceRequest::add_data_container` when handling
requests instead. [#1906]
[#1891]: https://github.com/actix/actix-web/pull/1891
[#1893]: https://github.com/actix/actix-web/pull/1893
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1905]: https://github.com/actix/actix-web/pull/1905
[#1906]: https://github.com/actix/actix-web/pull/1906
[#1933]: https://github.com/actix/actix-web/pull/1933
[#1957]: https://github.com/actix/actix-web/pull/1957
## 4.0.0-beta.1 - 2021-01-07
### Added
* `Compat` middleware enabling generic response body/error type of middlewares like `Logger` and
`Compress` to be used in `middleware::Condition` and `Resource`, `Scope` services. [#1865]
### Changed
* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
* Bumped `rand` to `0.8`.
* Update `rust-tls` to `0.19`. [#1813]
* Rename `Handler` to `HandlerService` and rename `Factory` to `Handler`. [#1852]
* The default `TrailingSlash` is now `Trim`, in line with existing documentation. See migration
guide for implications. [#1875]
* Rename `DefaultHeaders::{content_type => add_content_type}`. [#1875]
* MSRV is now 1.46.0.
### Fixed
* Added the underlying parse error to `test::read_body_json`'s panic message. [#1812]
### Removed
* Public modules `middleware::{normalize, err_handlers}`. All necessary middleware structs are now
exposed directly by the `middleware` module.
* Remove `actix-threadpool` as dependency. `actix_threadpool::BlockingError` error type can be imported
from `actix_web::error` module. [#1878]
[#1812]: https://github.com/actix/actix-web/pull/1812
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1852]: https://github.com/actix/actix-web/pull/1852
[#1865]: https://github.com/actix/actix-web/pull/1865
[#1875]: https://github.com/actix/actix-web/pull/1875
[#1878]: https://github.com/actix/actix-web/pull/1878
## 3.3.2 - 2020-12-01
### Fixed
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
* Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
* Add `Either<A, B>` extractor helper. [#1788]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
* Implement `exclude_regex` for Logger middleware. [#1723]
* Add request-local data extractor `web::ReqData`. [#1748]
* Add ability to register closure for request middleware logging. [#1749]
* Add `app_data` to `ServiceConfig`. [#1757]
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
* Re-export bytes::Buf{Mut} in web module. [#1750]
* Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behavior for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
* Re-export `actix_rt::main` as `actix_web::main`.
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
* MSRV is now 1.41.1
### Fixed
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
* Move `BodyEncoding` to `dev` module #1220
* Allow to set `peer_addr` for TestRequest #1074
* Make web::Data deref to Arc<T> #1214
* Rename `App::register_data()` to `App::app_data()`
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
* Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
* Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
* Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
* Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
* Migrated to `std::future`
* Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Add `middleware::Condition` that conditionally enables another middleware
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
* Make UrlEncodedError::Overflow more informative
* Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
* Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
* Re-implement Host predicate (#989)
* Form implements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data`
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
* Enable `rust-tls` feature for client #1045
* Update serde_urlencoded to 0.6.1
* Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
* Unix domain sockets (HttpServer::bind_uds) #92
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
* Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
* Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
* Support asynchronous data factories #850
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
* Add support for PathConfig #903
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
* Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
* Re-apply patch from #637 #894
### Fixed
* HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
* Add `Scope::configure()` method.
* Add `ServiceRequest::set_payload()` method.
* Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Add macros for head, options, trace, connect and patch http methods
### Changed
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
* Fix Logger request time format, and use rfc3339. #867
* Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Added
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
* Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Added
* Allow to set/override app data on scope level
### Changed
* `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
* Add helper function for executing futures `test::block_fn()`
### Changed
* Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* CORS handling without headers #702
* Allow constructing `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
* Fix `NormalizePath` middleware impl #806
### Deleted
* `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
* Add raw services support via `web::service()`
* Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
* Extend `Responder` trait, allow to override status code and headers.
* Store visit and login timestamp in the identity cookie #502
### Changed
* `.to_async()` handler can return `Responder` type #792
### Fixed
* Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Add `.peer_addr()` #744
* Add `NormalizePath` middleware
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow using any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* Rustls support
### Changed
* Use forked cookie
* Multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538

View File

@@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include: Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language * Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences * Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism * Gracefully accepting constructive criticism
- Focusing on what is best for the community * Focusing on what is best for the community
- Showing empathy towards other community members * Showing empathy towards other community members
Examples of unacceptable behavior by participants include: Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances * The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks * Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment * Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission * Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting * Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities ## Our Responsibilities

View File

@@ -1,18 +1,126 @@
[package]
name = "actix-web"
version = "4.0.0-beta.14"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
rustdoc-args = ["--cfg", "docsrs"]
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
"actix-files", ".",
"actix-http-test", "awc",
"actix-http", "actix-http",
"actix-files",
"actix-multipart", "actix-multipart",
"actix-router",
"actix-test",
"actix-web-actors", "actix-web-actors",
"actix-web-codegen", "actix-web-codegen",
"actix-web", "actix-http-test",
"awc", "actix-test",
"actix-router",
] ]
[features]
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
# Brotli algorithm content-encoding support
compress-brotli = ["actix-http/compress-brotli", "__compress"]
# Gzip and deflate algorithms content-encoding support
compress-gzip = ["actix-http/compress-gzip", "__compress"]
# Zstd algorithm content-encoding support
compress-zstd = ["actix-http/compress-zstd", "__compress"]
# support for cookies
cookies = ["cookie"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
# rustls
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
# io-uring feature only avaiable for Linux OSes.
experimental-io-uring = ["actix-server/io-uring"]
[dependencies]
actix-codec = "0.4.1"
actix-macros = "0.2.3"
actix-rt = "2.3"
actix-server = "2.0.0-rc.1"
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-tls = { version = "3.0.0-rc.1", default-features = false, optional = true }
actix-http = "3.0.0-beta.15"
actix-router = "0.5.0-beta.2"
actix-web-codegen = "0.5.0-beta.6"
ahash = "0.7"
bytes = "1"
cfg-if = "1"
cookie = { version = "0.15", features = ["percent-encode"], optional = true }
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
itoa = "0.4"
language-tags = "0.3"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
paste = "1"
pin-project-lite = "0.2.7"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
smallvec = "1.6.1"
socket2 = "0.4.0"
time = { version = "0.3", default-features = false, features = ["formatting"] }
url = "2.1"
[dev-dependencies]
actix-test = { version = "0.1.0-beta.8", features = ["openssl", "rustls"] }
awc = { version = "3.0.0-beta.13", features = ["openssl"] }
brotli2 = "0.3.2"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
flate2 = "1.0.13"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
rand = "0.8"
rcgen = "0.8"
rustls-pemfile = "0.2"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
zstd = "0.9"
[profile.dev] [profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much. # Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0 debug = 0
@@ -29,7 +137,7 @@ actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" } actix-multipart = { path = "actix-multipart" }
actix-router = { path = "actix-router" } actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" } actix-test = { path = "actix-test" }
actix-web = { path = "actix-web" } actix-web = { path = "." }
actix-web-actors = { path = "actix-web-actors" } actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" } actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" } awc = { path = "awc" }
@@ -42,3 +150,31 @@ awc = { path = "awc" }
# actix-utils = { path = "../actix-net/actix-utils" } # actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" } # actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" } # actix-server = { path = "../actix-net/actix-server" }
[[test]]
name = "test_server"
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
[[example]]
name = "basic"
required-features = ["compress-gzip"]
[[example]]
name = "uds"
required-features = ["compress-gzip"]
[[example]]
name = "on_connect"
required-features = []
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false
[[bench]]
name = "responder"
harness = false

677
MIGRATION.md Normal file
View File

@@ -0,0 +1,677 @@
## Unreleased
* The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
It is advised that the `new` method be used instead.
Before: `#[get("/test/")]`
After: `#[get("/test")]`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
* The `type Config` of `FromRequest` was removed.
* Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
By default all compression algorithms are enabled.
To select algorithm you want to include with `middleware::Compress` use following flags:
- `compress-brotli`
- `compress-gzip`
- `compress-zstd`
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
to have compression enabled. Please change features selection like bellow:
Before: `"compress"`
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
## 3.0.0
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
* Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@@ -1 +0,0 @@
actix-web/README.md

109
README.md Normal file
View File

@@ -0,0 +1,109 @@
<div align="center">
<h1>Actix Web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.14)](https://docs.rs/actix-web/4.0.0-beta.14)
[![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.14/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.14)
<br />
[![build status](https://github.com/actix/actix-web/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-web/actions)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate, zstd)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://docs.rs/awc/)
* Runs on stable Rust 1.52+
## Documentation
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
* [Application State](https://github.com/actix/examples/tree/master/basics/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0])
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT])
at your option.
## Code of Conduct
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@@ -3,73 +3,43 @@
## Unreleased - 2021-xx-xx ## Unreleased - 2021-xx-xx
## 0.6.0-beta.16 - 2022-01-31
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15 - 2022-01-21
- No significant changes since `0.6.0-beta.14`.
## 0.6.0-beta.14 - 2022-01-14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13 - 2022-01-04
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12 - 2021-12-29
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11 - 2021-12-27
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10 - 2021-12-11 ## 0.6.0-beta.10 - 2021-12-11
- No significant changes since `0.6.0-beta.9`. * No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9 - 2021-11-22 ## 0.6.0-beta.9 - 2021-11-22
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408] * Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408] * Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453] * Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408] * The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408] * The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408] * Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408 [#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453 [#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8 - 2021-10-20 ## 0.6.0-beta.8 - 2021-10-20
- Minimum supported Rust version (MSRV) is now 1.52. * Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7 - 2021-09-09 ## 0.6.0-beta.7 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51. * Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6 - 2021-06-26 ## 0.6.0-beta.6 - 2021-06-26
- Added `Files::path_filter()`. [#2274] * Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228] * `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274 [#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228 [#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5 - 2021-06-17 ## 0.6.0-beta.5 - 2021-06-17
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135] * `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156] * For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225] * `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257] * `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135 [#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156 [#2156]: https://github.com/actix/actix-web/pull/2156
@@ -78,130 +48,130 @@
## 0.6.0-beta.4 - 2021-04-02 ## 0.6.0-beta.4 - 2021-04-02
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046] * Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046 [#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3 - 2021-03-09 ## 0.6.0-beta.3 - 2021-03-09
- No notable changes. * No notable changes.
## 0.6.0-beta.2 - 2021-02-10 ## 0.6.0-beta.2 - 2021-02-10
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887] * Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953] * Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887 [#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953 [#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1 - 2021-01-07 ## 0.6.0-beta.1 - 2021-01-07
- `HttpRange::parse` now has its own error type. * `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813] * Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813 [#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0 - 2020-12-26 ## 0.5.0 - 2020-12-26
- Optionally support hidden files/directories. [#1811] * Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811 [#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1 - 2020-11-24 ## 0.4.1 - 2020-11-24
- Clarify order of parameters in `Files::new` and improve docs. * Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0 - 2020-10-06 ## 0.4.0 - 2020-10-06
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714] * Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714 [#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0 - 2020-09-11 ## 0.3.0 - 2020-09-11
- No significant changes from 0.3.0-beta.1. * No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1 - 2020-07-15 ## 0.3.0-beta.1 - 2020-07-15
- Update `v_htmlescape` to 0.10 * Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1 * Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1 - 2020-05-23 ## 0.3.0-alpha.1 - 2020-05-23
- Update `actix-web` and `actix-http` dependencies to alpha * Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs * Fix some typos in the docs
- Bump minimum supported Rust version to 1.40 * Bump minimum supported Rust version to 1.40
- Support sending Content-Length when Content-Range is specified [#1384] * Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384 [#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1 - 2019-12-22 ## 0.2.1 - 2019-12-22
- Use the same format for file URLs regardless of platforms * Use the same format for file URLs regardless of platforms
## 0.2.0 - 2019-12-20 ## 0.2.0 - 2019-12-20
- Fix BodyEncoding trait import #1220 * Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1 - 2019-12-07 ## 0.2.0-alpha.1 - 2019-12-07
- Migrate to `std::future` * Migrate to `std::future`
## 0.1.7 - 2019-11-06 ## 0.1.7 - 2019-11-06
- Add an additional `filename*` param in the `Content-Disposition` header of * Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151) `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14 ## 0.1.6 - 2019-10-14
- Add option to redirect to a slash-ended path `Files` #1132 * Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5 - 2019-10-08 ## 0.1.5 - 2019-10-08
- Bump up `mime_guess` crate version to 2.0.1 * Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1 * Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113 * Allow user defined request guards for `Files` #1113
## 0.1.4 - 2019-07-20 ## 0.1.4 - 2019-07-20
- Allow to disable `Content-Disposition` header #686 * Allow to disable `Content-Disposition` header #686
## 0.1.3 - 2019-06-28 ## 0.1.3 - 2019-06-28
- Do not set `Content-Length` header, let actix-http set it #930 * Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2 - 2019-06-13 ## 0.1.2 - 2019-06-13
- Content-Length is 0 for NamedFile HEAD request #914 * Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741 * Fix ring dependency from actix-web default features for #741
## 0.1.1 - 2019-06-01 ## 0.1.1 - 2019-06-01
- Static files are incorrectly served as both chunked and with length #812 * Static files are incorrectly served as both chunked and with length #812
## 0.1.0 - 2019-05-25 ## 0.1.0 - 2019-05-25
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820 * NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4 - 2019-05-12 ## 0.1.0-beta.4 - 2019-05-12
- Update actix-web to beta.4 * Update actix-web to beta.4
## 0.1.0-beta.1 - 2019-04-20 ## 0.1.0-beta.1 - 2019-04-20
- Update actix-web to beta.1 * Update actix-web to beta.1
## 0.1.0-alpha.6 - 2019-04-14 ## 0.1.0-alpha.6 - 2019-04-14
- Update actix-web to alpha6 * Update actix-web to alpha6
## 0.1.0-alpha.4 - 2019-04-08 ## 0.1.0-alpha.4 - 2019-04-08
- Update actix-web to alpha4 * Update actix-web to alpha4
## 0.1.0-alpha.2 - 2019-04-02 ## 0.1.0-alpha.2 - 2019-04-02
- Add default handler support * Add default handler support
## 0.1.0-alpha.1 - 2019-03-28 ## 0.1.0-alpha.1 - 2019-03-28
- Initial impl * Initial impl

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "actix-files" name = "actix-files"
version = "0.6.0-beta.16" version = "0.6.0-beta.10"
authors = [ authors = [
"Nikolay Kim <fafhrd91@gmail.com>", "Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>", "fakeshadow <24548779@qq.com>",
@@ -22,10 +22,10 @@ path = "src/lib.rs"
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"] experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies] [dependencies]
actix-http = "3.0.0" actix-http = "3.0.0-beta.15"
actix-service = "2" actix-service = "2"
actix-utils = "3" actix-utils = "3"
actix-web = { version = "4.0.0", default-features = false } actix-web = { version = "4.0.0-beta.14", default-features = false }
askama_escape = "0.10" askama_escape = "0.10"
bitflags = "1" bitflags = "1"
@@ -39,10 +39,9 @@ mime_guess = "2.0.1"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2.7" pin-project-lite = "0.2.7"
tokio-uring = { version = "0.2", optional = true, features = ["bytes"] } tokio-uring = { version = "0.1", optional = true }
[dev-dependencies] [dev-dependencies]
actix-rt = "2.2" actix-rt = "2.2"
actix-test = "0.1.0-beta.13" actix-test = "0.1.0-beta.8"
actix-web = "4.0.0" actix-web = "4.0.0-beta.14"
tempfile = "3.2"

View File

@@ -3,16 +3,16 @@
> Static file serving for Actix Web > Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.16)](https://docs.rs/actix-files/0.6.0-beta.16) [![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.10)](https://docs.rs/actix-files/0.6.0-beta.10)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) [![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
![License](https://img.shields.io/crates/l/actix-files.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.16/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.16) [![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.10/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.10)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources ## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files) - [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static-files) - [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
- Minimum Supported Rust Version (MSRV): 1.54 - Minimum Supported Rust Version (MSRV): 1.52

View File

@@ -10,9 +10,6 @@ use actix_web::{error::Error, web::Bytes};
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
#[cfg(feature = "experimental-io-uring")]
use bytes::BytesMut;
use super::named::File; use super::named::File;
pin_project! { pin_project! {
@@ -81,7 +78,7 @@ async fn chunked_read_file_callback(
) -> Result<(File, Bytes), Error> { ) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _}; use io::{Read as _, Seek as _};
let res = actix_web::web::block(move || { let res = actix_web::rt::task::spawn_blocking(move || {
let mut buf = Vec::with_capacity(max_bytes); let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?; file.seek(io::SeekFrom::Start(offset))?;
@@ -94,7 +91,8 @@ async fn chunked_read_file_callback(
Ok((file, Bytes::from(buf))) Ok((file, Bytes::from(buf)))
} }
}) })
.await??; .await
.map_err(|_| actix_web::error::BlockingError)??;
Ok(res) Ok(res)
} }
@@ -216,3 +214,64 @@ where
} }
} }
} }
#[cfg(feature = "experimental-io-uring")]
use bytes_mut::BytesMut;
// TODO: remove new type and use bytes::BytesMut directly
#[doc(hidden)]
#[cfg(feature = "experimental-io-uring")]
mod bytes_mut {
use std::ops::{Deref, DerefMut};
use tokio_uring::buf::{IoBuf, IoBufMut};
#[derive(Debug)]
pub struct BytesMut(bytes::BytesMut);
impl BytesMut {
pub(super) fn new() -> Self {
Self(bytes::BytesMut::new())
}
}
impl Deref for BytesMut {
type Target = bytes::BytesMut;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for BytesMut {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
unsafe impl IoBuf for BytesMut {
fn stable_ptr(&self) -> *const u8 {
self.0.as_ptr()
}
fn bytes_init(&self) -> usize {
self.0.len()
}
fn bytes_total(&self) -> usize {
self.0.capacity()
}
}
unsafe impl IoBufMut for BytesMut {
fn stable_mut_ptr(&mut self) -> *mut u8 {
self.0.as_mut_ptr()
}
unsafe fn set_init(&mut self, init_len: usize) {
if self.len() < init_len {
self.0.set_len(init_len);
}
}
}
}

View File

@@ -40,23 +40,14 @@ impl Directory {
pub(crate) type DirectoryRenderer = pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>; dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
/// Returns percent encoded file URL path. // show file url as relative to static path
macro_rules! encode_file_url { macro_rules! encode_file_url {
($path:ident) => { ($path:ident) => {
utf8_percent_encode(&$path, CONTROLS) utf8_percent_encode(&$path, CONTROLS)
}; };
} }
/// Returns HTML entity encoded formatter. // " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name { macro_rules! encode_file_name {
($entry:ident) => { ($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy(), Html) escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
@@ -75,7 +66,7 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) { if dir.is_visible(&entry) {
let entry = entry.unwrap(); let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) { let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"), Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(), Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue, Err(_) => continue,
}; };

View File

@@ -23,23 +23,16 @@ impl ResponseError for FilesError {
#[allow(clippy::enum_variant_names)] #[allow(clippy::enum_variant_names)]
#[derive(Display, Debug, PartialEq)] #[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError { pub enum UriSegmentError {
/// The segment started with the wrapped invalid character. /// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")] #[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char), BadStart(char),
/// The segment contained the wrapped invalid character. /// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")] #[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char), BadChar(char),
/// The segment ended with the wrapped invalid character. /// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")] #[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char), BadEnd(char),
/// The path is not a valid UTF-8 string after doing percent decoding.
#[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
} }
/// Return `BadRequest` for `UriSegmentError` /// Return `BadRequest` for `UriSegmentError`

View File

@@ -28,7 +28,6 @@ use crate::{
/// ///
/// `Files` service must be registered with `App::service()` method. /// `Files` service must be registered with `App::service()` method.
/// ///
/// # Examples
/// ``` /// ```
/// use actix_web::App; /// use actix_web::App;
/// use actix_files::Files; /// use actix_files::Files;
@@ -37,7 +36,7 @@ use crate::{
/// .service(Files::new("/static", ".")); /// .service(Files::new("/static", "."));
/// ``` /// ```
pub struct Files { pub struct Files {
mount_path: String, path: String,
directory: PathBuf, directory: PathBuf,
index: Option<String>, index: Option<String>,
show_index: bool, show_index: bool,
@@ -68,7 +67,7 @@ impl Clone for Files {
default: self.default.clone(), default: self.default.clone(),
renderer: self.renderer.clone(), renderer: self.renderer.clone(),
file_flags: self.file_flags, file_flags: self.file_flags,
mount_path: self.mount_path.clone(), path: self.path.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(), path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(), use_guards: self.use_guards.clone(),
@@ -107,7 +106,7 @@ impl Files {
}; };
Files { Files {
mount_path: mount_path.trim_end_matches('/').to_owned(), path: mount_path.trim_end_matches('/').to_owned(),
directory: dir, directory: dir,
index: None, index: None,
show_index: false, show_index: false,
@@ -342,9 +341,9 @@ impl HttpServiceFactory for Files {
} }
let rdef = if config.is_root() { let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.mount_path) ResourceDef::root_prefix(&self.path)
} else { } else {
ResourceDef::prefix(&self.mount_path) ResourceDef::prefix(&self.path)
}; };
config.register_service(rdef, guards, self, None) config.register_service(rdef, guards, self, None)

View File

@@ -2,7 +2,7 @@
//! //!
//! Provides a non-blocking service for serving static files from disk. //! Provides a non-blocking service for serving static files from disk.
//! //!
//! # Examples //! # Example
//! ``` //! ```
//! use actix_web::App; //! use actix_web::App;
//! use actix_files::Files; //! use actix_files::Files;
@@ -67,8 +67,8 @@ mod tests {
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use actix_service::ServiceFactory;
use actix_web::{ use actix_web::{
dev::ServiceFactory,
guard, guard,
http::{ http::{
header::{self, ContentDisposition, DispositionParam, DispositionType}, header::{self, ContentDisposition, DispositionParam, DispositionType},
@@ -106,7 +106,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@@ -118,7 +118,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@@ -131,7 +131,7 @@ mod tests {
.insert_header((header::IF_NONE_MATCH, "miss_etag")) .insert_header((header::IF_NONE_MATCH, "miss_etag"))
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED); assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@@ -143,7 +143,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since)) .insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
@@ -155,7 +155,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since)) .insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED); assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
} }
@@ -172,7 +172,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@@ -196,7 +196,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"Cargo.toml\"" "inline; filename=\"Cargo.toml\""
@@ -207,7 +207,7 @@ mod tests {
.unwrap() .unwrap()
.disable_content_disposition(); .disable_content_disposition();
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none()); assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none());
} }
@@ -235,7 +235,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@@ -261,7 +261,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/xml" "text/xml"
@@ -284,7 +284,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png" "image/png"
@@ -300,10 +300,10 @@ mod tests {
let file = NamedFile::open_async("tests/test.js").await.unwrap(); let file = NamedFile::open_async("tests/test.js").await.unwrap();
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/javascript; charset=utf-8" "application/javascript"
); );
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
@@ -330,7 +330,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png" "image/png"
@@ -353,7 +353,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/octet-stream" "application/octet-stream"
@@ -379,7 +379,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@@ -597,8 +597,7 @@ mod tests {
.to_request(); .to_request();
let res = test::call_service(&srv, request).await; let res = test::call_service(&srv, request).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert!(res.headers().contains_key(header::CONTENT_ENCODING)); assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
assert!(!test::read_body(res).await.is_empty());
} }
#[actix_rt::test] #[actix_rt::test]
@@ -633,7 +632,7 @@ mod tests {
async fn test_named_file_allowed_method() { async fn test_named_file_allowed_method() {
let req = TestRequest::default().method(Method::GET).to_http_request(); let req = TestRequest::default().method(Method::GET).to_http_request();
let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
@@ -803,38 +802,6 @@ mod tests {
let req = TestRequest::get().uri("/test/%43argo.toml").to_request(); let req = TestRequest::get().uri("/test/%43argo.toml").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
// `%2F` == `/`
let req = TestRequest::get().uri("/test%2Ftest.binary").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::get().uri("/test/Cargo.toml%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_percent_encoding_2() {
let tmpdir = tempfile::tempdir().unwrap();
let filename = match cfg!(unix) {
true => "ض:?#[]{}<>()@!$&'`|*+,;= %20.test",
false => "ض#[]{}()@!$&'`+,;= %20.test",
};
let filename_encoded = filename
.as_bytes()
.iter()
.map(|c| format!("%{:02X}", c))
.collect::<String>();
std::fs::File::create(tmpdir.path().join(filename)).unwrap();
let srv = test::init_service(App::new().service(Files::new("", tmpdir.path()))).await;
let req = TestRequest::get()
.uri(&format!("/{}", filename_encoded))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
} }
#[actix_rt::test] #[actix_rt::test]

View File

@@ -1,20 +1,22 @@
use std::{ use std::{
fmt,
fs::Metadata, fs::Metadata,
io, io,
path::{Path, PathBuf}, path::{Path, PathBuf},
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use actix_service::{Service, ServiceFactory};
use actix_web::{ use actix_web::{
body::{self, BoxBody, SizedStream}, body::{self, BoxBody, SizedStream},
dev::{ dev::{
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory, AppService, BodyEncoding, HttpServiceFactory, ResourceDef, ServiceRequest,
ServiceRequest, ServiceResponse, ServiceResponse,
}, },
http::{ http::{
header::{ header::{
self, Charset, ContentDisposition, ContentEncoding, DispositionParam, self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
DispositionType, ExtendedValue, HeaderValue, DispositionType, ExtendedValue,
}, },
StatusCode, StatusCode,
}, },
@@ -38,7 +40,7 @@ bitflags! {
impl Default for Flags { impl Default for Flags {
fn default() -> Self { fn default() -> Self {
Flags::from_bits_truncate(0b0000_1111) Flags::from_bits_truncate(0b0000_0111)
} }
} }
@@ -66,12 +68,12 @@ impl Default for Flags {
/// NamedFile::open_async("./static/index.html").await /// NamedFile::open_async("./static/index.html").await
/// } /// }
/// ``` /// ```
#[derive(Debug, Deref, DerefMut)] #[derive(Deref, DerefMut)]
pub struct NamedFile { pub struct NamedFile {
path: PathBuf,
#[deref] #[deref]
#[deref_mut] #[deref_mut]
file: File, file: File,
path: PathBuf,
modified: Option<SystemTime>, modified: Option<SystemTime>,
pub(crate) md: Metadata, pub(crate) md: Metadata,
pub(crate) flags: Flags, pub(crate) flags: Flags,
@@ -81,6 +83,32 @@ pub struct NamedFile {
pub(crate) encoding: Option<ContentEncoding>, pub(crate) encoding: Option<ContentEncoding>,
} }
impl fmt::Debug for NamedFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NamedFile")
.field("path", &self.path)
.field(
"file",
#[cfg(feature = "experimental-io-uring")]
{
&"tokio_uring::File"
},
#[cfg(not(feature = "experimental-io-uring"))]
{
&self.file
},
)
.field("modified", &self.modified)
.field("md", &self.md)
.field("flags", &self.flags)
.field("status_code", &self.status_code)
.field("content_type", &self.content_type)
.field("content_disposition", &self.content_disposition)
.field("encoding", &self.encoding)
.finish()
}
}
#[cfg(not(feature = "experimental-io-uring"))] #[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File; pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")] #[cfg(feature = "experimental-io-uring")]
@@ -96,18 +124,18 @@ impl NamedFile {
/// ///
/// # Examples /// # Examples
/// ```ignore /// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
/// ///
/// let mut file = File::create("foo.txt")?; /// fn main() -> io::Result<()> {
/// file.write_all(b"Hello, world!")?; /// let mut file = File::create("foo.txt")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?; /// file.write_all(b"Hello, world!")?;
/// # std::fs::remove_file("foo.txt"); /// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// Ok(()) /// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ``` /// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> { pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
@@ -196,6 +224,7 @@ impl NamedFile {
}) })
} }
#[cfg(not(feature = "experimental-io-uring"))]
/// Attempts to open a file in read-only mode. /// Attempts to open a file in read-only mode.
/// ///
/// # Examples /// # Examples
@@ -203,13 +232,11 @@ impl NamedFile {
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// let file = NamedFile::open("foo.txt"); /// let file = NamedFile::open("foo.txt");
/// ``` /// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> { pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = File::open(&path)?; let file = File::open(&path)?;
Self::from_file(file, path) Self::from_file(file, path)
} }
#[allow(rustdoc::broken_intra_doc_links)]
/// Attempts to open a file asynchronously in read-only mode. /// Attempts to open a file asynchronously in read-only mode.
/// ///
/// When the `experimental-io-uring` crate feature is enabled, this will be async. /// When the `experimental-io-uring` crate feature is enabled, this will be async.
@@ -268,21 +295,23 @@ impl NamedFile {
self self
} }
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred /// Set the MIME Content-Type for serving this file. By default
/// from the filename extension. /// the Content-Type is inferred from the filename extension.
#[inline] #[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self { pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type; self.content_type = mime_type;
self self
} }
/// Set the Content-Disposition for serving this file. This allows changing the /// Set the Content-Disposition for serving this file. This allows
/// `inline/attachment` disposition as well as the filename sent to the peer. /// changing the inline/attachment disposition as well as the filename
/// sent to the peer.
/// ///
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and /// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the /// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise,
/// filename is taken from the path provided in the `open` method after converting it to UTF-8 /// and the filename is taken from the path provided in the `open` method
/// (using `to_string_lossy`). /// after converting it to UTF-8 using.
/// [`std::ffi::OsStr::to_string_lossy`]
#[inline] #[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self { pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd; self.content_disposition = cd;
@@ -299,18 +328,16 @@ impl NamedFile {
self self
} }
/// Sets content encoding for this file. /// Set content encoding for serving this file
/// ///
/// This prevents the `Compress` middleware from modifying the file contents and signals to /// Must be used with [`actix_web::middleware::Compress`] to take effect.
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
#[inline] #[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc); self.encoding = Some(enc);
self self
} }
/// Specifies whether to return `ETag` header in response. /// Specifies whether to use ETag or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@@ -319,7 +346,7 @@ impl NamedFile {
self self
} }
/// Specifies whether to return `Last-Modified` header in response. /// Specifies whether to use Last-Modified or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@@ -337,7 +364,7 @@ impl NamedFile {
self self
} }
/// Creates an `ETag` in a format is similar to Apache's. /// Creates a etag in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> { pub(crate) fn etag(&self) -> Option<header::EntityTag> {
self.modified.as_ref().map(|mtime| { self.modified.as_ref().map(|mtime| {
let ino = { let ino = {
@@ -359,7 +386,7 @@ impl NamedFile {
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch"); .expect("modification time must be after epoch");
header::EntityTag::new_strong(format!( header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}", "{:x}:{:x}:{:x}:{:x}",
ino, ino,
self.md.len(), self.md.len(),
@@ -378,13 +405,12 @@ impl NamedFile {
if self.status_code != StatusCode::OK { if self.status_code != StatusCode::OK {
let mut res = HttpResponse::build(self.status_code); let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
} else { } else {
self.content_type res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
}; }
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) { if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header(( res.insert_header((
@@ -394,7 +420,7 @@ impl NamedFile {
} }
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); res.encoding(current_encoding);
} }
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file); let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
@@ -452,13 +478,12 @@ impl NamedFile {
let mut res = HttpResponse::build(self.status_code); let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
} else { } else {
self.content_type res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
}; }
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) { if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header(( res.insert_header((
@@ -467,8 +492,9 @@ impl NamedFile {
)); ));
} }
// default compressing
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); res.encoding(current_encoding);
} }
if let Some(lm) = last_modified { if let Some(lm) = last_modified {
@@ -491,12 +517,7 @@ impl NamedFile {
length = ranges[0].length; length = ranges[0].length;
offset = ranges[0].start; offset = ranges[0].start;
// don't allow compression middleware to modify partial content res.encoding(ContentEncoding::Identity);
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
res.insert_header(( res.insert_header((
header::CONTENT_RANGE, header::CONTENT_RANGE,
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()), format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
@@ -605,7 +626,7 @@ impl Service<ServiceRequest> for NamedFileService {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!(); actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();

View File

@@ -1,5 +1,5 @@
use std::{ use std::{
path::{Component, Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
}; };
@@ -26,23 +26,8 @@ impl PathBufWrap {
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> { pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new(); let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segement_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') { for segment in path.split('/') {
if segment == ".." { if segment == ".." {
segment_count -= 1;
buf.pop(); buf.pop();
} else if !hidden_files && segment.starts_with('.') { } else if !hidden_files && segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.')); return Err(UriSegmentError::BadStart('.'));
@@ -55,27 +40,14 @@ impl PathBufWrap {
} else if segment.ends_with('<') { } else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<')); return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() { } else if segment.is_empty() {
segment_count -= 1;
continue; continue;
} else if cfg!(windows) && segment.contains('\\') { } else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\')); return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else { } else {
buf.push(segment) buf.push(segment)
} }
} }
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(i < segment_count);
}
Ok(PathBufWrap(buf)) Ok(PathBufWrap(buf))
} }
} }
@@ -91,7 +63,7 @@ impl FromRequest for PathBufWrap {
type Future = Ready<Result<Self, Self::Error>>; type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().unprocessed().parse()) ready(req.match_info().path().parse())
} }
} }
@@ -165,26 +137,4 @@ mod tests {
PathBuf::from_iter(vec!["etc/passwd"]) PathBuf::from_iter(vec!["etc/passwd"])
); );
} }
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
} }

View File

@@ -1,8 +1,8 @@
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc}; use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
use actix_service::Service;
use actix_web::{ use actix_web::{
body::BoxBody, dev::{ServiceRequest, ServiceResponse},
dev::{self, Service, ServiceRequest, ServiceResponse},
error::Error, error::Error,
guard::Guard, guard::Guard,
http::{header, Method}, http::{header, Method},
@@ -94,16 +94,16 @@ impl fmt::Debug for FilesService {
} }
impl Service<ServiceRequest> for FilesService { impl Service<ServiceRequest> for FilesService {
type Response = ServiceResponse<BoxBody>; type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!(); actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards { let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards // execute user defined guards
(**guard).check(&req.guard_ctx()) (**guard).check(req.head())
} else { } else {
// default behavior // default behavior
matches!(*req.method(), Method::HEAD | Method::GET) matches!(*req.method(), Method::HEAD | Method::GET)
@@ -114,32 +114,32 @@ impl Service<ServiceRequest> for FilesService {
Box::pin(async move { Box::pin(async move {
if !is_method_valid { if !is_method_valid {
return Ok(req.into_response( return Ok(req.into_response(
HttpResponse::MethodNotAllowed() actix_web::HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8)) .insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."), .body("Request did not meet this resource's requirements."),
)); ));
} }
let path_on_disk = match PathBufWrap::parse_path( let real_path =
req.match_info().unprocessed(), match PathBufWrap::parse_path(req.match_info().path(), this.hidden_files) {
this.hidden_files, Ok(item) => item,
) { Err(e) => return Ok(req.error_response(e)),
Ok(item) => item, };
Err(err) => return Ok(req.error_response(err)),
};
if let Some(filter) = &this.path_filter { if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) { if !filter(real_path.as_ref(), req.head()) {
if let Some(ref default) = this.default { if let Some(ref default) = this.default {
return default.call(req).await; return default.call(req).await;
} else { } else {
return Ok(req.into_response(HttpResponse::NotFound().finish())); return Ok(
req.into_response(actix_web::HttpResponse::NotFound().finish())
);
} }
} }
} }
// full file path // full file path
let path = this.directory.join(&path_on_disk); let path = this.directory.join(&real_path);
if let Err(err) = path.canonicalize() { if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await; return this.handle_err(err, req).await;
} }
@@ -168,7 +168,7 @@ impl Service<ServiceRequest> for FilesService {
} }
} }
None if this.show_index => Ok(this.show_index(req, path)), None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err( _ => Ok(ServiceResponse::from_err(
FilesError::IsDirectory, FilesError::IsDirectory,
req.into_parts().0, req.into_parts().0,
)), )),

View File

@@ -19,12 +19,12 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_TYPE), res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")), Some(&HeaderValue::from_static("text/plain")),
); );
// disable UTF-8 attribute // prefer UTF-8 encoding
let srv = let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false))) test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true)))
.await; .await;
let req = TestRequest::with_uri("/utf8.txt").to_request(); let req = TestRequest::with_uri("/utf8.txt").to_request();
@@ -33,6 +33,6 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_TYPE), res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")), Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
); );
} }

View File

@@ -3,144 +3,126 @@
## Unreleased - 2021-xx-xx ## Unreleased - 2021-xx-xx
## 3.0.0-beta.13 - 2022-02-16
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12 - 2022-01-31
- No significant changes since `3.0.0-beta.11`.
## 3.0.0-beta.11 - 2022-01-04
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10 - 2021-12-27
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9 - 2021-12-11 ## 3.0.0-beta.9 - 2021-12-11
- No significant changes since `3.0.0-beta.8`. * No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8 - 2021-11-30 ## 3.0.0-beta.8 - 2021-11-30
- Update `actix-tls` to `3.0.0-rc.1`. [#2474] * Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474 [#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7 - 2021-11-22 ## 3.0.0-beta.7 - 2021-11-22
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408] * Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408 [#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6 - 2021-11-15 ## 3.0.0-beta.6 - 2021-11-15
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442] * `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442] * Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52. * Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442 [#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5 - 2021-09-09 ## 3.0.0-beta.5 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51. * Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4 - 2021-04-02 ## 3.0.0-beta.4 - 2021-04-02
- Added `TestServer::client_headers` method. [#2097] * Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097 [#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3 - 2021-03-09 ## 3.0.0-beta.3 - 2021-03-09
- No notable changes. * No notable changes.
## 3.0.0-beta.2 - 2021-02-10 ## 3.0.0-beta.2 - 2021-02-10
- No notable changes. * No notable changes.
## 3.0.0-beta.1 - 2021-01-07 ## 3.0.0-beta.1 - 2021-01-07
- Update `bytes` to `1.0`. [#1813] * Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813 [#1813]: https://github.com/actix/actix-web/pull/1813
## 2.1.0 - 2020-11-25 ## 2.1.0 - 2020-11-25
- Add ability to set address for `TestServer`. [#1645] * Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`. * Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773] * Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773 [#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645 [#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0 - 2020-09-11 ## 2.0.0 - 2020-09-11
- Update actix-codec and actix-utils dependencies. * Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1 - 2020-05-23 ## 2.0.0-alpha.1 - 2020-05-23
- Update the `time` dependency to 0.2.7 * Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2 * Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn. * Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40 * Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2` * Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12 * Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7 * Update `env_logger` dependency to 0.7
## 1.0.0 - 2019-12-13 ## 1.0.0 - 2019-12-13
- Replaced `TestServer::start()` with `test_server()` * Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3 - 2019-12-07 ## 1.0.0-alpha.3 - 2019-12-07
- Migrate to `std::future` * Migrate to `std::future`
## 0.2.5 - 2019-09-17 ## 0.2.5 - 2019-09-17
- Update serde_urlencoded to "0.6.1" * Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms * Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System` * Do not override current `System`
## 0.2.4 - 2019-07-18 ## 0.2.4 - 2019-07-18
- Update actix-server to 0.6 * Update actix-server to 0.6
## 0.2.3 - 2019-07-16 ## 0.2.3 - 2019-07-16
- Add `delete`, `options`, `patch` methods to `TestServerRunner` * Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2 - 2019-06-16 ## 0.2.2 - 2019-06-16
- Add .put() and .sput() methods * Add .put() and .sput() methods
## 0.2.1 - 2019-06-05 ## 0.2.1 - 2019-06-05
- Add license files * Add license files
## 0.2.0 - 2019-05-12 ## 0.2.0 - 2019-05-12
- Update awc and actix-http deps * Update awc and actix-http deps
## 0.1.1 - 2019-04-24 ## 0.1.1 - 2019-04-24
- Always make new connection for http client * Always make new connection for http client
## 0.1.0 - 2019-04-16 ## 0.1.0 - 2019-04-16
- No changes * No changes
## 0.1.0-alpha.3 - 2019-04-02 ## 0.1.0-alpha.3 - 2019-04-02
- Request functions accept path #743 * Request functions accept path #743
## 0.1.0-alpha.2 - 2019-03-29 ## 0.1.0-alpha.2 - 2019-03-29
- Added TestServerRuntime::load_body() method * Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries * Update actix-http and awc libraries
## 0.1.0-alpha.1 - 2019-03-28 ## 0.1.0-alpha.1 - 2019-03-28
- Initial impl * Initial impl

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "actix-http-test" name = "actix-http-test"
version = "3.0.0-beta.13" version = "3.0.0-beta.9"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing" description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"] keywords = ["http", "web", "framework", "async", "futures"]
@@ -30,12 +30,12 @@ openssl = ["tls-openssl", "awc/openssl"]
[dependencies] [dependencies]
actix-service = "2.0.0" actix-service = "2.0.0"
actix-codec = "0.5" actix-codec = "0.4.1"
actix-tls = "3" actix-tls = "3.0.0-rc.1"
actix-utils = "3.0.0" actix-utils = "3.0.0"
actix-rt = "2.2" actix-rt = "2.2"
actix-server = "2" actix-server = "2.0.0-rc.1"
awc = { version = "3.0.0-beta.21", default-features = false } awc = { version = "3.0.0-beta.13", default-features = false }
base64 = "0.13" base64 = "0.13"
bytes = "1" bytes = "1"
@@ -48,8 +48,8 @@ serde_json = "1.0"
slab = "0.4" slab = "0.4"
serde_urlencoded = "0.7" serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.9", package = "openssl", optional = true } tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.8.4", features = ["sync"] } tokio = { version = "1.2", features = ["sync"] }
[dev-dependencies] [dev-dependencies]
actix-web = { version = "4.0.0", default-features = false, features = ["cookies"] } actix-web = { version = "4.0.0-beta.14", default-features = false, features = ["cookies"] }
actix-http = "3.0.0" actix-http = "3.0.0-beta.15"

View File

@@ -3,15 +3,15 @@
> Various helpers for Actix applications to use during testing. > Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test) [![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.13)](https://docs.rs/actix-http-test/3.0.0-beta.13) [![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.9)](https://docs.rs/actix-http-test/3.0.0-beta.9)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) [![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br> <br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.13/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.13) [![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.9/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.9)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test) [![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources ## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http-test) - [API Documentation](https://docs.rs/actix-http-test)
- Minimum Supported Rust Version (MSRV): 1.54 - Minimum Supported Rust Version (MSRV): 1.52

View File

@@ -12,7 +12,7 @@ use std::{net, thread, time::Duration};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::{net::TcpStream, System}; use actix_rt::{net::TcpStream, System};
use actix_server::{Server, ServerServiceFactory}; use actix_server::{Server, ServiceFactory};
use awc::{ use awc::{
error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse, error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse,
Connector, Connector,
@@ -51,13 +51,13 @@ use tokio::sync::mpsc;
/// assert!(response.status().is_success()); /// assert!(response.status().is_success());
/// } /// }
/// ``` /// ```
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer { pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
test_server_with_addr(tcp, factory).await test_server_with_addr(tcp, factory).await
} }
/// Start [`test server`](test_server()) on an existing address binding. /// Start [`test server`](test_server()) on an existing address binding.
pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>( pub async fn test_server_with_addr<F: ServiceFactory<TcpStream>>(
tcp: net::TcpListener, tcp: net::TcpListener,
factory: F, factory: F,
) -> TestServer { ) -> TestServer {
@@ -107,7 +107,7 @@ pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
Connector::new() Connector::new()
.conn_lifetime(Duration::from_secs(0)) .conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000)) .timeout(Duration::from_millis(30000))
.openssl(builder.build()) .ssl(builder.build())
}; };
#[cfg(not(feature = "openssl"))] #[cfg(not(feature = "openssl"))]

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,7 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "3.0.0" version = "3.0.0-beta.15"
authors = [ authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "HTTP primitives for the Actix ecosystem" description = "HTTP primitives for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"] keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
@@ -20,7 +17,7 @@ edition = "2018"
[package.metadata.docs.rs] [package.metadata.docs.rs]
# features that docs.rs will build with # features that docs.rs will build with
features = ["http2", "openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"] features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
[lib] [lib]
name = "actix_http" name = "actix_http"
@@ -29,85 +26,69 @@ path = "src/lib.rs"
[features] [features]
default = [] default = []
# HTTP/2 protocol support # openssl
http2 = ["h2"]
# WebSocket protocol implementation
ws = [
"local-channel",
"base64",
"rand",
"sha-1",
]
# TLS via OpenSSL
openssl = ["actix-tls/accept", "actix-tls/openssl"] openssl = ["actix-tls/accept", "actix-tls/openssl"]
# TLS via Rustls # rustls support
rustls = ["actix-tls/accept", "actix-tls/rustls"] rustls = ["actix-tls/accept", "actix-tls/rustls"]
# Compression codecs # enable compression support
compress-brotli = ["__compress", "brotli"] compress-brotli = ["brotli2", "__compress"]
compress-gzip = ["__compress", "flate2"] compress-gzip = ["flate2", "__compress"]
compress-zstd = ["__compress", "zstd"] compress-zstd = ["zstd", "__compress"]
# Internal (PRIVATE!) features used to aid testing and cheking feature status. # Internal (PRIVATE!) features used to aid testing and cheking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime. # Don't rely on these whatsoever. They may disappear at anytime.
__compress = [] __compress = []
[dependencies] [dependencies]
actix-service = "2" actix-service = "2.0.0"
actix-codec = "0.5" actix-codec = "0.4.1"
actix-utils = "3" actix-utils = "3.0.0"
actix-rt = { version = "2.2", default-features = false } actix-rt = "2.2"
ahash = "0.7" ahash = "0.7"
base64 = "0.13"
bitflags = "1.2" bitflags = "1.2"
bytes = "1" bytes = "1"
bytestring = "1" bytestring = "1"
derive_more = "0.99.5" derive_more = "0.99.5"
encoding_rs = "0.8" encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] } futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.7", default-features = false, features = ["alloc", "sink"] }
h2 = "0.3.9"
http = "0.2.5" http = "0.2.5"
httparse = "1.5.1" httparse = "1.5.1"
httpdate = "1.0.1" httpdate = "1.0.1"
itoa = "1" itoa = "0.4"
language-tags = "0.3" language-tags = "0.3"
local-channel = "0.1"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project = "1.0.0"
pin-project-lite = "0.2" pin-project-lite = "0.2"
rand = "0.8"
sha-1 = "0.9"
smallvec = "1.6.1" smallvec = "1.6.1"
# http2 # tls
h2 = { version = "0.3.9", optional = true } actix-tls = { version = "3.0.0-rc.1", default-features = false, optional = true }
# websockets # compression
local-channel = { version = "0.1", optional = true } brotli2 = { version="0.3.2", optional = true }
base64 = { version = "0.13", optional = true }
rand = { version = "0.8", optional = true }
sha-1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3", default-features = false, optional = true }
# compress-*
brotli = { version = "3.3.3", optional = true }
flate2 = { version = "1.0.13", optional = true } flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.10", optional = true } zstd = { version = "0.9", optional = true }
[dev-dependencies] [dev-dependencies]
actix-http-test = { version = "3.0.0-beta.13", features = ["openssl"] } actix-http-test = { version = "3.0.0-beta.9", features = ["openssl"] }
actix-server = "2" actix-server = "2.0.0-rc.1"
actix-tls = { version = "3", features = ["openssl"] } actix-tls = { version = "3.0.0-rc.1", features = ["openssl"] }
actix-web = "4.0.0" actix-web = "4.0.0-beta.14"
async-stream = "0.3" async-stream = "0.3"
criterion = { version = "0.3", features = ["html_reports"] } criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9" env_logger = "0.9"
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
memchr = "2.4"
once_cell = "1.9"
rcgen = "0.8" rcgen = "0.8"
regex = "1.3" regex = "1.3"
rustls-pemfile = "0.2" rustls-pemfile = "0.2"
@@ -116,7 +97,7 @@ serde_json = "1.0"
static_assertions = "1" static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" } tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" } tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] } tokio = { version = "1.2", features = ["net", "rt", "macros"] }
[[example]] [[example]]
name = "ws" name = "ws"

View File

@@ -3,18 +3,18 @@
> HTTP primitives for the Actix ecosystem. > HTTP primitives for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0)](https://docs.rs/actix-http/3.0.0) [![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.15)](https://docs.rs/actix-http/3.0.0-beta.15)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) [![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-http/3.0.0/status.svg)](https://deps.rs/crate/actix-http/3.0.0) [![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.15/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.15)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http) [![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources ## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http) - [API Documentation](https://docs.rs/actix-http)
- Minimum Supported Rust Version (MSRV): 1.54 - Minimum Supported Rust Version (MSRV): 1.52
## Example ## Example
@@ -54,8 +54,8 @@ async fn main() -> io::Result<()> {
This project is licensed under either of This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) * MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option. at your option.

View File

@@ -42,37 +42,32 @@ mod _new {
if x < 10 { if x < 10 {
f.write_str("00")?; f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return // 0 is handled so it's not possible to have a trailing 0, we can just return
itoa_fmt(f, x) itoa::fmt(f, x)
} else if x < 100 { } else if x < 100 {
f.write_str("0")?; f.write_str("0")?;
if x % 10 == 0 { if x % 10 == 0 {
// trailing 0, divide by 10 and write // trailing 0, divide by 10 and write
itoa_fmt(f, x / 10) itoa::fmt(f, x / 10)
} else { } else {
itoa_fmt(f, x) itoa::fmt(f, x)
} }
} else { } else {
// x is in range 101999 // x is in range 101999
if x % 100 == 0 { if x % 100 == 0 {
// two trailing 0s, divide by 100 and write // two trailing 0s, divide by 100 and write
itoa_fmt(f, x / 100) itoa::fmt(f, x / 100)
} else if x % 10 == 0 { } else if x % 10 == 0 {
// one trailing 0, divide by 10 and write // one trailing 0, divide by 10 and write
itoa_fmt(f, x / 10) itoa::fmt(f, x / 10)
} else { } else {
itoa_fmt(f, x) itoa::fmt(f, x)
} }
} }
} }
} }
} }
} }
pub fn itoa_fmt<W: fmt::Write, V: itoa::Integer>(mut wr: W, value: V) -> fmt::Result {
let mut buf = itoa::Buffer::new();
wr.write_str(buf.format(value))
}
} }
mod _naive { mod _naive {

View File

@@ -1,27 +0,0 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View File

@@ -1,4 +1,4 @@
use std::{io, time::Duration}; use std::io;
use actix_http::{Error, HttpService, Request, Response, StatusCode}; use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server; use actix_server::Server;
@@ -13,9 +13,8 @@ async fn main() -> io::Result<()> {
Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move { .finish(|mut req: Request| async move {
let mut body = BytesMut::new(); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await { while let Some(item) = req.payload().next().await {
@@ -24,13 +23,12 @@ async fn main() -> io::Result<()> {
log::info!("request body: {:?}", body); log::info!("request body: {:?}", body);
let res = Response::build(StatusCode::OK) Ok::<_, Error>(
.insert_header(("x-head", HeaderValue::from_static("dummy value!"))) Response::build(StatusCode::OK)
.body(body); .insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body),
Ok::<_, Error>(res) )
}) })
// No TLS
.tcp() .tcp()
})? })?
.run() .run()

View File

@@ -1,34 +1,32 @@
use std::io; use std::io;
use actix_http::{ use actix_http::{
body::{BodyStream, MessageBody}, body::MessageBody, header::HeaderValue, Error, HttpService, Request, Response, StatusCode,
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
}; };
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> { async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) { body.extend_from_slice(&item?)
res.insert_header((header::CONTENT_TYPE, ct));
} }
// echo request payload stream as (chunked) response body log::info!("request body: {:?}", body);
let res = res.message_body(BodyStream::new(req.payload().take()))?;
Ok(res) Ok(Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body))
} }
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
actix_server::Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build().finish(handle_request).tcp()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})? })?
.run() .run()
.await .await

View File

@@ -1,25 +0,0 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View File

@@ -1,4 +1,4 @@
use std::{convert::Infallible, io, time::Duration}; use std::{convert::Infallible, io};
use actix_http::{ use actix_http::{
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode, header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
@@ -12,8 +12,8 @@ async fn main() -> io::Result<()> {
Server::build() Server::build()
.bind("hello-world", ("127.0.0.1", 8080), || { .bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
.on_connect_ext(|_, ext| { .on_connect_ext(|_, ext| {
ext.insert(42u32); ext.insert(42u32);
}) })

View File

@@ -80,7 +80,7 @@ mod tests {
use futures_core::ready; use futures_core::ready;
use futures_util::{stream, FutureExt as _}; use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
@@ -91,10 +91,10 @@ mod tests {
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone // crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test] #[actix_rt::test]
async fn skips_empty_chunks() { async fn skips_empty_chunks() {

View File

@@ -8,110 +8,90 @@ use std::{
use bytes::Bytes; use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr}; use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body; use crate::Error;
/// A boxed message body with boxed errors. /// A boxed message body with boxed errors.
#[derive(Debug)] pub struct BoxBody(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>);
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody { impl BoxBody {
/// Boxes body type, erasing type information. /// Boxes a `MessageBody` and any errors it generates.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self pub fn new<B>(body: B) -> Self
where where
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
match body.size() { let body = MessageBodyMapErr::new(body, Into::into);
BodySize::None => Self(BoxBodyInner::None(body::None)), Self(Box::pin(body))
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
} }
/// Returns a mutable pinned reference to the inner message body type. /// Returns a mutable pinned reference to the inner message body type.
#[inline] pub fn as_pin_mut(&mut self) -> Pin<&mut (dyn MessageBody<Error = Box<dyn StdError>>)> {
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> { self.0.as_mut()
Pin::new(self) }
}
impl fmt::Debug for BoxBody {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("BoxBody(dyn MessageBody)")
} }
} }
impl MessageBody for BoxBody { impl MessageBody for BoxBody {
type Error = Box<dyn StdError>; type Error = Error;
#[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
match &self.0 { self.0.size()
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
} }
#[inline]
fn poll_next( fn poll_next(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 { self.0
BoxBodyInner::None(body) => { .as_mut()
Pin::new(body).poll_next(cx).map_err(|err| match err {}) .poll_next(cx)
} .map_err(|err| Error::new_body().with_cause(err))
BoxBodyInner::Bytes(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
} }
#[inline] fn is_complete_body(&self) -> bool {
fn try_into_bytes(self) -> Result<Bytes, Self> { self.0.is_complete_body()
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
} }
#[inline] fn take_complete_body(&mut self) -> Bytes {
fn boxed(self) -> BoxBody { debug_assert!(
self self.is_complete_body(),
"boxed type does not allow taking complete body; caller should make sure to \
call `is_complete_body` first",
);
// we do not have DerefMut access to call take_complete_body directly but since
// is_complete_body is true we should expect the entire bytes chunk in one poll_next
let waker = futures_util::task::noop_waker();
let mut cx = Context::from_waker(&waker);
match self.as_pin_mut().poll_next(&mut cx) {
Poll::Ready(Some(Ok(data))) => data,
_ => {
panic!(
"boxed type indicated it allows taking complete body but failed to \
return Bytes when polled",
);
}
}
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin); assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
#[actix_rt::test] #[actix_rt::test]
async fn nested_boxed_body() { async fn nested_boxed_body() {

View File

@@ -10,17 +10,6 @@ use super::{BodySize, BoxBody, MessageBody};
use crate::Error; use crate::Error;
pin_project! { pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj] #[project = EitherBodyProj]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> { pub enum EitherBody<L, R = BoxBody> {
@@ -33,10 +22,7 @@ pin_project! {
} }
impl<L> EitherBody<L, BoxBody> { impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant. /// Creates new `EitherBody` using left variant and boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
#[inline] #[inline]
pub fn new(body: L) -> Self { pub fn new(body: L) -> Self {
Self::Left { body } Self::Left { body }
@@ -88,22 +74,18 @@ where
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
match self { match self {
EitherBody::Left { body } => body EitherBody::Left { body } => body.is_complete_body(),
.try_into_bytes() EitherBody::Right { body } => body.is_complete_body(),
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
} }
} }
#[inline] #[inline]
fn boxed(self) -> BoxBody { fn take_complete_body(&mut self) -> Bytes {
match self { match self {
EitherBody::Left { body } => body.boxed(), EitherBody::Left { body } => body.take_complete_body(),
EitherBody::Right { body } => body.boxed(), EitherBody::Right { body } => body.take_complete_body(),
} }
} }
} }

View File

@@ -12,110 +12,70 @@ use bytes::{Bytes, BytesMut};
use futures_core::ready; use futures_core::ready;
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use super::{BodySize, BoxBody}; use super::BodySize;
/// An interface for types that can be used as a response body. /// An interface types that can converted to bytes and used as response bodies.
/// // TODO: examples
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
pub trait MessageBody { pub trait MessageBody {
/// The type of error that will be returned if streaming body fails. // TODO: consider this bound to only fmt::Display since the error type is not really used
/// // and there is an impl for Into<Box<StdError>> on String
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>; type Error: Into<Box<dyn StdError>>;
/// Body size hint. /// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize; fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes. /// Attempt to pull out the next chunk of body bytes.
/// // TODO: expand documentation
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>; ) -> Poll<Option<Result<Bytes, Self::Error>>>;
/// Try to convert into the complete chunk of body bytes. /// Returns true if entire body bytes chunk is obtainable in one call to `poll_next`.
/// ///
/// Override this method if the complete body can be trivially extracted. This is useful for /// This method's implementation should agree with [`take_complete_body`] and should always be
/// optimizations where `poll_next` calls can be avoided. /// checked before taking the body.
/// ///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling /// The default implementation returns `false.
/// this method, it is recommended to check `size` first and return early.
/// ///
/// # Errors /// [`take_complete_body`]: MessageBody::take_complete_body
/// The default implementation will error and return the original type back to the caller for fn is_complete_body(&self) -> bool {
/// further use. false
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
} }
/// Wraps this body into a `BoxBody`. /// Returns the complete chunk of body bytes.
/// ///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling /// Implementors of this method should note the following:
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body /// - It is acceptable to skip the omit checks of [`is_complete_body`]. The responsibility of
/// is required. /// performing this check is delegated to the caller.
#[inline] /// - If the result of [`is_complete_body`] is conditional, that condition should be given
fn boxed(self) -> BoxBody /// equivalent attention here.
where /// - A second call call to [`take_complete_body`] should return an empty `Bytes` or panic.
Self: Sized + 'static, /// - A call to [`poll_next`] after calling [`take_complete_body`] should return `None` unless
{ /// the chunk is guaranteed to be empty.
BoxBody::new(self) ///
/// The default implementation panics unconditionally, indicating a control flow bug in the
/// calling code.
///
/// # Panics
/// With a correct implementation, panics if called without first checking [`is_complete_body`].
///
/// [`is_complete_body`]: MessageBody::is_complete_body
/// [`take_complete_body`]: MessageBody::take_complete_body
/// [`poll_next`]: MessageBody::poll_next
fn take_complete_body(&mut self) -> Bytes {
assert!(
self.is_complete_body(),
"type ({}) allows taking complete body but did not provide an implementation \
of `take_complete_body`",
std::any::type_name::<Self>()
);
unimplemented!(
"type ({}) does not allow taking complete body; caller should make sure to \
check `is_complete_body` first",
std::any::type_name::<Self>()
);
} }
} }
@@ -135,6 +95,14 @@ mod foreign_impls {
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {} match *self {}
} }
fn is_complete_body(&self) -> bool {
true
}
fn take_complete_body(&mut self) -> Bytes {
match *self {}
}
} }
impl MessageBody for () { impl MessageBody for () {
@@ -154,14 +122,19 @@ mod foreign_impls {
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::new()) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::new()
} }
} }
impl<B> MessageBody for Box<B> impl<B> MessageBody for Box<B>
where where
B: MessageBody + Unpin + ?Sized, B: MessageBody + Unpin,
{ {
type Error = B::Error; type Error = B::Error;
@@ -177,11 +150,21 @@ mod foreign_impls {
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx) Pin::new(self.get_mut().as_mut()).poll_next(cx)
} }
#[inline]
fn is_complete_body(&self) -> bool {
self.as_ref().is_complete_body()
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
self.as_mut().take_complete_body()
}
} }
impl<B> MessageBody for Pin<Box<B>> impl<B> MessageBody for Pin<Box<B>>
where where
B: MessageBody + ?Sized, B: MessageBody,
{ {
type Error = B::Error; type Error = B::Error;
@@ -192,10 +175,42 @@ mod foreign_impls {
#[inline] #[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx) self.as_mut().poll_next(cx)
}
#[inline]
fn is_complete_body(&self) -> bool {
self.as_ref().is_complete_body()
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
debug_assert!(
self.is_complete_body(),
"inner type \"{}\" does not allow taking complete body; caller should make sure to \
call `is_complete_body` first",
std::any::type_name::<B>(),
);
// we do not have DerefMut access to call take_complete_body directly but since
// is_complete_body is true we should expect the entire bytes chunk in one poll_next
let waker = futures_util::task::noop_waker();
let mut cx = Context::from_waker(&waker);
match self.as_mut().poll_next(&mut cx) {
Poll::Ready(Some(Ok(data))) => data,
_ => {
panic!(
"inner type \"{}\" indicated it allows taking complete body but failed to \
return Bytes when polled",
std::any::type_name::<B>()
);
}
}
} }
} }
@@ -207,21 +222,25 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() { if self.is_empty() {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut()))))) Poll::Ready(Some(Ok(self.take_complete_body())))
} }
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::from_static(self)) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::from_static(mem::take(self))
} }
} }
@@ -233,21 +252,25 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() { if self.is_empty() {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
Poll::Ready(Some(Ok(mem::take(self.get_mut())))) Poll::Ready(Some(Ok(self.take_complete_body())))
} }
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(self) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
mem::take(self)
} }
} }
@@ -259,21 +282,25 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() { if self.is_empty() {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze()))) Poll::Ready(Some(Ok(self.take_complete_body())))
} }
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(self.freeze()) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
mem::take(self).freeze()
} }
} }
@@ -285,21 +312,25 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() { if self.is_empty() {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into()))) Poll::Ready(Some(Ok(self.take_complete_body())))
} }
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::from(self)) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::from(mem::take(self))
} }
} }
@@ -311,7 +342,6 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
@@ -326,8 +356,13 @@ mod foreign_impls {
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::from_static(self.as_bytes())) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::from_static(mem::take(self).as_bytes())
} }
} }
@@ -339,7 +374,6 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
@@ -353,8 +387,13 @@ mod foreign_impls {
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::from(self)) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::from(mem::take(self))
} }
} }
@@ -366,7 +405,6 @@ mod foreign_impls {
BodySize::Sized(self.len() as u64) BodySize::Sized(self.len() as u64)
} }
#[inline]
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
_cx: &mut Context<'_>, _cx: &mut Context<'_>,
@@ -376,8 +414,13 @@ mod foreign_impls {
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(self.into_bytes()) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
mem::take(self).into_bytes()
} }
} }
} }
@@ -432,12 +475,6 @@ where
None => Poll::Ready(None), None => Poll::Ready(None),
} }
} }
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
} }
#[cfg(test)] #[cfg(test)]
@@ -447,7 +484,6 @@ mod tests {
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use super::*; use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next { macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => { ($pin:expr, $exp:expr) => {
@@ -549,45 +585,49 @@ mod tests {
assert_poll_next!(pl, Bytes::from("test")); assert_poll_next!(pl, Bytes::from("test"));
} }
#[actix_rt::test] #[test]
async fn complete_body_combinators() { fn take_string() {
let body = Bytes::from_static(b"test"); let mut data = "test".repeat(2);
let body = BoxBody::new(body); let data_bytes = Bytes::from(data.clone());
let body = EitherBody::<_, ()>::left(body); assert!(data.is_complete_body());
let body = EitherBody::<(), _>::right(body); assert_eq!(data.take_complete_body(), data_bytes);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test")); let mut big_data = "test".repeat(64 * 1024);
let data_bytes = Bytes::from(big_data.clone());
assert!(big_data.is_complete_body());
assert_eq!(big_data.take_complete_body(), data_bytes);
} }
#[actix_rt::test] #[test]
async fn complete_body_combinators_poll() { fn take_boxed_equivalence() {
let body = Bytes::from_static(b"test"); let mut data = Bytes::from_static(b"test");
let body = BoxBody::new(body); assert!(data.is_complete_body());
let body = EitherBody::<_, ()>::left(body); assert_eq!(data.take_complete_body(), b"test".as_ref());
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4)); let mut data = Box::new(Bytes::from_static(b"test"));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test")); assert!(data.is_complete_body());
assert_poll_next_none!(Pin::new(&mut body)); assert_eq!(data.take_complete_body(), b"test".as_ref());
let mut data = Box::pin(Bytes::from_static(b"test"));
assert!(data.is_complete_body());
assert_eq!(data.take_complete_body(), b"test".as_ref());
} }
#[actix_rt::test] #[test]
async fn none_body_combinators() { fn take_policy() {
fn none_body() -> BoxBody { let mut data = Bytes::from_static(b"test");
let body = body::None; // first call returns chunk
let body = BoxBody::new(body); assert_eq!(data.take_complete_body(), b"test".as_ref());
let body = EitherBody::<_, ()>::left(body); // second call returns empty
let body = EitherBody::<(), _>::right(body); assert_eq!(data.take_complete_body(), b"".as_ref());
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None); let waker = futures_util::task::noop_waker();
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new()); let mut cx = Context::from_waker(&waker);
assert_poll_next_none!(Pin::new(&mut none_body())); let mut data = Bytes::from_static(b"test");
// take returns whole chunk
assert_eq!(data.take_complete_body(), b"test".as_ref());
// subsequent poll_next returns None
assert_eq!(Pin::new(&mut data).poll_next(&mut cx), Poll::Ready(None));
} }
// down-casting used to be done with a method on MessageBody trait // down-casting used to be done with a method on MessageBody trait

View File

@@ -1,9 +1,4 @@
//! Traits and structures to aid consuming and writing HTTP payloads. //! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
mod body_stream; mod body_stream;
mod boxed; mod boxed;

View File

@@ -10,12 +10,9 @@ use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads. /// Body type for responses that forbid payloads.
/// ///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header. /// Distinct from an empty response which would contain a Content-Length header.
/// For an "empty" body, use `()` or `Bytes::new()`.
/// ///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response. /// For an "empty" body, use `()` or `Bytes::new()`.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
#[derive(Debug, Clone, Copy, Default)] #[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive] #[non_exhaustive]
pub struct None; pub struct None;
@@ -45,7 +42,12 @@ impl MessageBody for None {
} }
#[inline] #[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> { fn is_complete_body(&self) -> bool {
Ok(Bytes::new()) true
}
#[inline]
fn take_complete_body(&mut self) -> Bytes {
Bytes::new()
} }
} }

View File

@@ -76,7 +76,7 @@ mod tests {
use actix_rt::pin; use actix_rt::pin;
use actix_utils::future::poll_fn; use actix_utils::future::poll_fn;
use futures_util::stream; use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
@@ -87,10 +87,10 @@ mod tests {
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone // crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test] #[actix_rt::test]
async fn skips_empty_chunks() { async fn skips_empty_chunks() {

View File

@@ -1,22 +1,25 @@
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration}; use std::{fmt, marker::PhantomData, net, rc::Rc};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory}; use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::{ use crate::{
body::{BoxBody, MessageBody}, body::{BoxBody, MessageBody},
config::{KeepAlive, ServiceConfig},
h1::{self, ExpectHandler, H1Service, UpgradeHandler}, h1::{self, ExpectHandler, H1Service, UpgradeHandler},
h2::H2Service,
service::HttpService, service::HttpService,
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig, ConnectCallback, Extensions, Request, Response,
}; };
/// An HTTP service builder. /// A HTTP service builder
/// ///
/// This type can construct an instance of [`HttpService`] through a builder-like pattern. /// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> { pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
expect: X, expect: X,
@@ -25,23 +28,21 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
_phantom: PhantomData<S>, _phantom: PhantomData<S>,
} }
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler> impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
{ {
fn default() -> Self { /// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
HttpServiceBuilder { HttpServiceBuilder {
// ServiceConfig parts (make sure defaults match) keep_alive: KeepAlive::Timeout(5),
keep_alive: KeepAlive::default(), client_timeout: 5000,
client_request_timeout: Duration::from_secs(5), client_disconnect: 0,
client_disconnect_timeout: Duration::ZERO,
secure: false, secure: false,
local_addr: None, local_addr: None,
// dispatcher parts
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect_ext: None, on_connect_ext: None,
@@ -63,11 +64,9 @@ where
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Set connection keep-alive setting. /// Set server keep-alive setting.
/// ///
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong. /// By default keep alive is set to a 5 seconds.
///
/// By default keep-alive is 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self { pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into(); self.keep_alive = val.into();
self self
@@ -85,45 +84,33 @@ where
self self
} }
/// Set client request timeout (for first request). /// Set server client timeout in milliseconds for first request.
/// ///
/// Defines a timeout for reading client request header. If the client does not transmit the /// Defines a timeout for reading client request header. If a client does not transmit
/// request head within this duration, the connection is terminated with a `408 Request Timeout` /// the entire set headers within this time, the request is terminated with
/// response error. /// the 408 (Request Time-out) error.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the client timeout is 5 seconds. /// By default client timeout is set to 5000 milliseconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self { pub fn client_timeout(mut self, val: u64) -> Self {
self.client_request_timeout = dur; self.client_timeout = val;
self self
} }
#[doc(hidden)] /// Set server connection disconnect timeout in milliseconds.
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
/// ///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete /// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections. /// within this time, the request get dropped. This timeout affects secure connections.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the disconnect timeout is disabled. /// By default disconnect timeout is set to 0.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self { pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect_timeout = dur; self.client_disconnect = val;
self self
} }
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support. /// Provide service for `EXPECT: 100-Continue` support.
/// ///
/// Service get called with request that contains `EXPECT` header. /// Service get called with request that contains `EXPECT` header.
@@ -138,8 +125,8 @@ where
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: expect.into_factory(), expect: expect.into_factory(),
@@ -162,8 +149,8 @@ where
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: self.expect, expect: self.expect,
@@ -197,8 +184,8 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
@@ -210,8 +197,7 @@ where
} }
/// Finish service configuration and create a HTTP service for HTTP/2 protocol. /// Finish service configuration and create a HTTP service for HTTP/2 protocol.
#[cfg(feature = "http2")] pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
where where
F: IntoServiceFactory<S, Request>, F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Response<BoxBody>> + 'static,
@@ -222,14 +208,13 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
crate::h2::H2Service::with_config(cfg, service.into_factory()) H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
.on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create `HttpService` instance. /// Finish service configuration and create `HttpService` instance.
@@ -244,8 +229,8 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );

View File

@@ -1,36 +1,71 @@
use std::{ use std::{
cell::Cell,
fmt::{self, Write},
net, net,
rc::Rc, rc::Rc,
time::{Duration, Instant}, time::{Duration, SystemTime},
}; };
use actix_rt::{
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
};
use bytes::BytesMut; use bytes::BytesMut;
use crate::{date::DateService, KeepAlive}; /// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
/// HTTP service configuration. #[derive(Debug, PartialEq, Clone, Copy)]
#[derive(Debug, Clone)] /// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>); pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner { struct Inner {
keep_alive: KeepAlive, keep_alive: Option<Duration>,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
ka_enabled: bool,
secure: bool, secure: bool,
local_addr: Option<std::net::SocketAddr>, local_addr: Option<std::net::SocketAddr>,
date_service: DateService, date_service: DateService,
} }
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig { impl Default for ServiceConfig {
fn default() -> Self { fn default() -> Self {
Self::new( Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
} }
} }
@@ -38,22 +73,34 @@ impl ServiceConfig {
/// Create instance of `ServiceConfig` /// Create instance of `ServiceConfig`
pub fn new( pub fn new(
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
) -> ServiceConfig { ) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner { ServiceConfig(Rc::new(Inner {
keep_alive: keep_alive.normalize(), keep_alive,
client_request_timeout, ka_enabled,
client_disconnect_timeout, client_timeout,
client_disconnect,
secure, secure,
local_addr, local_addr,
date_service: DateService::new(), date_service: DateService::new(),
})) }))
} }
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS). /// Returns true if connection is secure (HTTPS)
#[inline] #[inline]
pub fn secure(&self) -> bool { pub fn secure(&self) -> bool {
self.0.secure self.0.secure
@@ -67,97 +114,235 @@ impl ServiceConfig {
self.0.local_addr self.0.local_addr
} }
/// Connection keep-alive setting. /// Keep alive duration if configured.
#[inline] #[inline]
pub fn keep_alive(&self) -> KeepAlive { pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive self.0.keep_alive
} }
/// Creates a time object representing the deadline for this connection's keep-alive period, if /// Return state of connection keep-alive functionality
/// enabled. #[inline]
/// pub fn keep_alive_enabled(&self) -> bool {
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`. self.0.ka_enabled
pub fn keep_alive_deadline(&self) -> Option<Instant> { }
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur), /// Client timeout for first request.
KeepAlive::Os => None, #[inline]
KeepAlive::Disabled => None, pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
} }
} }
/// Creates a time object representing the deadline for the client to finish sending the head of /// Client timeout for first request.
/// its first request. pub fn client_timer_expire(&self) -> Option<Instant> {
/// let delay = self.0.client_timeout;
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`. if delay != 0 {
pub fn client_request_deadline(&self) -> Option<Instant> { Some(self.now() + Duration::from_millis(delay))
let timeout = self.0.client_request_timeout; } else {
(timeout != Duration::ZERO).then(|| self.now() + timeout) None
}
} }
/// Creates a time object representing the deadline for the client to disconnect. /// Client disconnect timer
pub fn client_disconnect_deadline(&self) -> Option<Instant> { pub fn client_disconnect_timer(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout; let delay = self.0.client_disconnect;
(timeout != Duration::ZERO).then(|| self.now() + timeout) if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
} }
/// Return keep-alive timer delay is configured.
#[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
}
#[inline]
pub(crate) fn now(&self) -> Instant { pub(crate) fn now(&self) -> Instant {
self.0.date_service.now() self.0.date_service.now()
} }
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)] #[doc(hidden)]
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) { pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 37] = [0; 37]; let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
self.0 self.0
.date_service .date_service
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes)); .set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
buf[35..].copy_from_slice(b"\r\n");
dst.extend_from_slice(&buf); dst.extend_from_slice(&buf);
} }
#[allow(unused)] // used with `http2` feature flag pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
self.0 self.0
.date_service .date_service
.with_date(|date| dst.extend_from_slice(&date.bytes)); .set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}
impl DateService {
fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
}
});
DateService { current, handle }
}
fn now(&self) -> Instant {
self.current.get().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
#[cfg(test)]
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
use actix_rt::{ use actix_rt::{task::yield_now, time::sleep};
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
#[actix_rt::test] #[actix_rt::test]
async fn test_date_service_update() { async fn test_date_service_update() {
let settings = let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
yield_now().await; yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false); settings.set_date(&mut buf1);
let now1 = settings.now(); let now1 = settings.now();
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await; sleep_until(Instant::now() + Duration::from_secs(2)).await;
yield_now().await; yield_now().await;
let now2 = settings.now(); let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false); settings.set_date(&mut buf2);
assert_ne!(now1, now2); assert_ne!(now1, now2);
@@ -210,27 +395,11 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_date() { async fn test_date() {
let settings = ServiceConfig::default(); let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false); settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false); settings.set_date(&mut buf2);
assert_eq!(buf1, buf2); assert_eq!(buf1, buf2);
} }
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
} }

View File

@@ -1,92 +0,0 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View File

@@ -11,6 +11,9 @@ use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
#[cfg(feature = "compress-brotli")]
use brotli2::write::BrotliDecoder;
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
@@ -19,20 +22,17 @@ use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{ use crate::{
encoding::Writer, encoding::Writer,
error::PayloadError, error::{BlockingError, PayloadError},
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING}, header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
}; };
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049; const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
pin_project_lite::pin_project! { pub struct Decoder<S> {
pub struct Decoder<S> { decoder: Option<ContentDecoder>,
decoder: Option<ContentDecoder>, stream: S,
#[pin] eof: bool,
stream: S, fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
} }
impl<S> Decoder<S> impl<S> Decoder<S>
@@ -44,20 +44,17 @@ where
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> { pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new( ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(BrotliDecoder::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096), Writer::new(),
))), )))),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new( ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()), ZlibDecoder::new(Writer::new()),
))), ))),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new( ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(), Writer::new(),
)))), )))),
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new( ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect( ZstdDecoder::new(Writer::new()).expect(
@@ -92,48 +89,42 @@ where
impl<S> Stream for Decoder<S> impl<S> Stream for Decoder<S>
where where
S: Stream<Item = Result<Bytes, PayloadError>>, S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{ {
type Item = Result<Bytes, PayloadError>; type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop { loop {
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| { let (chunk, decoder) =
PayloadError::Io(io::Error::new( ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})??;
*this.decoder = Some(decoder); self.decoder = Some(decoder);
this.fut.take(); self.fut.take();
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
if *this.eof { if self.eof {
return Poll::Ready(None); return Poll::Ready(None);
} }
match ready!(this.stream.as_mut().poll_next(cx)) { match ready!(Pin::new(&mut self.stream).poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))), Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => { Some(Ok(chunk)) => {
if let Some(mut decoder) = this.decoder.take() { if let Some(mut decoder) = self.decoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE { if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
*this.decoder = Some(decoder); self.decoder = Some(decoder);
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(spawn_blocking(move || { self.fut = Some(spawn_blocking(move || {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder)) Ok((chunk, decoder))
})); }));
@@ -146,9 +137,9 @@ where
} }
None => { None => {
*this.eof = true; self.eof = true;
return if let Some(mut decoder) = this.decoder.take() { return if let Some(mut decoder) = self.decoder.take() {
match decoder.feed_eof() { match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))), Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None), Ok(None) => Poll::Ready(None),
@@ -166,13 +157,10 @@ where
enum ContentDecoder { enum ContentDecoder {
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>), Br(Box<BrotliDecoder<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime // We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static` // argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
@@ -183,7 +171,7 @@ impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> { fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() { ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
Ok(()) => { Ok(()) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
@@ -241,7 +229,7 @@ impl ContentDecoder {
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> { fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();

View File

@@ -14,6 +14,9 @@ use derive_more::Display;
use futures_core::ready; use futures_core::ready;
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
#[cfg(feature = "compress-brotli")]
use brotli2::write::BrotliEncoder;
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
@@ -22,7 +25,8 @@ use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer; use super::Writer;
use crate::{ use crate::{
body::{self, BodySize, MessageBody}, body::{BodySize, MessageBody},
error::BlockingError,
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING}, header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode, ResponseHead, StatusCode,
}; };
@@ -42,34 +46,35 @@ pin_project! {
impl<B: MessageBody> Encoder<B> { impl<B: MessageBody> Encoder<B> {
fn none() -> Self { fn none() -> Self {
Encoder { Encoder {
body: EncoderBody::None { body: EncoderBody::None,
body: body::None::new(),
},
encoder: None, encoder: None,
fut: None, fut: None,
eof: true, eof: true,
} }
} }
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self { pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, mut body: B) -> Self {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
// no need to compress an empty body // no need to compress an empty body
if matches!(body.size(), BodySize::None) { if matches!(body.size(), BodySize::None) {
return Self::none(); return Self::none();
} }
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING) let body = if body.is_complete_body() {
|| head.status == StatusCode::SWITCHING_PROTOCOLS let body = body.take_complete_body();
|| head.status == StatusCode::NO_CONTENT EncoderBody::Full { body }
|| encoding == ContentEncoding::Identity); } else {
EncoderBody::Stream { body }
let body = match body.try_into_bytes() {
Ok(body) => EncoderBody::Full { body },
Err(body) => EncoderBody::Stream { body },
}; };
if should_encode { if can_encode {
// wrap body only if encoder is feature-enabled // Modify response body only if encoder is set
if let Some(enc) = ContentEncoder::select(encoding) { if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head); update_head(encoding, head);
return Encoder { return Encoder {
@@ -93,7 +98,7 @@ impl<B: MessageBody> Encoder<B> {
pin_project! { pin_project! {
#[project = EncoderBodyProj] #[project = EncoderBodyProj]
enum EncoderBody<B> { enum EncoderBody<B> {
None { body: body::None }, None,
Full { body: Bytes }, Full { body: Bytes },
Stream { #[pin] body: B }, Stream { #[pin] body: B },
} }
@@ -105,10 +110,9 @@ where
{ {
type Error = EncoderError; type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
match self { match self {
EncoderBody::None { body } => body.size(), EncoderBody::None => BodySize::None,
EncoderBody::Full { body } => body.size(), EncoderBody::Full { body } => body.size(),
EncoderBody::Stream { body } => body.size(), EncoderBody::Stream { body } => body.size(),
} }
@@ -119,9 +123,7 @@ where
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() { match self.project() {
EncoderBodyProj::None { body } => { EncoderBodyProj::None => Poll::Ready(None),
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Full { body } => { EncoderBodyProj::Full { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {}) Pin::new(body).poll_next(cx).map_err(|err| match err {})
} }
@@ -131,15 +133,21 @@ where
} }
} }
#[inline] fn is_complete_body(&self) -> bool {
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self { match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()), EncoderBody::None => true,
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()), EncoderBody::Full { .. } => true,
_ => Err(self), EncoderBody::Stream { .. } => false,
}
}
fn take_complete_body(&mut self) -> Bytes {
match self {
EncoderBody::None => Bytes::new(),
EncoderBody::Full { body } => body.take_complete_body(),
EncoderBody::Stream { .. } => {
panic!("EncoderBody::Stream variant cannot be taken")
}
} }
} }
} }
@@ -150,7 +158,6 @@ where
{ {
type Error = EncoderError; type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
if self.encoder.is_some() { if self.encoder.is_some() {
BodySize::Stream BodySize::Stream
@@ -164,7 +171,6 @@ where
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
let mut this = self.project(); let mut this = self.project();
loop { loop {
if *this.eof { if *this.eof {
return Poll::Ready(None); return Poll::Ready(None);
@@ -172,12 +178,7 @@ where
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx)) let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| { .map_err(|_| EncoderError::Blocking(BlockingError))?
EncoderError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(EncoderError::Io)?; .map_err(EncoderError::Io)?;
let chunk = encoder.take(); let chunk = encoder.take();
@@ -233,30 +234,28 @@ where
} }
} }
#[inline] fn is_complete_body(&self) -> bool {
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() { if self.encoder.is_some() {
Err(self) false
} else { } else {
match self.body.try_into_bytes() { self.body.is_complete_body()
Ok(body) => Ok(body), }
Err(body) => { }
self.body = body;
Err(self) fn take_complete_body(&mut self) -> Bytes {
} if self.encoder.is_some() {
} panic!("compressed body stream cannot be taken")
} else {
self.body.take_complete_body()
} }
} }
} }
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) { fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut() head.headers_mut().insert(
.insert(header::CONTENT_ENCODING, encoding.to_header_value()); header::CONTENT_ENCODING,
head.headers_mut() HeaderValue::from_static(encoding.as_str()),
.insert(header::VARY, HeaderValue::from_static("accept-encoding")); );
head.no_chunking(false); head.no_chunking(false);
} }
@@ -269,7 +268,7 @@ enum ContentEncoder {
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>), Br(BrotliEncoder<Writer>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we // Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`. // use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
@@ -278,7 +277,7 @@ enum ContentEncoder {
} }
impl ContentEncoder { impl ContentEncoder {
fn select(encoding: ContentEncoding) -> Option<Self> { fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding { match encoding {
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new( ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
@@ -293,7 +292,9 @@ impl ContentEncoder {
))), ))),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())), ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => { ContentEncoding::Zstd => {
@@ -309,7 +310,7 @@ impl ContentEncoder {
pub(crate) fn take(&mut self) -> Bytes { pub(crate) fn take(&mut self) -> Bytes {
match *self { match *self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
@@ -325,8 +326,8 @@ impl ContentEncoder {
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(mut encoder) => match encoder.flush() { ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(()) => Ok(encoder.into_inner().buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
@@ -353,10 +354,10 @@ impl ContentEncoder {
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
log::trace!("Error decoding br encoding: {}", err); trace!("Error decoding br encoding: {}", err);
Err(err) Err(err)
} }
}, },
@@ -365,7 +366,7 @@ impl ContentEncoder {
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
log::trace!("Error decoding gzip encoding: {}", err); trace!("Error decoding gzip encoding: {}", err);
Err(err) Err(err)
} }
}, },
@@ -374,7 +375,7 @@ impl ContentEncoder {
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
log::trace!("Error decoding deflate encoding: {}", err); trace!("Error decoding deflate encoding: {}", err);
Err(err) Err(err)
} }
}, },
@@ -383,7 +384,7 @@ impl ContentEncoder {
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
log::trace!("Error decoding ztsd encoding: {}", err); trace!("Error decoding ztsd encoding: {}", err);
Err(err) Err(err)
} }
}, },
@@ -391,24 +392,15 @@ impl ContentEncoder {
} }
} }
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)] #[derive(Debug, Display)]
#[non_exhaustive] #[non_exhaustive]
pub enum EncoderError { pub enum EncoderError {
/// Wrapped body stream error.
#[display(fmt = "body")] #[display(fmt = "body")]
Body(Box<dyn StdError>), Body(Box<dyn StdError>),
/// Generic I/O error. #[display(fmt = "blocking")]
Blocking(BlockingError),
#[display(fmt = "io")] #[display(fmt = "io")]
Io(io::Error), Io(io::Error),
} }
@@ -417,6 +409,7 @@ impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> { fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self { match self {
EncoderError::Body(err) => Some(&**err), EncoderError::Body(err) => Some(&**err),
EncoderError::Blocking(err) => Some(err),
EncoderError::Io(err) => Some(err), EncoderError::Io(err) => Some(err),
} }
} }

View File

@@ -5,7 +5,7 @@ use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Err
use derive_more::{Display, Error, From}; use derive_more::{Display, Error, From};
use http::{uri::InvalidUri, StatusCode}; use http::{uri::InvalidUri, StatusCode};
use crate::{body::BoxBody, Response}; use crate::{body::BoxBody, ws, Response};
pub use http::Error as HttpError; pub use http::Error as HttpError;
@@ -51,7 +51,7 @@ impl Error {
Self::new(Kind::SendResponse) Self::new(Kind::SendResponse)
} }
#[allow(unused)] // available for future use #[allow(unused)] // reserved for future use (TODO: remove allow when being used)
pub(crate) fn new_io() -> Self { pub(crate) fn new_io() -> Self {
Self::new(Kind::Io) Self::new(Kind::Io)
} }
@@ -61,7 +61,6 @@ impl Error {
Self::new(Kind::Encoder) Self::new(Kind::Encoder)
} }
#[allow(unused)] // used with `ws` feature flag
pub(crate) fn new_ws() -> Self { pub(crate) fn new_ws() -> Self {
Self::new(Kind::Ws) Self::new(Kind::Ws)
} }
@@ -108,10 +107,8 @@ pub(crate) enum Kind {
impl fmt::Debug for Error { impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("actix_http::Error") // TODO: more detail
.field("kind", &self.inner.kind) f.write_str("actix_http::Error")
.field("cause", &self.inner.cause)
.finish()
} }
} }
@@ -142,16 +139,14 @@ impl From<HttpError> for Error {
} }
} }
#[cfg(feature = "ws")] impl From<ws::HandshakeError> for Error {
impl From<crate::ws::HandshakeError> for Error { fn from(err: ws::HandshakeError) -> Self {
fn from(err: crate::ws::HandshakeError) -> Self {
Self::new_ws().with_cause(err) Self::new_ws().with_cause(err)
} }
} }
#[cfg(feature = "ws")] impl From<ws::ProtocolError> for Error {
impl From<crate::ws::ProtocolError> for Error { fn from(err: ws::ProtocolError) -> Self {
fn from(err: crate::ws::ProtocolError) -> Self {
Self::new_ws().with_cause(err) Self::new_ws().with_cause(err)
} }
} }
@@ -252,6 +247,11 @@ impl From<ParseError> for Response<BoxBody> {
} }
} }
/// A set of errors that can occur running blocking tasks in thread pool.
#[derive(Debug, Display, Error)]
#[display(fmt = "Blocking thread pool is gone")]
pub struct BlockingError;
/// A set of errors that can occur during payload parsing. /// A set of errors that can occur during payload parsing.
#[derive(Debug, Display)] #[derive(Debug, Display)]
#[non_exhaustive] #[non_exhaustive]
@@ -276,9 +276,8 @@ pub enum PayloadError {
UnknownLength, UnknownLength,
/// HTTP/2 payload error. /// HTTP/2 payload error.
#[cfg(feature = "http2")]
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
Http2Payload(::h2::Error), Http2Payload(h2::Error),
/// Generic I/O error. /// Generic I/O error.
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
@@ -289,20 +288,18 @@ impl std::error::Error for PayloadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self { match self {
PayloadError::Incomplete(None) => None, PayloadError::Incomplete(None) => None,
PayloadError::Incomplete(Some(err)) => Some(err), PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
PayloadError::EncodingCorrupted => None, PayloadError::EncodingCorrupted => None,
PayloadError::Overflow => None, PayloadError::Overflow => None,
PayloadError::UnknownLength => None, PayloadError::UnknownLength => None,
#[cfg(feature = "http2")] PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
PayloadError::Http2Payload(err) => Some(err), PayloadError::Io(err) => Some(err as &dyn std::error::Error),
PayloadError::Io(err) => Some(err),
} }
} }
} }
#[cfg(feature = "http2")] impl From<h2::Error> for PayloadError {
impl From<::h2::Error> for PayloadError { fn from(err: h2::Error) -> Self {
fn from(err: ::h2::Error) -> Self {
PayloadError::Http2Payload(err) PayloadError::Http2Payload(err)
} }
} }
@@ -319,6 +316,15 @@ impl From<io::Error> for PayloadError {
} }
} }
impl From<BlockingError> for PayloadError {
fn from(_: BlockingError) -> Self {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Operation is canceled",
))
}
}
impl From<PayloadError> for Error { impl From<PayloadError> for Error {
fn from(err: PayloadError) -> Self { fn from(err: PayloadError) -> Self {
Self::new_payload().with_cause(err) Self::new_payload().with_cause(err)
@@ -326,31 +332,32 @@ impl From<PayloadError> for Error {
} }
/// A set of errors that can occur during dispatching HTTP requests. /// A set of errors that can occur during dispatching HTTP requests.
#[derive(Debug, Display, From)] #[derive(Debug, Display, Error, From)]
#[non_exhaustive] #[non_exhaustive]
pub enum DispatchError { pub enum DispatchError {
/// Service error. /// Service error
// FIXME: display and error type
#[display(fmt = "Service Error")] #[display(fmt = "Service Error")]
Service(Response<BoxBody>), Service(#[error(not(source))] Response<BoxBody>),
/// Body streaming error. /// Body error
#[display(fmt = "Body error: {}", _0)] // FIXME: display and error type
Body(Box<dyn StdError>), #[display(fmt = "Body Error")]
Body(#[error(not(source))] Box<dyn StdError>),
/// Upgrade service error. /// Upgrade service error
Upgrade, Upgrade,
/// An `io::Error` that occurred while trying to read or write to a network stream. /// An `io::Error` that occurred while trying to read or write to a network stream.
#[display(fmt = "IO error: {}", _0)] #[display(fmt = "IO error: {}", _0)]
Io(io::Error), Io(io::Error),
/// Request parse error. /// Http request parse error.
#[display(fmt = "Request parse error: {}", _0)] #[display(fmt = "Parse error: {}", _0)]
Parse(ParseError), Parse(ParseError),
/// HTTP/2 error. /// Http/2 error
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
#[cfg(feature = "http2")]
H2(h2::Error), H2(h2::Error),
/// The first request did not complete within the specified timeout. /// The first request did not complete within the specified timeout.
@@ -361,34 +368,25 @@ pub enum DispatchError {
#[display(fmt = "Connection shutdown timeout")] #[display(fmt = "Connection shutdown timeout")]
DisconnectTimeout, DisconnectTimeout,
/// Handler dropped payload before reading EOF. /// Payload is not consumed
#[display(fmt = "Handler dropped payload before reading EOF")] #[display(fmt = "Task is completed but request's payload is not consumed")]
HandlerDroppedPayload, PayloadIsNotConsumed,
/// Internal error. /// Malformed request
#[display(fmt = "Malformed request")]
MalformedRequest,
/// Internal error
#[display(fmt = "Internal error")] #[display(fmt = "Internal error")]
InternalError, InternalError,
}
impl StdError for DispatchError { /// Unknown error
fn source(&self) -> Option<&(dyn StdError + 'static)> { #[display(fmt = "Unknown error")]
match self { Unknown,
DispatchError::Service(_res) => None,
DispatchError::Body(err) => Some(&**err),
DispatchError::Io(err) => Some(err),
DispatchError::Parse(err) => Some(err),
#[cfg(feature = "http2")]
DispatchError::H2(err) => Some(err),
_ => None,
}
}
} }
/// A set of error that can occur during parsing content type. /// A set of error that can occur during parsing content type.
#[derive(Debug, Display, Error)] #[derive(Debug, Display, Error)]
#[cfg_attr(test, derive(PartialEq))]
#[non_exhaustive] #[non_exhaustive]
pub enum ContentTypeError { pub enum ContentTypeError {
/// Can not parse content type /// Can not parse content type
@@ -401,13 +399,27 @@ pub enum ContentTypeError {
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod content_type_test_impls {
use std::io;
use http::{Error as HttpError, StatusCode};
use super::*; use super::*;
impl std::cmp::PartialEq for ContentTypeError {
fn eq(&self, other: &Self) -> bool {
match self {
Self::ParseError => matches!(other, ContentTypeError::ParseError),
Self::UnknownEncoding => {
matches!(other, ContentTypeError::UnknownEncoding)
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::{Error as HttpError, StatusCode};
use std::io;
#[test] #[test]
fn test_into_response() { fn test_into_response() {
let resp: Response<BoxBody> = ParseError::Incomplete.into(); let resp: Response<BoxBody> = ParseError::Incomplete.into();

View File

@@ -1,25 +1,23 @@
use std::{fmt, io}; use std::io;
use actix_codec::{Decoder, Encoder}; use actix_codec::{Decoder, Encoder};
use bitflags::bitflags; use bitflags::bitflags;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{Method, Version}; use http::{Method, Version};
use super::{ use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, use super::{decoder, encoder, reserve_readbuf};
encoder, reserve_readbuf, Message, MessageType, use super::{Message, MessageType};
}; use crate::body::BodySize;
use crate::{ use crate::config::ServiceConfig;
body::BodySize, use crate::error::{ParseError, PayloadError};
error::{ParseError, PayloadError}, use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
const HEAD = 0b0000_0001; const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000; const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000; const STREAM = 0b0001_0000;
} }
} }
@@ -38,7 +36,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>, decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>, payload: Option<PayloadDecoder>,
version: Version, version: Version,
conn_type: ConnectionType, ctype: ConnectionType,
// encoder part // encoder part
flags: Flags, flags: Flags,
@@ -51,32 +49,23 @@ impl Default for ClientCodec {
} }
} }
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec { impl ClientCodec {
/// Create HTTP/1 codec. /// Create HTTP/1 codec.
/// ///
/// `keepalive_enabled` how response `connection` header get generated. /// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self { pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() { let flags = if config.keep_alive_enabled() {
Flags::KEEP_ALIVE_ENABLED Flags::KEEPALIVE_ENABLED
} else { } else {
Flags::empty() Flags::empty()
}; };
ClientCodec { ClientCodec {
inner: ClientCodecInner { inner: ClientCodecInner {
config, config,
decoder: decoder::MessageDecoder::default(), decoder: decoder::MessageDecoder::default(),
payload: None, payload: None,
version: Version::HTTP_11, version: Version::HTTP_11,
conn_type: ConnectionType::Close, ctype: ConnectionType::Close,
flags, flags,
encoder: encoder::MessageEncoder::default(), encoder: encoder::MessageEncoder::default(),
@@ -86,12 +75,12 @@ impl ClientCodec {
/// Check if request is upgrade /// Check if request is upgrade
pub fn upgrade(&self) -> bool { pub fn upgrade(&self) -> bool {
self.inner.conn_type == ConnectionType::Upgrade self.inner.ctype == ConnectionType::Upgrade
} }
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Check last request's message type /// Check last request's message type
@@ -113,8 +102,8 @@ impl ClientCodec {
impl ClientPayloadCodec { impl ClientPayloadCodec {
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Transform payload codec to a message codec /// Transform payload codec to a message codec
@@ -128,18 +117,15 @@ impl Decoder for ClientCodec {
type Error = ParseError; type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!( debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
if let Some((req, payload)) = self.inner.decoder.decode(src)? { if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(conn_type) = req.conn_type() { if let Some(ctype) = req.conn_type() {
// do not use peer's keep-alive // do not use peer's keep-alive
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive { self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.conn_type self.inner.ctype
} else { } else {
conn_type ctype
}; };
} }
@@ -204,9 +190,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD); .set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status // connection status
inner.conn_type = match head.as_ref().connection_type() { inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => { ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) { if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive ConnectionType::KeepAlive
} else { } else {
ConnectionType::Close ConnectionType::Close
@@ -223,7 +209,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false, false,
inner.version, inner.version,
length, length,
inner.conn_type, inner.ctype,
&inner.config, &inner.config,
)?; )?;
} }

View File

@@ -5,19 +5,21 @@ use bitflags::bitflags;
use bytes::BytesMut; use bytes::BytesMut;
use http::{Method, Version}; use http::{Method, Version};
use super::{ use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, use super::{decoder, encoder};
encoder, Message, MessageType, use super::{Message, MessageType};
}; use crate::body::BodySize;
use crate::{ use crate::config::ServiceConfig;
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig, use crate::error::ParseError;
}; use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
const HEAD = 0b0000_0001; const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_0010; const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100; const STREAM = 0b0000_0100;
} }
} }
@@ -42,9 +44,7 @@ impl Default for Codec {
impl fmt::Debug for Codec { impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::Codec") write!(f, "h1::Codec({:?})", self.flags)
.field("flags", &self.flags)
.finish_non_exhaustive()
} }
} }
@@ -53,8 +53,8 @@ impl Codec {
/// ///
/// `keepalive_enabled` how response `connection` header get generated. /// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self { pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() { let flags = if config.keep_alive_enabled() {
Flags::KEEP_ALIVE_ENABLED Flags::KEEPALIVE_ENABLED
} else { } else {
Flags::empty() Flags::empty()
}; };
@@ -78,14 +78,14 @@ impl Codec {
/// Check if last response is keep-alive. /// Check if last response is keep-alive.
#[inline] #[inline]
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive self.conn_type == ConnectionType::KeepAlive
} }
/// Check if keep-alive enabled on server level. /// Check if keep-alive enabled on server level.
#[inline] #[inline]
pub fn keep_alive_enabled(&self) -> bool { pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEP_ALIVE_ENABLED) self.flags.contains(Flags::KEEPALIVE_ENABLED)
} }
/// Check last request's message type. /// Check last request's message type.
@@ -125,13 +125,11 @@ impl Decoder for Codec {
self.flags.set(Flags::HEAD, head.method == Method::HEAD); self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version; self.version = head.version;
self.conn_type = head.connection_type(); self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED) && !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{ {
self.conn_type = ConnectionType::Close self.conn_type = ConnectionType::Close
} }
match payload { match payload {
PayloadType::None => self.payload = None, PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl), PayloadType::Payload(pl) => self.payload = Some(pl),
@@ -183,11 +181,9 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
&self.config, &self.config,
)?; )?;
} }
Message::Chunk(Some(bytes)) => { Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?; self.encoder.encode_chunk(bytes.as_ref(), dst)?;
} }
Message::Chunk(None) => { Message::Chunk(None) => {
self.encoder.encode_eof(dst)?; self.encoder.encode_eof(dst)?;
} }
@@ -203,7 +199,7 @@ mod tests {
use http::Method; use http::Method;
use super::*; use super::*;
use crate::HttpMessage as _; use crate::HttpMessage;
#[actix_rt::test] #[actix_rt::test]
async fn test_http_request_chunked_payload_and_next_message() { async fn test_http_request_chunked_payload_and_next_message() {

View File

@@ -2,14 +2,17 @@ use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Pol
use actix_codec::Decoder; use actix_codec::Decoder;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{ use http::header::{HeaderName, HeaderValue};
header::{self, HeaderName, HeaderValue}, use http::{header, Method, StatusCode, Uri, Version};
Method, StatusCode, Uri, Version,
};
use log::{debug, error, trace}; use log::{debug, error, trace};
use super::chunked::ChunkedState; use super::chunked::ChunkedState;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead}; use crate::{
error::ParseError,
header::HeaderMap,
message::{ConnectionType, ResponseHead},
request::Request,
};
pub(crate) const MAX_BUFFER_SIZE: usize = 131_072; pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96; const MAX_HEADERS: usize = 96;
@@ -47,7 +50,7 @@ pub(crate) enum PayloadLength {
} }
pub(crate) trait MessageType: Sized { pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>); fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
fn set_expect(&mut self); fn set_expect(&mut self);
@@ -190,8 +193,8 @@ pub(crate) trait MessageType: Sized {
} }
impl MessageType for Request { impl MessageType for Request {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) { fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = conn_type { if let Some(ctype) = ctype {
self.head_mut().set_connection_type(ctype); self.head_mut().set_connection_type(ctype);
} }
} }
@@ -209,16 +212,15 @@ impl MessageType for Request {
let (len, method, uri, ver, h_len) = { let (len, method, uri, ver, h_len) = {
// SAFETY: // SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which // safe because the type we are claiming to have initialized here is a
// do not require initialization. // bunch of `MaybeUninit`s, which do not require initialization.
let mut parsed = unsafe { let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit() MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init() .assume_init()
}; };
let mut req = httparse::Request::new(&mut []); let mut req = httparse::Request::new(&mut []);
match req.parse_with_uninit_headers(src, &mut parsed)? { match req.parse_with_uninit_headers(src, &mut parsed)? {
httparse::Status::Complete(len) => { httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes()) let method = Method::from_bytes(req.method.unwrap().as_bytes())
@@ -233,7 +235,6 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len()) (len, method, uri, version, req.headers.len())
} }
httparse::Status::Partial => { httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE { return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing"); trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
@@ -277,8 +278,8 @@ impl MessageType for Request {
} }
impl MessageType for ResponseHead { impl MessageType for ResponseHead {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) { fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = conn_type { if let Some(ctype) = ctype {
ResponseHead::set_connection_type(self, ctype); ResponseHead::set_connection_type(self, ctype);
} }
} }
@@ -382,36 +383,34 @@ impl HeaderIndex {
} }
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
/// Chunk type yielded while decoding a payload. /// Http payload item
pub enum PayloadItem { pub enum PayloadItem {
Chunk(Bytes), Chunk(Bytes),
Eof, Eof,
} }
/// Decoder that can handle different payload types. /// Decoders to handle different Transfer-Encodings.
/// ///
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`. /// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct PayloadDecoder { pub struct PayloadDecoder {
kind: Kind, kind: Kind,
} }
impl PayloadDecoder { impl PayloadDecoder {
/// Constructs a fixed-length payload decoder.
pub fn length(x: u64) -> PayloadDecoder { pub fn length(x: u64) -> PayloadDecoder {
PayloadDecoder { PayloadDecoder {
kind: Kind::Length(x), kind: Kind::Length(x),
} }
} }
/// Constructs a chunked encoding decoder.
pub fn chunked() -> PayloadDecoder { pub fn chunked() -> PayloadDecoder {
PayloadDecoder { PayloadDecoder {
kind: Kind::Chunked(ChunkedState::Size, 0), kind: Kind::Chunked(ChunkedState::Size, 0),
} }
} }
/// Creates an decoder that yields chunks until the stream returns EOF.
pub fn eof() -> PayloadDecoder { pub fn eof() -> PayloadDecoder {
PayloadDecoder { kind: Kind::Eof } PayloadDecoder { kind: Kind::Eof }
} }
@@ -419,26 +418,25 @@ impl PayloadDecoder {
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
enum Kind { enum Kind {
/// A reader used when a `Content-Length` header is passed with a positive integer. /// A Reader used when a Content-Length header is passed with a positive
/// integer.
Length(u64), Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
/// A reader used when `Transfer-Encoding` is `chunked`.
Chunked(ChunkedState, u64), Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
/// A reader used for responses that don't indicate a length or chunked.
/// ///
/// Note: This should only used for `Response`s. It is illegal for a `Request` to be made /// Note: This should only used for `Response`s. It is illegal for a
/// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained /// `Request` to be made with both `Content-Length` and
/// in [RFC 7230 §3.3.3]: /// `Transfer-Encoding: chunked` missing, as explained from the spec:
/// ///
/// > If a Transfer-Encoding header field is present in a response and the chunked transfer /// > If a Transfer-Encoding header field is present in a response and
/// > coding is not the final encoding, the message body length is determined by reading the /// > the chunked transfer coding is not the final encoding, the
/// > connection until it is closed by the server. If a Transfer-Encoding header field is /// > message body length is determined by reading the connection until
/// > present in a request and the chunked transfer coding is not the final encoding, the /// > it is closed by the server. If a Transfer-Encoding header field
/// > message body length cannot be determined reliably; the server MUST respond with the 400 /// > is present in a request and the chunked transfer coding is not
/// > (Bad Request) status code and then close the connection. /// > the final encoding, the message body length cannot be determined
/// /// > reliably; the server MUST respond with the 400 (Bad Request)
/// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 /// > status code and then close the connection.
Eof, Eof,
} }
@@ -468,7 +466,6 @@ impl Decoder for PayloadDecoder {
Ok(Some(PayloadItem::Chunk(buf))) Ok(Some(PayloadItem::Chunk(buf)))
} }
} }
Kind::Chunked(ref mut state, ref mut size) => { Kind::Chunked(ref mut state, ref mut size) => {
loop { loop {
let mut buf = None; let mut buf = None;
@@ -494,7 +491,6 @@ impl Decoder for PayloadDecoder {
} }
} }
} }
Kind::Eof => { Kind::Eof => {
if src.is_empty() { if src.is_empty() {
Ok(None) Ok(None)

File diff suppressed because it is too large Load Diff

View File

@@ -1,973 +0,0 @@
use std::{future::Future, str, task::Poll, time::Duration};
use actix_rt::{pin, time::sleep};
use actix_service::fn_service;
use actix_utils::future::{ready, Ready};
use bytes::Bytes;
use futures_util::future::lazy;
use actix_codec::Framed;
use actix_service::Service;
use bytes::{Buf, BytesMut};
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
use crate::{
body::MessageBody,
config::ServiceConfig,
h1::{Codec, ExpectHandler, UpgradeHandler},
service::HttpFlow,
test::{TestBuffer, TestSeqBuffer},
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
memchr::memmem::find(&haystack[from..], needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
status_service(StatusCode::OK)
}
fn status_service(
status: StatusCode,
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
}
fn echo_path_service(
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(
Response::ok().set_body(Bytes::copy_from_slice(path)),
))
})
}
fn drop_payload_service(
) -> impl Service<Request, Response = Response<&'static str>, Error = Error> {
fn_service(|mut req: Request| async move {
let _ = req.take_payload();
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::stream::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
})
})
}
#[actix_rt::test]
async fn late_request() {
let mut buf = TestBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Ready(_) => panic!("first poll should not be ready"),
Poll::Pending => {}
}
// polls: initial
assert_eq!(h1.poll_count, 1);
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("second poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial pending => handle req => shutdown
assert_eq!(h1.poll_count, 3);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn oneshot_connection() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 5
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
/abcd
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
#[actix_rt::test]
async fn keep_alive_timeout() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(200)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep slightly longer than keep-alive timeout
sleep(Duration::from_millis(250)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_ready(),
"keep-alive should have resolved",
);
// polls: initial => keep-alive wake-up shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
}
#[actix_rt::test]
async fn keep_alive_follow_up_req() {
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(500)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep for less than KA timeout
sleep(Duration::from_millis(100)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should not have resolved dispatcher yet",
);
// polls: initial => manual
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection not closed
assert!(!inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
lazy(|cx| {
buf.extend_read_buf(
"\
GET /efg HTTP/1.1\r\n\
Connection: close\r\n\
\r\n\r\n",
);
assert!(
h1.as_mut().poll(cx).is_ready(),
"connection close header should override keep-alive setting",
);
// polls: initial => manual => follow-up req => shutdown
assert_eq!(h1.poll_count, 4);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
}
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/efg\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&buf.write_buf_slice()[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_ok() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_bad() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn expect_handling() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn expect_eager() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn upgrade_handling() {
struct TestUpgrade;
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
assert_eq!(req.method(), Method::GET);
assert!(req.upgrade());
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
ready(Ok(()))
}
}
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
#[actix_rt::test]
async fn handler_drop_payload() {
let _ = env_logger::try_init();
let mut buf = TestBuffer::new(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 3
abc
",
));
let services = HttpFlow::new(
drop_payload_service(),
ExpectHandler,
None::<UpgradeHandler>,
);
let h1 = Dispatcher::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual
assert_eq!(h1.poll_count, 1);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
assert!(inner.state.is_none());
}
})
.await;
lazy(|cx| {
// add message that claims to have payload longer than provided
buf.extend_read_buf(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 200
abc
",
));
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual => manual
assert_eq!(h1.poll_count, 2);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect response immediately even though request side has not finished reading payload
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual => manual => manual
assert_eq!(h1.poll_count, 3);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect that unrequested error response is sent back since connection could not be cleaned
let exp = http_msg(
r"
HTTP/1.1 500 Internal Server Error
content-length: 0
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
let mut msg = msg
.as_ref()
.trim()
.split('\n')
.into_iter()
.map(|line| [line.trim_start(), "\r"].concat())
.collect::<Vec<_>>()
.join("\n");
// remove trailing \r
msg.pop();
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
msg.push_str("\r\n\r\n");
}
BytesMut::from(msg.as_bytes())
}
#[test]
fn http_msg_creates_msg() {
assert_eq!(http_msg(r""), "");
assert_eq!(
http_msg(
r"
POST / HTTP/1.1
Content-Length: 3
abc
"
),
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
);
assert_eq!(
http_msg(
r"
GET / HTTP/1.1
Content-Length: 3
"
),
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
);
}

View File

@@ -1,19 +1,19 @@
use std::{ use std::io::Write;
cmp, use std::marker::PhantomData;
io::{self, Write as _}, use std::ptr::copy_nonoverlapping;
marker::PhantomData, use std::slice::from_raw_parts_mut;
ptr::copy_nonoverlapping, use std::{cmp, io};
slice::from_raw_parts_mut,
};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use crate::{ use crate::{
body::BodySize, body::BodySize,
header::{ config::ServiceConfig,
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, header::{map::Value, HeaderMap, HeaderName},
}, header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version, helpers,
message::{ConnectionType, RequestHeadType},
Response, StatusCode, Version,
}; };
const AVERAGE_HEADER_SIZE: usize = 30; const AVERAGE_HEADER_SIZE: usize = 30;
@@ -105,7 +105,7 @@ pub(crate) trait MessageType: Sized {
} }
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"), BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"), BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case), BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::None => dst.put_slice(b"\r\n"), BodySize::None => dst.put_slice(b"\r\n"),
} }
@@ -152,6 +152,7 @@ pub(crate) trait MessageType: Sized {
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
let k_len = k.len(); let k_len = k.len();
// TODO: drain?
for val in value.iter() { for val in value.iter() {
let v = val.as_ref(); let v = val.as_ref();
let v_len = v.len(); let v_len = v.len();
@@ -210,14 +211,14 @@ pub(crate) trait MessageType: Sized {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
// optimized date header, set_date writes \r\n
if !has_date { if !has_date {
// optimized date header, write_date_header writes its own \r\n config.set_date(dst);
config.write_date_header(dst, camel_case); } else {
// msg eof
dst.extend_from_slice(b"\r\n");
} }
// end-of-headers marker
dst.extend_from_slice(b"\r\n");
Ok(()) Ok(())
} }
@@ -257,12 +258,6 @@ impl MessageType for Response<()> {
None None
} }
fn camel_case(&self) -> bool {
self.head()
.flags
.contains(crate::message::Flags::CAMEL_CASE)
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> { fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head(); let head = self.head();
let reason = head.reason().as_bytes(); let reason = head.reason().as_bytes();
@@ -318,17 +313,16 @@ impl MessageType for RequestHeadType {
} }
impl<T: MessageType> MessageEncoder<T> { impl<T: MessageType> MessageEncoder<T> {
/// Encode chunk. /// Encode message
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> { pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf) self.te.encode(msg, buf)
} }
/// Encode EOF. /// Encode eof
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> { pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf) self.te.encode_eof(buf)
} }
/// Encode message.
pub fn encode( pub fn encode(
&mut self, &mut self,
dst: &mut BytesMut, dst: &mut BytesMut,

View File

@@ -1,7 +1,8 @@
use actix_service::{Service, ServiceFactory}; use actix_service::{Service, ServiceFactory};
use actix_utils::future::{ready, Ready}; use actix_utils::future::{ready, Ready};
use crate::{Error, Request}; use crate::error::Error;
use crate::request::Request;
pub struct ExpectHandler; pub struct ExpectHandler;

View File

@@ -7,13 +7,10 @@ mod client;
mod codec; mod codec;
mod decoder; mod decoder;
mod dispatcher; mod dispatcher;
#[cfg(test)]
mod dispatcher_tests;
mod encoder; mod encoder;
mod expect; mod expect;
mod payload; mod payload;
mod service; mod service;
mod timer;
mod upgrade; mod upgrade;
mod utils; mod utils;
@@ -29,10 +26,9 @@ pub use self::utils::SendResponse;
#[derive(Debug)] #[derive(Debug)]
/// Codec message /// Codec message
pub enum Message<T> { pub enum Message<T> {
/// HTTP message. /// Http message
Item(T), Item(T),
/// Payload chunk
/// Payload chunk.
Chunk(Option<Bytes>), Chunk(Option<Bytes>),
} }
@@ -63,7 +59,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::Request; use crate::request::Request;
impl Message<Request> { impl Message<Request> {
pub fn message(self) -> Request { pub fn message(self) -> Request {

View File

@@ -1,12 +1,9 @@
//! Payload stream //! Payload stream
use std::cell::RefCell;
use std::{ use std::collections::VecDeque;
cell::RefCell, use std::pin::Pin;
collections::VecDeque, use std::rc::{Rc, Weak};
pin::Pin, use std::task::{Context, Poll, Waker};
rc::{Rc, Weak},
task::{Context, Poll, Waker},
};
use bytes::Bytes; use bytes::Bytes;
use futures_core::Stream; use futures_core::Stream;
@@ -25,32 +22,39 @@ pub enum PayloadStatus {
/// Buffered stream of bytes chunks /// Buffered stream of bytes chunks
/// ///
/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does /// Payload stores chunks in a vector. First chunk can be received with
/// not notify current task when new data is available. /// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
/// ///
/// Payload can be used as `Response` body stream. /// Payload stream can be used as `Response` body stream.
#[derive(Debug)] #[derive(Debug)]
pub struct Payload { pub struct Payload {
inner: Rc<RefCell<Inner>>, inner: Rc<RefCell<Inner>>,
} }
impl Payload { impl Payload {
/// Creates a payload stream. /// Create payload stream.
/// ///
/// This method construct two objects responsible for bytes stream generation: /// This method construct two objects responsible for bytes stream
/// - `PayloadSender` - *Sender* side of the stream /// generation.
/// - `Payload` - *Receiver* side of the stream ///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) { pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof))); let shared = Rc::new(RefCell::new(Inner::new(eof)));
( (
PayloadSender::new(Rc::downgrade(&shared)), PayloadSender {
inner: Rc::downgrade(&shared),
},
Payload { inner: shared }, Payload { inner: shared },
) )
} }
/// Creates an empty payload. /// Create empty payload
pub(crate) fn empty() -> Payload { #[doc(hidden)]
pub fn empty() -> Payload {
Payload { Payload {
inner: Rc::new(RefCell::new(Inner::new(true))), inner: Rc::new(RefCell::new(Inner::new(true))),
} }
@@ -73,6 +77,14 @@ impl Payload {
pub fn unread_data(&mut self, data: Bytes) { pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data); self.inner.borrow_mut().unread_data(data);
} }
#[inline]
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
} }
impl Stream for Payload { impl Stream for Payload {
@@ -82,7 +94,7 @@ impl Stream for Payload {
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx) self.inner.borrow_mut().readany(cx)
} }
} }
@@ -92,10 +104,6 @@ pub struct PayloadSender {
} }
impl PayloadSender { impl PayloadSender {
fn new(inner: Weak<RefCell<Inner>>) -> Self {
Self { inner }
}
#[inline] #[inline]
pub fn set_error(&mut self, err: PayloadError) { pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() { if let Some(shared) = self.inner.upgrade() {
@@ -219,10 +227,7 @@ impl Inner {
self.len self.len
} }
fn poll_next( fn readany(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, PayloadError>>> {
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() { if let Some(data) = self.items.pop_front() {
self.len -= data.len(); self.len -= data.len();
self.need_read = self.len < MAX_BUFFER_SIZE; self.need_read = self.len < MAX_BUFFER_SIZE;
@@ -252,18 +257,8 @@ impl Inner {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*; use super::*;
use actix_utils::future::poll_fn;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test] #[actix_rt::test]
async fn test_unread_data() { async fn test_unread_data() {
@@ -275,10 +270,7 @@ mod tests {
assert_eq!( assert_eq!(
Bytes::from("data"), Bytes::from("data"),
poll_fn(|cx| Pin::new(&mut payload).poll_next(cx)) poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
.await
.unwrap()
.unwrap()
); );
} }
} }

View File

@@ -356,9 +356,9 @@ where
type Future = Dispatcher<T, S, B, X, U>; type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| { self._poll_ready(cx).map_err(|e| {
log::error!("HTTP/1 service readiness error: {:?}", err); log::error!("HTTP/1 service readiness error: {:?}", e);
DispatchError::Service(err) DispatchError::Service(e)
}) })
} }

View File

@@ -1,80 +0,0 @@
use std::{fmt, future::Future, pin::Pin, task::Context};
use actix_rt::time::{Instant, Sleep};
#[derive(Debug)]
pub(super) enum TimerState {
Disabled,
Inactive,
Active { timer: Pin<Box<Sleep>> },
}
impl TimerState {
pub(super) fn new(enabled: bool) -> Self {
if enabled {
Self::Inactive
} else {
Self::Disabled
}
}
pub(super) fn is_enabled(&self) -> bool {
matches!(self, Self::Active { .. } | Self::Inactive)
}
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
if matches!(self, Self::Disabled) {
log::trace!("setting disabled timer from line {}", line);
}
*self = Self::Active {
timer: Box::pin(timer),
};
}
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
self.set(timer, line);
self.init(cx);
}
pub(super) fn clear(&mut self, line: u32) {
if matches!(self, Self::Disabled) {
log::trace!("trying to clear a disabled timer from line {}", line);
}
if matches!(self, Self::Inactive) {
log::trace!("trying to clear an inactive timer from line {}", line);
}
*self = Self::Inactive;
}
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
if let TimerState::Active { timer } = self {
let _ = timer.as_mut().poll(cx);
}
}
}
impl fmt::Display for TimerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimerState::Disabled => f.write_str("timer is disabled"),
TimerState::Inactive => f.write_str("timer is inactive"),
TimerState::Active { timer } => {
let deadline = timer.deadline();
let now = Instant::now();
if deadline < now {
f.write_str("timer is active and has reached deadline")
} else {
write!(
f,
"timer is active and due to expire in {} milliseconds",
((deadline - now).as_secs_f32() * 1000.0)
)
}
}
}
}
}

View File

@@ -2,7 +2,9 @@ use actix_codec::Framed;
use actix_service::{Service, ServiceFactory}; use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture; use futures_core::future::LocalBoxFuture;
use crate::{h1::Codec, Error, Request}; use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler; pub struct UpgradeHandler;

View File

@@ -9,8 +9,9 @@ use pin_project_lite::pin_project;
use crate::{ use crate::{
body::{BodySize, MessageBody}, body::{BodySize, MessageBody},
error::Error,
h1::{Codec, Message}, h1::{Codec, Message},
Error, Response, response::Response,
}; };
pin_project! { pin_project! {
@@ -45,7 +46,7 @@ where
impl<T, B> Future for SendResponse<T, B> impl<T, B> Future for SendResponse<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody, B: MessageBody + Unpin,
B::Error: Into<Error>, B::Error: Into<Error>,
{ {
type Output = Result<Framed<T, Codec>, Error>; type Output = Result<Framed<T, Codec>, Error>;
@@ -81,7 +82,7 @@ where
// body is done when item is None // body is done when item is None
body_done = item.is_none(); body_done = item.is_none();
if body_done { if body_done {
this.body.set(None); let _ = this.body.take();
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap(); let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed framed

View File

@@ -25,9 +25,7 @@ use pin_project_lite::pin_project;
use crate::{ use crate::{
body::{BodySize, BoxBody, MessageBody}, body::{BodySize, BoxBody, MessageBody},
config::ServiceConfig, config::ServiceConfig,
header::{ header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
},
service::HttpFlow, service::HttpFlow,
Extensions, OnConnectData, Payload, Request, Response, ResponseHead, Extensions, OnConnectData, Payload, Request, Response, ResponseHead,
}; };
@@ -59,11 +57,11 @@ where
conn_data: OnConnectData, conn_data: OnConnectData,
timer: Option<Pin<Box<Sleep>>>, timer: Option<Pin<Box<Sleep>>>,
) -> Self { ) -> Self {
let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong { let ping_pong = config.keep_alive().map(|dur| H2PingPong {
timer: timer timer: timer
.map(|mut timer| { .map(|mut timer| {
// reuse timer slot if it was initialized for handshake // reset timer if it's received from new function.
timer.as_mut().reset((config.now() + dur).into()); timer.as_mut().reset(config.now() + dur);
timer timer
}) })
.unwrap_or_else(|| Box::pin(sleep(dur))), .unwrap_or_else(|| Box::pin(sleep(dur))),
@@ -110,8 +108,8 @@ where
match Pin::new(&mut this.connection).poll_accept(cx)? { match Pin::new(&mut this.connection).poll_accept(cx)? {
Poll::Ready(Some((req, tx))) => { Poll::Ready(Some((req, tx))) => {
let (parts, body) = req.into_parts(); let (parts, body) = req.into_parts();
let payload = crate::h2::Payload::new(body); let pl = crate::h2::Payload::new(body);
let pl = Payload::H2 { payload }; let pl = Payload::H2(pl);
let mut req = Request::with_payload(pl); let mut req = Request::with_payload(pl);
let head = req.head_mut(); let head = req.head_mut();
@@ -143,7 +141,7 @@ where
DispatchError::SendResponse(err) => { DispatchError::SendResponse(err) => {
trace!("Error sending HTTP/2 response: {:?}", err) trace!("Error sending HTTP/2 response: {:?}", err)
} }
DispatchError::SendData(err) => log::warn!("{:?}", err), DispatchError::SendData(err) => warn!("{:?}", err),
DispatchError::ResponseBody(err) => { DispatchError::ResponseBody(err) => {
error!("Response payload stream error: {:?}", err) error!("Response payload stream error: {:?}", err)
} }
@@ -162,8 +160,8 @@ where
Poll::Ready(_) => { Poll::Ready(_) => {
ping_pong.on_flight = false; ping_pong.on_flight = false;
let dead_line = this.config.keep_alive_deadline().unwrap(); let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into()); ping_pong.timer.as_mut().reset(dead_line);
} }
Poll::Pending => { Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(())) return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
@@ -176,8 +174,8 @@ where
ping_pong.ping_pong.send_ping(Ping::opaque())?; ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_deadline().unwrap(); let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into()); ping_pong.timer.as_mut().reset(dead_line);
ping_pong.on_flight = true; ping_pong.on_flight = true;
} }
@@ -290,11 +288,9 @@ fn prepare_response(
let _ = match size { let _ = match size {
BodySize::None | BodySize::Stream => None, BodySize::None | BodySize::Stream => None,
BodySize::Sized(0) => { BodySize::Sized(0) => res
#[allow(clippy::declare_interior_mutable_const)] .headers_mut()
const HV_ZERO: HeaderValue = HeaderValue::from_static("0"); .insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => { BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new(); let mut buf = itoa::Buffer::new();
@@ -308,22 +304,13 @@ fn prepare_response(
// copy headers // copy headers
for (key, value) in head.headers.iter() { for (key, value) in head.headers.iter() {
match key { match *key {
// omit HTTP/1.x only headers according to: // TODO: consider skipping other headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2 // https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
&CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue, // omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
&CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
&DATE => has_date = true, DATE => has_date = true,
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
hdr if hdr == HeaderName::from_static("keep-alive")
|| hdr == HeaderName::from_static("proxy-connection") =>
{
continue
}
_ => {} _ => {}
} }
@@ -333,7 +320,7 @@ fn prepare_response(
// set date header // set date header
if !has_date { if !has_date {
let mut bytes = BytesMut::with_capacity(29); let mut bytes = BytesMut::with_capacity(29);
config.write_date_header_value(&mut bytes); config.set_date_header(&mut bytes);
res.headers_mut().insert( res.headers_mut().insert(
DATE, DATE,
// SAFETY: serialized date-times are known ASCII strings // SAFETY: serialized date-times are known ASCII strings

View File

@@ -7,7 +7,7 @@ use std::{
}; };
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep_until, Sleep}; use actix_rt::time::Sleep;
use bytes::Bytes; use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use h2::{ use h2::{
@@ -15,17 +15,17 @@ use h2::{
RecvStream, RecvStream,
}; };
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
mod dispatcher; mod dispatcher;
mod service; mod service;
pub use self::dispatcher::Dispatcher; pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service; pub use self::service::H2Service;
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
/// HTTP/2 peer stream. /// HTTP/2 peer stream.
pub struct Payload { pub struct Payload {
stream: RecvStream, stream: RecvStream,
@@ -67,9 +67,7 @@ where
{ {
HandshakeWithTimeout { HandshakeWithTimeout {
handshake: handshake(io), handshake: handshake(io),
timer: config timer: config.client_timer().map(Box::pin),
.client_request_deadline()
.map(|deadline| Box::pin(sleep_until(deadline.into()))),
} }
} }
@@ -88,7 +86,7 @@ where
let this = self.get_mut(); let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? { match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake; its slot can be re-used for h2 ping-pong // return the timer on success handshake. It can be re-used for h2 ping-pong.
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))), Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() { Poll::Pending => match this.timer.as_mut() {
Some(timer) => { Some(timer) => {
@@ -100,14 +98,3 @@ where
} }
} }
} }
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
}

View File

@@ -355,7 +355,7 @@ where
} }
Err(err) => { Err(err) => {
log::trace!("H2 handshake error: {}", err); trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err)) Poll::Ready(Err(err))
} }
}, },

View File

@@ -6,7 +6,7 @@ use ahash::AHashMap;
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
use super::AsHeaderName; use crate::header::AsHeaderName;
/// A multi-map of HTTP headers. /// A multi-map of HTTP headers.
/// ///
@@ -306,11 +306,8 @@ impl HeaderMap {
/// assert_eq!(set_cookies_iter.next().unwrap(), "two=2"); /// assert_eq!(set_cookies_iter.next().unwrap(), "two=2");
/// assert!(set_cookies_iter.next().is_none()); /// assert!(set_cookies_iter.next().is_none());
/// ``` /// ```
pub fn get_all(&self, key: impl AsHeaderName) -> std::slice::Iter<'_, HeaderValue> { pub fn get_all(&self, key: impl AsHeaderName) -> GetAll<'_> {
match self.get_value(key) { GetAll::new(self.get_value(key))
Some(value) => value.iter(),
None => (&[]).iter(),
}
} }
// TODO: get_all_mut ? // TODO: get_all_mut ?
@@ -605,13 +602,52 @@ impl<'a> IntoIterator for &'a HeaderMap {
} }
} }
/// Convert `http::HeaderMap` to our `HeaderMap`. /// Iterator over borrowed values with the same associated name.
impl From<http::HeaderMap> for HeaderMap { ///
fn from(mut map: http::HeaderMap) -> HeaderMap { /// See [`HeaderMap::get_all`].
HeaderMap::from_drain(map.drain()) #[derive(Debug)]
pub struct GetAll<'a> {
idx: usize,
value: Option<&'a Value>,
}
impl<'a> GetAll<'a> {
fn new(value: Option<&'a Value>) -> Self {
Self { idx: 0, value }
} }
} }
impl<'a> Iterator for GetAll<'a> {
type Item = &'a HeaderValue;
fn next(&mut self) -> Option<Self::Item> {
let val = self.value?;
match val.get(self.idx) {
Some(val) => {
self.idx += 1;
Some(val)
}
None => {
// current index is none; remove value to fast-path future next calls
self.value = None;
None
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.value {
Some(val) => (val.len(), Some(val.len())),
None => (0, Some(0)),
}
}
}
impl ExactSizeIterator for GetAll<'_> {}
impl iter::FusedIterator for GetAll<'_> {}
/// Iterator over removed, owned values with the same associated name. /// Iterator over removed, owned values with the same associated name.
/// ///
/// Returned from methods that remove or replace items. See [`HeaderMap::insert`] /// Returned from methods that remove or replace items. See [`HeaderMap::insert`]
@@ -630,7 +666,7 @@ impl Removed {
/// Returns true if iterator contains no elements, without consuming it. /// Returns true if iterator contains no elements, without consuming it.
/// ///
/// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate /// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate
/// whether any items were actually replaced or removed, respectively. /// wether any items were actually replaced or removed, respectively.
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
match self.inner { match self.inner {
// size hint lower bound of smallvec is the correct length // size hint lower bound of smallvec is the correct length
@@ -859,7 +895,7 @@ mod tests {
assert_impl_all!(HeaderMap: IntoIterator); assert_impl_all!(HeaderMap: IntoIterator);
assert_impl_all!(Keys<'_>: Iterator, ExactSizeIterator, FusedIterator); assert_impl_all!(Keys<'_>: Iterator, ExactSizeIterator, FusedIterator);
assert_impl_all!(std::slice::Iter<'_, HeaderValue>: Iterator, ExactSizeIterator, FusedIterator); assert_impl_all!(GetAll<'_>: Iterator, ExactSizeIterator, FusedIterator);
assert_impl_all!(Removed: Iterator, ExactSizeIterator, FusedIterator); assert_impl_all!(Removed: Iterator, ExactSizeIterator, FusedIterator);
assert_impl_all!(Iter<'_>: Iterator, ExactSizeIterator, FusedIterator); assert_impl_all!(Iter<'_>: Iterator, ExactSizeIterator, FusedIterator);
assert_impl_all!(IntoIter: Iterator, ExactSizeIterator, FusedIterator); assert_impl_all!(IntoIter: Iterator, ExactSizeIterator, FusedIterator);

View File

@@ -50,13 +50,20 @@ pub use self::utils::{
/// An interface for types that already represent a valid header. /// An interface for types that already represent a valid header.
pub trait Header: TryIntoHeaderValue { pub trait Header: TryIntoHeaderValue {
/// Returns the name of the header field. /// Returns the name of the header field
fn name() -> HeaderName; fn name() -> HeaderName;
/// Parse the header from a HTTP message. /// Parse a header
fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError>; fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError>;
} }
/// Convert `http::HeaderMap` to our `HeaderMap`.
impl From<http::HeaderMap> for HeaderMap {
fn from(mut map: http::HeaderMap) -> HeaderMap {
HeaderMap::from_drain(map.drain())
}
}
/// This encode set is used for HTTP header values and is defined at /// This encode set is used for HTTP header values and is defined at
/// <https://datatracker.ietf.org/doc/html/rfc5987#section-3.2>. /// <https://datatracker.ietf.org/doc/html/rfc5987#section-3.2>.
pub(crate) const HTTP_VALUE: &AsciiSet = &CONTROLS pub(crate) const HTTP_VALUE: &AsciiSet = &CONTROLS

View File

@@ -20,16 +20,14 @@ pub struct ContentEncodingParseError;
/// See [IANA HTTP Content Coding Registry]. /// See [IANA HTTP Content Coding Registry].
/// ///
/// [IANA HTTP Content Coding Registry]: https://www.iana.org/assignments/http-parameters/http-parameters.xhtml /// [IANA HTTP Content Coding Registry]: https://www.iana.org/assignments/http-parameters/http-parameters.xhtml
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Copy, PartialEq)]
#[non_exhaustive] #[non_exhaustive]
pub enum ContentEncoding { pub enum ContentEncoding {
/// Indicates the no-op identity encoding. /// Automatically select encoding based on encoding negotiation.
/// Auto,
/// I.e., no compression or modification.
Identity,
/// A format using the Brotli algorithm. /// A format using the Brotli algorithm.
Brotli, Br,
/// A format using the zlib structure with deflate algorithm. /// A format using the zlib structure with deflate algorithm.
Deflate, Deflate,
@@ -39,36 +37,32 @@ pub enum ContentEncoding {
/// Zstd algorithm. /// Zstd algorithm.
Zstd, Zstd,
/// Indicates the identity function (i.e. no compression, nor modification).
Identity,
} }
impl ContentEncoding { impl ContentEncoding {
/// Is the content compressed?
#[inline]
pub fn is_compression(self) -> bool {
matches!(self, ContentEncoding::Identity | ContentEncoding::Auto)
}
/// Convert content encoding to string. /// Convert content encoding to string.
#[inline] #[inline]
pub const fn as_str(self) -> &'static str { pub fn as_str(self) -> &'static str {
match self { match self {
ContentEncoding::Brotli => "br", ContentEncoding::Br => "br",
ContentEncoding::Gzip => "gzip", ContentEncoding::Gzip => "gzip",
ContentEncoding::Deflate => "deflate", ContentEncoding::Deflate => "deflate",
ContentEncoding::Zstd => "zstd", ContentEncoding::Zstd => "zstd",
ContentEncoding::Identity => "identity", ContentEncoding::Identity | ContentEncoding::Auto => "identity",
}
}
/// Convert content encoding to header value.
#[inline]
pub const fn to_header_value(self) -> HeaderValue {
match self {
ContentEncoding::Brotli => HeaderValue::from_static("br"),
ContentEncoding::Gzip => HeaderValue::from_static("gzip"),
ContentEncoding::Deflate => HeaderValue::from_static("deflate"),
ContentEncoding::Zstd => HeaderValue::from_static("zstd"),
ContentEncoding::Identity => HeaderValue::from_static("identity"),
} }
} }
} }
impl Default for ContentEncoding { impl Default for ContentEncoding {
#[inline]
fn default() -> Self { fn default() -> Self {
Self::Identity Self::Identity
} }
@@ -77,18 +71,16 @@ impl Default for ContentEncoding {
impl FromStr for ContentEncoding { impl FromStr for ContentEncoding {
type Err = ContentEncodingParseError; type Err = ContentEncodingParseError;
fn from_str(enc: &str) -> Result<Self, Self::Err> { fn from_str(val: &str) -> Result<Self, Self::Err> {
let enc = enc.trim(); let val = val.trim();
if enc.eq_ignore_ascii_case("br") { if val.eq_ignore_ascii_case("br") {
Ok(ContentEncoding::Brotli) Ok(ContentEncoding::Br)
} else if enc.eq_ignore_ascii_case("gzip") { } else if val.eq_ignore_ascii_case("gzip") {
Ok(ContentEncoding::Gzip) Ok(ContentEncoding::Gzip)
} else if enc.eq_ignore_ascii_case("deflate") { } else if val.eq_ignore_ascii_case("deflate") {
Ok(ContentEncoding::Deflate) Ok(ContentEncoding::Deflate)
} else if enc.eq_ignore_ascii_case("identity") { } else if val.eq_ignore_ascii_case("zstd") {
Ok(ContentEncoding::Identity)
} else if enc.eq_ignore_ascii_case("zstd") {
Ok(ContentEncoding::Zstd) Ok(ContentEncoding::Zstd)
} else { } else {
Err(ContentEncodingParseError) Err(ContentEncodingParseError)

View File

@@ -4,7 +4,8 @@ use bytes::BytesMut;
use http::header::{HeaderValue, InvalidHeaderValue}; use http::header::{HeaderValue, InvalidHeaderValue};
use crate::{ use crate::{
date::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue, helpers::MutWriter, config::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue,
helpers::MutWriter,
}; };
/// A timestamp with HTTP-style formatting and parsing. /// A timestamp with HTTP-style formatting and parsing.

View File

@@ -27,8 +27,7 @@ const MAX_QUALITY_FLOAT: f32 = 1.0;
/// ///
/// assert_eq!(q(0.42).to_string(), "0.42"); /// assert_eq!(q(0.42).to_string(), "0.42");
/// assert_eq!(q(1.0).to_string(), "1"); /// assert_eq!(q(1.0).to_string(), "1");
/// assert_eq!(Quality::MIN.to_string(), "0.001"); /// assert_eq!(Quality::MIN.to_string(), "0");
/// assert_eq!(Quality::ZERO.to_string(), "0");
/// ``` /// ```
/// ///
/// [RFC 7231 §5.3.1]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.1 /// [RFC 7231 §5.3.1]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.1
@@ -39,11 +38,8 @@ impl Quality {
/// The maximum quality value, equivalent to `q=1.0`. /// The maximum quality value, equivalent to `q=1.0`.
pub const MAX: Quality = Quality(MAX_QUALITY_INT); pub const MAX: Quality = Quality(MAX_QUALITY_INT);
/// The minimum, non-zero quality value, equivalent to `q=0.001`. /// The minimum quality value, equivalent to `q=0.0`.
pub const MIN: Quality = Quality(1); pub const MIN: Quality = Quality(0);
/// The zero quality value, equivalent to `q=0.0`.
pub const ZERO: Quality = Quality(0);
/// Converts a float in the range 0.01.0 to a `Quality`. /// Converts a float in the range 0.01.0 to a `Quality`.
/// ///
@@ -55,7 +51,7 @@ impl Quality {
// Check that `value` is within range should be done before calling this method. // Check that `value` is within range should be done before calling this method.
// Just in case, this debug_assert should catch if we were forgetful. // Just in case, this debug_assert should catch if we were forgetful.
debug_assert!( debug_assert!(
(0.0..=MAX_QUALITY_FLOAT).contains(&value), (0.0f32..=1.0f32).contains(&value),
"q value must be between 0.0 and 1.0" "q value must be between 0.0 and 1.0"
); );
@@ -91,7 +87,7 @@ impl fmt::Display for Quality {
// 0 is already handled so it's not possible to have a trailing 0 in this range // 0 is already handled so it's not possible to have a trailing 0 in this range
// we can just write the integer // we can just write the integer
itoa_fmt(f, x) itoa::fmt(f, x)
} else if x < 100 { } else if x < 100 {
// x in is range 1099 // x in is range 1099
@@ -99,21 +95,21 @@ impl fmt::Display for Quality {
if x % 10 == 0 { if x % 10 == 0 {
// trailing 0, divide by 10 and write // trailing 0, divide by 10 and write
itoa_fmt(f, x / 10) itoa::fmt(f, x / 10)
} else { } else {
itoa_fmt(f, x) itoa::fmt(f, x)
} }
} else { } else {
// x is in range 100999 // x is in range 100999
if x % 100 == 0 { if x % 100 == 0 {
// two trailing 0s, divide by 100 and write // two trailing 0s, divide by 100 and write
itoa_fmt(f, x / 100) itoa::fmt(f, x / 100)
} else if x % 10 == 0 { } else if x % 10 == 0 {
// one trailing 0, divide by 10 and write // one trailing 0, divide by 10 and write
itoa_fmt(f, x / 10) itoa::fmt(f, x / 10)
} else { } else {
itoa_fmt(f, x) itoa::fmt(f, x)
} }
} }
} }
@@ -121,12 +117,6 @@ impl fmt::Display for Quality {
} }
} }
/// Write integer to a `fmt::Write`.
pub fn itoa_fmt<W: fmt::Write, V: itoa::Integer>(mut wr: W, value: V) -> fmt::Result {
let mut buf = itoa::Buffer::new();
wr.write_str(buf.format(value))
}
#[derive(Debug, Clone, Display, Error)] #[derive(Debug, Clone, Display, Error)]
#[display(fmt = "quality out of bounds")] #[display(fmt = "quality out of bounds")]
#[non_exhaustive] #[non_exhaustive]
@@ -158,13 +148,10 @@ impl TryFrom<f32> for Quality {
/// let q1 = q(1.0); /// let q1 = q(1.0);
/// assert_eq!(q1, Quality::MAX); /// assert_eq!(q1, Quality::MAX);
/// ///
/// let q2 = q(0.001); /// let q2 = q(0.0);
/// assert_eq!(q2, Quality::MIN); /// assert_eq!(q2, Quality::MIN);
/// ///
/// let q3 = q(0.0); /// let q3 = q(0.42);
/// assert_eq!(q3, Quality::ZERO);
///
/// let q4 = q(0.42);
/// ``` /// ```
/// ///
/// An out-of-range `f32` quality will panic. /// An out-of-range `f32` quality will panic.
@@ -192,10 +179,6 @@ mod tests {
#[test] #[test]
fn display_output() { fn display_output() {
assert_eq!(Quality::ZERO.to_string(), "0");
assert_eq!(Quality::MIN.to_string(), "0.001");
assert_eq!(Quality::MAX.to_string(), "1");
assert_eq!(q(0.0).to_string(), "0"); assert_eq!(q(0.0).to_string(), "0");
assert_eq!(q(1.0).to_string(), "1"); assert_eq!(q(1.0).to_string(), "1");
assert_eq!(q(0.001).to_string(), "0.001"); assert_eq!(q(0.001).to_string(), "0.001");

View File

@@ -31,7 +31,7 @@ use super::Quality;
/// let q_item_fallback: QualityItem<String> = "abc;q=0.1".parse().unwrap(); /// let q_item_fallback: QualityItem<String> = "abc;q=0.1".parse().unwrap();
/// assert!(q_item > q_item_fallback); /// assert!(q_item > q_item_fallback);
/// ``` /// ```
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct QualityItem<T> { pub struct QualityItem<T> {
/// The wrapped contents of the field. /// The wrapped contents of the field.
pub item: T, pub item: T,
@@ -53,15 +53,10 @@ impl<T> QualityItem<T> {
Self::new(item, Quality::MAX) Self::new(item, Quality::MAX)
} }
/// Constructs a new `QualityItem` from an item, using the minimum, non-zero q-value. /// Constructs a new `QualityItem` from an item, using the minimum q-value.
pub fn min(item: T) -> Self { pub fn min(item: T) -> Self {
Self::new(item, Quality::MIN) Self::new(item, Quality::MIN)
} }
/// Constructs a new `QualityItem` from an item, using zero q-value of zero.
pub fn zero(item: T) -> Self {
Self::new(item, Quality::ZERO)
}
} }
impl<T: PartialEq> PartialOrd for QualityItem<T> { impl<T: PartialEq> PartialOrd for QualityItem<T> {
@@ -78,10 +73,7 @@ impl<T: fmt::Display> fmt::Display for QualityItem<T> {
// q-factor value is implied for max value // q-factor value is implied for max value
Quality::MAX => Ok(()), Quality::MAX => Ok(()),
// fast path for zero Quality::MIN => f.write_str("; q=0"),
Quality::ZERO => f.write_str("; q=0"),
// quality formatting is already using itoa
q => write!(f, "; q={}", q), q => write!(f, "; q={}", q),
} }
} }

View File

@@ -30,25 +30,15 @@ pub(crate) fn write_status_line<B: BufMut>(version: Version, n: u16, buf: &mut B
/// Write out content length header. /// Write out content length header.
/// ///
/// Buffer must to contain enough space or be implicitly extendable. /// Buffer must to contain enough space or be implicitly extendable.
pub fn write_content_length<B: BufMut>(n: u64, buf: &mut B, camel_case: bool) { pub fn write_content_length<B: BufMut>(n: u64, buf: &mut B) {
if n == 0 { if n == 0 {
if camel_case { buf.put_slice(b"\r\ncontent-length: 0\r\n");
buf.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
buf.put_slice(b"\r\ncontent-length: 0\r\n");
}
return; return;
} }
let mut buffer = itoa::Buffer::new(); let mut buffer = itoa::Buffer::new();
if camel_case { buf.put_slice(b"\r\ncontent-length: ");
buf.put_slice(b"\r\nContent-Length: ");
} else {
buf.put_slice(b"\r\ncontent-length: ");
}
buf.put_slice(buffer.format(n).as_bytes()); buf.put_slice(buffer.format(n).as_bytes());
buf.put_slice(b"\r\n"); buf.put_slice(b"\r\n");
} }
@@ -105,88 +95,77 @@ mod tests {
fn test_write_content_length() { fn test_write_content_length() {
let mut bytes = BytesMut::new(); let mut bytes = BytesMut::new();
bytes.reserve(50); bytes.reserve(50);
write_content_length(0, &mut bytes, false); write_content_length(0, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(9, &mut bytes, false); write_content_length(9, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(10, &mut bytes, false); write_content_length(10, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(99, &mut bytes, false); write_content_length(99, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(100, &mut bytes, false); write_content_length(100, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 100\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 100\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(101, &mut bytes, false); write_content_length(101, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 101\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 101\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(998, &mut bytes, false); write_content_length(998, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 998\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 998\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(1000, &mut bytes, false); write_content_length(1000, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1000\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1000\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(1001, &mut bytes, false); write_content_length(1001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1001\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1001\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(5909, &mut bytes, false); write_content_length(5909, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(9999, &mut bytes, false); write_content_length(9999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(10001, &mut bytes, false); write_content_length(10001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(59094, &mut bytes, false); write_content_length(59094, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(99999, &mut bytes, false); write_content_length(99999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]); assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]);
bytes.reserve(50); bytes.reserve(50);
write_content_length(590947, &mut bytes, false); write_content_length(590947, &mut bytes);
assert_eq!( assert_eq!(
bytes.split().freeze(), bytes.split().freeze(),
b"\r\ncontent-length: 590947\r\n"[..] b"\r\ncontent-length: 590947\r\n"[..]
); );
bytes.reserve(50); bytes.reserve(50);
write_content_length(999999, &mut bytes, false); write_content_length(999999, &mut bytes);
assert_eq!( assert_eq!(
bytes.split().freeze(), bytes.split().freeze(),
b"\r\ncontent-length: 999999\r\n"[..] b"\r\ncontent-length: 999999\r\n"[..]
); );
bytes.reserve(50); bytes.reserve(50);
write_content_length(5909471, &mut bytes, false); write_content_length(5909471, &mut bytes);
assert_eq!( assert_eq!(
bytes.split().freeze(), bytes.split().freeze(),
b"\r\ncontent-length: 5909471\r\n"[..] b"\r\ncontent-length: 5909471\r\n"[..]
); );
bytes.reserve(50); bytes.reserve(50);
write_content_length(59094718, &mut bytes, false); write_content_length(59094718, &mut bytes);
assert_eq!( assert_eq!(
bytes.split().freeze(), bytes.split().freeze(),
b"\r\ncontent-length: 59094718\r\n"[..] b"\r\ncontent-length: 59094718\r\n"[..]
); );
bytes.reserve(50); bytes.reserve(50);
write_content_length(4294973728, &mut bytes, false); write_content_length(4294973728, &mut bytes);
assert_eq!( assert_eq!(
bytes.split().freeze(), bytes.split().freeze(),
b"\r\ncontent-length: 4294973728\r\n"[..] b"\r\ncontent-length: 4294973728\r\n"[..]
); );
} }
#[test]
fn write_content_length_camel_case() {
let mut bytes = BytesMut::new();
write_content_length(0, &mut bytes, false);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
let mut bytes = BytesMut::new();
write_content_length(0, &mut bytes, true);
assert_eq!(bytes.split().freeze(), b"\r\nContent-Length: 0\r\n"[..]);
}
} }

View File

@@ -25,10 +25,10 @@ pub trait HttpMessage: Sized {
/// Message payload stream /// Message payload stream
fn take_payload(&mut self) -> Payload<Self::Stream>; fn take_payload(&mut self) -> Payload<Self::Stream>;
/// Returns a reference to the request-local data/extensions container. /// Request's extensions container
fn extensions(&self) -> Ref<'_, Extensions>; fn extensions(&self) -> Ref<'_, Extensions>;
/// Returns a mutable reference to the request-local data/extensions container. /// Mutable reference to a the request's extensions container
fn extensions_mut(&self) -> RefMut<'_, Extensions>; fn extensions_mut(&self) -> RefMut<'_, Extensions>;
/// Get a header. /// Get a header.
@@ -55,7 +55,7 @@ pub trait HttpMessage: Sized {
"" ""
} }
/// Get content type encoding. /// Get content type encoding
/// ///
/// UTF-8 is used by default, If request charset is not set. /// UTF-8 is used by default, If request charset is not set.
fn encoding(&self) -> Result<&'static Encoding, ContentTypeError> { fn encoding(&self) -> Result<&'static Encoding, ContentTypeError> {

View File

@@ -1,84 +0,0 @@
use std::time::Duration;
/// Connection keep-alive config.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum KeepAlive {
/// Keep-alive duration.
///
/// `KeepAlive::Timeout(Duration::ZERO)` is mapped to `KeepAlive::Disabled`.
Timeout(Duration),
/// Rely on OS to shutdown TCP connection.
///
/// Some defaults can be very long, check your OS documentation.
Os,
/// Keep-alive is disabled.
///
/// Connections will be closed immediately.
Disabled,
}
impl KeepAlive {
pub(crate) fn enabled(&self) -> bool {
!matches!(self, Self::Disabled)
}
#[allow(unused)] // used with `http2` feature flag
pub(crate) fn duration(&self) -> Option<Duration> {
match self {
KeepAlive::Timeout(dur) => Some(*dur),
_ => None,
}
}
/// Map zero duration to disabled.
pub(crate) fn normalize(self) -> KeepAlive {
match self {
KeepAlive::Timeout(Duration::ZERO) => KeepAlive::Disabled,
ka => ka,
}
}
}
impl Default for KeepAlive {
fn default() -> Self {
Self::Timeout(Duration::from_secs(5))
}
}
impl From<Duration> for KeepAlive {
fn from(dur: Duration) -> Self {
KeepAlive::Timeout(dur).normalize()
}
}
impl From<Option<Duration>> for KeepAlive {
fn from(ka_dur: Option<Duration>) -> Self {
match ka_dur {
Some(dur) => KeepAlive::from(dur),
None => KeepAlive::Disabled,
}
.normalize()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn from_impls() {
let test: KeepAlive = Duration::from_secs(1).into();
assert_eq!(test, KeepAlive::Timeout(Duration::from_secs(1)));
let test: KeepAlive = Duration::from_secs(0).into();
assert_eq!(test, KeepAlive::Disabled);
let test: KeepAlive = Some(Duration::from_secs(0)).into();
assert_eq!(test, KeepAlive::Disabled);
let test: KeepAlive = None.into();
assert_eq!(test, KeepAlive::Disabled);
}
}

View File

@@ -3,7 +3,6 @@
//! ## Crate Features //! ## Crate Features
//! | Feature | Functionality | //! | Feature | Functionality |
//! | ------------------- | ------------------------------------------- | //! | ------------------- | ------------------------------------------- |
//! | `http2` | HTTP/2 support via [h2]. |
//! | `openssl` | TLS support via [OpenSSL]. | //! | `openssl` | TLS support via [OpenSSL]. |
//! | `rustls` | TLS support via [rustls]. | //! | `rustls` | TLS support via [rustls]. |
//! | `compress-brotli` | Payload compression support: Brotli. | //! | `compress-brotli` | Payload compression support: Brotli. |
@@ -11,7 +10,6 @@
//! | `compress-zstd` | Payload compression support: Zstd. | //! | `compress-zstd` | Payload compression support: Zstd. |
//! | `trust-dns` | Use [trust-dns] as the client DNS resolver. | //! | `trust-dns` | Use [trust-dns] as the client DNS resolver. |
//! //!
//! [h2]: https://crates.io/crates/h2
//! [OpenSSL]: https://crates.io/crates/openssl //! [OpenSSL]: https://crates.io/crates/openssl
//! [rustls]: https://crates.io/crates/rustls //! [rustls]: https://crates.io/crates/rustls
//! [trust-dns]: https://crates.io/crates/trust-dns //! [trust-dns]: https://crates.io/crates/trust-dns
@@ -21,55 +19,55 @@
#![allow( #![allow(
clippy::type_complexity, clippy::type_complexity,
clippy::too_many_arguments, clippy::too_many_arguments,
clippy::new_without_default,
clippy::borrow_interior_mutable_const clippy::borrow_interior_mutable_const
)] )]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
pub use ::http::{uri, uri::Uri}; #[macro_use]
pub use ::http::{Method, StatusCode, Version}; extern crate log;
pub mod body; pub mod body;
mod builder; mod builder;
mod config; mod config;
mod date;
#[cfg(feature = "__compress")] #[cfg(feature = "__compress")]
pub mod encoding; pub mod encoding;
pub mod error;
mod extensions; mod extensions;
pub mod h1;
#[cfg(feature = "http2")]
pub mod h2;
pub mod header; pub mod header;
mod helpers; mod helpers;
mod http_message; mod http_message;
mod keep_alive;
mod message; mod message;
#[cfg(test)]
mod notify_on_drop;
mod payload; mod payload;
mod requests; mod request;
mod responses; mod response;
mod response_builder;
mod service; mod service;
pub mod error;
pub mod h1;
pub mod h2;
pub mod test; pub mod test;
#[cfg(feature = "ws")]
pub mod ws; pub mod ws;
pub use self::builder::HttpServiceBuilder; pub use self::builder::HttpServiceBuilder;
pub use self::config::ServiceConfig; pub use self::config::{KeepAlive, ServiceConfig};
pub use self::error::Error; pub use self::error::Error;
pub use self::extensions::Extensions; pub use self::extensions::Extensions;
pub use self::header::ContentEncoding; pub use self::header::ContentEncoding;
pub use self::http_message::HttpMessage; pub use self::http_message::HttpMessage;
pub use self::keep_alive::KeepAlive;
pub use self::message::ConnectionType; pub use self::message::ConnectionType;
pub use self::message::Message; pub use self::message::{Message, RequestHead, RequestHeadType, ResponseHead};
#[allow(deprecated)] pub use self::payload::{Payload, PayloadStream};
pub use self::payload::{BoxedPayloadStream, Payload, PayloadStream}; pub use self::request::Request;
pub use self::requests::{Request, RequestHead, RequestHeadType}; pub use self::response::Response;
pub use self::responses::{Response, ResponseBuilder, ResponseHead}; pub use self::response_builder::ResponseBuilder;
pub use self::service::HttpService; pub use self::service::HttpService;
pub use ::http::{uri, uri::Uri};
pub use ::http::{Method, StatusCode, Version};
/// A major HTTP protocol version. /// A major HTTP protocol version.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive] #[non_exhaustive]

View File

@@ -1,17 +1,26 @@
use std::{cell::RefCell, ops, rc::Rc}; use std::{
cell::{Ref, RefCell, RefMut},
net,
rc::Rc,
};
use bitflags::bitflags; use bitflags::bitflags;
use crate::{
header::{self, HeaderMap},
Extensions, Method, StatusCode, Uri, Version,
};
/// Represents various types of connection /// Represents various types of connection
#[derive(Copy, Clone, PartialEq, Debug)] #[derive(Copy, Clone, PartialEq, Debug)]
pub enum ConnectionType { pub enum ConnectionType {
/// Close connection after response. /// Close connection after response
Close, Close,
/// Keep connection alive after response. /// Keep connection alive after response
KeepAlive, KeepAlive,
/// Connection is upgraded to different type. /// Connection is upgraded to different type
Upgrade, Upgrade,
} }
@@ -35,6 +44,294 @@ pub trait Head: Default + 'static {
F: FnOnce(&MessagePool<Self>) -> R; F: FnOnce(&MessagePool<Self>) -> R;
} }
#[derive(Debug, Clone)]
pub struct RequestHead {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub headers: HeaderMap,
pub peer_addr: Option<net::SocketAddr>,
flags: Flags,
}
impl Default for RequestHead {
fn default() -> RequestHead {
RequestHead {
method: Method::default(),
uri: Uri::default(),
version: Version::HTTP_11,
headers: HeaderMap::with_capacity(16),
peer_addr: None,
flags: Flags::empty(),
}
}
}
impl Head for RequestHead {
fn clear(&mut self) {
self.flags = Flags::empty();
self.headers.clear();
}
fn with_pool<F, R>(f: F) -> R
where
F: FnOnce(&MessagePool<Self>) -> R,
{
REQUEST_POOL.with(|p| f(p))
}
}
impl RequestHead {
/// Read the message headers.
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Mutable reference to the message headers.
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
/// Is to uppercase headers with Camel-Case.
/// Default is `false`
#[inline]
pub fn camel_case_headers(&self) -> bool {
self.flags.contains(Flags::CAMEL_CASE)
}
/// Set `true` to send headers which are formatted as Camel-Case.
#[inline]
pub fn set_camel_case_headers(&mut self, val: bool) {
if val {
self.flags.insert(Flags::CAMEL_CASE);
} else {
self.flags.remove(Flags::CAMEL_CASE);
}
}
#[inline]
/// Set connection type of the message
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
match ctype {
ConnectionType::Close => self.flags.insert(Flags::CLOSE),
ConnectionType::KeepAlive => self.flags.insert(Flags::KEEP_ALIVE),
ConnectionType::Upgrade => self.flags.insert(Flags::UPGRADE),
}
}
#[inline]
/// Connection type
pub fn connection_type(&self) -> ConnectionType {
if self.flags.contains(Flags::CLOSE) {
ConnectionType::Close
} else if self.flags.contains(Flags::KEEP_ALIVE) {
ConnectionType::KeepAlive
} else if self.flags.contains(Flags::UPGRADE) {
ConnectionType::Upgrade
} else if self.version < Version::HTTP_11 {
ConnectionType::Close
} else {
ConnectionType::KeepAlive
}
}
/// Connection upgrade status
pub fn upgrade(&self) -> bool {
self.headers()
.get(header::CONNECTION)
.map(|hdr| {
if let Ok(s) = hdr.to_str() {
s.to_ascii_lowercase().contains("upgrade")
} else {
false
}
})
.unwrap_or(false)
}
#[inline]
/// Get response body chunking state
pub fn chunked(&self) -> bool {
!self.flags.contains(Flags::NO_CHUNKING)
}
#[inline]
pub fn no_chunking(&mut self, val: bool) {
if val {
self.flags.insert(Flags::NO_CHUNKING);
} else {
self.flags.remove(Flags::NO_CHUNKING);
}
}
#[inline]
/// Request contains `EXPECT` header
pub fn expect(&self) -> bool {
self.flags.contains(Flags::EXPECT)
}
#[inline]
pub(crate) fn set_expect(&mut self) {
self.flags.insert(Flags::EXPECT);
}
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum RequestHeadType {
Owned(RequestHead),
Rc(Rc<RequestHead>, Option<HeaderMap>),
}
impl RequestHeadType {
pub fn extra_headers(&self) -> Option<&HeaderMap> {
match self {
RequestHeadType::Owned(_) => None,
RequestHeadType::Rc(_, headers) => headers.as_ref(),
}
}
}
impl AsRef<RequestHead> for RequestHeadType {
fn as_ref(&self) -> &RequestHead {
match self {
RequestHeadType::Owned(head) => head,
RequestHeadType::Rc(head, _) => head.as_ref(),
}
}
}
impl From<RequestHead> for RequestHeadType {
fn from(head: RequestHead) -> Self {
RequestHeadType::Owned(head)
}
}
#[derive(Debug)]
pub struct ResponseHead {
pub version: Version,
pub status: StatusCode,
pub headers: HeaderMap,
pub reason: Option<&'static str>,
pub(crate) extensions: RefCell<Extensions>,
flags: Flags,
}
impl ResponseHead {
/// Create new instance of `ResponseHead` type
#[inline]
pub fn new(status: StatusCode) -> ResponseHead {
ResponseHead {
status,
version: Version::default(),
headers: HeaderMap::with_capacity(12),
reason: None,
flags: Flags::empty(),
extensions: RefCell::new(Extensions::new()),
}
}
/// Message extensions
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
}
/// Mutable reference to a the message's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
}
#[inline]
/// Read the message headers.
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
#[inline]
/// Mutable reference to the message headers.
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
#[inline]
/// Set connection type of the message
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
match ctype {
ConnectionType::Close => self.flags.insert(Flags::CLOSE),
ConnectionType::KeepAlive => self.flags.insert(Flags::KEEP_ALIVE),
ConnectionType::Upgrade => self.flags.insert(Flags::UPGRADE),
}
}
#[inline]
pub fn connection_type(&self) -> ConnectionType {
if self.flags.contains(Flags::CLOSE) {
ConnectionType::Close
} else if self.flags.contains(Flags::KEEP_ALIVE) {
ConnectionType::KeepAlive
} else if self.flags.contains(Flags::UPGRADE) {
ConnectionType::Upgrade
} else if self.version < Version::HTTP_11 {
ConnectionType::Close
} else {
ConnectionType::KeepAlive
}
}
/// Check if keep-alive is enabled
#[inline]
pub fn keep_alive(&self) -> bool {
self.connection_type() == ConnectionType::KeepAlive
}
/// Check upgrade status of this message
#[inline]
pub fn upgrade(&self) -> bool {
self.connection_type() == ConnectionType::Upgrade
}
/// Get custom reason for the response
#[inline]
pub fn reason(&self) -> &str {
self.reason.unwrap_or_else(|| {
self.status
.canonical_reason()
.unwrap_or("<unknown status code>")
})
}
#[inline]
pub(crate) fn conn_type(&self) -> Option<ConnectionType> {
if self.flags.contains(Flags::CLOSE) {
Some(ConnectionType::Close)
} else if self.flags.contains(Flags::KEEP_ALIVE) {
Some(ConnectionType::KeepAlive)
} else if self.flags.contains(Flags::UPGRADE) {
Some(ConnectionType::Upgrade)
} else {
None
}
}
#[inline]
/// Get response body chunking state
pub fn chunked(&self) -> bool {
!self.flags.contains(Flags::NO_CHUNKING)
}
#[inline]
/// Set no chunking for payload
pub fn no_chunking(&mut self, val: bool) {
if val {
self.flags.insert(Flags::NO_CHUNKING);
} else {
self.flags.remove(Flags::NO_CHUNKING);
}
}
}
pub struct Message<T: Head> { pub struct Message<T: Head> {
/// Rc here should not be cloned by anyone. /// Rc here should not be cloned by anyone.
/// It's used to reuse allocation of T and no shared ownership is allowed. /// It's used to reuse allocation of T and no shared ownership is allowed.
@@ -43,13 +340,12 @@ pub struct Message<T: Head> {
impl<T: Head> Message<T> { impl<T: Head> Message<T> {
/// Get new message from the pool of objects /// Get new message from the pool of objects
#[allow(clippy::new_without_default)]
pub fn new() -> Self { pub fn new() -> Self {
T::with_pool(MessagePool::get_message) T::with_pool(MessagePool::get_message)
} }
} }
impl<T: Head> ops::Deref for Message<T> { impl<T: Head> std::ops::Deref for Message<T> {
type Target = T; type Target = T;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
@@ -57,7 +353,7 @@ impl<T: Head> ops::Deref for Message<T> {
} }
} }
impl<T: Head> ops::DerefMut for Message<T> { impl<T: Head> std::ops::DerefMut for Message<T> {
fn deref_mut(&mut self) -> &mut Self::Target { fn deref_mut(&mut self) -> &mut Self::Target {
Rc::get_mut(&mut self.head).expect("Multiple copies exist") Rc::get_mut(&mut self.head).expect("Multiple copies exist")
} }
@@ -69,12 +365,53 @@ impl<T: Head> Drop for Message<T> {
} }
} }
/// Generic `Head` object pool. pub(crate) struct BoxedResponseHead {
head: Option<Box<ResponseHead>>,
}
impl BoxedResponseHead {
/// Get new message from the pool of objects
pub fn new(status: StatusCode) -> Self {
RESPONSE_POOL.with(|p| p.get_message(status))
}
}
impl std::ops::Deref for BoxedResponseHead {
type Target = ResponseHead;
fn deref(&self) -> &Self::Target {
self.head.as_ref().unwrap()
}
}
impl std::ops::DerefMut for BoxedResponseHead {
fn deref_mut(&mut self) -> &mut Self::Target {
self.head.as_mut().unwrap()
}
}
impl Drop for BoxedResponseHead {
fn drop(&mut self) {
if let Some(head) = self.head.take() {
RESPONSE_POOL.with(move |p| p.release(head))
}
}
}
#[doc(hidden)] #[doc(hidden)]
/// Request's objects pool
pub struct MessagePool<T: Head>(RefCell<Vec<Rc<T>>>); pub struct MessagePool<T: Head>(RefCell<Vec<Rc<T>>>);
#[doc(hidden)]
#[allow(clippy::vec_box)]
/// Request's objects pool
pub struct BoxedResponsePool(RefCell<Vec<Box<ResponseHead>>>);
thread_local!(static REQUEST_POOL: MessagePool<RequestHead> = MessagePool::<RequestHead>::create());
thread_local!(static RESPONSE_POOL: BoxedResponsePool = BoxedResponsePool::create());
impl<T: Head> MessagePool<T> { impl<T: Head> MessagePool<T> {
pub(crate) fn create() -> MessagePool<T> { fn create() -> MessagePool<T> {
MessagePool(RefCell::new(Vec::with_capacity(128))) MessagePool(RefCell::new(Vec::with_capacity(128)))
} }
@@ -96,11 +433,43 @@ impl<T: Head> MessagePool<T> {
} }
#[inline] #[inline]
/// Release message instance /// Release request instance
fn release(&self, msg: Rc<T>) { fn release(&self, msg: Rc<T>) {
let pool = &mut self.0.borrow_mut(); let v = &mut self.0.borrow_mut();
if pool.len() < 128 { if v.len() < 128 {
pool.push(msg); v.push(msg);
}
}
}
impl BoxedResponsePool {
fn create() -> BoxedResponsePool {
BoxedResponsePool(RefCell::new(Vec::with_capacity(128)))
}
/// Get message from the pool
#[inline]
fn get_message(&self, status: StatusCode) -> BoxedResponseHead {
if let Some(mut head) = self.0.borrow_mut().pop() {
head.reason = None;
head.status = status;
head.headers.clear();
head.flags = Flags::empty();
BoxedResponseHead { head: Some(head) }
} else {
BoxedResponseHead {
head: Some(Box::new(ResponseHead::new(status))),
}
}
}
#[inline]
/// Release request instance
fn release(&self, mut msg: Box<ResponseHead>) {
let v = &mut self.0.borrow_mut();
if v.len() < 128 {
msg.extensions.get_mut().clear();
v.push(msg);
} }
} }
} }

View File

@@ -1,49 +0,0 @@
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panics
/// Panics hen construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
}

View File

@@ -1,107 +1,67 @@
use std::{ use std::pin::Pin;
mem, use std::task::{Context, Poll};
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes; use bytes::Bytes;
use futures_core::Stream; use futures_core::Stream;
use pin_project_lite::pin_project; use h2::RecvStream;
use crate::error::PayloadError; use crate::error::PayloadError;
/// A boxed payload stream. /// A boxed payload.
pub type BoxedPayloadStream = Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>>; pub type PayloadStream = Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>>;
#[deprecated(since = "4.0.0", note = "Renamed to `BoxedPayloadStream`.")] /// A streaming payload.
pub type PayloadStream = BoxedPayloadStream; pub enum Payload<S = PayloadStream> {
None,
#[cfg(not(feature = "http2"))] H1(crate::h1::Payload),
pin_project! { H2(crate::h2::Payload),
/// A streaming payload. Stream(S),
#[project = PayloadProj]
pub enum Payload<S = BoxedPayloadStream> {
None,
H1 { payload: crate::h1::Payload },
Stream { #[pin] payload: S },
}
}
#[cfg(feature = "http2")]
pin_project! {
/// A streaming payload.
#[project = PayloadProj]
pub enum Payload<S = BoxedPayloadStream> {
None,
H1 { payload: crate::h1::Payload },
H2 { payload: crate::h2::Payload },
Stream { #[pin] payload: S },
}
} }
impl<S> From<crate::h1::Payload> for Payload<S> { impl<S> From<crate::h1::Payload> for Payload<S> {
fn from(payload: crate::h1::Payload) -> Self { fn from(v: crate::h1::Payload) -> Self {
Payload::H1 { payload } Payload::H1(v)
} }
} }
#[cfg(feature = "http2")]
impl<S> From<crate::h2::Payload> for Payload<S> { impl<S> From<crate::h2::Payload> for Payload<S> {
fn from(payload: crate::h2::Payload) -> Self { fn from(v: crate::h2::Payload) -> Self {
Payload::H2 { payload } Payload::H2(v)
} }
} }
#[cfg(feature = "http2")] impl<S> From<RecvStream> for Payload<S> {
impl<S> From<::h2::RecvStream> for Payload<S> { fn from(v: RecvStream) -> Self {
fn from(stream: ::h2::RecvStream) -> Self { Payload::H2(crate::h2::Payload::new(v))
Payload::H2 {
payload: crate::h2::Payload::new(stream),
}
} }
} }
impl From<BoxedPayloadStream> for Payload { impl From<PayloadStream> for Payload {
fn from(payload: BoxedPayloadStream) -> Self { fn from(pl: PayloadStream) -> Self {
Payload::Stream { payload } Payload::Stream(pl)
} }
} }
impl<S> Payload<S> { impl<S> Payload<S> {
/// Takes current payload and replaces it with `None` value /// Takes current payload and replaces it with `None` value
pub fn take(&mut self) -> Payload<S> { pub fn take(&mut self) -> Payload<S> {
mem::replace(self, Payload::None) std::mem::replace(self, Payload::None)
} }
} }
impl<S> Stream for Payload<S> impl<S> Stream for Payload<S>
where where
S: Stream<Item = Result<Bytes, PayloadError>>, S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{ {
type Item = Result<Bytes, PayloadError>; type Item = Result<Bytes, PayloadError>;
#[inline] #[inline]
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.project() { match self.get_mut() {
PayloadProj::None => Poll::Ready(None), Payload::None => Poll::Ready(None),
PayloadProj::H1 { payload } => Pin::new(payload).poll_next(cx), Payload::H1(ref mut pl) => pl.readany(cx),
Payload::H2(ref mut pl) => Pin::new(pl).poll_next(cx),
#[cfg(feature = "http2")] Payload::Stream(ref mut pl) => Pin::new(pl).poll_next(cx),
PayloadProj::H2 { payload } => Pin::new(payload).poll_next(cx),
PayloadProj::Stream { payload } => payload.poll_next(cx),
} }
} }
} }
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
}

View File

@@ -10,16 +10,19 @@ use std::{
use http::{header, Method, Uri, Version}; use http::{header, Method, Uri, Version};
use crate::{ use crate::{
header::HeaderMap, BoxedPayloadStream, Extensions, HttpMessage, Message, Payload, extensions::Extensions,
RequestHead, header::HeaderMap,
message::{Message, RequestHead},
payload::{Payload, PayloadStream},
HttpMessage,
}; };
/// An HTTP request. /// An HTTP request.
pub struct Request<P = BoxedPayloadStream> { pub struct Request<P = PayloadStream> {
pub(crate) payload: Payload<P>, pub(crate) payload: Payload<P>,
pub(crate) head: Message<RequestHead>, pub(crate) head: Message<RequestHead>,
pub(crate) conn_data: Option<Rc<Extensions>>, pub(crate) conn_data: Option<Rc<Extensions>>,
pub(crate) extensions: RefCell<Extensions>, pub(crate) req_data: RefCell<Extensions>,
} }
impl<P> HttpMessage for Request<P> { impl<P> HttpMessage for Request<P> {
@@ -34,36 +37,37 @@ impl<P> HttpMessage for Request<P> {
mem::replace(&mut self.payload, Payload::None) mem::replace(&mut self.payload, Payload::None)
} }
/// Request extensions
#[inline] #[inline]
fn extensions(&self) -> Ref<'_, Extensions> { fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow() self.req_data.borrow()
} }
/// Mutable reference to a the request's extensions
#[inline] #[inline]
fn extensions_mut(&self) -> RefMut<'_, Extensions> { fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut() self.req_data.borrow_mut()
} }
} }
impl From<Message<RequestHead>> for Request<BoxedPayloadStream> { impl From<Message<RequestHead>> for Request<PayloadStream> {
fn from(head: Message<RequestHead>) -> Self { fn from(head: Message<RequestHead>) -> Self {
Request { Request {
head, head,
payload: Payload::None, payload: Payload::None,
extensions: RefCell::new(Extensions::default()), req_data: RefCell::new(Extensions::default()),
conn_data: None, conn_data: None,
} }
} }
} }
impl Request<BoxedPayloadStream> { impl Request<PayloadStream> {
/// Create new Request instance /// Create new Request instance
#[allow(clippy::new_without_default)] pub fn new() -> Request<PayloadStream> {
pub fn new() -> Request<BoxedPayloadStream> {
Request { Request {
head: Message::new(), head: Message::new(),
payload: Payload::None, payload: Payload::None,
extensions: RefCell::new(Extensions::default()), req_data: RefCell::new(Extensions::default()),
conn_data: None, conn_data: None,
} }
} }
@@ -75,7 +79,7 @@ impl<P> Request<P> {
Request { Request {
payload, payload,
head: Message::new(), head: Message::new(),
extensions: RefCell::new(Extensions::default()), req_data: RefCell::new(Extensions::default()),
conn_data: None, conn_data: None,
} }
} }
@@ -88,7 +92,7 @@ impl<P> Request<P> {
Request { Request {
payload, payload,
head: self.head, head: self.head,
extensions: self.extensions, req_data: self.req_data,
conn_data: self.conn_data, conn_data: self.conn_data,
}, },
pl, pl,
@@ -193,17 +197,16 @@ impl<P> Request<P> {
.and_then(|container| container.get::<T>()) .and_then(|container| container.get::<T>())
} }
/// Returns the connection-level data/extensions container if an [on-connect] callback was /// Returns the connection data container if an [on-connect] callback was registered.
/// registered, leaving an empty one in its place.
/// ///
/// [on-connect]: crate::HttpServiceBuilder::on_connect_ext /// [on-connect]: crate::HttpServiceBuilder::on_connect_ext
pub fn take_conn_data(&mut self) -> Option<Rc<Extensions>> { pub fn take_conn_data(&mut self) -> Option<Rc<Extensions>> {
self.conn_data.take() self.conn_data.take()
} }
/// Returns the request-local data/extensions container, leaving an empty one in its place. /// Returns the request data container, leaving an empty one in it's place.
pub fn take_req_data(&mut self) -> Extensions { pub fn take_req_data(&mut self) -> Extensions {
mem::take(self.extensions.get_mut()) mem::take(&mut self.req_data.get_mut())
} }
} }

View File

@@ -1,174 +0,0 @@
use std::{net, rc::Rc};
use crate::{
header::{self, HeaderMap},
message::{Flags, Head, MessagePool},
ConnectionType, Method, Uri, Version,
};
thread_local! {
static REQUEST_POOL: MessagePool<RequestHead> = MessagePool::<RequestHead>::create()
}
#[derive(Debug, Clone)]
pub struct RequestHead {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub headers: HeaderMap,
pub peer_addr: Option<net::SocketAddr>,
flags: Flags,
}
impl Default for RequestHead {
fn default() -> RequestHead {
RequestHead {
method: Method::default(),
uri: Uri::default(),
version: Version::HTTP_11,
headers: HeaderMap::with_capacity(16),
peer_addr: None,
flags: Flags::empty(),
}
}
}
impl Head for RequestHead {
fn clear(&mut self) {
self.flags = Flags::empty();
self.headers.clear();
}
fn with_pool<F, R>(f: F) -> R
where
F: FnOnce(&MessagePool<Self>) -> R,
{
REQUEST_POOL.with(|p| f(p))
}
}
impl RequestHead {
/// Read the message headers.
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Mutable reference to the message headers.
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
/// Is to uppercase headers with Camel-Case.
/// Default is `false`
#[inline]
pub fn camel_case_headers(&self) -> bool {
self.flags.contains(Flags::CAMEL_CASE)
}
/// Set `true` to send headers which are formatted as Camel-Case.
#[inline]
pub fn set_camel_case_headers(&mut self, val: bool) {
if val {
self.flags.insert(Flags::CAMEL_CASE);
} else {
self.flags.remove(Flags::CAMEL_CASE);
}
}
#[inline]
/// Set connection type of the message
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
match ctype {
ConnectionType::Close => self.flags.insert(Flags::CLOSE),
ConnectionType::KeepAlive => self.flags.insert(Flags::KEEP_ALIVE),
ConnectionType::Upgrade => self.flags.insert(Flags::UPGRADE),
}
}
#[inline]
/// Connection type
pub fn connection_type(&self) -> ConnectionType {
if self.flags.contains(Flags::CLOSE) {
ConnectionType::Close
} else if self.flags.contains(Flags::KEEP_ALIVE) {
ConnectionType::KeepAlive
} else if self.flags.contains(Flags::UPGRADE) {
ConnectionType::Upgrade
} else if self.version < Version::HTTP_11 {
ConnectionType::Close
} else {
ConnectionType::KeepAlive
}
}
/// Connection upgrade status
pub fn upgrade(&self) -> bool {
self.headers()
.get(header::CONNECTION)
.map(|hdr| {
if let Ok(s) = hdr.to_str() {
s.to_ascii_lowercase().contains("upgrade")
} else {
false
}
})
.unwrap_or(false)
}
#[inline]
/// Get response body chunking state
pub fn chunked(&self) -> bool {
!self.flags.contains(Flags::NO_CHUNKING)
}
#[inline]
pub fn no_chunking(&mut self, val: bool) {
if val {
self.flags.insert(Flags::NO_CHUNKING);
} else {
self.flags.remove(Flags::NO_CHUNKING);
}
}
/// Request contains `EXPECT` header.
#[inline]
pub fn expect(&self) -> bool {
self.flags.contains(Flags::EXPECT)
}
#[inline]
pub(crate) fn set_expect(&mut self) {
self.flags.insert(Flags::EXPECT);
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum RequestHeadType {
Owned(RequestHead),
Rc(Rc<RequestHead>, Option<HeaderMap>),
}
impl RequestHeadType {
pub fn extra_headers(&self) -> Option<&HeaderMap> {
match self {
RequestHeadType::Owned(_) => None,
RequestHeadType::Rc(_, headers) => headers.as_ref(),
}
}
}
impl AsRef<RequestHead> for RequestHeadType {
fn as_ref(&self) -> &RequestHead {
match self {
RequestHeadType::Owned(head) => head,
RequestHeadType::Rc(head, _) => head.as_ref(),
}
}
}
impl From<RequestHead> for RequestHeadType {
fn from(head: RequestHead) -> Self {
RequestHeadType::Owned(head)
}
}

View File

@@ -1,7 +0,0 @@
//! HTTP requests.
mod head;
mod request;
pub use self::head::{RequestHead, RequestHeadType};
pub use self::request::Request;

View File

@@ -1,7 +1,7 @@
//! HTTP response. //! HTTP response.
use std::{ use std::{
cell::{Ref, RefCell, RefMut}, cell::{Ref, RefMut},
fmt, str, fmt, str,
}; };
@@ -9,17 +9,17 @@ use bytes::{Bytes, BytesMut};
use bytestring::ByteString; use bytestring::ByteString;
use crate::{ use crate::{
body::{BoxBody, EitherBody, MessageBody}, body::{BoxBody, MessageBody},
extensions::Extensions,
header::{self, HeaderMap, TryIntoHeaderValue}, header::{self, HeaderMap, TryIntoHeaderValue},
responses::BoxedResponseHead, message::{BoxedResponseHead, ResponseHead},
Error, Extensions, ResponseBuilder, ResponseHead, StatusCode, Error, ResponseBuilder, StatusCode,
}; };
/// An HTTP response. /// An HTTP response.
pub struct Response<B> { pub struct Response<B> {
pub(crate) head: BoxedResponseHead, pub(crate) head: BoxedResponseHead,
pub(crate) body: B, pub(crate) body: B,
pub(crate) extensions: RefCell<Extensions>,
} }
impl Response<BoxBody> { impl Response<BoxBody> {
@@ -29,7 +29,6 @@ impl Response<BoxBody> {
Response { Response {
head: BoxedResponseHead::new(status), head: BoxedResponseHead::new(status),
body: BoxBody::new(()), body: BoxBody::new(()),
extensions: RefCell::new(Extensions::new()),
} }
} }
@@ -76,7 +75,6 @@ impl<B> Response<B> {
Response { Response {
head: BoxedResponseHead::new(status), head: BoxedResponseHead::new(status),
body, body,
extensions: RefCell::new(Extensions::new()),
} }
} }
@@ -123,21 +121,20 @@ impl<B> Response<B> {
} }
/// Returns true if keep-alive is enabled. /// Returns true if keep-alive is enabled.
#[inline]
pub fn keep_alive(&self) -> bool { pub fn keep_alive(&self) -> bool {
self.head.keep_alive() self.head.keep_alive()
} }
/// Returns a reference to the request-local data/extensions container. /// Returns a reference to the extensions of this response.
#[inline] #[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> { pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow() self.head.extensions.borrow()
} }
/// Returns a mutable reference to the request-local data/extensions container. /// Returns a mutable reference to the extensions of this response.
#[inline] #[inline]
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> { pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut() self.head.extensions.borrow_mut()
} }
/// Returns a reference to the body of this response. /// Returns a reference to the body of this response.
@@ -147,29 +144,24 @@ impl<B> Response<B> {
} }
/// Sets new body. /// Sets new body.
#[inline]
pub fn set_body<B2>(self, body: B2) -> Response<B2> { pub fn set_body<B2>(self, body: B2) -> Response<B2> {
Response { Response {
head: self.head, head: self.head,
body, body,
extensions: self.extensions,
} }
} }
/// Drops body and returns new response. /// Drops body and returns new response.
#[inline]
pub fn drop_body(self) -> Response<()> { pub fn drop_body(self) -> Response<()> {
self.set_body(()) self.set_body(())
} }
/// Sets new body, returning new response and previous body value. /// Sets new body, returning new response and previous body value.
#[inline]
pub(crate) fn replace_body<B2>(self, body: B2) -> (Response<B2>, B) { pub(crate) fn replace_body<B2>(self, body: B2) -> (Response<B2>, B) {
( (
Response { Response {
head: self.head, head: self.head,
body, body,
extensions: self.extensions,
}, },
self.body, self.body,
) )
@@ -178,17 +170,13 @@ impl<B> Response<B> {
/// Returns split head and body. /// Returns split head and body.
/// ///
/// # Implementation Notes /// # Implementation Notes
/// Due to internal performance optimizations, the first element of the returned tuple is a /// Due to internal performance optimisations, the first element of the returned tuple is a
/// `Response` as well but only contains the head of the response this was called on. /// `Response` as well but only contains the head of the response this was called on.
#[inline]
pub fn into_parts(self) -> (Response<()>, B) { pub fn into_parts(self) -> (Response<()>, B) {
self.replace_body(()) self.replace_body(())
} }
/// Map the current body type to another using a closure, returning a new response. /// Returns new response with mapped body.
///
/// Closure receives the response head and the current body type.
#[inline]
pub fn map_body<F, B2>(mut self, f: F) -> Response<B2> pub fn map_body<F, B2>(mut self, f: F) -> Response<B2>
where where
F: FnOnce(&mut ResponseHead, B) -> B2, F: FnOnce(&mut ResponseHead, B) -> B2,
@@ -198,21 +186,18 @@ impl<B> Response<B> {
Response { Response {
head: self.head, head: self.head,
body, body,
extensions: self.extensions,
} }
} }
/// Map the current body to a type-erased `BoxBody`.
#[inline] #[inline]
pub fn map_into_boxed_body(self) -> Response<BoxBody> pub fn map_into_boxed_body(self) -> Response<BoxBody>
where where
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
self.map_body(|_, body| body.boxed()) self.map_body(|_, body| BoxBody::new(body))
} }
/// Returns the response body, dropping all other parts. /// Returns body, consuming this response.
#[inline]
pub fn into_body(self) -> B { pub fn into_body(self) -> B {
self.body self.body
} }
@@ -255,9 +240,9 @@ impl<I: Into<Response<BoxBody>>, E: Into<Error>> From<Result<I, E>> for Response
} }
} }
impl From<ResponseBuilder> for Response<EitherBody<()>> { impl From<ResponseBuilder> for Response<BoxBody> {
fn from(mut builder: ResponseBuilder) -> Self { fn from(mut builder: ResponseBuilder) -> Self {
builder.finish() builder.finish().map_into_boxed_body()
} }
} }
@@ -285,24 +270,6 @@ impl From<&'static [u8]> for Response<&'static [u8]> {
} }
} }
impl From<Vec<u8>> for Response<Vec<u8>> {
fn from(val: Vec<u8>) -> Self {
let mut res = Response::with_body(StatusCode::OK, val);
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
res.headers_mut().insert(header::CONTENT_TYPE, mime);
res
}
}
impl From<&Vec<u8>> for Response<Vec<u8>> {
fn from(val: &Vec<u8>) -> Self {
let mut res = Response::with_body(StatusCode::OK, val.clone());
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
res.headers_mut().insert(header::CONTENT_TYPE, mime);
res
}
}
impl From<String> for Response<String> { impl From<String> for Response<String> {
fn from(val: String) -> Self { fn from(val: String) -> Self {
let mut res = Response::with_body(StatusCode::OK, val); let mut res = Response::with_body(StatusCode::OK, val);

View File

@@ -1,13 +1,16 @@
//! HTTP response builder. //! HTTP response builder.
use std::{cell::RefCell, fmt, str}; use std::{
cell::{Ref, RefMut},
fmt, str,
};
use crate::{ use crate::{
body::{EitherBody, MessageBody}, body::{EitherBody, MessageBody},
error::{Error, HttpError}, error::{Error, HttpError},
header::{self, TryIntoHeaderPair, TryIntoHeaderValue}, header::{self, TryIntoHeaderPair, TryIntoHeaderValue},
responses::{BoxedResponseHead, ResponseHead}, message::{BoxedResponseHead, ConnectionType, ResponseHead},
ConnectionType, Extensions, Response, StatusCode, Extensions, Response, StatusCode,
}; };
/// An HTTP response builder. /// An HTTP response builder.
@@ -199,6 +202,20 @@ impl ResponseBuilder {
self self
} }
/// Responses extensions
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow()
}
/// Mutable reference to a the response's extensions
#[inline]
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow_mut()
}
/// Generate response with a wrapped body. /// Generate response with a wrapped body.
/// ///
/// This `ResponseBuilder` will be left in a useless state. /// This `ResponseBuilder` will be left in a useless state.
@@ -221,12 +238,7 @@ impl ResponseBuilder {
} }
let head = self.head.take().expect("cannot reuse response builder"); let head = self.head.take().expect("cannot reuse response builder");
Ok(Response { head, body })
Ok(Response {
head,
body,
extensions: RefCell::new(Extensions::new()),
})
} }
/// Generate response with an empty body. /// Generate response with an empty body.

View File

@@ -1,269 +0,0 @@
//! Response head type and caching pool.
use std::{cell::RefCell, ops};
use crate::{header::HeaderMap, message::Flags, ConnectionType, StatusCode, Version};
thread_local! {
static RESPONSE_POOL: BoxedResponsePool = BoxedResponsePool::create();
}
#[derive(Debug, Clone)]
pub struct ResponseHead {
pub version: Version,
pub status: StatusCode,
pub headers: HeaderMap,
pub reason: Option<&'static str>,
pub(crate) flags: Flags,
}
impl ResponseHead {
/// Create new instance of `ResponseHead` type
#[inline]
pub fn new(status: StatusCode) -> ResponseHead {
ResponseHead {
status,
version: Version::HTTP_11,
headers: HeaderMap::with_capacity(12),
reason: None,
flags: Flags::empty(),
}
}
/// Read the message headers.
#[inline]
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Mutable reference to the message headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
/// Sets the flag that controls whether to send headers formatted as Camel-Case.
///
/// Only applicable to HTTP/1.x responses; HTTP/2 header names are always lowercase.
#[inline]
pub fn set_camel_case_headers(&mut self, camel_case: bool) {
if camel_case {
self.flags.insert(Flags::CAMEL_CASE);
} else {
self.flags.remove(Flags::CAMEL_CASE);
}
}
/// Set connection type of the message
#[inline]
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
match ctype {
ConnectionType::Close => self.flags.insert(Flags::CLOSE),
ConnectionType::KeepAlive => self.flags.insert(Flags::KEEP_ALIVE),
ConnectionType::Upgrade => self.flags.insert(Flags::UPGRADE),
}
}
#[inline]
pub fn connection_type(&self) -> ConnectionType {
if self.flags.contains(Flags::CLOSE) {
ConnectionType::Close
} else if self.flags.contains(Flags::KEEP_ALIVE) {
ConnectionType::KeepAlive
} else if self.flags.contains(Flags::UPGRADE) {
ConnectionType::Upgrade
} else if self.version < Version::HTTP_11 {
ConnectionType::Close
} else {
ConnectionType::KeepAlive
}
}
/// Check if keep-alive is enabled
#[inline]
pub fn keep_alive(&self) -> bool {
self.connection_type() == ConnectionType::KeepAlive
}
/// Check upgrade status of this message
#[inline]
pub fn upgrade(&self) -> bool {
self.connection_type() == ConnectionType::Upgrade
}
/// Get custom reason for the response
#[inline]
pub fn reason(&self) -> &str {
self.reason.unwrap_or_else(|| {
self.status
.canonical_reason()
.unwrap_or("<unknown status code>")
})
}
#[inline]
pub(crate) fn conn_type(&self) -> Option<ConnectionType> {
if self.flags.contains(Flags::CLOSE) {
Some(ConnectionType::Close)
} else if self.flags.contains(Flags::KEEP_ALIVE) {
Some(ConnectionType::KeepAlive)
} else if self.flags.contains(Flags::UPGRADE) {
Some(ConnectionType::Upgrade)
} else {
None
}
}
/// Get response body chunking state
#[inline]
pub fn chunked(&self) -> bool {
!self.flags.contains(Flags::NO_CHUNKING)
}
/// Set no chunking for payload
#[inline]
pub fn no_chunking(&mut self, val: bool) {
if val {
self.flags.insert(Flags::NO_CHUNKING);
} else {
self.flags.remove(Flags::NO_CHUNKING);
}
}
}
pub(crate) struct BoxedResponseHead {
head: Option<Box<ResponseHead>>,
}
impl BoxedResponseHead {
/// Get new message from the pool of objects
pub fn new(status: StatusCode) -> Self {
RESPONSE_POOL.with(|p| p.get_message(status))
}
}
impl ops::Deref for BoxedResponseHead {
type Target = ResponseHead;
fn deref(&self) -> &Self::Target {
self.head.as_ref().unwrap()
}
}
impl ops::DerefMut for BoxedResponseHead {
fn deref_mut(&mut self) -> &mut Self::Target {
self.head.as_mut().unwrap()
}
}
impl Drop for BoxedResponseHead {
fn drop(&mut self) {
if let Some(head) = self.head.take() {
RESPONSE_POOL.with(move |p| p.release(head))
}
}
}
/// Response head object pool.
#[doc(hidden)]
pub struct BoxedResponsePool(#[allow(clippy::vec_box)] RefCell<Vec<Box<ResponseHead>>>);
impl BoxedResponsePool {
fn create() -> BoxedResponsePool {
BoxedResponsePool(RefCell::new(Vec::with_capacity(128)))
}
/// Get message from the pool.
#[inline]
fn get_message(&self, status: StatusCode) -> BoxedResponseHead {
if let Some(mut head) = self.0.borrow_mut().pop() {
head.reason = None;
head.status = status;
head.headers.clear();
head.flags = Flags::empty();
BoxedResponseHead { head: Some(head) }
} else {
BoxedResponseHead {
head: Some(Box::new(ResponseHead::new(status))),
}
}
}
/// Release request instance.
#[inline]
fn release(&self, msg: Box<ResponseHead>) {
let pool = &mut self.0.borrow_mut();
if pool.len() < 128 {
pool.push(msg);
}
}
}
#[cfg(test)]
mod tests {
use std::{
io::{Read as _, Write as _},
net,
};
use memchr::memmem;
use crate::{
h1::H1Service,
header::{HeaderName, HeaderValue},
Error, Request, Response, ServiceConfig,
};
#[actix_rt::test]
async fn camel_case_headers() {
let mut srv = actix_http_test::test_server(|| {
H1Service::with_config(ServiceConfig::default(), |req: Request| async move {
let mut res = Response::ok();
if req.path().contains("camel") {
res.head_mut().set_camel_case_headers(true);
}
res.headers_mut().insert(
HeaderName::from_static("foo-bar"),
HeaderValue::from_static("baz"),
);
Ok::<_, Error>(res)
})
.tcp()
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream
.write_all(b"GET /camel HTTP/1.1\r\nConnection: Close\r\n\r\n")
.unwrap();
let mut data = vec![];
let _ = stream.read_to_end(&mut data).unwrap();
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
assert!(memmem::find(&data, b"Foo-Bar").is_some());
assert!(memmem::find(&data, b"foo-bar").is_none());
assert!(memmem::find(&data, b"Date").is_some());
assert!(memmem::find(&data, b"date").is_none());
assert!(memmem::find(&data, b"Content-Length").is_some());
assert!(memmem::find(&data, b"content-length").is_none());
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream
.write_all(b"GET /lower HTTP/1.1\r\nConnection: Close\r\n\r\n")
.unwrap();
let mut data = vec![];
let _ = stream.read_to_end(&mut data).unwrap();
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
assert!(memmem::find(&data, b"Foo-Bar").is_none());
assert!(memmem::find(&data, b"foo-bar").is_some());
assert!(memmem::find(&data, b"Date").is_none());
assert!(memmem::find(&data, b"date").is_some());
assert!(memmem::find(&data, b"Content-Length").is_none());
assert!(memmem::find(&data, b"content-length").is_some());
srv.stop().await;
}
}

View File

@@ -1,11 +0,0 @@
//! HTTP response.
mod builder;
mod head;
#[allow(clippy::module_inception)]
mod response;
pub use self::builder::ResponseBuilder;
pub(crate) use self::head::BoxedResponseHead;
pub use self::head::ResponseHead;
pub use self::response::Response;

View File

@@ -19,8 +19,9 @@ use pin_project_lite::pin_project;
use crate::{ use crate::{
body::{BoxBody, MessageBody}, body::{BoxBody, MessageBody},
builder::HttpServiceBuilder, builder::HttpServiceBuilder,
config::{KeepAlive, ServiceConfig},
error::DispatchError, error::DispatchError,
h1, ConnectCallback, OnConnectData, Protocol, Request, Response, ServiceConfig, h1, h2, ConnectCallback, OnConnectData, Protocol, Request, Response,
}; };
/// A `ServiceFactory` for HTTP/1.1 or HTTP/2 protocol. /// A `ServiceFactory` for HTTP/1.1 or HTTP/2 protocol.
@@ -42,9 +43,9 @@ where
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Constructs builder for `HttpService` instance. /// Create builder for `HttpService` instance.
pub fn build() -> HttpServiceBuilder<T, S> { pub fn build() -> HttpServiceBuilder<T, S> {
HttpServiceBuilder::default() HttpServiceBuilder::new()
} }
} }
@@ -57,10 +58,12 @@ where
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Constructs new `HttpService` instance from service with default config. /// Create new `HttpService` instance.
pub fn new<F: IntoServiceFactory<S, Request>>(service: F) -> Self { pub fn new<F: IntoServiceFactory<S, Request>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0, false, None);
HttpService { HttpService {
cfg: ServiceConfig::default(), cfg,
srv: service.into_factory(), srv: service.into_factory(),
expect: h1::ExpectHandler, expect: h1::ExpectHandler,
upgrade: None, upgrade: None,
@@ -69,7 +72,7 @@ where
} }
} }
/// Constructs new `HttpService` instance from config and service. /// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>( pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig, cfg: ServiceConfig,
service: F, service: F,
@@ -94,10 +97,11 @@ where
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody, B: MessageBody,
{ {
/// Sets service for `Expect: 100-Continue` handling. /// Provide service for `EXPECT: 100-Continue` support.
/// ///
/// An expect service is called with requests that contain an `Expect` header. A successful /// Service get called with request that contains `EXPECT` header.
/// response type is also a request which will be forwarded to the main service. /// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<X1>(self, expect: X1) -> HttpService<T, S, B, X1, U> pub fn expect<X1>(self, expect: X1) -> HttpService<T, S, B, X1, U>
where where
X1: ServiceFactory<Request, Config = (), Response = Request>, X1: ServiceFactory<Request, Config = (), Response = Request>,
@@ -114,10 +118,10 @@ where
} }
} }
/// Sets service for custom `Connection: Upgrade` handling. /// Provide service for custom `Connection: UPGRADE` support.
/// ///
/// If service is provided then normal requests handling get halted and this service get called /// If service is provided then normal requests handling get halted
/// with original request and framed object. /// and this service get called with original request and framed object.
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> HttpService<T, S, B, X, U1> pub fn upgrade<U1>(self, upgrade: Option<U1>) -> HttpService<T, S, B, X, U1>
where where
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>, U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
@@ -489,9 +493,9 @@ where
type Future = HttpServiceHandlerResponse<T, S, B, X, U>; type Future = HttpServiceHandlerResponse<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| { self._poll_ready(cx).map_err(|e| {
log::error!("HTTP service readiness error: {:?}", err); log::error!("HTTP service readiness error: {:?}", e);
DispatchError::Service(err) DispatchError::Service(e)
}) })
} }
@@ -502,11 +506,10 @@ where
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
match proto { match proto {
#[cfg(feature = "http2")]
Protocol::Http2 => HttpServiceHandlerResponse { Protocol::Http2 => HttpServiceHandlerResponse {
state: State::H2Handshake { state: State::H2Handshake {
handshake: Some(( handshake: Some((
crate::h2::handshake_with_timeout(io, &self.cfg), h2::handshake_with_timeout(io, &self.cfg),
self.cfg.clone(), self.cfg.clone(),
self.flow.clone(), self.flow.clone(),
conn_data, conn_data,
@@ -515,11 +518,6 @@ where
}, },
}, },
#[cfg(not(feature = "http2"))]
Protocol::Http2 => {
panic!("HTTP/2 support is disabled (enable with the `http2` feature flag)")
}
Protocol::Http1 => HttpServiceHandlerResponse { Protocol::Http1 => HttpServiceHandlerResponse {
state: State::H1 { state: State::H1 {
dispatcher: h1::Dispatcher::new( dispatcher: h1::Dispatcher::new(
@@ -537,7 +535,6 @@ where
} }
} }
#[cfg(not(feature = "http2"))]
pin_project! { pin_project! {
#[project = StateProj] #[project = StateProj]
enum State<T, S, B, X, U> enum State<T, S, B, X, U>
@@ -559,37 +556,10 @@ pin_project! {
U::Error: fmt::Display, U::Error: fmt::Display,
{ {
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> }, H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
} H2 { #[pin] dispatcher: h2::Dispatcher<T, S, B, X, U> },
}
#[cfg(feature = "http2")]
pin_project! {
#[project = StateProj]
enum State<T, S, B, X, U>
where
T: AsyncRead,
T: AsyncWrite,
T: Unpin,
S: Service<Request>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
H2 { #[pin] dispatcher: crate::h2::Dispatcher<T, S, B, X, U> },
H2Handshake { H2Handshake {
handshake: Option<( handshake: Option<(
crate::h2::HandshakeWithTimeout<T>, h2::HandshakeWithTimeout<T>,
ServiceConfig, ServiceConfig,
Rc<HttpFlow<S, X, U>>, Rc<HttpFlow<S, X, U>>,
OnConnectData, OnConnectData,
@@ -648,25 +618,21 @@ where
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().project().state.project() { match self.as_mut().project().state.project() {
StateProj::H1 { dispatcher } => dispatcher.poll(cx), StateProj::H1 { dispatcher } => dispatcher.poll(cx),
#[cfg(feature = "http2")]
StateProj::H2 { dispatcher } => dispatcher.poll(cx), StateProj::H2 { dispatcher } => dispatcher.poll(cx),
#[cfg(feature = "http2")]
StateProj::H2Handshake { handshake: data } => { StateProj::H2Handshake { handshake: data } => {
match ready!(Pin::new(&mut data.as_mut().unwrap().0).poll(cx)) { match ready!(Pin::new(&mut data.as_mut().unwrap().0).poll(cx)) {
Ok((conn, timer)) => { Ok((conn, timer)) => {
let (_, config, flow, conn_data, peer_addr) = data.take().unwrap(); let (_, config, flow, conn_data, peer_addr) = data.take().unwrap();
self.as_mut().project().state.set(State::H2 { self.as_mut().project().state.set(State::H2 {
dispatcher: crate::h2::Dispatcher::new( dispatcher: h2::Dispatcher::new(
conn, flow, config, peer_addr, conn_data, timer, conn, flow, config, peer_addr, conn_data, timer,
), ),
}); });
self.poll(cx) self.poll(cx)
} }
Err(err) => { Err(err) => {
log::trace!("H2 handshake error: {}", err); trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err)) Poll::Ready(Err(err))
} }
} }

View File

@@ -1,7 +1,7 @@
//! Various testing helpers for use in internal and app tests. //! Various testing helpers for use in internal and app tests.
use std::{ use std::{
cell::{Ref, RefCell, RefMut}, cell::{Ref, RefCell},
io::{self, Read, Write}, io::{self, Read, Write},
pin::Pin, pin::Pin,
rc::Rc, rc::Rc,
@@ -120,7 +120,7 @@ impl TestRequest {
} }
/// Set request payload. /// Set request payload.
pub fn set_payload(&mut self, data: impl Into<Bytes>) -> &mut Self { pub fn set_payload<B: Into<Bytes>>(&mut self, data: B) -> &mut Self {
let mut payload = crate::h1::Payload::empty(); let mut payload = crate::h1::Payload::empty();
payload.unread_data(data.into()); payload.unread_data(data.into());
parts(&mut self.0).payload = Some(payload.into()); parts(&mut self.0).payload = Some(payload.into());
@@ -157,11 +157,10 @@ fn parts(parts: &mut Option<Inner>) -> &mut Inner {
} }
/// Async I/O test buffer. /// Async I/O test buffer.
#[derive(Debug)]
pub struct TestBuffer { pub struct TestBuffer {
pub read_buf: Rc<RefCell<BytesMut>>, pub read_buf: BytesMut,
pub write_buf: Rc<RefCell<BytesMut>>, pub write_buf: BytesMut,
pub err: Option<Rc<io::Error>>, pub err: Option<io::Error>,
} }
impl TestBuffer { impl TestBuffer {
@@ -171,69 +170,34 @@ impl TestBuffer {
T: Into<BytesMut>, T: Into<BytesMut>,
{ {
Self { Self {
read_buf: Rc::new(RefCell::new(data.into())), read_buf: data.into(),
write_buf: Rc::new(RefCell::new(BytesMut::new())), write_buf: BytesMut::new(),
err: None, err: None,
} }
} }
// intentionally not using Clone trait
#[allow(dead_code)]
pub(crate) fn clone(&self) -> Self {
Self {
read_buf: self.read_buf.clone(),
write_buf: self.write_buf.clone(),
err: self.err.clone(),
}
}
/// Create new empty `TestBuffer` instance. /// Create new empty `TestBuffer` instance.
pub fn empty() -> Self { pub fn empty() -> Self {
Self::new("") Self::new("")
} }
#[allow(dead_code)]
pub(crate) fn read_buf_slice(&self) -> Ref<'_, [u8]> {
Ref::map(self.read_buf.borrow(), |b| b.as_ref())
}
#[allow(dead_code)]
pub(crate) fn read_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
RefMut::map(self.read_buf.borrow_mut(), |b| b.as_mut())
}
#[allow(dead_code)]
pub(crate) fn write_buf_slice(&self) -> Ref<'_, [u8]> {
Ref::map(self.write_buf.borrow(), |b| b.as_ref())
}
#[allow(dead_code)]
pub(crate) fn write_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
RefMut::map(self.write_buf.borrow_mut(), |b| b.as_mut())
}
#[allow(dead_code)]
pub(crate) fn take_write_buf(&self) -> Bytes {
self.write_buf.borrow_mut().split().freeze()
}
/// Add data to read buffer. /// Add data to read buffer.
pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) { pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) {
self.read_buf.borrow_mut().extend_from_slice(data.as_ref()) self.read_buf.extend_from_slice(data.as_ref())
} }
} }
impl io::Read for TestBuffer { impl io::Read for TestBuffer {
fn read(&mut self, dst: &mut [u8]) -> Result<usize, io::Error> { fn read(&mut self, dst: &mut [u8]) -> Result<usize, io::Error> {
if self.read_buf.borrow().is_empty() { if self.read_buf.is_empty() {
if self.err.is_some() { if self.err.is_some() {
Err(Rc::try_unwrap(self.err.take().unwrap()).unwrap()) Err(self.err.take().unwrap())
} else { } else {
Err(io::Error::new(io::ErrorKind::WouldBlock, "")) Err(io::Error::new(io::ErrorKind::WouldBlock, ""))
} }
} else { } else {
let size = std::cmp::min(self.read_buf.borrow().len(), dst.len()); let size = std::cmp::min(self.read_buf.len(), dst.len());
let b = self.read_buf.borrow_mut().split_to(size); let b = self.read_buf.split_to(size);
dst[..size].copy_from_slice(&b); dst[..size].copy_from_slice(&b);
Ok(size) Ok(size)
} }
@@ -242,7 +206,7 @@ impl io::Read for TestBuffer {
impl io::Write for TestBuffer { impl io::Write for TestBuffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_buf.borrow_mut().extend(buf); self.write_buf.extend(buf);
Ok(buf.len()) Ok(buf.len())
} }
@@ -300,7 +264,7 @@ impl TestSeqBuffer {
/// Create new empty `TestBuffer` instance. /// Create new empty `TestBuffer` instance.
pub fn empty() -> Self { pub fn empty() -> Self {
Self::new(BytesMut::new()) Self::new("")
} }
pub fn read_buf(&self) -> Ref<'_, BytesMut> { pub fn read_buf(&self) -> Ref<'_, BytesMut> {

View File

@@ -3,11 +3,9 @@ use bitflags::bitflags;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use bytestring::ByteString; use bytestring::ByteString;
use super::{ use super::frame::Parser;
frame::Parser, use super::proto::{CloseReason, OpCode};
proto::{CloseReason, OpCode}, use super::ProtocolError;
ProtocolError,
};
/// A WebSocket message. /// A WebSocket message.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@@ -253,7 +251,7 @@ impl Decoder for Codec {
} }
} }
_ => { _ => {
log::error!("Unfinished fragment {:?}", opcode); error!("Unfinished fragment {:?}", opcode);
Err(ProtocolError::ContinuationFragment(opcode)) Err(ProtocolError::ContinuationFragment(opcode))
} }
}; };

View File

@@ -1,8 +1,6 @@
use std::{ use std::future::Future;
future::Future, use std::pin::Pin;
pin::Pin, use std::task::{Context, Poll};
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_service::{IntoService, Service}; use actix_service::{IntoService, Service};

View File

@@ -3,11 +3,9 @@ use std::convert::TryFrom;
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use log::debug; use log::debug;
use super::{ use crate::ws::mask::apply_mask;
mask::apply_mask, use crate::ws::proto::{CloseCode, CloseReason, OpCode};
proto::{CloseCode, CloseReason, OpCode}, use crate::ws::ProtocolError;
ProtocolError,
};
/// A struct representing a WebSocket frame. /// A struct representing a WebSocket frame.
#[derive(Debug)] #[derive(Debug)]

View File

@@ -47,6 +47,40 @@ pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) {
mod tests { mod tests {
use super::*; use super::*;
// legacy test from old apply mask test. kept for now for back compat test.
// TODO: remove it and favor the other test.
#[test]
fn test_apply_mask_legacy() {
let mask = [0x6d, 0xb6, 0xb2, 0x80];
let unmasked = vec![
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9,
0x12, 0x03,
];
// Check masking with proper alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked, mask);
let mut masked_fast = unmasked.clone();
apply_mask(&mut masked_fast, mask);
assert_eq!(masked, masked_fast);
}
// Check masking without alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked[1..], mask);
let mut masked_fast = unmasked;
apply_mask(&mut masked_fast[1..], mask);
assert_eq!(masked, masked_fast);
}
}
#[test] #[test]
fn test_apply_mask() { fn test_apply_mask() {
let mask = [0x6d, 0xb6, 0xb2, 0x80]; let mask = [0x6d, 0xb6, 0xb2, 0x80];

View File

@@ -9,7 +9,7 @@ use derive_more::{Display, Error, From};
use http::{header, Method, StatusCode}; use http::{header, Method, StatusCode};
use crate::body::BoxBody; use crate::body::BoxBody;
use crate::{header::HeaderValue, RequestHead, Response, ResponseBuilder}; use crate::{header::HeaderValue, message::RequestHead, response::Response, ResponseBuilder};
mod codec; mod codec;
mod dispatcher; mod dispatcher;
@@ -99,9 +99,8 @@ impl From<HandshakeError> for Response<BoxBody> {
match err { match err {
HandshakeError::GetMethodRequired => { HandshakeError::GetMethodRequired => {
let mut res = Response::new(StatusCode::METHOD_NOT_ALLOWED); let mut res = Response::new(StatusCode::METHOD_NOT_ALLOWED);
#[allow(clippy::declare_interior_mutable_const)] res.headers_mut()
const HV_GET: HeaderValue = HeaderValue::from_static("GET"); .insert(header::ALLOW, HeaderValue::from_static("GET"));
res.headers_mut().insert(header::ALLOW, HV_GET);
res res
} }

View File

@@ -31,7 +31,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World"; Hello World Hello World Hello World Hello World Hello World";
#[actix_rt::test] #[actix_rt::test]
async fn h1_v2() { async fn test_h1_v2() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR))) .finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -59,7 +59,7 @@ async fn h1_v2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn connection_close() { async fn test_connection_close() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR))) .finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -73,7 +73,7 @@ async fn connection_close() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn with_query_parameter() { async fn test_with_query_parameter() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|req: Request| async move { .finish(|req: Request| async move {
@@ -104,7 +104,7 @@ impl From<ExpectFailed> for Response<BoxBody> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_expect() { async fn test_h1_expect() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.expect(|req: Request| async { .expect(|req: Request| async {

View File

@@ -1,4 +1,4 @@
use std::{io, time::Duration}; use std::io;
use actix_http::{error::Error, HttpService, Response}; use actix_http::{error::Error, HttpService, Response};
use actix_server::Server; use actix_server::Server;
@@ -19,7 +19,7 @@ async fn h2_ping_pong() -> io::Result<()> {
.workers(1) .workers(1)
.listen("h2_ping_pong", lst, || { .listen("h2_ping_pong", lst, || {
HttpService::build() HttpService::build()
.keep_alive(Duration::from_secs(3)) .keep_alive(3)
.h2(|_| async { Ok::<_, Error>(Response::ok()) }) .h2(|_| async { Ok::<_, Error>(Response::ok()) })
.tcp() .tcp()
})? })?
@@ -92,10 +92,10 @@ async fn h2_handshake_timeout() -> io::Result<()> {
.workers(1) .workers(1)
.listen("h2_ping_pong", lst, || { .listen("h2_ping_pong", lst, || {
HttpService::build() HttpService::build()
.keep_alive(Duration::from_secs(30)) .keep_alive(30)
// set first request timeout to 5 seconds. // set first request timeout to 5 seconds.
// this is the timeout used for http2 handshake. // this is the timeout used for http2 handshake.
.client_request_timeout(Duration::from_secs(5)) .client_timeout(5000)
.h2(|_| async { Ok::<_, Error>(Response::ok()) }) .h2(|_| async { Ok::<_, Error>(Response::ok()) })
.tcp() .tcp()
})? })?

View File

@@ -66,7 +66,7 @@ fn tls_config() -> SslAcceptor {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2() -> io::Result<()> { async fn test_h2() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Error>(Response::ok())) .h2(|_| ok::<_, Error>(Response::ok()))
@@ -81,7 +81,7 @@ async fn h2() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_1() -> io::Result<()> { async fn test_h2_1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|req: Request| { .finish(|req: Request| {
@@ -100,7 +100,7 @@ async fn h2_1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body() -> io::Result<()> { async fn test_h2_body() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024); // 640 KiB let data = "HELLOWORLD".to_owned().repeat(64 * 1024); // 640 KiB
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
@@ -122,7 +122,7 @@ async fn h2_body() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_content_length() { async fn test_h2_content_length() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|req: Request| { .h2(|req: Request| {
@@ -164,7 +164,7 @@ async fn h2_content_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_headers() { async fn test_h2_headers() {
let data = STR.repeat(10); let data = STR.repeat(10);
let data2 = data.clone(); let data2 = data.clone();
@@ -229,7 +229,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World"; Hello World Hello World Hello World Hello World Hello World";
#[actix_rt::test] #[actix_rt::test]
async fn h2_body2() { async fn test_h2_body2() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -247,7 +247,7 @@ async fn h2_body2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_empty() { async fn test_h2_head_empty() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .finish(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -271,7 +271,7 @@ async fn h2_head_empty() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_binary() { async fn test_h2_head_binary() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -294,7 +294,7 @@ async fn h2_head_binary() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_binary2() { async fn test_h2_head_binary2() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -313,7 +313,7 @@ async fn h2_head_binary2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body_length() { async fn test_h2_body_length() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| async { .h2(|_| async {
@@ -338,7 +338,7 @@ async fn h2_body_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body_chunked_explicit() { async fn test_h2_body_chunked_explicit() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| { .h2(|_| {
@@ -366,7 +366,7 @@ async fn h2_body_chunked_explicit() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_response_http_error_handling() { async fn test_h2_response_http_error_handling() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(fn_service(|_| { .h2(fn_service(|_| {
@@ -406,7 +406,7 @@ impl From<BadRequest> for Response<BoxBody> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_service_error() { async fn test_h2_service_error() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| err::<Response<BoxBody>, _>(BadRequest)) .h2(|_| err::<Response<BoxBody>, _>(BadRequest))
@@ -424,7 +424,7 @@ async fn h2_service_error() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_on_connect() { async fn test_h2_on_connect() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.on_connect_ext(|_, data| { .on_connect_ext(|_, data| {

View File

@@ -7,7 +7,6 @@ use std::{
io::{self, BufReader, Write}, io::{self, BufReader, Write},
net::{SocketAddr, TcpStream as StdTcpStream}, net::{SocketAddr, TcpStream as StdTcpStream},
sync::Arc, sync::Arc,
task::Poll,
}; };
use actix_http::{ use actix_http::{
@@ -17,37 +16,25 @@ use actix_http::{
Error, HttpService, Method, Request, Response, StatusCode, Version, Error, HttpService, Method, Request, Response, StatusCode, Version,
}; };
use actix_http_test::test_server; use actix_http_test::test_server;
use actix_rt::pin;
use actix_service::{fn_factory_with_config, fn_service}; use actix_service::{fn_factory_with_config, fn_service};
use actix_tls::connect::rustls::webpki_roots_cert_store; use actix_tls::connect::rustls::webpki_roots_cert_store;
use actix_utils::future::{err, ok, poll_fn}; use actix_utils::future::{err, ok};
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use derive_more::{Display, Error}; use derive_more::{Display, Error};
use futures_core::{ready, Stream}; use futures_core::Stream;
use futures_util::stream::once; use futures_util::stream::{once, StreamExt as _};
use rustls::{Certificate, PrivateKey, ServerConfig as RustlsServerConfig, ServerName}; use rustls::{Certificate, PrivateKey, ServerConfig as RustlsServerConfig, ServerName};
use rustls_pemfile::{certs, pkcs8_private_keys}; use rustls_pemfile::{certs, pkcs8_private_keys};
async fn load_body<S>(stream: S) -> Result<BytesMut, PayloadError> async fn load_body<S>(mut stream: S) -> Result<BytesMut, PayloadError>
where where
S: Stream<Item = Result<Bytes, PayloadError>>, S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{ {
let mut buf = BytesMut::new(); let mut body = BytesMut::new();
while let Some(item) = stream.next().await {
pin!(stream); body.extend_from_slice(&item?)
}
poll_fn(|cx| loop { Ok(body)
let body = stream.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
Ok(buf)
} }
fn tls_config() -> RustlsServerConfig { fn tls_config() -> RustlsServerConfig {
@@ -106,7 +93,7 @@ pub fn get_negotiated_alpn_protocol(
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1() -> io::Result<()> { async fn test_h1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Error>(Response::ok())) .h1(|_| ok::<_, Error>(Response::ok()))
@@ -120,7 +107,7 @@ async fn h1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2() -> io::Result<()> { async fn test_h2() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Error>(Response::ok())) .h2(|_| ok::<_, Error>(Response::ok()))
@@ -134,7 +121,7 @@ async fn h2() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_1() -> io::Result<()> { async fn test_h1_1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h1(|req: Request| { .h1(|req: Request| {
@@ -152,7 +139,7 @@ async fn h1_1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_1() -> io::Result<()> { async fn test_h2_1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|req: Request| { .finish(|req: Request| {
@@ -170,7 +157,7 @@ async fn h2_1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body1() -> io::Result<()> { async fn test_h2_body1() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024); let data = "HELLOWORLD".to_owned().repeat(64 * 1024);
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
@@ -191,7 +178,7 @@ async fn h2_body1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_content_length() { async fn test_h2_content_length() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|req: Request| { .h2(|req: Request| {
@@ -245,7 +232,7 @@ async fn h2_content_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_headers() { async fn test_h2_headers() {
let data = STR.repeat(10); let data = STR.repeat(10);
let data2 = data.clone(); let data2 = data.clone();
@@ -309,7 +296,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World"; Hello World Hello World Hello World Hello World Hello World";
#[actix_rt::test] #[actix_rt::test]
async fn h2_body2() { async fn test_h2_body2() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -326,7 +313,7 @@ async fn h2_body2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_empty() { async fn test_h2_head_empty() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.finish(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .finish(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -352,7 +339,7 @@ async fn h2_head_empty() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_binary() { async fn test_h2_head_binary() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -377,7 +364,7 @@ async fn h2_head_binary() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_head_binary2() { async fn test_h2_head_binary2() {
let srv = test_server(move || { let srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -398,7 +385,7 @@ async fn h2_head_binary2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body_length() { async fn test_h2_body_length() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| { .h2(|_| {
@@ -420,7 +407,7 @@ async fn h2_body_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_body_chunked_explicit() { async fn test_h2_body_chunked_explicit() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| { .h2(|_| {
@@ -447,7 +434,7 @@ async fn h2_body_chunked_explicit() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_response_http_error_handling() { async fn test_h2_response_http_error_handling() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(fn_factory_with_config(|_: ()| { .h2(fn_factory_with_config(|_: ()| {
@@ -486,7 +473,7 @@ impl From<BadRequest> for Response<BoxBody> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h2_service_error() { async fn test_h2_service_error() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h2(|_| err::<Response<BoxBody>, _>(BadRequest)) .h2(|_| err::<Response<BoxBody>, _>(BadRequest))
@@ -503,7 +490,7 @@ async fn h2_service_error() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_service_error() { async fn test_h1_service_error() {
let mut srv = test_server(move || { let mut srv = test_server(move || {
HttpService::build() HttpService::build()
.h1(|_| err::<Response<BoxBody>, _>(BadRequest)) .h1(|_| err::<Response<BoxBody>, _>(BadRequest))
@@ -524,7 +511,7 @@ const HTTP1_1_ALPN_PROTOCOL: &[u8] = b"http/1.1";
const CUSTOM_ALPN_PROTOCOL: &[u8] = b"custom"; const CUSTOM_ALPN_PROTOCOL: &[u8] = b"custom";
#[actix_rt::test] #[actix_rt::test]
async fn alpn_h1() -> io::Result<()> { async fn test_alpn_h1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
let mut config = tls_config(); let mut config = tls_config();
config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec());
@@ -546,7 +533,7 @@ async fn alpn_h1() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn alpn_h2() -> io::Result<()> { async fn test_alpn_h2() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
let mut config = tls_config(); let mut config = tls_config();
config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec());
@@ -572,7 +559,7 @@ async fn alpn_h2() -> io::Result<()> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn alpn_h2_1() -> io::Result<()> { async fn test_alpn_h2_1() -> io::Result<()> {
let srv = test_server(move || { let srv = test_server(move || {
let mut config = tls_config(); let mut config = tls_config();
config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec());

View File

@@ -2,7 +2,7 @@ use std::{
convert::Infallible, convert::Infallible,
io::{Read, Write}, io::{Read, Write},
net, thread, net, thread,
time::{Duration, Instant}, time::Duration,
}; };
use actix_http::{ use actix_http::{
@@ -22,12 +22,12 @@ use futures_util::{
use regex::Regex; use regex::Regex;
#[actix_rt::test] #[actix_rt::test]
async fn h1_basic() { async fn test_h1() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.keep_alive(KeepAlive::Disabled) .keep_alive(KeepAlive::Disabled)
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
.h1(|req: Request| { .h1(|req: Request| {
assert!(req.peer_addr().is_some()); assert!(req.peer_addr().is_some());
ok::<_, Infallible>(Response::ok()) ok::<_, Infallible>(Response::ok())
@@ -43,12 +43,12 @@ async fn h1_basic() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_2() { async fn test_h1_2() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.keep_alive(KeepAlive::Disabled) .keep_alive(KeepAlive::Disabled)
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
.finish(|req: Request| { .finish(|req: Request| {
assert!(req.peer_addr().is_some()); assert!(req.peer_addr().is_some());
assert_eq!(req.version(), http::Version::HTTP_11); assert_eq!(req.version(), http::Version::HTTP_11);
@@ -75,7 +75,7 @@ impl From<ExpectFailed> for Response<BoxBody> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn expect_continue() { async fn test_expect_continue() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.expect(fn_service(|req: Request| { .expect(fn_service(|req: Request| {
@@ -106,7 +106,7 @@ async fn expect_continue() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn expect_continue_h1() { async fn test_expect_continue_h1() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.expect(fn_service(|req: Request| { .expect(fn_service(|req: Request| {
@@ -139,7 +139,7 @@ async fn expect_continue_h1() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn chunked_payload() { async fn test_chunked_payload() {
let chunk_sizes = vec![32768, 32, 32768]; let chunk_sizes = vec![32768, 32, 32768];
let total_size: usize = chunk_sizes.iter().sum(); let total_size: usize = chunk_sizes.iter().sum();
@@ -197,43 +197,26 @@ async fn chunked_payload() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn slow_request_408() { async fn test_slow_request() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_millis(200)) .client_timeout(100)
.keep_alive(Duration::from_secs(2))
.finish(|_| ok::<_, Infallible>(Response::ok())) .finish(|_| ok::<_, Infallible>(Response::ok()))
.tcp() .tcp()
}) })
.await; .await;
let start = Instant::now();
let mut stream = net::TcpStream::connect(srv.addr()).unwrap(); let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test HTTP/1.1\r\n"); let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
let mut data = String::new(); let mut data = String::new();
let _ = stream.read_to_string(&mut data); let _ = stream.read_to_string(&mut data);
assert!( assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
data.starts_with("HTTP/1.1 408 Request Timeout"),
"response was not 408: {}",
data
);
let diff = start.elapsed();
if diff < Duration::from_secs(1) {
// test success
} else if diff < Duration::from_secs(3) {
panic!("request seems to have wrongly timed-out according to keep-alive");
} else {
panic!("request took way too long to time out");
}
srv.stop().await; srv.stop().await;
} }
#[actix_rt::test] #[actix_rt::test]
async fn http1_malformed_request() { async fn test_http1_malformed_request() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
@@ -251,7 +234,7 @@ async fn http1_malformed_request() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http1_keepalive() { async fn test_http1_keepalive() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
@@ -274,25 +257,23 @@ async fn http1_keepalive() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http1_keepalive_timeout() { async fn test_http1_keepalive_timeout() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.keep_alive(Duration::from_secs(1)) .keep_alive(1)
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
.tcp() .tcp()
}) })
.await; .await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap(); let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
let _ = stream.write_all(b"GET /test HTTP/1.1\r\n\r\n"); let mut data = vec![0; 1024];
let mut data = vec![0; 256];
let _ = stream.read(&mut data); let _ = stream.read(&mut data);
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n"); assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
thread::sleep(Duration::from_millis(1100)); thread::sleep(Duration::from_millis(1100));
let mut data = vec![0; 256]; let mut data = vec![0; 1024];
let res = stream.read(&mut data).unwrap(); let res = stream.read(&mut data).unwrap();
assert_eq!(res, 0); assert_eq!(res, 0);
@@ -300,7 +281,7 @@ async fn http1_keepalive_timeout() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http1_keepalive_close() { async fn test_http1_keepalive_close() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
@@ -322,7 +303,7 @@ async fn http1_keepalive_close() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http10_keepalive_default_close() { async fn test_http10_keepalive_default_close() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
@@ -344,7 +325,7 @@ async fn http10_keepalive_default_close() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http10_keepalive() { async fn test_http10_keepalive() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok())) .h1(|_| ok::<_, Infallible>(Response::ok()))
@@ -373,7 +354,7 @@ async fn http10_keepalive() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn http1_keepalive_disabled() { async fn test_http1_keepalive_disabled() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.keep_alive(KeepAlive::Disabled) .keep_alive(KeepAlive::Disabled)
@@ -396,7 +377,7 @@ async fn http1_keepalive_disabled() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn content_length() { async fn test_content_length() {
use actix_http::{ use actix_http::{
header::{HeaderName, HeaderValue}, header::{HeaderName, HeaderValue},
StatusCode, StatusCode,
@@ -445,7 +426,7 @@ async fn content_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_headers() { async fn test_h1_headers() {
let data = STR.repeat(10); let data = STR.repeat(10);
let data2 = data.clone(); let data2 = data.clone();
@@ -511,7 +492,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World"; Hello World Hello World Hello World Hello World Hello World";
#[actix_rt::test] #[actix_rt::test]
async fn h1_body() { async fn test_h1_body() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -530,7 +511,7 @@ async fn h1_body() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_head_empty() { async fn test_h1_head_empty() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -557,7 +538,7 @@ async fn h1_head_empty() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_head_binary() { async fn test_h1_head_binary() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -584,7 +565,7 @@ async fn h1_head_binary() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_head_binary2() { async fn test_h1_head_binary2() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) .h1(|_| ok::<_, Infallible>(Response::ok().set_body(STR)))
@@ -607,7 +588,7 @@ async fn h1_head_binary2() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_body_length() { async fn test_h1_body_length() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| { .h1(|_| {
@@ -631,7 +612,7 @@ async fn h1_body_length() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_body_chunked_explicit() { async fn test_h1_body_chunked_explicit() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| { .h1(|_| {
@@ -668,7 +649,7 @@ async fn h1_body_chunked_explicit() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_body_chunked_implicit() { async fn test_h1_body_chunked_implicit() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| { .h1(|_| {
@@ -699,7 +680,7 @@ async fn h1_body_chunked_implicit() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_response_http_error_handling() { async fn test_h1_response_http_error_handling() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(fn_service(|_| { .h1(fn_service(|_| {
@@ -738,7 +719,7 @@ impl From<BadRequest> for Response<BoxBody> {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_service_error() { async fn test_h1_service_error() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.h1(|_| err::<Response<()>, _>(BadRequest)) .h1(|_| err::<Response<()>, _>(BadRequest))
@@ -757,7 +738,7 @@ async fn h1_service_error() {
} }
#[actix_rt::test] #[actix_rt::test]
async fn h1_on_connect() { async fn test_h1_on_connect() {
let mut srv = test_server(|| { let mut srv = test_server(|| {
HttpService::build() HttpService::build()
.on_connect_ext(|_, data| { .on_connect_ext(|_, data| {
@@ -780,7 +761,7 @@ async fn h1_on_connect() {
/// Tests compliance with 304 Not Modified spec in RFC 7232 §4.1. /// Tests compliance with 304 Not Modified spec in RFC 7232 §4.1.
/// https://datatracker.ietf.org/doc/html/rfc7232#section-4.1 /// https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
#[actix_rt::test] #[actix_rt::test]
async fn not_modified_spec_h1() { async fn test_not_modified_spec_h1() {
// TODO: this test needing a few seconds to complete reveals some weirdness with either the // TODO: this test needing a few seconds to complete reveals some weirdness with either the
// dispatcher or the client, though similar hangs occur on other tests in this file, only // dispatcher or the client, though similar hangs occur on other tests in this file, only
// succeeding, it seems, because of the keepalive timer // succeeding, it seems, because of the keepalive timer
@@ -850,8 +831,7 @@ async fn not_modified_spec_h1() {
Some(&header::HeaderValue::from_static("4")), Some(&header::HeaderValue::from_static("4")),
); );
// server does not prevent payload from being sent but clients may choose not to read it // server does not prevent payload from being sent but clients may choose not to read it
// TODO: this is probably a bug in the client, especially since CL header can differ in length // TODO: this is probably a bug, especially since CL header can differ in length from the body
// from the body
assert!(!srv.load_body(res).await.unwrap().is_empty()); assert!(!srv.load_body(res).await.unwrap().is_empty());
// TODO: add stream response tests // TODO: add stream response tests

Some files were not shown because too many files have changed in this diff Show More