1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-04-21 20:56:50 +02:00

Compare commits

..

No commits in common. "master" and "http-test-v3.0.0-beta.11" have entirely different histories.

397 changed files with 12763 additions and 30917 deletions

14
.cargo/config.toml Normal file
View File

@ -0,0 +1,14 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
# lib checking
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"

View File

@ -1,7 +0,0 @@
disallowed-names = [
"e", # no single letter error bindings
]
disallowed-methods = [
"std::cell::RefCell::default()",
"std::rc::Rc::default()",
]

View File

@ -1,3 +0,0 @@
version: "0.2"
words:
- actix

View File

@ -3,40 +3,34 @@ name: Bug Report
about: Create a bug report. about: Create a bug report.
--- ---
Your issue may already be reported! Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one. Your issue may already be reported!
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
## Expected Behavior ## Expected Behavior
<!--- If you're describing a bug, tell us what should happen --> <!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work --> <!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior ## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior --> <!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior --> <!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution ## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change --> <!--- or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs) ## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to --> <!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant --> <!--- reproduce this bug. Include code to reproduce, if relevant -->
1. 1.
2. 2.
3. 3.
4. 4.
## Context ## Context
<!--- How has this issue affected you? What are you trying to accomplish? --> <!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world --> <!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment ## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in --> <!--- Include as many relevant details about the environment you experienced the bug in -->
- Rust Version (I.e, output of `rustc -V`): - Rust Version (I.e, output of `rustc -V`):

View File

@ -2,14 +2,12 @@
<!-- Please fill out the following to get your PR reviewed quicker. --> <!-- Please fill out the following to get your PR reviewed quicker. -->
## PR Type ## PR Type
<!-- What kind of change does this PR make? --> <!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other --> <!-- Bug Fix / Feature / Refactor / Code Style / Other -->
PR_TYPE PR_TYPE
## PR Checklist
## PR Checklist
<!-- Check your PR fulfills the following items. --> <!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. --> <!-- For draft PRs check the boxes as you complete them. -->
@ -19,10 +17,11 @@ PR_TYPE
- [ ] Format code with the latest stable rustfmt. - [ ] Format code with the latest stable rustfmt.
- [ ] (Team) Label with affected crates and semver status. - [ ] (Team) Label with affected crates and semver status.
## Overview
## Overview
<!-- Describe the current and new behavior. --> <!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. --> <!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. --> <!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 --> <!-- Closes #000 -->

View File

@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: cargo
directory: /
schedule:
interval: weekly
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly

View File

@ -2,27 +2,25 @@ name: Benchmark
on: on:
push: push:
branches: [master] branches:
- master
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
check_benchmark: check_benchmark:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Install Rust - name: Install Rust
run: | uses: actions-rs/toolchain@v1
rustup set profile minimal with:
rustup install nightly toolchain: nightly
rustup override set nightly profile: minimal
override: true
- name: Check benchmark - name: Check benchmark
run: cargo bench --bench=server -- --sample-size=15 uses: actions-rs/cargo@v1
with:
command: bench
args: --bench=server -- --sample-size=15

153
.github/workflows/ci-master.yml vendored Normal file
View File

@ -0,0 +1,153 @@
name: CI (master only)
on:
push:
branches: [master]
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Generate coverage file
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }

View File

@ -1,91 +0,0 @@
name: CI (post-merge)
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- { name: nightly, version: nightly }
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
steps:
- uses: actions/checkout@v4
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ matrix.version.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.49.45
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: check minimal
run: just check-min
- name: check default
run: just check-default
- name: tests
timeout-minutes: 60
run: just test
- name: CI cache clean
run: cargo-ci-cache-clean
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Free Disk Space
run: ./scripts/free-disk-space.sh
- name: Setup mold linker
uses: rui314/setup-mold@v1
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
- name: Install just, cargo-hack
uses: taiki-e/install-action@v2.49.45
with:
tool: just,cargo-hack
- name: Check feature combinations
run: just check-feature-combinations

View File

@ -3,119 +3,118 @@ name: CI
on: on:
pull_request: pull_request:
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened]
merge_group:
types: [checks_requested]
push: push:
branches: [master] branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
read_msrv:
name: Read MSRV
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
build_and_test: build_and_test:
needs: read_msrv
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
# prettier-ignore
target: target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu } - { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin } - { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc } - { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version: version:
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" } - 1.54.0 # MSRV
- { name: stable, version: stable } - stable
name: ${{ matrix.target.name }} / ${{ matrix.version.name }} name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }} runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL - name: Install OpenSSL
if: matrix.target.os == 'windows-latest' if: matrix.target.triple == 'x86_64-pc-windows-msvc'
shell: bash run: vcpkg install openssl:x64-windows
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Setup mold linker - name: Install ${{ matrix.version }}
if: matrix.target.os == 'ubuntu-latest' uses: actions-rs/toolchain@v1
uses: rui314/setup-mold@v1
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with: with:
toolchain: ${{ matrix.version.version }} toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean - name: Generate Cargo.lock
uses: taiki-e/install-action@v2.49.45 uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with: with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean command: install
args: cargo-hack
- name: workaround MSRV issues
if: matrix.version.name == 'msrv'
run: just downgrade-for-msrv
- name: check minimal - name: check minimal
run: just check-min uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default - name: check default
run: just check-default uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests - name: tests
timeout-minutes: 60 timeout-minutes: 60
run: just test run: |
cargo test --lib --tests -p=actix-router --all-features
- name: CI cache clean cargo test --lib --tests -p=actix-http --all-features
run: cargo-ci-cache-clean cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
io-uring: cargo test --lib --tests -p=awc --all-features
name: io-uring tests cargo test --lib --tests -p=actix-http-test --all-features
runs-on: ubuntu-latest cargo test --lib --tests -p=actix-test --all-features
steps: cargo test --lib --tests -p=actix-files
- uses: actions/checkout@v4 cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
- name: tests (io-uring) - name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60 timeout-minutes: 60
run: > run: >
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features" sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
rustdoc: rustdoc:
name: doc tests name: doc tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Install Rust (nightly) - name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0 uses: actions-rs/toolchain@v1
with: with:
toolchain: nightly toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Install just - name: Generate Cargo.lock
uses: taiki-e/install-action@v2.49.45 uses: actions-rs/cargo@v1
with: with: { command: generate-lockfile }
tool: just - name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests - name: doc tests
run: just test-docs uses: actions-rs/cargo@v1
timeout-minutes: 60
with: { command: ci-doctest }

48
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@ -0,0 +1,48 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --examples --all-features

View File

@ -1,40 +0,0 @@
name: Coverage
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: llvm-tools
- name: Install just, cargo-llvm-cov, cargo-nextest
uses: taiki-e/install-action@v2.49.45
with:
tool: just,cargo-llvm-cov,cargo-nextest
- name: Generate code coverage
run: just test-coverage-codecov
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5.4.0
with:
files: codecov.json
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@ -1,90 +0,0 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: rustfmt
- name: Check with Rustfmt
run: cargo fmt --all -- --check
clippy:
permissions:
contents: read
checks: write # to add clippy checks to PR diffs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
components: clippy
- name: Check with Clippy
uses: giraffate/clippy-action@v1.0.1
with:
reporter: github-pr-check
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: >-
--workspace --all-features --tests --examples --bins --
-A unknown_lints -D clippy::todo -D clippy::dbg_macro
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: rust-docs
- name: Check for broken intra-doc links
env:
RUSTDOCFLAGS: -D warnings
run: cargo +nightly doc --no-deps --workspace --all-features
check-external-types:
if: false # rustdoc mismatch currently
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (${{ vars.RUST_VERSION_EXTERNAL_TYPES }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
- name: Install just
uses: taiki-e/install-action@v2.49.45
with:
tool: just
- name: Install cargo-check-external-types
uses: taiki-e/cache-cargo-install-action@v2.1.1
with:
tool: cargo-check-external-types
- name: check external types
run: just check-external-types-all +${{ vars.RUST_VERSION_EXTERNAL_TYPES }}

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Upload Documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

5
.gitignore vendored
View File

@ -1,3 +1,4 @@
Cargo.lock
target/ target/
guide/build/ guide/build/
/gh-pages /gh-pages
@ -18,7 +19,3 @@ guide/build/
# Configuration directory generated by VSCode # Configuration directory generated by VSCode
.vscode .vscode
# code coverage
/lcov.info
/codecov.json

View File

@ -1,5 +0,0 @@
overrides:
- files: "*.md"
options:
printWidth: 9999
proseWrap: never

View File

@ -1,3 +0,0 @@
group_imports = "StdExternalCrate"
imports_granularity = "Crate"
use_field_init_shorthand = true

View File

@ -1,5 +1,985 @@
# Changelog # Changes
Changelogs are kept separately for each crate in this repo. ## Unreleased - 2021-xx-xx
Actix Web changelog [is now here &rarr;](./actix-web/CHANGES.md).
## 4.0.0-beta.19 - 2022-01-04
### Added
- `impl Hash` for `http::header::Encoding`. [#2501]
- `AcceptEncoding::negotiate()`. [#2501]
### Changed
- `AcceptEncoding::preference` now returns `Option<Preference<Encoding>>`. [#2501]
- Rename methods `BodyEncoding::{encoding => encode_with, get_encoding => preferred_encoding}`. [#2501]
- `http::header::Encoding` now only represents `Content-Encoding` types. [#2501]
### Fixed
- Auto-negotiation of content encoding is more fault-tolerant when using the `Compress` middleware. [#2501]
### Removed
- `Compress::new`; restricting compression algorithm is done through feature flags. [#2501]
- `BodyEncoding` trait; signalling content encoding is now only done via the `Content-Encoding` header. [#2565]
[#2501]: https://github.com/actix/actix-web/pull/2501
[#2565]: https://github.com/actix/actix-web/pull/2565
## 4.0.0-beta.18 - 2021-12-29
### Changed
- Update `cookie` dependency (re-exported) to `0.16`. [#2555]
- Minimum supported Rust version (MSRV) is now 1.54.
### Security
- `cookie` upgrade addresses [`RUSTSEC-2020-0071`].
[#2555]: https://github.com/actix/actix-web/pull/2555
[`RUSTSEC-2020-0071`]: https://rustsec.org/advisories/RUSTSEC-2020-0071.html
## 4.0.0-beta.17 - 2021-12-29
### Added
- `guard::GuardContext` for use with the `Guard` trait. [#2552]
- `ServiceRequest::guard_ctx` for obtaining a guard context. [#2552]
### Changed
- `Guard` trait now receives a `&GuardContext`. [#2552]
- `guard::fn_guard` functions now receives a `&GuardContext`. [#2552]
- Some guards now return `impl Guard` and their concrete types are made private: `guard::Header` and all the method guards. [#2552]
- The `Not` guard is now generic over the type of guard it wraps. [#2552]
### Fixed
- Rename `ConnectionInfo::{remote_addr => peer_addr}`, deprecating the old name. [#2554]
- `ConnectionInfo::peer_addr` will not return the port number. [#2554]
- `ConnectionInfo::realip_remote_addr` will not return the port number if sourcing the IP from the peer's socket address. [#2554]
[#2552]: https://github.com/actix/actix-web/pull/2552
[#2554]: https://github.com/actix/actix-web/pull/2554
## 4.0.0-beta.16 - 2021-12-27
### Changed
- No longer require `Scope` service body type to be boxed. [#2523]
- No longer require `Resource` service body type to be boxed. [#2526]
[#2523]: https://github.com/actix/actix-web/pull/2523
[#2526]: https://github.com/actix/actix-web/pull/2526
## 4.0.0-beta.15 - 2021-12-17
### Added
- Method on `Responder` trait (`customize`) for customizing responders and `CustomizeResponder` struct. [#2510]
- Implement `Debug` for `DefaultHeaders`. [#2510]
### Changed
- Align `DefaultHeader` method terminology, deprecating previous methods. [#2510]
- Response service types in `ErrorHandlers` middleware now use `ServiceResponse<EitherBody<B>>` to allow changing the body type. [#2515]
- Both variants in `ErrorHandlerResponse` now use `ServiceResponse<EitherBody<B>>`. [#2515]
- Rename `test::{default_service => simple_service}`. Old name is deprecated. [#2518]
- Rename `test::{read_response_json => call_and_read_body_json}`. Old name is deprecated. [#2518]
- Rename `test::{read_response => call_and_read_body}`. Old name is deprecated. [#2518]
- Relax body type and error bounds on test utilities. [#2518]
### Removed
- Top-level `EitherExtractError` export. [#2510]
- Conversion implementations for `either` crate. [#2516]
- `test::load_stream` and `test::load_body`; replace usage with `body::to_bytes`. [#2518]
[#2510]: https://github.com/actix/actix-web/pull/2510
[#2515]: https://github.com/actix/actix-web/pull/2515
[#2516]: https://github.com/actix/actix-web/pull/2516
[#2518]: https://github.com/actix/actix-web/pull/2518
## 4.0.0-beta.14 - 2021-12-11
### Added
- Methods on `AcceptLanguage`: `ranked` and `preference`. [#2480]
- `AcceptEncoding` typed header. [#2482]
- `Range` typed header. [#2485]
- `HttpResponse::map_into_{left,right}_body` and `HttpResponse::map_into_boxed_body`. [#2468]
- `ServiceResponse::map_into_{left,right}_body` and `HttpResponse::map_into_boxed_body`. [#2468]
- Connection data set through the `HttpServer::on_connect` callback is now accessible only from the new `HttpRequest::conn_data()` and `ServiceRequest::conn_data()` methods. [#2491]
- `HttpRequest::{req_data,req_data_mut}`. [#2487]
- `ServiceResponse::into_parts`. [#2499]
### Changed
- Rename `Accept::{mime_precedence => ranked}`. [#2480]
- Rename `Accept::{mime_preference => preference}`. [#2480]
- Un-deprecate `App::data_factory`. [#2484]
- `HttpRequest::url_for` no longer constructs URLs with query or fragment components. [#2430]
- Remove `B` (body) type parameter on `App`. [#2493]
- Add `B` (body) type parameter on `Scope`. [#2492]
- Request-local data container is no longer part of a `RequestHead`. Instead it is a distinct part of a `Request`. [#2487]
### Fixed
- Accept wildcard `*` items in `AcceptLanguage`. [#2480]
- Re-exports `dev::{BodySize, MessageBody, SizedStream}`. They are exposed through the `body` module. [#2468]
- Typed headers containing lists that require one or more items now enforce this minimum. [#2482]
### Removed
- `ConnectionInfo::get`. [#2487]
[#2430]: https://github.com/actix/actix-web/pull/2430
[#2468]: https://github.com/actix/actix-web/pull/2468
[#2480]: https://github.com/actix/actix-web/pull/2480
[#2482]: https://github.com/actix/actix-web/pull/2482
[#2484]: https://github.com/actix/actix-web/pull/2484
[#2485]: https://github.com/actix/actix-web/pull/2485
[#2487]: https://github.com/actix/actix-web/pull/2487
[#2491]: https://github.com/actix/actix-web/pull/2491
[#2492]: https://github.com/actix/actix-web/pull/2492
[#2493]: https://github.com/actix/actix-web/pull/2493
[#2499]: https://github.com/actix/actix-web/pull/2499
## 4.0.0-beta.13 - 2021-11-30
### Changed
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 4.0.0-beta.12 - 2021-11-22
### Changed
- Compress middleware's response type is now `AnyBody<Encoder<B>>`. [#2448]
### Fixed
- Relax `Unpin` bound on `S` (stream) parameter of `HttpResponseBuilder::streaming`. [#2448]
### Removed
- `dev::ResponseBody` re-export; is function is replaced by the new `dev::AnyBody` enum. [#2446]
[#2446]: https://github.com/actix/actix-web/pull/2446
[#2448]: https://github.com/actix/actix-web/pull/2448
## 4.0.0-beta.11 - 2021-11-15
### Added
- Re-export `dev::ServerHandle` from `actix-server`. [#2442]
### Changed
- `ContentType::html` now produces `text/html; charset=utf-8` instead of `text/html`. [#2423]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
[#2423]: https://github.com/actix/actix-web/pull/2423
[#2442]: https://github.com/actix/actix-web/pull/2442
## 4.0.0-beta.10 - 2021-10-20
### Added
- Option to allow `Json` extractor to work without a `Content-Type` header present. [#2362]
- `#[actix_web::test]` macro for setting up tests with a runtime. [#2409]
### Changed
- Associated type `FromRequest::Config` was removed. [#2233]
- Inner field made private on `web::Payload`. [#2384]
- `Data::into_inner` and `Data::get_ref` no longer requires `T: Sized`. [#2403]
- Updated rustls to v0.20. [#2414]
- Minimum supported Rust version (MSRV) is now 1.52.
### Removed
- Useless `ServiceResponse::checked_expr` method. [#2401]
[#2233]: https://github.com/actix/actix-web/pull/2233
[#2362]: https://github.com/actix/actix-web/pull/2362
[#2384]: https://github.com/actix/actix-web/pull/2384
[#2401]: https://github.com/actix/actix-web/pull/2401
[#2403]: https://github.com/actix/actix-web/pull/2403
[#2409]: https://github.com/actix/actix-web/pull/2409
[#2414]: https://github.com/actix/actix-web/pull/2414
## 4.0.0-beta.9 - 2021-09-09
### Added
- Re-export actix-service `ServiceFactory` in `dev` module. [#2325]
### Changed
- Compress middleware will return 406 Not Acceptable when no content encoding is acceptable to the client. [#2344]
- Move `BaseHttpResponse` to `dev::Response`. [#2379]
- Enable `TestRequest::param` to accept more than just static strings. [#2172]
- Minimum supported Rust version (MSRV) is now 1.51.
### Fixed
- Fix quality parse error in Accept-Encoding header. [#2344]
- Re-export correct type at `web::HttpResponse`. [#2379]
[#2172]: https://github.com/actix/actix-web/pull/2172
[#2325]: https://github.com/actix/actix-web/pull/2325
[#2344]: https://github.com/actix/actix-web/pull/2344
[#2379]: https://github.com/actix/actix-web/pull/2379
## 4.0.0-beta.8 - 2021-06-26
### Added
- Add `ServiceRequest::parts_mut`. [#2177]
- Add extractors for `Uri` and `Method`. [#2263]
- Add extractors for `ConnectionInfo` and `PeerAddr`. [#2263]
- Add `Route::service` for using hand-written services as handlers. [#2262]
### Changed
- Change compression algorithm features flags. [#2250]
- Deprecate `App::data` and `App::data_factory`. [#2271]
- Smarter extraction of `ConnectionInfo` parts. [#2282]
### Fixed
- Scope and Resource middleware can access data items set on their own layer. [#2288]
[#2177]: https://github.com/actix/actix-web/pull/2177
[#2250]: https://github.com/actix/actix-web/pull/2250
[#2271]: https://github.com/actix/actix-web/pull/2271
[#2262]: https://github.com/actix/actix-web/pull/2262
[#2263]: https://github.com/actix/actix-web/pull/2263
[#2282]: https://github.com/actix/actix-web/pull/2282
[#2288]: https://github.com/actix/actix-web/pull/2288
## 4.0.0-beta.7 - 2021-06-17
### Added
- `HttpServer::worker_max_blocking_threads` for setting block thread pool. [#2200]
### Changed
- Adjusted default JSON payload limit to 2MB (from 32kb) and included size and limits in the `JsonPayloadError::Overflow` error variant. [#2162]
[#2162]: (https://github.com/actix/actix-web/pull/2162)
- `ServiceResponse::error_response` now uses body type of `Body`. [#2201]
- `ServiceResponse::checked_expr` now returns a `Result`. [#2201]
- Update `language-tags` to `0.3`.
- `ServiceResponse::take_body`. [#2201]
- `ServiceResponse::map_body` closure receives and returns `B` instead of `ResponseBody<B>` types. [#2201]
- All error trait bounds in server service builders have changed from `Into<Error>` to `Into<Response<AnyBody>>`. [#2253]
- All error trait bounds in message body and stream impls changed from `Into<Error>` to `Into<Box<dyn std::error::Error>>`. [#2253]
- `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuation parameter. [#2226]
- `middleware::normalize` now will not try to normalize URIs with no valid path [#2246]
### Removed
- `HttpResponse::take_body` and old `HttpResponse::into_body` method that casted body type. [#2201]
[#2200]: https://github.com/actix/actix-web/pull/2200
[#2201]: https://github.com/actix/actix-web/pull/2201
[#2253]: https://github.com/actix/actix-web/pull/2253
[#2246]: https://github.com/actix/actix-web/pull/2246
## 4.0.0-beta.6 - 2021-04-17
### Added
- `HttpResponse` and `HttpResponseBuilder` structs. [#2065]
### Changed
- Most error types are now marked `#[non_exhaustive]`. [#2148]
- Methods on `ContentDisposition` that took `T: AsRef<str>` now take `impl AsRef<str>`.
[#2065]: https://github.com/actix/actix-web/pull/2065
[#2148]: https://github.com/actix/actix-web/pull/2148
## 4.0.0-beta.5 - 2021-04-02
### Added
- `Header` extractor for extracting common HTTP headers in handlers. [#2094]
- Added `TestServer::client_headers` method. [#2097]
### Fixed
- Double ampersand in Logger format is escaped correctly. [#2067]
### Changed
- `CustomResponder` would return error as `HttpResponse` when `CustomResponder::with_header` failed
instead of skipping. (Only the first error is kept when multiple error occur) [#2093]
### Removed
- The `client` mod was removed. Clients should now use `awc` directly.
[871ca5e4](https://github.com/actix/actix-web/commit/871ca5e4ae2bdc22d1ea02701c2992fa8d04aed7)
- Integration testing was moved to new `actix-test` crate. Namely these items from the `test`
module: `TestServer`, `TestServerConfig`, `start`, `start_with`, and `unused_addr`. [#2112]
[#2067]: https://github.com/actix/actix-web/pull/2067
[#2093]: https://github.com/actix/actix-web/pull/2093
[#2094]: https://github.com/actix/actix-web/pull/2094
[#2097]: https://github.com/actix/actix-web/pull/2097
[#2112]: https://github.com/actix/actix-web/pull/2112
## 4.0.0-beta.4 - 2021-03-09
### Changed
- Feature `cookies` is now optional and enabled by default. [#1981]
- `JsonBody::new` returns a default limit of 32kB to be consistent with `JsonConfig` and the default
behaviour of the `web::Json<T>` extractor. [#2010]
[#1981]: https://github.com/actix/actix-web/pull/1981
[#2010]: https://github.com/actix/actix-web/pull/2010
## 4.0.0-beta.3 - 2021-02-10
- Update `actix-web-codegen` to `0.5.0-beta.1`.
## 4.0.0-beta.2 - 2021-02-10
### Added
- The method `Either<web::Json<T>, web::Form<T>>::into_inner()` which returns the inner type for
whichever variant was created. Also works for `Either<web::Form<T>, web::Json<T>>`. [#1894]
- Add `services!` macro for helping register multiple services to `App`. [#1933]
- Enable registering a vec of services of the same type to `App` [#1933]
### Changed
- Rework `Responder` trait to be sync and returns `Response`/`HttpResponse` directly.
Making it simpler and more performant. [#1891]
- `ServiceRequest::into_parts` and `ServiceRequest::from_parts` can no longer fail. [#1893]
- `ServiceRequest::from_request` can no longer fail. [#1893]
- Our `Either` type now uses `Left`/`Right` variants (instead of `A`/`B`) [#1894]
- `test::{call_service, read_response, read_response_json, send_request}` take `&Service`
in argument [#1905]
- `App::wrap_fn`, `Resource::wrap_fn` and `Scope::wrap_fn` provide `&Service` in closure
argument. [#1905]
- `web::block` no longer requires the output is a Result. [#1957]
### Fixed
- Multiple calls to `App::data` with the same type now keeps the latest call's data. [#1906]
### Removed
- Public field of `web::Path` has been made private. [#1894]
- Public field of `web::Query` has been made private. [#1894]
- `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
- `AppService::set_service_data`; for custom HTTP service factories adding application data, use the
layered data model by calling `ServiceRequest::add_data_container` when handling
requests instead. [#1906]
[#1891]: https://github.com/actix/actix-web/pull/1891
[#1893]: https://github.com/actix/actix-web/pull/1893
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1905]: https://github.com/actix/actix-web/pull/1905
[#1906]: https://github.com/actix/actix-web/pull/1906
[#1933]: https://github.com/actix/actix-web/pull/1933
[#1957]: https://github.com/actix/actix-web/pull/1957
## 4.0.0-beta.1 - 2021-01-07
### Added
- `Compat` middleware enabling generic response body/error type of middlewares like `Logger` and
`Compress` to be used in `middleware::Condition` and `Resource`, `Scope` services. [#1865]
### Changed
- Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
- Bumped `rand` to `0.8`.
- Update `rust-tls` to `0.19`. [#1813]
- Rename `Handler` to `HandlerService` and rename `Factory` to `Handler`. [#1852]
- The default `TrailingSlash` is now `Trim`, in line with existing documentation. See migration
guide for implications. [#1875]
- Rename `DefaultHeaders::{content_type => add_content_type}`. [#1875]
- MSRV is now 1.46.0.
### Fixed
- Added the underlying parse error to `test::read_body_json`'s panic message. [#1812]
### Removed
- Public modules `middleware::{normalize, err_handlers}`. All necessary middleware structs are now
exposed directly by the `middleware` module.
- Remove `actix-threadpool` as dependency. `actix_threadpool::BlockingError` error type can be imported
from `actix_web::error` module. [#1878]
[#1812]: https://github.com/actix/actix-web/pull/1812
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1852]: https://github.com/actix/actix-web/pull/1852
[#1865]: https://github.com/actix/actix-web/pull/1865
[#1875]: https://github.com/actix/actix-web/pull/1875
[#1878]: https://github.com/actix/actix-web/pull/1878
## 3.3.3 - 2021-12-18
### Changed
- Soft-deprecate `NormalizePath::default()`, noting upcoming behavior change in v4. [#2529]
[#2529]: https://github.com/actix/actix-web/pull/2529
## 3.3.2 - 2020-12-01
### Fixed
- Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
- Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
- Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
- Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
- Add `Either<A, B>` extractor helper. [#1788]
### Changed
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
- Implement `exclude_regex` for Logger middleware. [#1723]
- Add request-local data extractor `web::ReqData`. [#1748]
- Add ability to register closure for request middleware logging. [#1749]
- Add `app_data` to `ServiceConfig`. [#1757]
- Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
- Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
- Print non-configured `Data<T>` type when attempting extraction. [#1743]
- Re-export bytes::Buf{Mut} in web module. [#1750]
- Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
- Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
- Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
- `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
- `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
- `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
- No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
- `middleware::NormalizePath` now has configurable behavior for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
- Update actix-codec and actix-utils dependencies. [#1634]
- `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
- `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
- Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
- `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
- `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
- `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
- Re-export all error types from `awc`. [#1621]
- MSRV is now 1.42.0.
### Fixed
- Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
- Re-export `actix_rt::main` as `actix_web::main`.
- `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
- `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
- Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
- Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
- MSRV is now 1.41.1
### Fixed
- `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
- Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
- Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
- Fix audit issue logging by default peer address [#1485]
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
- `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
- Implement `std::error::Error` for our custom errors [#1422]
- NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
- Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
- Add helper function for creating routes with `TRACE` method guard `web::trace()`
- Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
- Use `sha-1` crate instead of unmaintained `sha1` crate
- Skip empty chunks when returning response from a `Stream` [#1308]
- Update the `time` dependency to 0.2.7
- Update `actix-tls` dependency to 2.0.0-alpha.1
- Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
- Rename `HttpServer::start()` to `HttpServer::run()`
- Allow to gracefully stop test server via `TestServer::stop()`
- Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
- Move `BodyEncoding` to `dev` module #1220
- Allow to set `peer_addr` for TestRequest #1074
- Make web::Data deref to Arc<T> #1214
- Rename `App::register_data()` to `App::app_data()`
- `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
- Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
- Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
- Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
- Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
- Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
- Migrated to `std::future`
- Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
- Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
- Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
- Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
- Add `middleware::Condition` that conditionally enables another middleware
- Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
- Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
- Make UrlEncodedError::Overflow more informative
- Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
- Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
- Re-implement Host predicate (#989)
- Form implements Responder, returning a `application/x-www-form-urlencoded` response
- Add `into_inner` to `Data`
- Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
- `Query` payload made `pub`. Allows user to pattern-match the payload.
- Enable `rust-tls` feature for client #1045
- Update serde_urlencoded to 0.6.1
- Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
- Unix domain sockets (HttpServer::bind_uds) #92
- Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
- Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
- Add `Responder` impl for `(T, StatusCode) where T: Responder`
- Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
- Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
- Support asynchronous data factories #850
### Changed
- Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
- Move cors middleware to `actix-cors` crate.
- Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
- Add support for PathConfig #903
- Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
- Move cors middleware to `actix-cors` crate.
- Move identity middleware to `actix-identity` crate.
- Disable default feature `secure-cookies`.
- Allow to test an app that uses async actors #897
- Re-apply patch from #637 #894
### Fixed
- HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
- Add `Scope::configure()` method.
- Add `ServiceRequest::set_payload()` method.
- Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
- Add macros for head, options, trace, connect and patch http methods
### Changed
- Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
- Fix Logger request time format, and use rfc3339. #867
- Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Added
- Add `Query<T>::from_query()` to extract parameters from a query string. #846
- `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
- `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
- Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Added
- Allow to set/override app data on scope level
### Changed
- `App::configure` take an `FnOnce` instead of `Fn`
- Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
- Add helper function for executing futures `test::block_fn()`
### Changed
- Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
- Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
- CORS handling without headers #702
- Allow constructing `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
- Fix `NormalizePath` middleware impl #806
### Deleted
- `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
- Add raw services support via `web::service()`
- Add helper functions for reading response body `test::read_body()`
- Add support for `remainder match` (i.e "/path/{tail}*")
- Extend `Responder` trait, allow to override status code and headers.
- Store visit and login timestamp in the identity cookie #502
### Changed
- `.to_async()` handler can return `Responder` type #792
### Fixed
- Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
- Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
- Add `.peer_addr()` #744
- Add `NormalizePath` middleware
### Changed
- Rename `RouterConfig` to `ServiceConfig`
- Rename `test::call_success` to `test::call_service`
- Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
- `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
- Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
- Allow using any service as default service.
- Remove generic type for request payload, always use default.
- Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
- Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
- Added async io `TestBuffer` for testing.
### Deleted
- Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
- `App::configure()` allow to offload app configuration to different methods
- Added `URLPath` option for logger
- Added `ServiceRequest::app_data()`, returns `Data<T>`
- Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
- `FromRequest` trait refactoring
- Move multipart support to actix-multipart crate
### Fixed
- Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
- Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
- Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
- Removed `Deref` impls
### Removed
- Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
- Rustls support
### Changed
- Use forked cookie
- Multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
- Complete architecture re-design.
- Return 405 response if no matching route found within resource #538

3958
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +1,126 @@
[package]
name = "actix-web"
version = "4.0.0-beta.19"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
rustdoc-args = ["--cfg", "docsrs"]
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
".",
"actix-files", "actix-files",
"actix-http-test", "actix-http-test",
"actix-http", "actix-http",
"actix-multipart", "actix-multipart",
"actix-multipart-derive",
"actix-router", "actix-router",
"actix-test", "actix-test",
"actix-web-actors", "actix-web-actors",
"actix-web-codegen", "actix-web-codegen",
"actix-web",
"awc", "awc",
] ]
[workspace.package] [features]
homepage = "https://actix.rs" default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
repository = "https://github.com/actix/actix-web"
license = "MIT OR Apache-2.0" # Brotli algorithm content-encoding support
edition = "2021" compress-brotli = ["actix-http/compress-brotli", "__compress"]
rust-version = "1.75" # Gzip and deflate algorithms content-encoding support
compress-gzip = ["actix-http/compress-gzip", "__compress"]
# Zstd algorithm content-encoding support
compress-zstd = ["actix-http/compress-zstd", "__compress"]
# support for cookies
cookies = ["cookie"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
# rustls
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
# io-uring feature only avaiable for Linux OSes.
experimental-io-uring = ["actix-server/io-uring"]
[dependencies]
actix-codec = "0.4.1"
actix-macros = "0.2.3"
actix-rt = "2.3"
actix-server = "2.0.0-rc.2"
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-tls = { version = "3.0.0", default-features = false, optional = true }
actix-http = "3.0.0-beta.18"
actix-router = "0.5.0-beta.4"
actix-web-codegen = "0.5.0-rc.1"
ahash = "0.7"
bytes = "1"
cfg-if = "1"
cookie = { version = "0.16", features = ["percent-encode"], optional = true }
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
itoa = "1"
language-tags = "0.3"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
pin-project-lite = "0.2.7"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
smallvec = "1.6.1"
socket2 = "0.4.0"
time = { version = "0.3", default-features = false, features = ["formatting"] }
url = "2.1"
[dev-dependencies]
actix-files = "0.6.0-beta.13"
actix-test = { version = "0.1.0-beta.11", features = ["openssl", "rustls"] }
awc = { version = "3.0.0-beta.18", features = ["openssl"] }
brotli2 = "0.3.2"
const-str = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
flate2 = "1.0.13"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
rand = "0.8"
rcgen = "0.8"
rustls-pemfile = "0.2"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
zstd = "0.9"
[profile.dev] [profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much. # Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
@ -35,10 +136,9 @@ actix-files = { path = "actix-files" }
actix-http = { path = "actix-http" } actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" } actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" } actix-multipart = { path = "actix-multipart" }
actix-multipart-derive = { path = "actix-multipart-derive" }
actix-router = { path = "actix-router" } actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" } actix-test = { path = "actix-test" }
actix-web = { path = "actix-web" } actix-web = { path = "." }
actix-web-actors = { path = "actix-web-actors" } actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" } actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" } awc = { path = "awc" }
@ -52,10 +152,30 @@ awc = { path = "awc" }
# actix-tls = { path = "../actix-net/actix-tls" } # actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" } # actix-server = { path = "../actix-net/actix-server" }
[workspace.lints.rust] [[test]]
rust_2018_idioms = { level = "deny" } name = "test_server"
future_incompatible = { level = "deny" } required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
nonstandard_style = { level = "deny" }
[workspace.lints.clippy] [[example]]
# clone_on_ref_ptr = { level = "deny" } name = "basic"
required-features = ["compress-gzip"]
[[example]]
name = "uds"
required-features = ["compress-gzip"]
[[example]]
name = "on-connect"
required-features = []
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false
[[bench]]
name = "responder"
harness = false

677
MIGRATION.md Normal file
View File

@ -0,0 +1,677 @@
## Unreleased
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
It is advised that the `new` method be used instead.
Before: `#[get("/test/")]`
After: `#[get("/test")]`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
- The `type Config` of `FromRequest` was removed.
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
By default all compression algorithms are enabled.
To select algorithm you want to include with `middleware::Compress` use following flags:
- `compress-brotli`
- `compress-gzip`
- `compress-zstd`
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
to have compression enabled. Please change features selection like bellow:
Before: `"compress"`
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
## 3.0.0
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
- Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
- actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
- content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
- Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
- Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
- `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
- Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
- Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
- Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
- Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
- Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
- `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
- `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
- `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
- Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
- StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
- Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
- Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
- Session middleware moved to actix-session crate
- Actors support have been moved to `actix-web-actors` crate
- Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
- `Handler::handle()` uses `&self` instead of `&mut self`
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
- Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
- Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
- `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
- `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
- `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
- `HttpServer::threads()` renamed to `HttpServer::workers()`.
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
- `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
- Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
- `FromRequest::from_request()` accepts mutable reference to a request
- `FromRequest::Result` has to implement `Into<Reply<Self>>`
- [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
- Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
- Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
- `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
- `actix_web::header` moved to `actix_web::http::header`
- `NormalizePath` moved to `actix_web::http` module
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
- `Application` renamed to a `App`
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1 +0,0 @@
actix-web/README.md

109
README.md Normal file
View File

@ -0,0 +1,109 @@
<div align="center">
<h1>Actix Web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.19)](https://docs.rs/actix-web/4.0.0-beta.19)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.19/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.19)
<br />
[![CI](https://github.com/actix/actix-web/actions/workflows/ci.yml/badge.svg)](https://github.com/actix/actix-web/actions/workflows/ci.yml)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
- Supports *HTTP/1.x* and *HTTP/2*
- Streaming and pipelining
- Keep-alive and slow requests handling
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support
- Transparent content compression/decompression (br, gzip, deflate, zstd)
- Powerful [request routing](https://actix.rs/docs/url-dispatch/)
- Multipart streams
- Static assets
- SSL support using OpenSSL or Rustls
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
- Includes an async [HTTP client](https://docs.rs/awc/)
- Runs on stable Rust 1.54+
## Documentation
- [Website & User Guide](https://actix.rs)
- [Examples Repository](https://github.com/actix/examples)
- [API Documentation](https://docs.rs/actix-web)
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
- [Application State](https://github.com/actix/examples/tree/master/basics/state/)
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
## License
This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0])
- MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT])
at your option.
## Code of Conduct
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@ -1,88 +1,29 @@
# Changes # Changes
## Unreleased ## Unreleased - 2021-xx-xx
- Minimum supported Rust version (MSRV) is now 1.75.
## 0.6.6
- Update `tokio-uring` dependency to `0.4`.
- Minimum supported Rust version (MSRV) is now 1.72.
## 0.6.5
- Fix handling of special characters in filenames.
## 0.6.4
- Fix handling of newlines in filenames.
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 0.6.3
- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903]
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
- Update `tokio-uring` dependency to `0.4`.
[#2903]: https://github.com/actix/actix-web/pull/2903
## 0.6.2
- Allow partial range responses for video content to start streaming sooner. [#2817]
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
[#2817]: https://github.com/actix/actix-web/pull/2817
## 0.6.1
- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021]
- Update `tokio-uring` dependency to `0.3`.
- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645]
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
[#2021]: https://github.com/actix/actix-web/pull/2021
[#2645]: https://github.com/actix/actix-web/pull/2645
## 0.6.0
- No significant changes since `0.6.0-beta.16`.
## 0.6.0-beta.16
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15
- No significant changes since `0.6.0-beta.14`.
## 0.6.0-beta.14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13
## 0.6.0-beta.13 - 2022-01-04
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398] - The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398] - The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54. - Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398 [#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12
## 0.6.0-beta.12 - 2021-12-29
- No significant changes since `0.6.0-beta.11`. - No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11
## 0.6.0-beta.11 - 2021-12-27
- No significant changes since `0.6.0-beta.10`. - No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10
## 0.6.0-beta.10 - 2021-12-11
- No significant changes since `0.6.0-beta.9`. - No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9
## 0.6.0-beta.9 - 2021-11-22
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408] - Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408] - Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453] - Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
@ -93,24 +34,24 @@
[#2408]: https://github.com/actix/actix-web/pull/2408 [#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453 [#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8
## 0.6.0-beta.8 - 2021-10-20
- Minimum supported Rust version (MSRV) is now 1.52. - Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7
## 0.6.0-beta.7 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51. - Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6
## 0.6.0-beta.6 - 2021-06-26
- Added `Files::path_filter()`. [#2274] - Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228] - `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274 [#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228 [#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5
## 0.6.0-beta.5 - 2021-06-17
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135] - `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156] - For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225] - `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
@ -121,58 +62,58 @@
[#2225]: https://github.com/actix/actix-web/pull/2225 [#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257 [#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4
## 0.6.0-beta.4 - 2021-04-02
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046] - Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046 [#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3
## 0.6.0-beta.3 - 2021-03-09
- No notable changes. - No notable changes.
## 0.6.0-beta.2
## 0.6.0-beta.2 - 2021-02-10
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887] - Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953] - Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887 [#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953 [#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1
## 0.6.0-beta.1 - 2021-01-07
- `HttpRange::parse` now has its own error type. - `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813] - Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813 [#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0
## 0.5.0 - 2020-12-26
- Optionally support hidden files/directories. [#1811] - Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811 [#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1
## 0.4.1 - 2020-11-24
- Clarify order of parameters in `Files::new` and improve docs. - Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0
## 0.4.0 - 2020-10-06
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714] - Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714 [#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0
## 0.3.0 - 2020-09-11
- No significant changes from 0.3.0-beta.1. - No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1
## 0.3.0-beta.1 - 2020-07-15
- Update `v_htmlescape` to 0.10 - Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1 - Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1
## 0.3.0-alpha.1 - 2020-05-23
- Update `actix-web` and `actix-http` dependencies to alpha - Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs - Fix some typos in the docs
- Bump minimum supported Rust version to 1.40 - Bump minimum supported Rust version to 1.40
@ -180,73 +121,73 @@
[#1384]: https://github.com/actix/actix-web/pull/1384 [#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1
## 0.2.1 - 2019-12-22
- Use the same format for file URLs regardless of platforms - Use the same format for file URLs regardless of platforms
## 0.2.0
## 0.2.0 - 2019-12-20
- Fix BodyEncoding trait import #1220 - Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1
## 0.2.0-alpha.1 - 2019-12-07
- Migrate to `std::future` - Migrate to `std::future`
## 0.1.7
- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) ## 0.1.7 - 2019-11-06
- Add an additional `filename*` param in the `Content-Disposition` header of
## 0.1.6 `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14
- Add option to redirect to a slash-ended path `Files` #1132 - Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5
## 0.1.5 - 2019-10-08
- Bump up `mime_guess` crate version to 2.0.1 - Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1 - Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113 - Allow user defined request guards for `Files` #1113
## 0.1.4
## 0.1.4 - 2019-07-20
- Allow to disable `Content-Disposition` header #686 - Allow to disable `Content-Disposition` header #686
## 0.1.3
## 0.1.3 - 2019-06-28
- Do not set `Content-Length` header, let actix-http set it #930 - Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2
## 0.1.2 - 2019-06-13
- Content-Length is 0 for NamedFile HEAD request #914 - Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741 - Fix ring dependency from actix-web default features for #741
## 0.1.1
## 0.1.1 - 2019-06-01
- Static files are incorrectly served as both chunked and with length #812 - Static files are incorrectly served as both chunked and with length #812
## 0.1.0
## 0.1.0 - 2019-05-25
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820 - NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4
## 0.1.0-beta.4 - 2019-05-12
- Update actix-web to beta.4 - Update actix-web to beta.4
## 0.1.0-beta.1
## 0.1.0-beta.1 - 2019-04-20
- Update actix-web to beta.1 - Update actix-web to beta.1
## 0.1.0-alpha.6
## 0.1.0-alpha.6 - 2019-04-14
- Update actix-web to alpha6 - Update actix-web to alpha6
## 0.1.0-alpha.4
## 0.1.0-alpha.4 - 2019-04-08
- Update actix-web to alpha4 - Update actix-web to alpha4
## 0.1.0-alpha.2
## 0.1.0-alpha.2 - 2019-04-02
- Add default handler support - Add default handler support
## 0.1.0-alpha.1
## 0.1.0-alpha.1 - 2019-03-28
- Initial impl - Initial impl

View File

@ -1,8 +1,9 @@
[package] [package]
name = "actix-files" name = "actix-files"
version = "0.6.6" version = "0.6.0-beta.13"
authors = [ authors = [
"Nikolay Kim <fafhrd91@gmail.com>", "Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
"Rob Ede <robjtede@icloud.com>", "Rob Ede <robjtede@icloud.com>",
] ]
description = "Static file serving for Actix Web" description = "Static file serving for Actix Web"
@ -11,49 +12,37 @@ homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web"
categories = ["asynchronous", "web-programming::http-server"] categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2021" edition = "2018"
[package.metadata.cargo_check_external_types] [lib]
allowed_external_types = [ name = "actix_files"
"actix_http::*", path = "src/lib.rs"
"actix_service::*",
"actix_web::*",
"http::*",
"mime::*",
]
[features] [features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"] experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies] [dependencies]
actix-http = "3" actix-http = "3.0.0-beta.18"
actix-service = "2" actix-service = "2"
actix-utils = "3" actix-utils = "3"
actix-web = { version = "4", default-features = false } actix-web = { version = "4.0.0-beta.19", default-features = false }
bitflags = "2" askama_escape = "0.10"
bitflags = "1"
bytes = "1" bytes = "1"
derive_more = { version = "2", features = ["display", "error", "from"] } derive_more = "0.99.5"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http-range = "0.1.4" http-range = "0.1.4"
log = "0.4" log = "0.4"
mime = "0.3.9" mime = "0.3"
mime_guess = "2.0.1" mime_guess = "2.0.1"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2.7" pin-project-lite = "0.2.7"
v_htmlescape = "0.15.5"
# experimental-io-uring tokio-uring = { version = "0.1", optional = true }
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.5", optional = true, features = ["bytes"] }
actix-server = { version = "2.4", optional = true } # ensure matching tokio-uring versions
[dev-dependencies] [dev-dependencies]
actix-rt = "2.7" actix-rt = "2.2"
actix-test = "0.1" actix-test = "0.1.0-beta.11"
actix-web = "4" actix-web = "4.0.0-beta.19"
env_logger = "0.11"
tempfile = "3.2" tempfile = "3.2"
[lints]
workspace = true

View File

@ -1,32 +1,18 @@
# `actix-files` # actix-files
<!-- prettier-ignore-start --> > Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.6)](https://docs.rs/actix-files/0.6.6) [![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.13)](https://docs.rs/actix-files/0.6.0-beta.13)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) [![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![License](https://img.shields.io/crates/l/actix-files.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.6/status.svg)](https://deps.rs/crate/actix-files/0.6.6) [![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.13/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.13)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end --> ## Documentation & Resources
<!-- cargo-rdme start --> - [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
Static file serving for Actix Web. - Minimum Supported Rust Version (MSRV): 1.54
Provides a non-blocking service for serving static files from disk.
## Examples
```rust
use actix_web::App;
use actix_files::Files;
let app = App::new()
.service(Files::new("/static", ".").prefer_utf8(true));
```
<!-- cargo-rdme end -->

View File

@ -1,33 +0,0 @@
use actix_files::Files;
use actix_web::{get, guard, middleware, App, HttpServer, Responder};
const EXAMPLES_DIR: &str = concat![env!("CARGO_MANIFEST_DIR"), "/examples"];
#[get("/")]
async fn index() -> impl Responder {
"Hello world!"
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
log::info!("starting HTTP server at http://localhost:8080");
HttpServer::new(|| {
App::new()
.service(index)
.service(
Files::new("/assets", EXAMPLES_DIR)
.show_files_listing()
.guard(guard::Header("show-listing", "?1")),
)
.service(Files::new("/assets", EXAMPLES_DIR))
.wrap(middleware::Compress::default())
.wrap(middleware::Logger::default())
})
.bind(("127.0.0.1", 8080))?
.workers(2)
.run()
.await
}

View File

@ -7,8 +7,6 @@ use std::{
}; };
use actix_web::{error::Error, web::Bytes}; use actix_web::{error::Error, web::Bytes};
#[cfg(feature = "experimental-io-uring")]
use bytes::BytesMut;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
@ -80,7 +78,7 @@ async fn chunked_read_file_callback(
) -> Result<(File, Bytes), Error> { ) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _}; use io::{Read as _, Seek as _};
let res = actix_web::web::block(move || { let res = actix_web::rt::task::spawn_blocking(move || {
let mut buf = Vec::with_capacity(max_bytes); let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?; file.seek(io::SeekFrom::Start(offset))?;
@ -93,7 +91,8 @@ async fn chunked_read_file_callback(
Ok((file, Bytes::from(buf))) Ok((file, Bytes::from(buf)))
} }
}) })
.await??; .await
.map_err(|_| actix_web::error::BlockingError)??;
Ok(res) Ok(res)
} }
@ -215,3 +214,64 @@ where
} }
} }
} }
#[cfg(feature = "experimental-io-uring")]
use bytes_mut::BytesMut;
// TODO: remove new type and use bytes::BytesMut directly
#[doc(hidden)]
#[cfg(feature = "experimental-io-uring")]
mod bytes_mut {
use std::ops::{Deref, DerefMut};
use tokio_uring::buf::{IoBuf, IoBufMut};
#[derive(Debug)]
pub struct BytesMut(bytes::BytesMut);
impl BytesMut {
pub(super) fn new() -> Self {
Self(bytes::BytesMut::new())
}
}
impl Deref for BytesMut {
type Target = bytes::BytesMut;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for BytesMut {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
unsafe impl IoBuf for BytesMut {
fn stable_ptr(&self) -> *const u8 {
self.0.as_ptr()
}
fn bytes_init(&self) -> usize {
self.0.len()
}
fn bytes_total(&self) -> usize {
self.0.capacity()
}
}
unsafe impl IoBufMut for BytesMut {
fn stable_mut_ptr(&mut self) -> *mut u8 {
self.0.as_mut_ptr()
}
unsafe fn set_init(&mut self, init_len: usize) {
if self.len() < init_len {
self.0.set_len(init_len);
}
}
}
}

View File

@ -1,13 +1,8 @@
use std::{ use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
fmt::Write,
fs::DirEntry,
io,
path::{Path, PathBuf},
};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse}; use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS}; use percent_encoding::{utf8_percent_encode, CONTROLS};
use v_htmlescape::escape as escape_html_entity;
/// A directory; responds with the generated directory listing. /// A directory; responds with the generated directory listing.
#[derive(Debug)] #[derive(Debug)]
@ -64,7 +59,7 @@ macro_rules! encode_file_url {
/// ``` /// ```
macro_rules! encode_file_name { macro_rules! encode_file_name {
($entry:ident) => { ($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy()) escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
}; };
} }
@ -80,7 +75,7 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) { if dir.is_visible(&entry) {
let entry = entry.unwrap(); let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) { let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"), Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(), Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue, Err(_) => continue,
}; };

View File

@ -2,47 +2,48 @@ use actix_web::{http::StatusCode, ResponseError};
use derive_more::Display; use derive_more::Display;
/// Errors which can occur when serving static files. /// Errors which can occur when serving static files.
#[derive(Debug, PartialEq, Eq, Display)] #[derive(Display, Debug, PartialEq)]
pub enum FilesError { pub enum FilesError {
/// Path is not a directory. /// Path is not a directory
#[allow(dead_code)] #[allow(dead_code)]
#[display("path is not a directory. Unable to serve static files")] #[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory, IsNotDirectory,
/// Cannot render directory. /// Cannot render directory
#[display("unable to render directory without index file")] #[display(fmt = "Unable to render directory without index file")]
IsDirectory, IsDirectory,
} }
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError { impl ResponseError for FilesError {
/// Returns `404 Not Found`.
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::NOT_FOUND StatusCode::NOT_FOUND
} }
} }
#[derive(Debug, PartialEq, Eq, Display)] #[allow(clippy::enum_variant_names)]
#[derive(Display, Debug, PartialEq)]
#[non_exhaustive] #[non_exhaustive]
pub enum UriSegmentError { pub enum UriSegmentError {
/// Segment started with the wrapped invalid character. /// The segment started with the wrapped invalid character.
#[display("segment started with invalid character: ('{_0}')")] #[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char), BadStart(char),
/// Segment contained the wrapped invalid character. /// The segment contained the wrapped invalid character.
#[display("segment contained invalid character ('{_0}')")] #[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char), BadChar(char),
/// Segment ended with the wrapped invalid character. /// The segment ended with the wrapped invalid character.
#[display("segment ended with invalid character: ('{_0}')")] #[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char), BadEnd(char),
/// Path is not a valid UTF-8 string after percent-decoding. /// The path is not a valid UTF-8 string after doing percent decoding.
#[display("path is not a valid UTF-8 string after percent-decoding")] #[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8, NotValidUtf8,
} }
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError { impl ResponseError for UriSegmentError {
/// Returns `400 Bad Request`.
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST StatusCode::BAD_REQUEST
} }

View File

@ -8,7 +8,8 @@ use std::{
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt}; use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
use actix_web::{ use actix_web::{
dev::{ dev::{
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest, ServiceResponse, AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest,
ServiceResponse,
}, },
error::Error, error::Error,
guard::Guard, guard::Guard,
@ -36,7 +37,7 @@ use crate::{
/// .service(Files::new("/static", ".")); /// .service(Files::new("/static", "."));
/// ``` /// ```
pub struct Files { pub struct Files {
mount_path: String, path: String,
directory: PathBuf, directory: PathBuf,
index: Option<String>, index: Option<String>,
show_index: bool, show_index: bool,
@ -67,7 +68,7 @@ impl Clone for Files {
default: self.default.clone(), default: self.default.clone(),
renderer: self.renderer.clone(), renderer: self.renderer.clone(),
file_flags: self.file_flags, file_flags: self.file_flags,
mount_path: self.mount_path.clone(), path: self.path.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(), path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(), use_guards: self.use_guards.clone(),
@ -106,7 +107,7 @@ impl Files {
}; };
Files { Files {
mount_path: mount_path.trim_end_matches('/').to_owned(), path: mount_path.trim_end_matches('/').to_owned(),
directory: dir, directory: dir,
index: None, index: None,
show_index: false, show_index: false,
@ -141,7 +142,7 @@ impl Files {
self self
} }
/// Set custom directory renderer. /// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where where
for<'r, 's> F: for<'r, 's> F:
@ -151,7 +152,7 @@ impl Files {
self self
} }
/// Specifies MIME override callback. /// Specifies mime override callback
pub fn mime_override<F>(mut self, f: F) -> Self pub fn mime_override<F>(mut self, f: F) -> Self
where where
F: Fn(&mime::Name<'_>) -> DispositionType + 'static, F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
@ -235,7 +236,7 @@ impl Files {
/// request starts being handled by the file service, it will not be able to back-out and try /// request starts being handled by the file service, it will not be able to back-out and try
/// the next service, you will simply get a 404 (or 405) error response. /// the next service, you will simply get a 404 (or 405) error response.
/// ///
/// To allow `POST` requests to retrieve files, see [`Files::method_guard()`]. /// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
/// ///
/// # Examples /// # Examples
/// ``` /// ```
@ -300,8 +301,12 @@ impl Files {
pub fn default_handler<F, U>(mut self, f: F) -> Self pub fn default_handler<F, U>(mut self, f: F) -> Self
where where
F: IntoServiceFactory<U, ServiceRequest>, F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<ServiceRequest, Config = (), Response = ServiceResponse, Error = Error> U: ServiceFactory<
+ 'static, ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
> + 'static,
{ {
// create and configure default resource // create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory( self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
@ -337,9 +342,9 @@ impl HttpServiceFactory for Files {
} }
let rdef = if config.is_root() { let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.mount_path) ResourceDef::root_prefix(&self.path)
} else { } else {
ResourceDef::prefix(&self.mount_path) ResourceDef::prefix(&self.path)
}; };
config.register_service(rdef, guards, self, None) config.register_service(rdef, guards, self, None)
@ -385,46 +390,3 @@ impl ServiceFactory<ServiceRequest> for Files {
} }
} }
} }
#[cfg(test)]
mod tests {
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App, HttpResponse,
};
use super::*;
#[actix_web::test]
async fn custom_files_listing_renderer() {
let srv = test::init_service(
App::new().service(
Files::new("/", "./tests")
.show_files_listing()
.files_listing_renderer(|dir, req| {
Ok(ServiceResponse::new(
req.clone(),
HttpResponse::Ok().body(dir.path.to_str().unwrap().to_owned()),
))
}),
),
)
.await;
let req = TestRequest::with_uri("/").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let body = test::read_body(res).await;
let body_str = std::str::from_utf8(&body).unwrap();
let actual_path = Path::new(&body_str);
let expected_path = Path::new("actix-files/tests");
assert!(
actual_path.ends_with(expected_path),
"body {:?} does not end with {:?}",
actual_path,
expected_path
);
}
}

View File

@ -2,7 +2,7 @@
//! //!
//! Provides a non-blocking service for serving static files from disk. //! Provides a non-blocking service for serving static files from disk.
//! //!
//! # Examples //! # Example
//! ``` //! ```
//! use actix_web::App; //! use actix_web::App;
//! use actix_files::Files; //! use actix_files::Files;
@ -11,12 +11,8 @@
//! .service(Files::new("/static", ".").prefer_utf8(true)); //! .service(Files::new("/static", ".").prefer_utf8(true));
//! ``` //! ```
#![warn(missing_docs, missing_debug_implementations)] #![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![warn(future_incompatible, missing_docs, missing_debug_implementations)]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::path::Path;
use actix_service::boxed::{BoxService, BoxServiceFactory}; use actix_service::boxed::{BoxService, BoxServiceFactory};
use actix_web::{ use actix_web::{
@ -25,6 +21,7 @@ use actix_web::{
http::header::DispositionType, http::header::DispositionType,
}; };
use mime_guess::from_ext; use mime_guess::from_ext;
use std::path::Path;
mod chunked; mod chunked;
mod directory; mod directory;
@ -36,15 +33,16 @@ mod path_buf;
mod range; mod range;
mod service; mod service;
pub use self::{ pub use self::chunked::ChunkedReadFile;
chunked::ChunkedReadFile, directory::Directory, files::Files, named::NamedFile, pub use self::directory::Directory;
range::HttpRange, service::FilesService, pub use self::files::Files;
}; pub use self::named::NamedFile;
use self::{ pub use self::range::HttpRange;
directory::{directory_listing, DirectoryRenderer}, pub use self::service::FilesService;
error::FilesError,
path_buf::PathBufWrap, use self::directory::{directory_listing, DirectoryRenderer};
}; use self::error::FilesError;
use self::path_buf::PathBufWrap;
type HttpService = BoxService<ServiceRequest, ServiceResponse, Error>; type HttpService = BoxService<ServiceRequest, ServiceResponse, Error>;
type HttpNewService = BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>; type HttpNewService = BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>;
@ -64,17 +62,16 @@ type PathFilter = dyn Fn(&Path, &RequestHead) -> bool;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::{ use std::{
fmt::Write as _,
fs::{self}, fs::{self},
ops::Add, ops::Add,
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use actix_service::ServiceFactory;
use actix_web::{ use actix_web::{
dev::ServiceFactory,
guard, guard,
http::{ http::{
header::{self, ContentDisposition, DispositionParam}, header::{self, ContentDisposition, DispositionParam, DispositionType},
Method, StatusCode, Method, StatusCode,
}, },
middleware::Compress, middleware::Compress,
@ -109,7 +106,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@ -121,7 +118,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@ -134,7 +131,7 @@ mod tests {
.insert_header((header::IF_NONE_MATCH, "miss_etag")) .insert_header((header::IF_NONE_MATCH, "miss_etag"))
.insert_header((header::IF_MODIFIED_SINCE, since)) .insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED); assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
} }
@ -146,7 +143,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since)) .insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
@ -158,7 +155,7 @@ mod tests {
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since)) .insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED); assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
} }
@ -175,7 +172,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@ -199,7 +196,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"Cargo.toml\"" "inline; filename=\"Cargo.toml\""
@ -210,7 +207,7 @@ mod tests {
.unwrap() .unwrap()
.disable_content_disposition(); .disable_content_disposition();
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none()); assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none());
} }
@ -238,7 +235,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@ -264,7 +261,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/xml" "text/xml"
@ -287,7 +284,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png" "image/png"
@ -303,14 +300,14 @@ mod tests {
let file = NamedFile::open_async("tests/test.js").await.unwrap(); let file = NamedFile::open_async("tests/test.js").await.unwrap();
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/javascript", "application/javascript"
); );
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"test.js\"", "inline; filename=\"test.js\""
); );
} }
@ -333,7 +330,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"image/png" "image/png"
@ -356,7 +353,7 @@ mod tests {
} }
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/octet-stream" "application/octet-stream"
@ -367,45 +364,22 @@ mod tests {
); );
} }
#[allow(deprecated)]
#[actix_rt::test] #[actix_rt::test]
async fn status_code_customize_same_output() { async fn test_named_file_status_code_text() {
let file1 = NamedFile::open_async("Cargo.toml") let mut file = NamedFile::open_async("Cargo.toml")
.await .await
.unwrap() .unwrap()
.set_status_code(StatusCode::NOT_FOUND); .set_status_code(StatusCode::NOT_FOUND);
let file2 = NamedFile::open_async("Cargo.toml")
.await
.unwrap()
.customize()
.with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request();
let res1 = file1.respond_to(&req);
let res2 = file2.respond_to(&req);
assert_eq!(res1.status(), StatusCode::NOT_FOUND);
assert_eq!(res2.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_named_file_status_code_text() {
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
} }
{ {
let _f: &mut File = &mut file; let _f: &mut File = &mut file;
} }
let file = file.customize().with_status(StatusCode::NOT_FOUND);
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-toml" "text/x-toml"
@ -553,9 +527,10 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_static_files_with_spaces() { async fn test_static_files_with_spaces() {
let srv = let srv = test::init_service(
test::init_service(App::new().service(Files::new("/", ".").index_file("Cargo.toml"))) App::new().service(Files::new("/", ".").index_file("Cargo.toml")),
.await; )
.await;
let request = TestRequest::get() let request = TestRequest::get()
.uri("/tests/test%20space.binary") .uri("/tests/test%20space.binary")
.to_request(); .to_request();
@ -567,30 +542,6 @@ mod tests {
assert_eq!(bytes, data); assert_eq!(bytes, data);
} }
#[cfg(not(target_os = "windows"))]
#[actix_rt::test]
async fn test_static_files_with_special_characters() {
// Create the file we want to test against ad-hoc. We can't check it in as otherwise
// Windows can't even checkout this repository.
let temp_dir = tempfile::tempdir().unwrap();
let file_with_newlines = temp_dir.path().join("test\n\x0B\x0C\rnewline.text");
fs::write(&file_with_newlines, "Look at my newlines").unwrap();
let srv = test::init_service(
App::new().service(Files::new("/", temp_dir.path()).index_file("Cargo.toml")),
)
.await;
let request = TestRequest::get()
.uri("/test%0A%0B%0C%0Dnewline.text")
.to_request();
let response = test::call_service(&srv, request).await;
assert_eq!(response.status(), StatusCode::OK);
let bytes = test::read_body(response).await;
let data = web::Bytes::from(fs::read(file_with_newlines).unwrap());
assert_eq!(bytes, data);
}
#[actix_rt::test] #[actix_rt::test]
async fn test_files_not_allowed() { async fn test_files_not_allowed() {
let srv = test::init_service(App::new().service(Files::new("/", "."))).await; let srv = test::init_service(App::new().service(Files::new("/", "."))).await;
@ -682,14 +633,15 @@ mod tests {
async fn test_named_file_allowed_method() { async fn test_named_file_allowed_method() {
let req = TestRequest::default().method(Method::GET).to_http_request(); let req = TestRequest::default().method(Method::GET).to_http_request();
let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let resp = file.respond_to(&req); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_static_files() { async fn test_static_files() {
let srv = let srv =
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await; test::init_service(App::new().service(Files::new("/", ".").show_files_listing()))
.await;
let req = TestRequest::with_uri("/missing").to_request(); let req = TestRequest::with_uri("/missing").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&srv, req).await;
@ -702,7 +654,8 @@ mod tests {
assert_eq!(resp.status(), StatusCode::NOT_FOUND); assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let srv = let srv =
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await; test::init_service(App::new().service(Files::new("/", ".").show_files_listing()))
.await;
let req = TestRequest::with_uri("/tests").to_request(); let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&srv, req).await;
assert_eq!( assert_eq!(
@ -863,21 +816,19 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_percent_encoding_2() { async fn test_percent_encoding_2() {
let temp_dir = tempfile::tempdir().unwrap(); let tmpdir = tempfile::tempdir().unwrap();
let filename = match cfg!(unix) { let filename = match cfg!(unix) {
true => "ض:?#[]{}<>()@!$&'`|*+,;= %20\n.test", true => "ض:?#[]{}<>()@!$&'`|*+,;= %20.test",
false => "ض#[]{}()@!$&'`+,;= %20.test", false => "ض#[]{}()@!$&'`+,;= %20.test",
}; };
let filename_encoded = filename let filename_encoded = filename
.as_bytes() .as_bytes()
.iter() .iter()
.fold(String::new(), |mut buf, c| { .map(|c| format!("%{:02X}", c))
write!(&mut buf, "%{:02X}", c).unwrap(); .collect::<String>();
buf std::fs::File::create(tmpdir.path().join(filename)).unwrap();
});
std::fs::File::create(temp_dir.path().join(filename)).unwrap();
let srv = test::init_service(App::new().service(Files::new("/", temp_dir.path()))).await; let srv = test::init_service(App::new().service(Files::new("", tmpdir.path()))).await;
let req = TestRequest::get() let req = TestRequest::get()
.uri(&format!("/{}", filename_encoded)) .uri(&format!("/{}", filename_encoded))

View File

@ -1,20 +1,19 @@
use std::{ use std::{
fmt,
fs::Metadata, fs::Metadata,
io, io,
path::{Path, PathBuf}, path::{Path, PathBuf},
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use actix_service::{Service, ServiceFactory};
use actix_web::{ use actix_web::{
body::{self, BoxBody, SizedStream}, body::{self, BoxBody, SizedStream},
dev::{ dev::{AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse},
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory, ServiceRequest,
ServiceResponse,
},
http::{ http::{
header::{ header::{
self, Charset, ContentDisposition, ContentEncoding, DispositionParam, DispositionType, self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
ExtendedValue, HeaderValue, DispositionType, ExtendedValue, HeaderValue,
}, },
StatusCode, StatusCode,
}, },
@ -23,12 +22,11 @@ use actix_web::{
use bitflags::bitflags; use bitflags::bitflags;
use derive_more::{Deref, DerefMut}; use derive_more::{Deref, DerefMut};
use futures_core::future::LocalBoxFuture; use futures_core::future::LocalBoxFuture;
use mime::Mime; use mime_guess::from_path;
use crate::{encoding::equiv_utf8_text, range::HttpRange}; use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! { bitflags! {
#[derive(Debug, Clone, Copy)]
pub(crate) struct Flags: u8 { pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001; const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010; const LAST_MD = 0b0000_0010;
@ -39,7 +37,7 @@ bitflags! {
impl Default for Flags { impl Default for Flags {
fn default() -> Self { fn default() -> Self {
Flags::from_bits_truncate(0b0000_1111) Flags::from_bits_truncate(0b0000_0111)
} }
} }
@ -67,24 +65,49 @@ impl Default for Flags {
/// NamedFile::open_async("./static/index.html").await /// NamedFile::open_async("./static/index.html").await
/// } /// }
/// ``` /// ```
#[derive(Debug, Deref, DerefMut)] #[derive(Deref, DerefMut)]
pub struct NamedFile { pub struct NamedFile {
path: PathBuf,
#[deref] #[deref]
#[deref_mut] #[deref_mut]
file: File, file: File,
path: PathBuf,
modified: Option<SystemTime>, modified: Option<SystemTime>,
pub(crate) md: Metadata, pub(crate) md: Metadata,
pub(crate) flags: Flags, pub(crate) flags: Flags,
pub(crate) status_code: StatusCode, pub(crate) status_code: StatusCode,
pub(crate) content_type: Mime, pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: ContentDisposition, pub(crate) content_disposition: header::ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>, pub(crate) encoding: Option<ContentEncoding>,
} }
impl fmt::Debug for NamedFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NamedFile")
.field("path", &self.path)
.field(
"file",
#[cfg(feature = "experimental-io-uring")]
{
&"tokio_uring::File"
},
#[cfg(not(feature = "experimental-io-uring"))]
{
&self.file
},
)
.field("modified", &self.modified)
.field("md", &self.md)
.field("flags", &self.flags)
.field("status_code", &self.status_code)
.field("content_type", &self.content_type)
.field("content_disposition", &self.content_disposition)
.field("encoding", &self.encoding)
.finish()
}
}
#[cfg(not(feature = "experimental-io-uring"))] #[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File; pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")] #[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File; pub(crate) use tokio_uring::fs::File;
@ -98,18 +121,18 @@ impl NamedFile {
/// ///
/// # Examples /// # Examples
/// ```ignore /// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
/// ///
/// let mut file = File::create("foo.txt")?; /// fn main() -> io::Result<()> {
/// file.write_all(b"Hello, world!")?; /// let mut file = File::create("foo.txt")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?; /// file.write_all(b"Hello, world!")?;
/// # std::fs::remove_file("foo.txt"); /// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// Ok(()) /// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ``` /// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> { pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
@ -127,25 +150,20 @@ impl NamedFile {
} }
}; };
let ct = mime_guess::from_path(&path).first_or_octet_stream(); let ct = from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() { let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline, mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() { mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline, mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" || name == "xhtml" => DispositionType::Inline, name if name == "wasm" => DispositionType::Inline,
_ => DispositionType::Attachment, _ => DispositionType::Attachment,
}, },
_ => DispositionType::Attachment, _ => DispositionType::Attachment,
}; };
// replace special characters in filenames which could occur on some filesystems let mut parameters =
let filename_s = filename vec![DispositionParam::Filename(String::from(filename.as_ref()))];
.replace('\n', "%0A") // \n line break
.replace('\x0B', "%0B") // \v vertical tab
.replace('\x0C', "%0C") // \f form feed
.replace('\r', "%0D"); // \r carriage return
let mut parameters = vec![DispositionParam::Filename(filename_s)];
if !filename.is_ascii() { if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue { parameters.push(DispositionParam::FilenameExt(ExtendedValue {
@ -218,8 +236,8 @@ impl NamedFile {
/// Attempts to open a file asynchronously in read-only mode. /// Attempts to open a file asynchronously in read-only mode.
/// ///
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it /// When the `experimental-io-uring` crate feature is enabled, this will be async.
/// will behave just like `open`. /// Otherwise, it will be just like [`open`][Self::open].
/// ///
/// # Examples /// # Examples
/// ``` /// ```
@ -244,13 +262,13 @@ impl NamedFile {
Self::from_file(file, path) Self::from_file(file, path)
} }
/// Returns reference to the underlying file object. /// Returns reference to the underlying `File` object.
#[inline] #[inline]
pub fn file(&self) -> &File { pub fn file(&self) -> &File {
&self.file &self.file
} }
/// Returns the filesystem path to this file. /// Retrieve the path of this file.
/// ///
/// # Examples /// # Examples
/// ``` /// ```
@ -268,53 +286,16 @@ impl NamedFile {
self.path.as_path() self.path.as_path()
} }
/// Returns the time the file was last modified. /// Set response **Status Code**
///
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
/// Therefore, it is usually safe to unwrap this.
#[inline]
pub fn modified(&self) -> Option<SystemTime> {
self.modified
}
/// Returns the filesystem metadata associated with this file.
#[inline]
pub fn metadata(&self) -> &Metadata {
&self.md
}
/// Returns the `Content-Type` header that will be used when serving this file.
#[inline]
pub fn content_type(&self) -> &Mime {
&self.content_type
}
/// Returns the `Content-Disposition` that will be used when serving this file.
#[inline]
pub fn content_disposition(&self) -> &ContentDisposition {
&self.content_disposition
}
/// Returns the `Content-Encoding` that will be used when serving this file.
///
/// A return value of `None` indicates that the content is not already using a compressed
/// representation and may be subject to compression downstream.
#[inline]
pub fn content_encoding(&self) -> Option<ContentEncoding> {
self.encoding
}
/// Set response status code.
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
pub fn set_status_code(mut self, status: StatusCode) -> Self { pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status; self.status_code = status;
self self
} }
/// Sets the `Content-Type` header that will be used when serving this file. By default the /// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred
/// `Content-Type` is inferred from the filename extension. /// from the filename extension.
#[inline] #[inline]
pub fn set_content_type(mut self, mime_type: Mime) -> Self { pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type; self.content_type = mime_type;
self self
} }
@ -327,26 +308,24 @@ impl NamedFile {
/// filename is taken from the path provided in the `open` method after converting it to UTF-8 /// filename is taken from the path provided in the `open` method after converting it to UTF-8
/// (using `to_string_lossy`). /// (using `to_string_lossy`).
#[inline] #[inline]
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self { pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd; self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION); self.flags.insert(Flags::CONTENT_DISPOSITION);
self self
} }
/// Disables `Content-Disposition` header. /// Disable `Content-Disposition` header.
/// ///
/// By default, the `Content-Disposition` header is sent. /// By default Content-Disposition` header is enabled.
#[inline] #[inline]
pub fn disable_content_disposition(mut self) -> Self { pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION); self.flags.remove(Flags::CONTENT_DISPOSITION);
self self
} }
/// Sets content encoding for this file. /// Set content encoding for serving this file
/// ///
/// This prevents the `Compress` middleware from modifying the file contents and signals to /// Must be used with [`actix_web::middleware::Compress`] to take effect.
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
#[inline] #[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc); self.encoding = Some(enc);
@ -534,26 +513,11 @@ impl NamedFile {
length = ranges[0].length; length = ranges[0].length;
offset = ranges[0].start; offset = ranges[0].start;
// When a Content-Encoding header is present in a 206 partial content response // don't allow compression middleware to modify partial content
// for video content, it prevents browser video players from starting playback res.insert_header((
// before loading the whole video and also prevents seeking. header::CONTENT_ENCODING,
// HeaderValue::from_static("identity"),
// See: https://github.com/actix/actix-web/issues/2815 ));
//
// The assumption of this fix is that the video player knows to not send an
// Accept-Encoding header for this request and that downstream middleware will
// not attempt compression for requests without it.
//
// TODO: Solve question around what to do if self.encoding is set and partial
// range is requested. Reject request? Ignoring self.encoding seems wrong, too.
// In practice, it should not come up.
if req.headers().contains_key(&header::ACCEPT_ENCODING) {
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
}
res.insert_header(( res.insert_header((
header::CONTENT_RANGE, header::CONTENT_RANGE,
@ -663,7 +627,7 @@ impl Service<ServiceRequest> for NamedFileService {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!(); actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();

View File

@ -30,7 +30,7 @@ impl PathBufWrap {
let mut segment_count = path.matches('/').count() + 1; let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding) // we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segment_count`. // because we will reject `%2F` in paths using `segement_count`.
let path = percent_encoding::percent_decode_str(path) let path = percent_encoding::percent_decode_str(path)
.decode_utf8() .decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?; .map_err(|_| UriSegmentError::NotValidUtf8)?;
@ -59,8 +59,6 @@ impl PathBufWrap {
continue; continue;
} else if cfg!(windows) && segment.contains('\\') { } else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\')); return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else { } else {
buf.push(segment) buf.push(segment)
} }
@ -68,11 +66,7 @@ impl PathBufWrap {
// make sure we agree with stdlib parser // make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() { for (i, component) in buf.components().enumerate() {
assert!( assert!(matches!(component, Component::Normal(_)));
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(i < segment_count); assert!(i < segment_count);
} }
@ -91,12 +85,14 @@ impl FromRequest for PathBufWrap {
type Future = Ready<Result<Self, Self::Error>>; type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().unprocessed().parse()) ready(req.match_info().path().parse())
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::iter::FromIterator;
use super::*; use super::*;
#[test] #[test]
@ -163,26 +159,4 @@ mod tests {
PathBuf::from_iter(vec!["etc/passwd"]) PathBuf::from_iter(vec!["etc/passwd"])
); );
} }
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
} }

View File

@ -1,36 +1,4 @@
use std::fmt; use derive_more::{Display, Error};
use derive_more::Error;
/// Copy of `http_range::HttpRangeParseError`.
#[derive(Debug, Clone)]
enum HttpRangeParseError {
InvalidRange,
NoOverlap,
}
impl From<http_range::HttpRangeParseError> for HttpRangeParseError {
fn from(err: http_range::HttpRangeParseError) -> Self {
match err {
http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange,
http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap,
}
}
}
#[derive(Debug, Clone, Error)]
#[non_exhaustive]
pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError);
impl fmt::Display for ParseRangeErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid Range header: ")?;
f.write_str(match self.0 {
HttpRangeParseError::InvalidRange => "invalid syntax",
HttpRangeParseError::NoOverlap => "range starts after end of content",
})
}
}
/// HTTP Range header representation. /// HTTP Range header representation.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -42,22 +10,26 @@ pub struct HttpRange {
pub length: u64, pub length: u64,
} }
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange { impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616. /// Parses Range HTTP header string as per RFC 2616.
/// ///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file). /// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> { pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
let ranges = match http_range::HttpRange::parse(header, size) {
http_range::HttpRange::parse(header, size).map_err(|err| ParseRangeErr(err.into()))?; Ok(ranges) => Ok(ranges
.iter()
Ok(ranges .map(|range| HttpRange {
.iter() start: range.start,
.map(|range| HttpRange { length: range.length,
start: range.start, })
length: range.length, .collect()),
}) Err(_) => Err(ParseRangeErr(())),
.collect()) }
} }
} }

View File

@ -2,7 +2,7 @@ use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
use actix_web::{ use actix_web::{
body::BoxBody, body::BoxBody,
dev::{self, Service, ServiceRequest, ServiceResponse}, dev::{Service, ServiceRequest, ServiceResponse},
error::Error, error::Error,
guard::Guard, guard::Guard,
http::{header, Method}, http::{header, Method},
@ -23,7 +23,7 @@ impl Deref for FilesService {
type Target = FilesServiceInner; type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.0 &*self.0
} }
} }
@ -62,7 +62,11 @@ impl FilesService {
} }
} }
fn serve_named_file(&self, req: ServiceRequest, mut named_file: NamedFile) -> ServiceResponse { fn serve_named_file(
&self,
req: ServiceRequest,
mut named_file: NamedFile,
) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override { if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_()); let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition; named_file.content_disposition.disposition = new_disposition;
@ -79,7 +83,7 @@ impl FilesService {
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|err| ServiceResponse::from_err(err, req)) (self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
} }
} }
@ -94,7 +98,7 @@ impl Service<ServiceRequest> for FilesService {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!(); actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards { let is_method_valid = if let Some(guard) = &self.guards {
@ -116,14 +120,14 @@ impl Service<ServiceRequest> for FilesService {
)); ));
} }
let path_on_disk = let real_path =
match PathBufWrap::parse_path(req.match_info().unprocessed(), this.hidden_files) { match PathBufWrap::parse_path(req.match_info().path(), this.hidden_files) {
Ok(item) => item, Ok(item) => item,
Err(err) => return Ok(req.error_response(err)), Err(err) => return Ok(req.error_response(err)),
}; };
if let Some(filter) = &this.path_filter { if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) { if !filter(real_path.as_ref(), req.head()) {
if let Some(ref default) = this.default { if let Some(ref default) = this.default {
return default.call(req).await; return default.call(req).await;
} else { } else {
@ -133,7 +137,7 @@ impl Service<ServiceRequest> for FilesService {
} }
// full file path // full file path
let path = this.directory.join(&path_on_disk); let path = this.directory.join(&real_path);
if let Err(err) = path.canonicalize() { if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await; return this.handle_err(err, req).await;
} }
@ -162,7 +166,7 @@ impl Service<ServiceRequest> for FilesService {
} }
} }
None if this.show_index => Ok(this.show_index(req, path)), None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err( _ => Ok(ServiceResponse::from_err(
FilesError::IsDirectory, FilesError::IsDirectory,
req.into_parts().0, req.into_parts().0,
)), )),
@ -171,7 +175,8 @@ impl Service<ServiceRequest> for FilesService {
match NamedFile::open_async(&path).await { match NamedFile::open_async(&path).await {
Ok(mut named_file) => { Ok(mut named_file) => {
if let Some(ref mime_override) = this.mime_override { if let Some(ref mime_override) = this.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_()); let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition; named_file.content_disposition.disposition = new_disposition;
} }
named_file.flags = this.file_flags; named_file.flags = this.file_flags;

View File

@ -1,11 +1,11 @@
use actix_files::{Files, NamedFile}; use actix_files::Files;
use actix_web::{ use actix_web::{
http::{ http::{
header::{self, HeaderValue}, header::{self, HeaderValue},
StatusCode, StatusCode,
}, },
test::{self, TestRequest}, test::{self, TestRequest},
web, App, App,
}; };
#[actix_web::test] #[actix_web::test]
@ -19,12 +19,13 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_TYPE), res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")), Some(&HeaderValue::from_static("text/plain")),
); );
// disable UTF-8 attribute // prefer UTF-8 encoding
let srv = let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false))).await; test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true)))
.await;
let req = TestRequest::with_uri("/utf8.txt").to_request(); let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&srv, req).await;
@ -32,34 +33,6 @@ async fn test_utf8_file_contents() {
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_TYPE), res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")), Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
}
#[actix_web::test]
async fn partial_range_response_encoding() {
let srv = test::init_service(App::new().default_service(web::to(|| async {
NamedFile::open_async("./tests/test.binary").await.unwrap()
})))
.await;
// range request without accept-encoding returns no content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
// range request with accept-encoding returns a content-encoding header
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.append_header((header::ACCEPT_ENCODING, "identity"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert_eq!(
res.headers().get(header::CONTENT_ENCODING).unwrap(),
"identity"
); );
} }

View File

@ -12,7 +12,9 @@ async fn test_guard_filter() {
let srv = test::init_service( let srv = test::init_service(
App::new() App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com"))) .service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com"))), .service(
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
),
) )
.await; .await;

View File

@ -9,7 +9,8 @@ use actix_web::{
async fn test_directory_traversal_prevention() { async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await; let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request(); let req =
TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.status(), StatusCode::NOT_FOUND);

View File

@ -1,103 +1,67 @@
# Changes # Changes
## Unreleased ## Unreleased - 2021-xx-xx
- Minimum supported Rust version (MSRV) is now 1.72.
## 3.2.0
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 3.1.0
- Minimum supported Rust version (MSRV) is now 1.59.
## 3.0.0
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Added `TestServer::client_headers` method. [#2097]
- Update `actix-server` dependency to `2`.
- Update `actix-tls` dependency to `3`.
- Update `bytes` to `1.0`. [#1813]
- Minimum supported Rust version (MSRV) is now 1.57.
[#2442]: https://github.com/actix/actix-web/pull/2442
[#2097]: https://github.com/actix/actix-web/pull/2097
[#1813]: https://github.com/actix/actix-web/pull/1813
<details>
<summary>3.0.0 Pre-Releases</summary>
## 3.0.0-beta.13
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12
- No significant changes since `3.0.0-beta.11`.
## 3.0.0-beta.11
## 3.0.0-beta.11 - 2022-01-04
- Minimum supported Rust version (MSRV) is now 1.54. - Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10
## 3.0.0-beta.10 - 2021-12-27
- Update `actix-server` to `2.0.0-rc.2`. [#2550] - Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550 [#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9
## 3.0.0-beta.9 - 2021-12-11
- No significant changes since `3.0.0-beta.8`. - No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8
## 3.0.0-beta.8 - 2021-11-30
- Update `actix-tls` to `3.0.0-rc.1`. [#2474] - Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474 [#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7
## 3.0.0-beta.7 - 2021-11-22
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408] - Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408 [#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6
## 3.0.0-beta.6 - 2021-11-15
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442] - `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442] - Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52. - Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442 [#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5
## 3.0.0-beta.5 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51. - Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4
## 3.0.0-beta.4 - 2021-04-02
- Added `TestServer::client_headers` method. [#2097] - Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097 [#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3
## 3.0.0-beta.3 - 2021-03-09
- No notable changes. - No notable changes.
## 3.0.0-beta.2
## 3.0.0-beta.2 - 2021-02-10
- No notable changes. - No notable changes.
## 3.0.0-beta.1
## 3.0.0-beta.1 - 2021-01-07
- Update `bytes` to `1.0`. [#1813] - Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813 [#1813]: https://github.com/actix/actix-web/pull/1813
</details>
## 2.1.0
## 2.1.0 - 2020-11-25
- Add ability to set address for `TestServer`. [#1645] - Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`. - Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773] - Upgrade `serde_urlencoded` to `0.7`. [#1773]
@ -105,12 +69,12 @@
[#1773]: https://github.com/actix/actix-web/pull/1773 [#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645 [#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0
## 2.0.0 - 2020-09-11
- Update actix-codec and actix-utils dependencies. - Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1
## 2.0.0-alpha.1 - 2020-05-23
- Update the `time` dependency to 0.2.7 - Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2 - Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn. - Make `test_server` `async` fn.
@ -119,57 +83,56 @@
- Update `base64` dependency to 0.12 - Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7 - Update `env_logger` dependency to 0.7
## 1.0.0 ## 1.0.0 - 2019-12-13
- Replaced `TestServer::start()` with `test_server()` - Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3
## 1.0.0-alpha.3 - 2019-12-07
- Migrate to `std::future` - Migrate to `std::future`
## 0.2.5
## 0.2.5 - 2019-09-17
- Update serde_urlencoded to "0.6.1" - Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms - Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System` - Do not override current `System`
## 0.2.4
## 0.2.4 - 2019-07-18
- Update actix-server to 0.6 - Update actix-server to 0.6
## 0.2.3
## 0.2.3 - 2019-07-16
- Add `delete`, `options`, `patch` methods to `TestServerRunner` - Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2
## 0.2.2 - 2019-06-16
- Add .put() and .sput() methods - Add .put() and .sput() methods
## 0.2.1
## 0.2.1 - 2019-06-05
- Add license files - Add license files
## 0.2.0
## 0.2.0 - 2019-05-12
- Update awc and actix-http deps - Update awc and actix-http deps
## 0.1.1
## 0.1.1 - 2019-04-24
- Always make new connection for http client - Always make new connection for http client
## 0.1.0
## 0.1.0 - 2019-04-16
- No changes - No changes
## 0.1.0-alpha.3
## 0.1.0-alpha.3 - 2019-04-02
- Request functions accept path #743 - Request functions accept path #743
## 0.1.0-alpha.2
## 0.1.0-alpha.2 - 2019-03-29
- Added TestServerRuntime::load_body() method - Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries - Update actix-http and awc libraries
## 0.1.0-alpha.1
## 0.1.0-alpha.1 - 2019-03-28
- Initial impl - Initial impl

View File

@ -1,11 +1,11 @@
[package] [package]
name = "actix-http-test" name = "actix-http-test"
version = "3.2.0" version = "3.0.0-beta.11"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing" description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"] keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web.git"
categories = [ categories = [
"network-programming", "network-programming",
"asynchronous", "asynchronous",
@ -13,22 +13,14 @@ categories = [
"web-programming::websocket", "web-programming::websocket",
] ]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2021" edition = "2018"
[package.metadata.docs.rs] [package.metadata.docs.rs]
features = [] features = []
[package.metadata.cargo_check_external_types] [lib]
allowed_external_types = [ name = "actix_http_test"
"actix_codec::*", path = "src/lib.rs"
"actix_http::*",
"actix_server::*",
"awc::*",
"bytes::*",
"futures_core::*",
"http::*",
"tokio::*",
]
[features] [features]
default = [] default = []
@ -37,28 +29,27 @@ default = []
openssl = ["tls-openssl", "awc/openssl"] openssl = ["tls-openssl", "awc/openssl"]
[dependencies] [dependencies]
actix-service = "2" actix-service = "2.0.0"
actix-codec = "0.5" actix-codec = "0.4.1"
actix-tls = "3" actix-tls = "3.0.0"
actix-utils = "3" actix-utils = "3.0.0"
actix-rt = "2.2" actix-rt = "2.2"
actix-server = "2" actix-server = "2.0.0-rc.2"
awc = { version = "3", default-features = false } awc = { version = "3.0.0-beta.18", default-features = false }
base64 = "0.13"
bytes = "1" bytes = "1"
futures-core = { version = "0.3.17", default-features = false } futures-core = { version = "0.3.7", default-features = false }
http = "0.2.7" http = "0.2.5"
log = "0.4" log = "0.4"
socket2 = "0.5" socket2 = "0.4"
serde = "1" serde = "1.0"
serde_json = "1" serde_json = "1.0"
slab = "0.4" slab = "0.4"
serde_urlencoded = "0.7" serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.55", package = "openssl", optional = true } tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.24.2", features = ["sync"] } tokio = { version = "1.8.4", features = ["sync"] }
[dev-dependencies] [dev-dependencies]
actix-http = "3" actix-web = { version = "4.0.0-beta.19", default-features = false, features = ["cookies"] }
actix-http = "3.0.0-beta.18"
[lints]
workspace = true

View File

@ -1,20 +1,17 @@
# `actix-http-test` # actix-http-test
<!-- prettier-ignore-start --> > Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test) [![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.2.0)](https://docs.rs/actix-http-test/3.2.0) [![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.11)](https://docs.rs/actix-http-test/3.0.0-beta.11)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) [![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br> <br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.2.0/status.svg)](https://deps.rs/crate/actix-http-test/3.2.0) [![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.11/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.11)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test) [![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end --> ## Documentation & Resources
<!-- cargo-rdme start --> - [API Documentation](https://docs.rs/actix-http-test)
- Minimum Supported Rust Version (MSRV): 1.54
Various helpers for Actix applications to use during testing.
<!-- cargo-rdme end -->

View File

@ -1,8 +1,9 @@
//! Various helpers for Actix applications to use during testing. //! Various helpers for Actix applications to use during testing.
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
extern crate tls_openssl as openssl; extern crate tls_openssl as openssl;
@ -28,31 +29,27 @@ use tokio::sync::mpsc;
/// for HTTP applications. /// for HTTP applications.
/// ///
/// # Examples /// # Examples
/// /// ```no_run
/// ``` /// use actix_http::HttpService;
/// use actix_http::{HttpService, Response, Error, StatusCode};
/// use actix_http_test::test_server; /// use actix_http_test::test_server;
/// use actix_service::{fn_service, map_config, ServiceFactoryExt as _}; /// use actix_web::{web, App, HttpResponse, Error};
/// ///
/// #[actix_rt::test] /// async fn my_handler() -> Result<HttpResponse, Error> {
/// # async fn hidden_test() {} /// Ok(HttpResponse::Ok().into())
/// }
///
/// #[actix_web::test]
/// async fn test_example() { /// async fn test_example() {
/// let srv = test_server(|| { /// let mut srv = TestServer::start(||
/// HttpService::build() /// HttpService::new(
/// .h1(fn_service(|req| async move { /// App::new().service(web::resource("/").to(my_handler))
/// Ok::<_, Error>(Response::ok()) /// )
/// })) /// );
/// .tcp()
/// .map_err(|_| ())
/// })
/// .await;
/// ///
/// let req = srv.get("/"); /// let req = srv.get("/");
/// let response = req.send().await.unwrap(); /// let response = req.send().await.unwrap();
/// /// assert!(response.status().is_success());
/// assert_eq!(response.status(), StatusCode::OK);
/// } /// }
/// # actix_rt::System::new().block_on(test_example());
/// ``` /// ```
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer { pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
@ -90,7 +87,6 @@ pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
// notify TestServer that server and system have shut down // notify TestServer that server and system have shut down
// all thread managed resources should be dropped at this point // all thread managed resources should be dropped at this point
#[allow(clippy::let_underscore_future)]
let _ = thread_stop_tx.send(()); let _ = thread_stop_tx.send(());
}); });
@ -106,7 +102,7 @@ pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
builder.set_verify(SslVerifyMode::NONE); builder.set_verify(SslVerifyMode::NONE);
let _ = builder let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1") .set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|err| log::error!("Can not set ALPN protocol: {err}")); .map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new() Connector::new()
.conn_lifetime(Duration::from_secs(0)) .conn_lifetime(Duration::from_secs(0))
@ -298,7 +294,6 @@ impl Drop for TestServer {
// without needing to await anything // without needing to await anything
// signal server to stop // signal server to stop
#[allow(clippy::let_underscore_future)]
let _ = self.server.stop(true); let _ = self.server.stop(true);
// signal system to stop // signal system to stop

File diff suppressed because it is too large Load Diff

View File

@ -1,188 +1,119 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "3.10.0" version = "3.0.0-beta.18"
authors = [ authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
"Nikolay Kim <fafhrd91@gmail.com>", description = "HTTP primitives for the Actix ecosystem"
"Rob Ede <robjtede@icloud.com>",
]
description = "HTTP types and services for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"] keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web.git"
categories = [ categories = [
"network-programming", "network-programming",
"asynchronous", "asynchronous",
"web-programming::http-server", "web-programming::http-server",
"web-programming::websocket", "web-programming::websocket",
] ]
license.workspace = true license = "MIT OR Apache-2.0"
edition.workspace = true edition = "2018"
rust-version.workspace = true
[package.metadata.docs.rs] [package.metadata.docs.rs]
rustdoc-args = ["--cfg", "docsrs"] # features that docs.rs will build with
features = [ features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
"http2",
"ws",
"openssl",
"rustls-0_20",
"rustls-0_21",
"rustls-0_22",
"rustls-0_23",
"compress-brotli",
"compress-gzip",
"compress-zstd",
]
[package.metadata.cargo_check_external_types] [lib]
allowed_external_types = [ name = "actix_http"
"actix_codec::*", path = "src/lib.rs"
"actix_service::*",
"actix_tls::*",
"actix_utils::*",
"bytes::*",
"bytestring::*",
"encoding_rs::*",
"futures_core::*",
"h2::*",
"http::*",
"httparse::*",
"language_tags::*",
"mime::*",
"openssl::*",
"rustls::*",
"tokio_util::*",
"tokio::*",
]
[features] [features]
default = [] default = []
# HTTP/2 protocol support # openssl
http2 = ["dep:h2"] openssl = ["actix-tls/accept", "actix-tls/openssl"]
# WebSocket protocol implementation # rustls support
ws = [ rustls = ["actix-tls/accept", "actix-tls/rustls"]
"dep:local-channel",
"dep:base64",
"dep:rand",
"dep:sha1",
]
# TLS via OpenSSL # enable compression support
openssl = ["__tls", "actix-tls/accept", "actix-tls/openssl"] compress-brotli = ["brotli2", "__compress"]
compress-gzip = ["flate2", "__compress"]
compress-zstd = ["zstd", "__compress"]
# TLS via Rustls v0.20 # Internal (PRIVATE!) features used to aid testing and cheking feature status.
rustls = ["__tls", "rustls-0_20"] # Don't rely on these whatsoever. They may disappear at anytime.
# TLS via Rustls v0.20
rustls-0_20 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_20"]
# TLS via Rustls v0.21
rustls-0_21 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_21"]
# TLS via Rustls v0.22
rustls-0_22 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_22"]
# TLS via Rustls v0.23
rustls-0_23 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_23"]
# Compression codecs
compress-brotli = ["__compress", "dep:brotli"]
compress-gzip = ["__compress", "dep:flate2"]
compress-zstd = ["__compress", "dep:zstd"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
__compress = [] __compress = []
# Internal (PRIVATE!) features used to aid checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__tls = []
[dependencies] [dependencies]
actix-service = "2" actix-service = "2.0.0"
actix-codec = "0.5" actix-codec = "0.4.1"
actix-utils = "3" actix-utils = "3.0.0"
actix-rt = { version = "2.2", default-features = false } actix-rt = { version = "2.2", default-features = false }
bitflags = "2" ahash = "0.7"
base64 = "0.13"
bitflags = "1.2"
bytes = "1" bytes = "1"
bytestring = "1" bytestring = "1"
derive_more = { version = "2", features = ["as_ref", "deref", "deref_mut", "display", "error", "from"] } derive_more = "0.99.5"
encoding_rs = "0.8" encoding_rs = "0.8"
foldhash = "0.1" futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } h2 = "0.3.9"
http = "0.2.7" http = "0.2.5"
httparse = "1.5.1" httparse = "1.5.1"
httpdate = "1.0.1" httpdate = "1.0.1"
itoa = "1" itoa = "1"
language-tags = "0.3" language-tags = "0.3"
mime = "0.3.4" local-channel = "0.1"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2" pin-project-lite = "0.2"
rand = "0.8"
sha-1 = "0.10"
smallvec = "1.6.1" smallvec = "1.6.1"
tokio = { version = "1.24.2", features = [] }
tokio-util = { version = "0.7", features = ["io", "codec"] }
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
# http2 # tls
h2 = { version = "0.3.26", optional = true } actix-tls = { version = "3.0.0", default-features = false, optional = true }
# websockets # compression
local-channel = { version = "0.1", optional = true } brotli2 = { version="0.3.2", optional = true }
base64 = { version = "0.22", optional = true }
rand = { version = "0.9", optional = true }
sha1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3.4", default-features = false, optional = true }
# compress-*
brotli = { version = "7", optional = true }
flate2 = { version = "1.0.13", optional = true } flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.13", optional = true } zstd = { version = "0.9", optional = true }
[dev-dependencies] [dev-dependencies]
actix-http-test = { version = "3", features = ["openssl"] } actix-http-test = { version = "3.0.0-beta.11", features = ["openssl"] }
actix-server = "2" actix-server = "2.0.0-rc.2"
actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23-webpki-roots"] } actix-tls = { version = "3.0.0", features = ["openssl"] }
actix-web = "4" actix-web = "4.0.0-beta.19"
async-stream = "0.3" async-stream = "0.3"
criterion = { version = "0.5", features = ["html_reports"] } criterion = { version = "0.3", features = ["html_reports"] }
divan = "0.1.8" env_logger = "0.9"
env_logger = "0.11" futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } rcgen = "0.8"
memchr = "2.4"
once_cell = "1.21"
rcgen = "0.13"
regex = "1.3" regex = "1.3"
rustversion = "1" rustls-pemfile = "0.2"
rustls-pemfile = "2" serde = { version = "1.0", features = ["derive"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
static_assertions = "1" static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.55" } tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls_023 = { package = "rustls", version = "0.23" } tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.24.2", features = ["net", "rt", "macros"] } tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
[lints]
workspace = true
[[example]] [[example]]
name = "ws" name = "ws"
required-features = ["ws", "rustls-0_23"] required-features = ["rustls"]
[[example]]
name = "tls_rustls"
required-features = ["http2", "rustls-0_23"]
[[bench]] [[bench]]
name = "response-body-compression" name = "write-camel-case"
harness = false harness = false
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]
[[bench]] [[bench]]
name = "date-formatting" name = "status-line"
harness = false
[[bench]]
name = "uninit-headers"
harness = false
[[bench]]
name = "quality-value"
harness = false harness = false

View File

@ -1,21 +1,22 @@
# `actix-http` # actix-http
> HTTP types and services for the Actix ecosystem. > HTTP primitives for the Actix ecosystem.
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.10.0)](https://docs.rs/actix-http/3.10.0) [![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.18)](https://docs.rs/actix-http/3.0.0-beta.18)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) [![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-http/3.10.0/status.svg)](https://deps.rs/crate/actix-http/3.10.0) [![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.18/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.18)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http) [![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end --> ## Documentation & Resources
## Examples - [API Documentation](https://docs.rs/actix-http)
- Minimum Supported Rust Version (MSRV): 1.54
## Example
```rust ```rust
use std::{env, io}; use std::{env, io};
@ -24,7 +25,7 @@ use actix_http::{HttpService, Response};
use actix_server::Server; use actix_server::Server;
use futures_util::future; use futures_util::future;
use http::header::HeaderValue; use http::header::HeaderValue;
use tracing::info; use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -48,3 +49,18 @@ async fn main() -> io::Result<()> {
.await .await
} }
``` ```
## License
This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-http crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@ -1,20 +0,0 @@
use std::time::SystemTime;
use actix_http::header::HttpDate;
use divan::{black_box, AllocProfiler, Bencher};
#[global_allocator]
static ALLOC: AllocProfiler = AllocProfiler::system();
#[divan::bench]
fn date_formatting(b: Bencher<'_, '_>) {
let now = SystemTime::now();
b.bench(|| {
black_box(HttpDate::from(black_box(now)).to_string());
})
}
fn main() {
divan::main();
}

View File

@ -0,0 +1,90 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
fn bench_quality_display_impls(c: &mut Criterion) {
let mut group = c.benchmark_group("quality value display impls");
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
b.iter(|| _new::Quality(i).to_string())
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| _naive::Quality(i).to_string())
});
}
group.finish();
}
criterion_group!(benches, bench_quality_display_impls);
criterion_main!(benches);
mod _new {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
// some number in the range 1999
x => {
f.write_str("0.")?;
// this implementation avoids string allocation otherwise required
// for `.trim_end_matches('0')`
if x < 10 {
f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return
itoa::fmt(f, x)
} else if x < 100 {
f.write_str("0")?;
if x % 10 == 0 {
// trailing 0, divide by 10 and write
itoa::fmt(f, x / 10)
} else {
itoa::fmt(f, x)
}
} else {
// x is in range 101999
if x % 100 == 0 {
// two trailing 0s, divide by 100 and write
itoa::fmt(f, x / 100)
} else if x % 10 == 0 {
// one trailing 0, divide by 10 and write
itoa::fmt(f, x / 10)
} else {
itoa::fmt(f, x)
}
}
}
}
}
}
}
mod _naive {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
x => {
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
}
}
}
}
}

View File

@ -1,88 +0,0 @@
use std::convert::Infallible;
use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode};
use actix_service::{fn_service, Service as _};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
static BODY: &[u8] = include_bytes!("../Cargo.toml");
fn compression_responses(c: &mut Criterion) {
let mut group = c.benchmark_group("compression responses");
group.bench_function("identity", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Identity,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("gzip", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Gzip,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("br", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Brotli,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("zstd", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Zstd,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.finish();
}
criterion_group!(benches, compression_responses);
criterion_main!(benches);

View File

@ -0,0 +1,214 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => {}
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -0,0 +1,134 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &[u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &[u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View File

@ -0,0 +1,93 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
let len = black_box(bts.len());
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
let buffer = unsafe {
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
std::slice::from_raw_parts_mut(buf, len)
};
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -1,10 +1,10 @@
use actix_http::HttpService; use actix_http::HttpService;
use actix_server::Server; use actix_server::Server;
use actix_service::map_config; use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App, Responder}; use actix_web::{dev::AppConfig, get, App};
#[get("/")] #[get("/")]
async fn index() -> impl Responder { async fn index() -> &'static str {
"Hello, world. From Actix Web!" "Hello, world. From Actix Web!"
} }
@ -18,8 +18,7 @@ async fn main() -> std::io::Result<()> {
HttpService::build() HttpService::build()
// pass the app to service builder // pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder // map_config is used to map App's configuration to ServiceBuilder
// h1 will configure server to only use HTTP/1.1 .finish(map_config(app, |_| AppConfig::default()))
.h1(map_config(app, |_| AppConfig::default()))
.tcp() .tcp()
})? })?
.run() .run()

View File

@ -1,27 +0,0 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View File

@ -1,11 +1,10 @@
use std::{io, time::Duration}; use std::io;
use actix_http::{Error, HttpService, Request, Response, StatusCode}; use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server; use actix_server::Server;
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::StreamExt as _; use futures_util::StreamExt as _;
use http::header::HeaderValue; use http::header::HeaderValue;
use tracing::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -14,24 +13,23 @@ async fn main() -> io::Result<()> {
Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move { .finish(|mut req: Request| async move {
let mut body = BytesMut::new(); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await { while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?); body.extend_from_slice(&item?);
} }
info!("request body: {body:?}"); log::info!("request body: {:?}", body);
let res = Response::build(StatusCode::OK) Ok::<_, Error>(
.insert_header(("x-head", HeaderValue::from_static("dummy value!"))) Response::build(StatusCode::OK)
.body(body); .insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body),
Ok::<_, Error>(res) )
}) })
.tcp() // No TLS .tcp()
})? })?
.run() .run()
.await .await

View File

@ -1,34 +1,32 @@
use std::io; use std::io;
use actix_http::{ use actix_http::{
body::{BodyStream, MessageBody}, body::MessageBody, header::HeaderValue, Error, HttpService, Request, Response, StatusCode,
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
}; };
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt as _;
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> { async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) { body.extend_from_slice(&item?)
res.insert_header((header::CONTENT_TYPE, ct));
} }
// echo request payload stream as (chunked) response body log::info!("request body: {:?}", body);
let res = res.message_body(BodyStream::new(req.payload().take()))?;
Ok(res) Ok(Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body))
} }
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
actix_server::Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build().finish(handle_request).tcp()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})? })?
.run() .run()
.await .await

View File

@ -1,34 +0,0 @@
//! An example that supports automatic selection of plaintext h1/h2c connections.
//!
//! Notably, both the following commands will work.
//! ```console
//! $ curl --http1.1 'http://localhost:8080/'
//! $ curl --http2-prior-knowledge 'http://localhost:8080/'
//! ```
use std::{convert::Infallible, io};
use actix_http::{body::BodyStream, HttpService, Request, Response, StatusCode};
use actix_server::Server;
#[tokio::main(flavor = "current_thread")]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2c-detect", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|_req: Request| async move {
Ok::<_, Infallible>(Response::build(StatusCode::OK).body(BodyStream::new(
futures_util::stream::iter([
Ok::<_, String>("123".into()),
Err("wertyuikmnbvcxdfty6t".to_owned()),
]),
)))
})
.tcp_auto_h2c()
})?
.workers(2)
.run()
.await
}

View File

@ -1,25 +0,0 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View File

@ -1,8 +1,9 @@
use std::{convert::Infallible, io, time::Duration}; use std::{convert::Infallible, io};
use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode}; use actix_http::{
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
};
use actix_server::Server; use actix_server::Server;
use tracing::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -11,19 +12,22 @@ async fn main() -> io::Result<()> {
Server::build() Server::build()
.bind("hello-world", ("127.0.0.1", 8080), || { .bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
.on_connect_ext(|_, ext| { .on_connect_ext(|_, ext| {
ext.insert(42u32); ext.insert(42u32);
}) })
.finish(|req: Request| async move { .finish(|req: Request| async move {
info!("{req:?}"); log::info!("{:?}", req);
let mut res = Response::build(StatusCode::OK); let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!"))); res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.conn_data::<u32>().unwrap().to_string(); let forty_two = req.extensions().get::<u32>().unwrap().to_string();
res.insert_header(("x-forty-two", HeaderValue::from_str(&forty_two).unwrap())); res.insert_header((
"x-forty-two",
HeaderValue::from_str(&forty_two).unwrap(),
));
Ok::<_, Infallible>(res.body("Hello world!")) Ok::<_, Infallible>(res.body("Hello world!"))
}) })

View File

@ -12,7 +12,6 @@ use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server; use actix_server::Server;
use async_stream::stream; use async_stream::stream;
use bytes::Bytes; use bytes::Bytes;
use tracing::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -22,16 +21,16 @@ async fn main() -> io::Result<()> {
.bind("streaming-error", ("127.0.0.1", 8080), || { .bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build() HttpService::build()
.finish(|req| async move { .finish(|req| async move {
info!("{req:?}"); log::info!("{:?}", req);
let res = Response::ok(); let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! { Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123")); yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456")); yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_secs(1)).await; actix_rt::time::sleep(Duration::from_millis(1000)).await;
yield Err(io::Error::new(io::ErrorKind::Other, "abc")); yield Err(io::Error::new(io::ErrorKind::Other, ""));
}))) })))
}) })
.tcp() .tcp()

View File

@ -1,76 +0,0 @@
//! Demonstrates TLS configuration (via Rustls) for HTTP/1.1 and HTTP/2 connections.
//!
//! Test using cURL:
//!
//! ```console
//! $ curl --insecure https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/2.0
//!
//! $ curl --insecure --http1.1 https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/1.1
//! ```
extern crate tls_rustls_023 as rustls;
use std::io;
use actix_http::{Error, HttpService, Request, Response};
use actix_utils::future::ok;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
tracing::info!("starting HTTP server at https://127.0.0.1:8443");
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8443), || {
HttpService::build()
.finish(|req: Request| {
let body = format!(
"Hello World!\n\
Protocol: {:?}",
req.head().version
);
ok::<_, Error>(Response::ok().set_body(body))
})
.rustls_0_23(rustls_config())
})?
.run()
.await
}
fn rustls_config() -> rustls::ServerConfig {
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_file = cert.pem();
let key_file = key_pair.serialize_pem();
let cert_file = &mut io::BufReader::new(cert_file.as_bytes());
let key_file = &mut io::BufReader::new(key_file.as_bytes());
let cert_chain = rustls_pemfile::certs(cert_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut keys = rustls_pemfile::pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap();
const H1_ALPN: &[u8] = b"http/1.1";
const H2_ALPN: &[u8] = b"h2";
config.alpn_protocols.push(H2_ALPN.to_vec());
config.alpn_protocols.push(H1_ALPN.to_vec());
config
}

View File

@ -1,7 +1,7 @@
//! Sets up a WebSocket server over TCP and TLS. //! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames. //! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls_023 as rustls; extern crate tls_rustls as rustls;
use std::{ use std::{
io, io,
@ -10,13 +10,13 @@ use std::{
time::Duration, time::Duration,
}; };
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response}; use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval}; use actix_rt::time::{interval, Interval};
use actix_server::Server; use actix_server::Server;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use bytestring::ByteString; use bytestring::ByteString;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use tokio_util::codec::Encoder;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -27,22 +27,20 @@ async fn main() -> io::Result<()> {
HttpService::build().h1(handler).tcp() HttpService::build().h1(handler).tcp()
})? })?
.bind("tls", ("127.0.0.1", 8443), || { .bind("tls", ("127.0.0.1", 8443), || {
HttpService::build() HttpService::build().finish(handler).rustls(tls_config())
.finish(handler)
.rustls_0_23(tls_config())
})? })?
.run() .run()
.await .await
} }
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> { async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
tracing::info!("handshaking"); log::info!("handshaking");
let mut res = ws::handshake(req.head())?; let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2 // handshake will always fail under HTTP/2
tracing::info!("responding"); log::info!("responding");
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new()))) Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
} }
struct Heartbeat { struct Heartbeat {
@ -63,7 +61,7 @@ impl Stream for Heartbeat {
type Item = Result<Bytes, Error>; type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
tracing::trace!("poll"); log::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx)); ready!(self.as_mut().interval.poll_tick(cx));
@ -84,27 +82,27 @@ impl Stream for Heartbeat {
fn tls_config() -> rustls::ServerConfig { fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader; use std::io::BufReader;
use rustls::{Certificate, PrivateKey};
use rustls_pemfile::{certs, pkcs8_private_keys}; use rustls_pemfile::{certs, pkcs8_private_keys};
let rcgen::CertifiedKey { cert, key_pair } = let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); let cert_file = cert.serialize_pem().unwrap();
let cert_file = cert.pem(); let key_file = cert.serialize_private_key_pem();
let key_file = key_pair.serialize_pem();
let cert_file = &mut BufReader::new(cert_file.as_bytes()); let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file).collect::<Result<Vec<_>, _>>().unwrap(); let cert_chain = certs(cert_file)
let mut keys = pkcs8_private_keys(key_file) .unwrap()
.collect::<Result<Vec<_>, _>>() .into_iter()
.unwrap(); .map(Certificate)
.collect();
let mut keys = pkcs8_private_keys(key_file).unwrap();
let mut config = rustls::ServerConfig::builder() let mut config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth() .with_no_client_auth()
.with_single_cert( .with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap(); .unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec()); config.alpn_protocols.push(b"http/1.1".to_vec());

View File

@ -47,8 +47,9 @@ where
/// Attempts to pull out the next value of the underlying [`Stream`]. /// Attempts to pull out the next value of the underlying [`Stream`].
/// ///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being ended on a /// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// zero-length chunk, but rather proceed until the underlying [`Stream`] ends. /// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next( fn poll_next(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
@ -79,7 +80,7 @@ mod tests {
use futures_core::ready; use futures_core::ready;
use futures_util::{stream, FutureExt as _}; use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
@ -90,10 +91,10 @@ mod tests {
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone // crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody); assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test] #[actix_rt::test]
async fn skips_empty_chunks() { async fn skips_empty_chunks() {
@ -131,7 +132,7 @@ mod tests {
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12"))); assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
} }
#[derive(Debug, Display, Error)] #[derive(Debug, Display, Error)]
#[display("stream error")] #[display(fmt = "stream error")]
struct StreamErr; struct StreamErr;
#[actix_rt::test] #[actix_rt::test]

View File

@ -31,7 +31,7 @@ impl fmt::Debug for BoxBodyInner {
} }
impl BoxBody { impl BoxBody {
/// Boxes body type, erasing type information. /// Same as `MessageBody::boxed`.
/// ///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to /// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing. /// avoid double boxing.
@ -77,8 +77,12 @@ impl MessageBody for BoxBody {
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 { match &mut self.0 {
BoxBodyInner::None(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}), BoxBodyInner::None(body) => {
BoxBodyInner::Bytes(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}), Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Bytes(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx), BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
} }
} }
@ -100,13 +104,15 @@ impl MessageBody for BoxBody {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any};
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin); assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
#[actix_rt::test] #[actix_rt::test]
async fn nested_boxed_body() { async fn nested_boxed_body() {

View File

@ -10,17 +10,6 @@ use super::{BodySize, BoxBody, MessageBody};
use crate::Error; use crate::Error;
pin_project! { pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj] #[project = EitherBodyProj]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> { pub enum EitherBody<L, R = BoxBody> {
@ -33,10 +22,7 @@ pin_project! {
} }
impl<L> EitherBody<L, BoxBody> { impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant. /// Creates new `EitherBody` using left variant and boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
#[inline] #[inline]
pub fn new(body: L) -> Self { pub fn new(body: L) -> Self {
Self::Left { body } Self::Left { body }

View File

@ -14,44 +14,8 @@ use pin_project_lite::pin_project;
use super::{BodySize, BoxBody}; use super::{BodySize, BoxBody};
/// An interface for types that can be used as a response body. /// An interface types that can converted to bytes and used as response bodies.
/// // TODO: examples
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
pub trait MessageBody { pub trait MessageBody {
/// The type of error that will be returned if streaming body fails. /// The type of error that will be returned if streaming body fails.
/// ///
@ -65,22 +29,7 @@ pub trait MessageBody {
fn size(&self) -> BodySize; fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes. /// Attempt to pull out the next chunk of body bytes.
/// // TODO: expand documentation
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
@ -88,7 +37,7 @@ pub trait MessageBody {
/// Try to convert into the complete chunk of body bytes. /// Try to convert into the complete chunk of body bytes.
/// ///
/// Override this method if the complete body can be trivially extracted. This is useful for /// Implement this method if the entire body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided. /// optimizations where `poll_next` calls can be avoided.
/// ///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling /// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
@ -105,11 +54,7 @@ pub trait MessageBody {
Err(self) Err(self)
} }
/// Wraps this body into a `BoxBody`. /// Converts this body into `BoxBody`.
///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
/// is required.
#[inline] #[inline]
fn boxed(self) -> BoxBody fn boxed(self) -> BoxBody
where where
@ -120,28 +65,8 @@ pub trait MessageBody {
} }
mod foreign_impls { mod foreign_impls {
use std::{borrow::Cow, ops::DerefMut};
use super::*; use super::*;
impl<B> MessageBody for &mut B
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
fn size(&self) -> BodySize {
(**self).size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(&mut **self).poll_next(cx)
}
}
impl MessageBody for Infallible { impl MessageBody for Infallible {
type Error = Infallible; type Error = Infallible;
@ -199,9 +124,8 @@ mod foreign_impls {
} }
} }
impl<T, B> MessageBody for Pin<T> impl<B> MessageBody for Pin<Box<B>>
where where
T: DerefMut<Target = B> + Unpin,
B: MessageBody + ?Sized, B: MessageBody + ?Sized,
{ {
type Error = B::Error; type Error = B::Error;
@ -324,39 +248,6 @@ mod foreign_impls {
} }
} }
impl MessageBody for Cow<'static, [u8]> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(b) => Bytes::from_static(b),
Cow::Owned(b) => Bytes::from(b),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(b) => Ok(Bytes::from_static(b)),
Cow::Owned(b) => Ok(Bytes::from(b)),
}
}
}
impl MessageBody for &'static str { impl MessageBody for &'static str {
type Error = Infallible; type Error = Infallible;
@ -412,39 +303,6 @@ mod foreign_impls {
} }
} }
impl MessageBody for Cow<'static, str> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()),
Cow::Owned(s) => Bytes::from(s.into_bytes()),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())),
Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())),
}
}
}
impl MessageBody for bytestring::ByteString { impl MessageBody for bytestring::ByteString {
type Error = Infallible; type Error = Infallible;
@ -531,7 +389,7 @@ where
mod tests { mod tests {
use actix_rt::pin; use actix_rt::pin;
use actix_utils::future::poll_fn; use actix_utils::future::poll_fn;
use futures_util::stream; use bytes::{Bytes, BytesMut};
use super::*; use super::*;
use crate::body::{self, EitherBody}; use crate::body::{self, EitherBody};
@ -554,7 +412,6 @@ mod tests {
}; };
} }
#[allow(unused_allocation)] // triggered by `Box::new(()).size()`
#[actix_rt::test] #[actix_rt::test]
async fn boxing_equivalence() { async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0)); assert_eq!(().size(), BodySize::Sized(0));
@ -569,35 +426,6 @@ mod tests {
assert_poll_next_none!(pl); assert_poll_next_none!(pl);
} }
#[actix_rt::test]
async fn mut_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), (&(&mut ())).size());
let pl = &mut ();
pin!(pl);
assert_poll_next_none!(pl);
let pl = &mut Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut body = body::SizedStream::new(
8,
stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]),
);
let body = &mut body;
assert_eq!(body.size(), BodySize::Sized(8));
pin!(body);
assert_poll_next!(body, Bytes::from_static(b"1234"));
assert_poll_next!(body, Bytes::from_static(b"5678"));
assert_poll_next_none!(body);
}
#[allow(clippy::let_unit_value)]
#[actix_rt::test] #[actix_rt::test]
async fn test_unit() { async fn test_unit() {
let pl = (); let pl = ();
@ -723,18 +551,4 @@ mod tests {
let not_body = resp_body.downcast_ref::<()>(); let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none()); assert!(not_body.is_none());
} }
#[actix_rt::test]
async fn non_owning_to_bytes() {
let mut body = BoxBody::new(());
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::new());
let mut body = body::BodyStream::new(stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]));
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::from_static(b"12345678"));
}
} }

View File

@ -1,9 +1,4 @@
//! Traits and structures to aid consuming and writing HTTP payloads. //! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
mod body_stream; mod body_stream;
mod boxed; mod boxed;
@ -14,14 +9,12 @@ mod size;
mod sized_stream; mod sized_stream;
mod utils; mod utils;
pub use self::body_stream::BodyStream;
pub use self::boxed::BoxBody;
pub use self::either::EitherBody;
pub use self::message_body::MessageBody;
pub(crate) use self::message_body::MessageBodyMapErr; pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::{ pub use self::none::None;
body_stream::BodyStream, pub use self::size::BodySize;
boxed::BoxBody, pub use self::sized_stream::SizedStream;
either::EitherBody, pub use self::utils::to_bytes;
message_body::MessageBody,
none::None,
size::BodySize,
sized_stream::SizedStream,
utils::{to_bytes, to_bytes_limited, BodyLimitExceeded},
};

View File

@ -10,12 +10,9 @@ use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads. /// Body type for responses that forbid payloads.
/// ///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header. /// Distinct from an empty response which would contain a Content-Length header.
/// For an "empty" body, use `()` or `Bytes::new()`.
/// ///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response. /// For an "empty" body, use `()` or `Bytes::new()`.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
#[derive(Debug, Clone, Copy, Default)] #[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive] #[non_exhaustive]
pub struct None; pub struct None;

View File

@ -44,7 +44,7 @@ where
#[inline] #[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
BodySize::Sized(self.size) BodySize::Sized(self.size as u64)
} }
/// Attempts to pull out the next value of the underlying [`Stream`]. /// Attempts to pull out the next value of the underlying [`Stream`].
@ -76,7 +76,7 @@ mod tests {
use actix_rt::pin; use actix_rt::pin;
use actix_utils::future::poll_fn; use actix_utils::future::poll_fn;
use futures_util::stream; use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*; use super::*;
use crate::body::to_bytes; use crate::body::to_bytes;
@ -87,10 +87,10 @@ mod tests {
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody); assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone // crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody); assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test] #[actix_rt::test]
async fn skips_empty_chunks() { async fn skips_empty_chunks() {

View File

@ -3,196 +3,75 @@ use std::task::Poll;
use actix_rt::pin; use actix_rt::pin;
use actix_utils::future::poll_fn; use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use derive_more::{Display, Error};
use futures_core::ready; use futures_core::ready;
use super::{BodySize, MessageBody}; use super::{BodySize, MessageBody};
/// Collects all the bytes produced by `body`. /// Collects the body produced by a `MessageBody` implementation into `Bytes`.
/// ///
/// Any errors produced by the body stream are returned immediately. /// Any errors produced by the body stream are returned immediately.
/// ///
/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion.
///
/// # Examples /// # Examples
///
/// ``` /// ```
/// use actix_http::body::{self, to_bytes}; /// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes; /// use bytes::Bytes;
/// ///
/// # actix_rt::System::new().block_on(async { /// # async fn test_to_bytes() {
/// let body = body::None::new(); /// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap(); /// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty()); /// assert!(bytes.is_empty());
/// ///
/// let body = Bytes::from_static(b"123"); /// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap(); /// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, "123"); /// assert_eq!(bytes, b"123"[..]);
/// # }); /// # }
/// ``` /// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> { pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
to_bytes_limited(body, usize::MAX)
.await
.expect("body should never yield more than usize::MAX bytes")
}
/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit.
#[derive(Debug, Display, Error)]
#[display("limit exceeded while collecting body bytes")]
#[non_exhaustive]
pub struct BodyLimitExceeded;
/// Collects the bytes produced by `body`, up to `limit` bytes.
///
/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an
/// `Err(BodyLimitExceeded)` is returned.
///
/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes_limited};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert_eq!(bytes, "123");
///
/// let body = Bytes::from_static(b"123");
/// assert!(to_bytes_limited(body, 2).await.is_err());
/// # });
/// ```
pub async fn to_bytes_limited<B: MessageBody>(
body: B,
limit: usize,
) -> Result<Result<Bytes, B::Error>, BodyLimitExceeded> {
/// Sensible default (32kB) for initial, bounded allocation when collecting body bytes.
const INITIAL_ALLOC_BYTES: usize = 32 * 1024;
let cap = match body.size() { let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())), BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded), BodySize::Sized(size) => size as usize,
BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES), // good enough first guess for chunk size
BodySize::Stream => INITIAL_ALLOC_BYTES, BodySize::Stream => 32_768,
}; };
let mut exceeded_limit = false;
let mut buf = BytesMut::with_capacity(cap); let mut buf = BytesMut::with_capacity(cap);
pin!(body); pin!(body);
match poll_fn(|cx| loop { poll_fn(|cx| loop {
let body = body.as_mut(); let body = body.as_mut();
match ready!(body.poll_next(cx)) { match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => { Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
// if limit is exceeded...
if buf.len() + bytes.len() > limit {
// ...set flag to true and break out of poll_fn
exceeded_limit = true;
return Poll::Ready(Ok(()));
}
buf.extend_from_slice(&bytes)
}
None => return Poll::Ready(Ok(())), None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)), Some(Err(err)) => return Poll::Ready(Err(err)),
} }
}) })
.await .await?;
{
// propagate error returned from body poll
Err(err) => Ok(Err(err)),
// limit was exceeded while reading body Ok(buf.freeze())
Ok(()) if exceeded_limit => Err(BodyLimitExceeded),
// otherwise return body buffer
Ok(()) => Ok(Ok(buf.freeze())),
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod test {
use std::io;
use futures_util::{stream, StreamExt as _}; use futures_util::{stream, StreamExt as _};
use super::*; use super::*;
use crate::{ use crate::{body::BodyStream, Error};
body::{BodyStream, SizedStream},
Error,
};
#[actix_rt::test] #[actix_rt::test]
async fn to_bytes_complete() { async fn test_to_bytes() {
let bytes = to_bytes(()).await.unwrap(); let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty()); assert!(bytes.is_empty());
let body = Bytes::from_static(b"123"); let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap(); let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]); assert_eq!(bytes, b"123"[..]);
}
#[actix_rt::test]
async fn to_bytes_streams() {
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")]) let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>); .map(Ok::<_, Error>);
let body = BodyStream::new(stream); let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap(); let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]); assert_eq!(bytes, b"123abc"[..]);
} }
#[actix_rt::test]
async fn to_bytes_limited_complete() {
let bytes = to_bytes_limited((), 0).await.unwrap().unwrap();
assert!(bytes.is_empty());
let bytes = to_bytes_limited((), 1).await.unwrap().unwrap();
assert!(bytes.is_empty());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 0)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 1)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok());
}
#[actix_rt::test]
async fn to_bytes_limited_streams() {
// hinting a larger body fails
let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.is_err());
// hinting a smaller body is okay
let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty());
// hinting a smaller body then returning a larger one fails
let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>);
let body = SizedStream::new(3, stream);
assert!(to_bytes_limited(body, 3).await.is_err());
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
assert!(to_bytes_limited(body, 3).await.is_err());
}
#[actix_rt::test]
async fn to_body_limit_error() {
let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) });
let body = SizedStream::new(8, err_stream);
// not too big, but propagates error from body stream
assert!(to_bytes_limited(body, 10).await.unwrap().is_err());
}
} }

View File

@ -1,22 +1,25 @@
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration}; use std::{fmt, marker::PhantomData, net, rc::Rc};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory}; use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::{ use crate::{
body::{BoxBody, MessageBody}, body::{BoxBody, MessageBody},
config::{KeepAlive, ServiceConfig},
h1::{self, ExpectHandler, H1Service, UpgradeHandler}, h1::{self, ExpectHandler, H1Service, UpgradeHandler},
h2::H2Service,
service::HttpService, service::HttpService,
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig, ConnectCallback, Extensions, Request, Response,
}; };
/// An HTTP service builder. /// A HTTP service builder
/// ///
/// This type can construct an instance of [`HttpService`] through a builder-like pattern. /// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> { pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
expect: X, expect: X,
@ -25,23 +28,22 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
_phantom: PhantomData<S>, _phantom: PhantomData<S>,
} }
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler> impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
{ {
fn default() -> Self { /// Create instance of `ServiceConfigBuilder`
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
HttpServiceBuilder { HttpServiceBuilder {
// ServiceConfig parts (make sure defaults match) keep_alive: KeepAlive::Timeout(5),
keep_alive: KeepAlive::default(), client_timeout: 5000,
client_request_timeout: Duration::from_secs(5), client_disconnect: 0,
client_disconnect_timeout: Duration::ZERO,
secure: false, secure: false,
local_addr: None, local_addr: None,
// dispatcher parts
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect_ext: None, on_connect_ext: None,
@ -63,11 +65,9 @@ where
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Set connection keep-alive setting. /// Set server keep-alive setting.
/// ///
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong. /// By default keep alive is set to a 5 seconds.
///
/// By default keep-alive is 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self { pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into(); self.keep_alive = val.into();
self self
@ -85,45 +85,33 @@ where
self self
} }
/// Set client request timeout (for first request). /// Set server client timeout in milliseconds for first request.
/// ///
/// Defines a timeout for reading client request header. If the client does not transmit the /// Defines a timeout for reading client request header. If a client does not transmit
/// request head within this duration, the connection is terminated with a `408 Request Timeout` /// the entire set headers within this time, the request is terminated with
/// response error. /// the 408 (Request Time-out) error.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the client timeout is 5 seconds. /// By default client timeout is set to 5000 milliseconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self { pub fn client_timeout(mut self, val: u64) -> Self {
self.client_request_timeout = dur; self.client_timeout = val;
self self
} }
#[doc(hidden)] /// Set server connection disconnect timeout in milliseconds.
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
/// ///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete /// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections. /// within this time, the request get dropped. This timeout affects secure connections.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the disconnect timeout is disabled. /// By default disconnect timeout is set to 0.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self { pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect_timeout = dur; self.client_disconnect = val;
self self
} }
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support. /// Provide service for `EXPECT: 100-Continue` support.
/// ///
/// Service get called with request that contains `EXPECT` header. /// Service get called with request that contains `EXPECT` header.
@ -138,8 +126,8 @@ where
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: expect.into_factory(), expect: expect.into_factory(),
@ -162,8 +150,8 @@ where
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: self.expect, expect: self.expect,
@ -186,7 +174,7 @@ where
self self
} }
/// Finish service configuration and create a service for the HTTP/1 protocol. /// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U> pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where where
B: MessageBody, B: MessageBody,
@ -197,8 +185,8 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
@ -209,9 +197,8 @@ where
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create a service for the HTTP/2 protocol. /// Finish service configuration and create a HTTP service for HTTP/2 protocol.
#[cfg(feature = "http2")] pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
where where
F: IntoServiceFactory<S, Request>, F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Response<BoxBody>> + 'static,
@ -222,14 +209,13 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
crate::h2::H2Service::with_config(cfg, service.into_factory()) H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
.on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create `HttpService` instance. /// Finish service configuration and create `HttpService` instance.
@ -244,8 +230,8 @@ where
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );

View File

@ -1,59 +1,106 @@
use std::{ use std::{
cell::Cell,
fmt::{self, Write},
net, net,
rc::Rc, rc::Rc,
time::{Duration, Instant}, time::{Duration, SystemTime},
}; };
use actix_rt::{
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
};
use bytes::BytesMut; use bytes::BytesMut;
use crate::{date::DateService, KeepAlive}; /// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
/// HTTP service configuration. #[derive(Debug, PartialEq, Clone, Copy)]
#[derive(Debug, Clone)] /// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>); pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner { struct Inner {
keep_alive: KeepAlive, keep_alive: Option<Duration>,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
ka_enabled: bool,
secure: bool, secure: bool,
local_addr: Option<std::net::SocketAddr>, local_addr: Option<std::net::SocketAddr>,
date_service: DateService, date_service: DateService,
} }
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig { impl Default for ServiceConfig {
fn default() -> Self { fn default() -> Self {
Self::new( Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
} }
} }
impl ServiceConfig { impl ServiceConfig {
/// Create instance of `ServiceConfig`. /// Create instance of `ServiceConfig`
pub fn new( pub fn new(
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
) -> ServiceConfig { ) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner { ServiceConfig(Rc::new(Inner {
keep_alive: keep_alive.normalize(), keep_alive,
client_request_timeout, ka_enabled,
client_disconnect_timeout, client_timeout,
client_disconnect,
secure, secure,
local_addr, local_addr,
date_service: DateService::new(), date_service: DateService::new(),
})) }))
} }
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS). /// Returns true if connection is secure (HTTPS)
#[inline] #[inline]
pub fn secure(&self) -> bool { pub fn secure(&self) -> bool {
self.0.secure self.0.secure
@ -67,97 +114,235 @@ impl ServiceConfig {
self.0.local_addr self.0.local_addr
} }
/// Connection keep-alive setting. /// Keep alive duration if configured.
#[inline] #[inline]
pub fn keep_alive(&self) -> KeepAlive { pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive self.0.keep_alive
} }
/// Creates a time object representing the deadline for this connection's keep-alive period, if /// Return state of connection keep-alive functionality
/// enabled. #[inline]
/// pub fn keep_alive_enabled(&self) -> bool {
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`. self.0.ka_enabled
pub fn keep_alive_deadline(&self) -> Option<Instant> { }
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur), /// Client timeout for first request.
KeepAlive::Os => None, #[inline]
KeepAlive::Disabled => None, pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
} }
} }
/// Creates a time object representing the deadline for the client to finish sending the head of /// Client timeout for first request.
/// its first request. pub fn client_timer_expire(&self) -> Option<Instant> {
/// let delay = self.0.client_timeout;
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`. if delay != 0 {
pub fn client_request_deadline(&self) -> Option<Instant> { Some(self.now() + Duration::from_millis(delay))
let timeout = self.0.client_request_timeout; } else {
(timeout != Duration::ZERO).then(|| self.now() + timeout) None
}
} }
/// Creates a time object representing the deadline for the client to disconnect. /// Client disconnect timer
pub fn client_disconnect_deadline(&self) -> Option<Instant> { pub fn client_disconnect_timer(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout; let delay = self.0.client_disconnect;
(timeout != Duration::ZERO).then(|| self.now() + timeout) if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
} }
/// Return keep-alive timer delay is configured.
#[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
}
#[inline]
pub(crate) fn now(&self) -> Instant { pub(crate) fn now(&self) -> Instant {
self.0.date_service.now() self.0.date_service.now()
} }
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)] #[doc(hidden)]
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) { pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 37] = [0; 37]; let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
self.0 self.0
.date_service .date_service
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes)); .set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
buf[35..].copy_from_slice(b"\r\n");
dst.extend_from_slice(&buf); dst.extend_from_slice(&buf);
} }
#[allow(unused)] // used with `http2` feature flag pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
self.0 self.0
.date_service .date_service
.with_date(|date| dst.extend_from_slice(&date.bytes)); .set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}
impl DateService {
fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
}
});
DateService { current, handle }
}
fn now(&self) -> Instant {
self.current.get().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
#[cfg(test)]
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use actix_rt::{
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
use super::*; use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
use actix_rt::{task::yield_now, time::sleep};
#[actix_rt::test] #[actix_rt::test]
async fn test_date_service_update() { async fn test_date_service_update() {
let settings = let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
yield_now().await; yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false); settings.set_date(&mut buf1);
let now1 = settings.now(); let now1 = settings.now();
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await; sleep_until(Instant::now() + Duration::from_secs(2)).await;
yield_now().await; yield_now().await;
let now2 = settings.now(); let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false); settings.set_date(&mut buf2);
assert_ne!(now1, now2); assert_ne!(now1, now2);
@ -210,27 +395,11 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_date() { async fn test_date() {
let settings = ServiceConfig::default(); let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false); settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false); settings.set_date(&mut buf2);
assert_eq!(buf1, buf2); assert_eq!(buf1, buf2);
} }
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
} }

View File

@ -1,92 +0,0 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::HttpDate::from(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View File

@ -9,15 +9,20 @@ use std::{
use actix_rt::task::{spawn_blocking, JoinHandle}; use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use futures_core::{ready, Stream};
#[cfg(feature = "compress-brotli")]
use brotli2::write::BrotliDecoder;
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream};
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
use zstd::stream::write::Decoder as ZstdDecoder; use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{ use crate::{
encoding::Writer, encoding::Writer,
error::PayloadError, error::{BlockingError, PayloadError},
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING}, header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
}; };
@ -43,19 +48,16 @@ where
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new( ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096), BrotliDecoder::new(Writer::new()),
))), ))),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(ZlibDecoder::new( ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
Writer::new(), ZlibDecoder::new(Writer::new()),
)))), ))),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new( ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(), Writer::new(),
)))), )))),
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new( ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect( ZstdDecoder::new(Writer::new()).expect(
@ -99,12 +101,8 @@ where
loop { loop {
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| { let (chunk, decoder) =
PayloadError::Io(io::Error::new( ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})??;
*this.decoder = Some(decoder); *this.decoder = Some(decoder);
this.fut.take(); this.fut.take();
@ -164,13 +162,10 @@ where
enum ContentDecoder { enum ContentDecoder {
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>), Brotli(Box<BrotliDecoder<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime // We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static` // argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
@ -191,7 +186,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
@ -205,7 +200,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
@ -218,7 +213,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
@ -231,7 +226,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
} }
} }
@ -250,7 +245,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
@ -265,7 +260,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")] #[cfg(feature = "compress-gzip")]
@ -280,7 +275,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
@ -295,7 +290,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
} }
} }

View File

@ -11,17 +11,22 @@ use std::{
use actix_rt::task::{spawn_blocking, JoinHandle}; use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use derive_more::Display; use derive_more::Display;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready; use futures_core::ready;
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use tracing::trace;
#[cfg(feature = "compress-brotli")]
use brotli2::write::BrotliEncoder;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
use zstd::stream::write::Encoder as ZstdEncoder; use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer; use super::Writer;
use crate::{ use crate::{
body::{self, BodySize, MessageBody}, body::{self, BodySize, MessageBody},
error::BlockingError,
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING}, header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode, ResponseHead, StatusCode,
}; };
@ -50,21 +55,10 @@ impl<B: MessageBody> Encoder<B> {
} }
} }
fn empty() -> Self {
Encoder {
body: EncoderBody::Full { body: Bytes::new() },
encoder: None,
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self { pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress empty bodies // no need to compress an empty body
match body.size() { if matches!(body.size(), BodySize::None) {
BodySize::None => return Self::none(), return Self::none();
BodySize::Sized(0) => return Self::empty(),
_ => {}
} }
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING) let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
@ -182,12 +176,7 @@ where
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx)) let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| { .map_err(|_| EncoderError::Blocking(BlockingError))?
EncoderError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(EncoderError::Io)?; .map_err(EncoderError::Io)?;
let chunk = encoder.take(); let chunk = encoder.take();
@ -266,7 +255,7 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut() head.headers_mut()
.insert(header::CONTENT_ENCODING, encoding.to_header_value()); .insert(header::CONTENT_ENCODING, encoding.to_header_value());
head.headers_mut() head.headers_mut()
.append(header::VARY, HeaderValue::from_static("accept-encoding")); .insert(header::VARY, HeaderValue::from_static("accept-encoding"));
head.no_chunking(false); head.no_chunking(false);
} }
@ -279,7 +268,7 @@ enum ContentEncoder {
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>), Brotli(BrotliEncoder<Writer>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we // Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`. // use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
@ -303,7 +292,9 @@ impl ContentEncoder {
))), ))),
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())), ContentEncoding::Brotli => {
Some(ContentEncoder::Brotli(BrotliEncoder::new(Writer::new(), 3)))
}
#[cfg(feature = "compress-zstd")] #[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => { ContentEncoding::Zstd => {
@ -335,8 +326,8 @@ impl ContentEncoder {
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "compress-brotli")] #[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(mut encoder) => match encoder.flush() { ContentEncoder::Brotli(encoder) => match encoder.finish() {
Ok(()) => Ok(encoder.into_inner().buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
@ -401,25 +392,16 @@ impl ContentEncoder {
} }
} }
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)] #[derive(Debug, Display)]
#[non_exhaustive] #[non_exhaustive]
pub enum EncoderError { pub enum EncoderError {
/// Wrapped body stream error. #[display(fmt = "body")]
#[display("body")]
Body(Box<dyn StdError>), Body(Box<dyn StdError>),
/// Generic I/O error. #[display(fmt = "blocking")]
#[display("io")] Blocking(BlockingError),
#[display(fmt = "io")]
Io(io::Error), Io(io::Error),
} }
@ -427,6 +409,7 @@ impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> { fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self { match self {
EncoderError::Body(err) => Some(&**err), EncoderError::Body(err) => Some(&**err),
EncoderError::Blocking(err) => Some(err),
EncoderError::Io(err) => Some(err), EncoderError::Io(err) => Some(err),
} }
} }

View File

@ -7,12 +7,13 @@ use bytes::{Bytes, BytesMut};
mod decoder; mod decoder;
mod encoder; mod encoder;
pub use self::{decoder::Decoder, encoder::Encoder}; pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
/// Special-purpose writer for streaming (de-)compression. /// Special-purpose writer for streaming (de-)compression.
/// ///
/// Pre-allocates 8KiB of capacity. /// Pre-allocates 8KiB of capacity.
struct Writer { pub(self) struct Writer {
buf: BytesMut, buf: BytesMut,
} }

View File

@ -3,10 +3,11 @@
use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error}; use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error};
use derive_more::{Display, Error, From}; use derive_more::{Display, Error, From};
pub use http::{status::InvalidStatusCode, Error as HttpError};
use http::{uri::InvalidUri, StatusCode}; use http::{uri::InvalidUri, StatusCode};
use crate::{body::BoxBody, Response}; use crate::{body::BoxBody, ws, Response};
pub use http::Error as HttpError;
pub struct Error { pub struct Error {
inner: Box<ErrorInner>, inner: Box<ErrorInner>,
@ -50,7 +51,7 @@ impl Error {
Self::new(Kind::SendResponse) Self::new(Kind::SendResponse)
} }
#[allow(unused)] // available for future use #[allow(unused)] // reserved for future use (TODO: remove allow when being used)
pub(crate) fn new_io() -> Self { pub(crate) fn new_io() -> Self {
Self::new(Kind::Io) Self::new(Kind::Io)
} }
@ -60,7 +61,6 @@ impl Error {
Self::new(Kind::Encoder) Self::new(Kind::Encoder)
} }
#[allow(unused)] // used with `ws` feature flag
pub(crate) fn new_ws() -> Self { pub(crate) fn new_ws() -> Self {
Self::new(Kind::Ws) Self::new(Kind::Ws)
} }
@ -80,37 +80,35 @@ impl From<Error> for Response<BoxBody> {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Display)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Display)]
pub(crate) enum Kind { pub(crate) enum Kind {
#[display("error processing HTTP")] #[display(fmt = "error processing HTTP")]
Http, Http,
#[display("error parsing HTTP message")] #[display(fmt = "error parsing HTTP message")]
Parse, Parse,
#[display("request payload read error")] #[display(fmt = "request payload read error")]
Payload, Payload,
#[display("response body write error")] #[display(fmt = "response body write error")]
Body, Body,
#[display("send response error")] #[display(fmt = "send response error")]
SendResponse, SendResponse,
#[display("error in WebSocket process")] #[display(fmt = "error in WebSocket process")]
Ws, Ws,
#[display("connection error")] #[display(fmt = "connection error")]
Io, Io,
#[display("encoder error")] #[display(fmt = "encoder error")]
Encoder, Encoder,
} }
impl fmt::Debug for Error { impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("actix_http::Error") // TODO: more detail
.field("kind", &self.inner.kind) f.write_str("actix_http::Error")
.field("cause", &self.inner.cause)
.finish()
} }
} }
@ -141,16 +139,14 @@ impl From<HttpError> for Error {
} }
} }
#[cfg(feature = "ws")] impl From<ws::HandshakeError> for Error {
impl From<crate::ws::HandshakeError> for Error { fn from(err: ws::HandshakeError) -> Self {
fn from(err: crate::ws::HandshakeError) -> Self {
Self::new_ws().with_cause(err) Self::new_ws().with_cause(err)
} }
} }
#[cfg(feature = "ws")] impl From<ws::ProtocolError> for Error {
impl From<crate::ws::ProtocolError> for Error { fn from(err: ws::ProtocolError) -> Self {
fn from(err: crate::ws::ProtocolError) -> Self {
Self::new_ws().with_cause(err) Self::new_ws().with_cause(err)
} }
} }
@ -160,44 +156,44 @@ impl From<crate::ws::ProtocolError> for Error {
#[non_exhaustive] #[non_exhaustive]
pub enum ParseError { pub enum ParseError {
/// An invalid `Method`, such as `GE.T`. /// An invalid `Method`, such as `GE.T`.
#[display("invalid method specified")] #[display(fmt = "Invalid Method specified")]
Method, Method,
/// An invalid `Uri`, such as `exam ple.domain`. /// An invalid `Uri`, such as `exam ple.domain`.
#[display("URI error: {}", _0)] #[display(fmt = "Uri error: {}", _0)]
Uri(InvalidUri), Uri(InvalidUri),
/// An invalid `HttpVersion`, such as `HTP/1.1` /// An invalid `HttpVersion`, such as `HTP/1.1`
#[display("invalid HTTP version specified")] #[display(fmt = "Invalid HTTP version specified")]
Version, Version,
/// An invalid `Header`. /// An invalid `Header`.
#[display("invalid Header provided")] #[display(fmt = "Invalid Header provided")]
Header, Header,
/// A message head is too large to be reasonable. /// A message head is too large to be reasonable.
#[display("message head is too large")] #[display(fmt = "Message head is too large")]
TooLarge, TooLarge,
/// A message reached EOF, but is not complete. /// A message reached EOF, but is not complete.
#[display("message is incomplete")] #[display(fmt = "Message is incomplete")]
Incomplete, Incomplete,
/// An invalid `Status`, such as `1337 ELITE`. /// An invalid `Status`, such as `1337 ELITE`.
#[display("invalid status provided")] #[display(fmt = "Invalid Status provided")]
Status, Status,
/// A timeout occurred waiting for an IO event. /// A timeout occurred waiting for an IO event.
#[allow(dead_code)] #[allow(dead_code)]
#[display("timeout")] #[display(fmt = "Timeout")]
Timeout, Timeout,
/// An I/O error that occurred while trying to read or write to a network stream. /// An `io::Error` that occurred while trying to read or write to a network stream.
#[display("I/O error: {}", _0)] #[display(fmt = "IO error: {}", _0)]
Io(io::Error), Io(io::Error),
/// Parsing a field as string failed. /// Parsing a field as string failed.
#[display("UTF-8 error: {}", _0)] #[display(fmt = "UTF8 error: {}", _0)]
Utf8(Utf8Error), Utf8(Utf8Error),
} }
@ -251,33 +247,41 @@ impl From<ParseError> for Response<BoxBody> {
} }
} }
/// A set of errors that can occur running blocking tasks in thread pool.
#[derive(Debug, Display, Error)]
#[display(fmt = "Blocking thread pool is gone")]
// TODO: non-exhaustive
pub struct BlockingError;
/// A set of errors that can occur during payload parsing. /// A set of errors that can occur during payload parsing.
#[derive(Debug, Display)] #[derive(Debug, Display)]
#[non_exhaustive] #[non_exhaustive]
pub enum PayloadError { pub enum PayloadError {
/// A payload reached EOF, but is not complete. /// A payload reached EOF, but is not complete.
#[display("payload reached EOF before completing: {:?}", _0)] #[display(
fmt = "A payload reached EOF, but is not complete. Inner error: {:?}",
_0
)]
Incomplete(Option<io::Error>), Incomplete(Option<io::Error>),
/// Content encoding stream corruption. /// Content encoding stream corruption.
#[display("can not decode content-encoding")] #[display(fmt = "Can not decode content-encoding.")]
EncodingCorrupted, EncodingCorrupted,
/// Payload reached size limit. /// Payload reached size limit.
#[display("payload reached size limit")] #[display(fmt = "Payload reached size limit.")]
Overflow, Overflow,
/// Payload length is unknown. /// Payload length is unknown.
#[display("payload length is unknown")] #[display(fmt = "Payload length is unknown.")]
UnknownLength, UnknownLength,
/// HTTP/2 payload error. /// HTTP/2 payload error.
#[cfg(feature = "http2")] #[display(fmt = "{}", _0)]
#[display("{}", _0)] Http2Payload(h2::Error),
Http2Payload(::h2::Error),
/// Generic I/O error. /// Generic I/O error.
#[display("{}", _0)] #[display(fmt = "{}", _0)]
Io(io::Error), Io(io::Error),
} }
@ -285,20 +289,18 @@ impl std::error::Error for PayloadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self { match self {
PayloadError::Incomplete(None) => None, PayloadError::Incomplete(None) => None,
PayloadError::Incomplete(Some(err)) => Some(err), PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
PayloadError::EncodingCorrupted => None, PayloadError::EncodingCorrupted => None,
PayloadError::Overflow => None, PayloadError::Overflow => None,
PayloadError::UnknownLength => None, PayloadError::UnknownLength => None,
#[cfg(feature = "http2")] PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
PayloadError::Http2Payload(err) => Some(err), PayloadError::Io(err) => Some(err as &dyn std::error::Error),
PayloadError::Io(err) => Some(err),
} }
} }
} }
#[cfg(feature = "http2")] impl From<h2::Error> for PayloadError {
impl From<::h2::Error> for PayloadError { fn from(err: h2::Error) -> Self {
fn from(err: ::h2::Error) -> Self {
PayloadError::Http2Payload(err) PayloadError::Http2Payload(err)
} }
} }
@ -315,6 +317,15 @@ impl From<io::Error> for PayloadError {
} }
} }
impl From<BlockingError> for PayloadError {
fn from(_: BlockingError) -> Self {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Operation is canceled",
))
}
}
impl From<PayloadError> for Error { impl From<PayloadError> for Error {
fn from(err: PayloadError) -> Self { fn from(err: PayloadError) -> Self {
Self::new_payload().with_cause(err) Self::new_payload().with_cause(err)
@ -323,61 +334,52 @@ impl From<PayloadError> for Error {
/// A set of errors that can occur during dispatching HTTP requests. /// A set of errors that can occur during dispatching HTTP requests.
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum DispatchError { pub enum DispatchError {
/// Service error. /// Service error.
#[display("service error")] #[display(fmt = "Service Error")]
Service(Response<BoxBody>), Service(Response<BoxBody>),
/// Body streaming error. /// Body streaming error.
#[display("body error: {}", _0)] #[display(fmt = "Body error: {}", _0)]
Body(Box<dyn StdError>), Body(Box<dyn StdError>),
/// Upgrade service error. /// Upgrade service error.
#[display("upgrade error")]
Upgrade, Upgrade,
/// An `io::Error` that occurred while trying to read or write to a network stream. /// An `io::Error` that occurred while trying to read or write to a network stream.
#[display("I/O error: {}", _0)] #[display(fmt = "IO error: {}", _0)]
Io(io::Error), Io(io::Error),
/// Request parse error. /// Request parse error.
#[display("request parse error: {}", _0)] #[display(fmt = "Request parse error: {}", _0)]
Parse(ParseError), Parse(ParseError),
/// HTTP/2 error. /// HTTP/2 error.
#[display("{}", _0)] #[display(fmt = "{}", _0)]
#[cfg(feature = "http2")]
H2(h2::Error), H2(h2::Error),
/// The first request did not complete within the specified timeout. /// The first request did not complete within the specified timeout.
#[display("request did not complete within the specified timeout")] #[display(fmt = "The first request did not complete within the specified timeout")]
SlowRequestTimeout, SlowRequestTimeout,
/// Disconnect timeout. Makes sense for TLS streams. /// Disconnect timeout. Makes sense for ssl streams.
#[display("connection shutdown timeout")] #[display(fmt = "Connection shutdown timeout")]
DisconnectTimeout, DisconnectTimeout,
/// Handler dropped payload before reading EOF.
#[display("handler dropped payload before reading EOF")]
HandlerDroppedPayload,
/// Internal error. /// Internal error.
#[display("internal error")] #[display(fmt = "Internal error")]
InternalError, InternalError,
} }
impl StdError for DispatchError { impl StdError for DispatchError {
fn source(&self) -> Option<&(dyn StdError + 'static)> { fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self { match self {
// TODO: error source extraction?
DispatchError::Service(_res) => None, DispatchError::Service(_res) => None,
DispatchError::Body(err) => Some(&**err), DispatchError::Body(err) => Some(&**err),
DispatchError::Io(err) => Some(err), DispatchError::Io(err) => Some(err),
DispatchError::Parse(err) => Some(err), DispatchError::Parse(err) => Some(err),
#[cfg(feature = "http2")]
DispatchError::H2(err) => Some(err), DispatchError::H2(err) => Some(err),
_ => None, _ => None,
} }
} }
@ -385,24 +387,39 @@ impl StdError for DispatchError {
/// A set of error that can occur during parsing content type. /// A set of error that can occur during parsing content type.
#[derive(Debug, Display, Error)] #[derive(Debug, Display, Error)]
#[cfg_attr(test, derive(PartialEq, Eq))]
#[non_exhaustive] #[non_exhaustive]
pub enum ContentTypeError { pub enum ContentTypeError {
/// Can not parse content type. /// Can not parse content type
#[display("could not parse content type")] #[display(fmt = "Can not parse content type")]
ParseError, ParseError,
/// Unknown content encoding. /// Unknown content encoding
#[display("unknown content encoding")] #[display(fmt = "Unknown content encoding")]
UnknownEncoding, UnknownEncoding,
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod content_type_test_impls {
use http::Error as HttpError;
use super::*; use super::*;
impl std::cmp::PartialEq for ContentTypeError {
fn eq(&self, other: &Self) -> bool {
match self {
Self::ParseError => matches!(other, ContentTypeError::ParseError),
Self::UnknownEncoding => {
matches!(other, ContentTypeError::UnknownEncoding)
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::{Error as HttpError, StatusCode};
use std::io;
#[test] #[test]
fn test_into_response() { fn test_into_response() {
let resp: Response<BoxBody> = ParseError::Incomplete.into(); let resp: Response<BoxBody> = ParseError::Incomplete.into();
@ -419,7 +436,7 @@ mod tests {
let err: Error = ParseError::Io(orig).into(); let err: Error = ParseError::Io(orig).into();
assert_eq!( assert_eq!(
format!("{}", err), format!("{}", err),
"error parsing HTTP message: I/O error: other" "error parsing HTTP message: IO error: other"
); );
} }
@ -446,7 +463,7 @@ mod tests {
let err = PayloadError::Incomplete(None); let err = PayloadError::Incomplete(None);
assert_eq!( assert_eq!(
err.to_string(), err.to_string(),
"payload reached EOF before completing: None" "A payload reached EOF, but is not complete. Inner error: None"
); );
} }
@ -466,7 +483,7 @@ mod tests {
match ParseError::from($from) { match ParseError::from($from) {
e @ $error => { e @ $error => {
let desc = format!("{}", e); let desc = format!("{}", e);
assert_eq!(desc, format!("I/O error: {}", $from)); assert_eq!(desc, format!("IO error: {}", $from));
} }
_ => unreachable!("{:?}", $from), _ => unreachable!("{:?}", $from),
} }

View File

@ -1,38 +1,17 @@
use std::{ use std::{
any::{Any, TypeId}, any::{Any, TypeId},
collections::HashMap,
fmt, fmt,
hash::{BuildHasherDefault, Hasher},
}; };
/// A hasher for `TypeId`s that takes advantage of its known characteristics. use ahash::AHashMap;
///
/// Author of `anymap` crate has done research on the topic:
/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599
#[derive(Debug, Default)]
struct NoOpHasher(u64);
impl Hasher for NoOpHasher {
fn write(&mut self, _bytes: &[u8]) {
unimplemented!("This NoOpHasher can only handle u64s")
}
fn write_u64(&mut self, i: u64) {
self.0 = i;
}
fn finish(&self) -> u64 {
self.0
}
}
/// A type map for request extensions. /// A type map for request extensions.
/// ///
/// All entries into this map must be owned types (or static references). /// All entries into this map must be owned types (or static references).
#[derive(Default)] #[derive(Default)]
pub struct Extensions { pub struct Extensions {
// use no-op hasher with a std HashMap with for faster lookups on the small `TypeId` keys /// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
map: HashMap<TypeId, Box<dyn Any>, BuildHasherDefault<NoOpHasher>>, map: AHashMap<TypeId, Box<dyn Any>>,
} }
impl Extensions { impl Extensions {
@ -40,7 +19,7 @@ impl Extensions {
#[inline] #[inline]
pub fn new() -> Extensions { pub fn new() -> Extensions {
Extensions { Extensions {
map: HashMap::default(), map: AHashMap::new(),
} }
} }
@ -104,46 +83,6 @@ impl Extensions {
.and_then(|boxed| boxed.downcast_mut()) .and_then(|boxed| boxed.downcast_mut())
} }
/// Inserts the given `value` into the extensions if it is not present, then returns a reference
/// to the value in the extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.get::<Vec<u32>>(), None);
///
/// map.get_or_insert(Vec::<u32>::new()).push(1);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1]));
///
/// map.get_or_insert(Vec::<u32>::new()).push(2);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1,2]));
/// ```
pub fn get_or_insert<T: 'static>(&mut self, value: T) -> &mut T {
self.get_or_insert_with(|| value)
}
/// Inserts a value computed from `f` into the extensions if the given `value` is not present,
/// then returns a reference to the value in the extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.get::<Vec<u32>>(), None);
///
/// map.get_or_insert_with(Vec::<u32>::new).push(1);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1]));
///
/// map.get_or_insert_with(Vec::<u32>::new).push(2);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1,2]));
/// ```
pub fn get_or_insert_with<T: 'static, F: FnOnce() -> T>(&mut self, default: F) -> &mut T {
self.map
.entry(TypeId::of::<T>())
.or_insert_with(|| Box::new(default()))
.downcast_mut()
.expect("extensions map should now contain a T value")
}
/// Remove an item from the map of a given type. /// Remove an item from the map of a given type.
/// ///
/// If an item of this type was already stored, it will be returned. /// If an item of this type was already stored, it will be returned.

View File

@ -1,7 +1,6 @@
use std::{io, task::Poll}; use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut}; use bytes::{Buf as _, Bytes, BytesMut};
use tracing::{debug, trace};
macro_rules! byte ( macro_rules! byte (
($rdr:ident) => ({ ($rdr:ident) => ({
@ -15,7 +14,7 @@ macro_rules! byte (
}) })
); );
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, PartialEq, Clone)]
pub(super) enum ChunkedState { pub(super) enum ChunkedState {
Size, Size,
SizeLws, SizeLws,
@ -71,13 +70,13 @@ impl ChunkedState {
match size.checked_mul(radix) { match size.checked_mul(radix) {
Some(n) => { Some(n) => {
*size = n; *size = n as u64;
*size += rem as u64; *size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size)) Poll::Ready(Ok(ChunkedState::Size))
} }
None => { None => {
debug!("chunk size would overflow u64"); log::debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new( Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput, io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big", "Invalid chunk size line: Size is too big",
@ -125,7 +124,7 @@ impl ChunkedState {
rem: &mut u64, rem: &mut u64,
buf: &mut Option<Bytes>, buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> { ) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem); log::trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64; let len = rdr.len() as u64;
if len == 0 { if len == 0 {

View File

@ -1,9 +1,9 @@
use std::{fmt, io}; use std::io;
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags; use bitflags::bitflags;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{Method, Version}; use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::{ use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
@ -16,11 +16,10 @@ use crate::{
}; };
bitflags! { bitflags! {
#[derive(Debug, Clone, Copy)]
struct Flags: u8 { struct Flags: u8 {
const HEAD = 0b0000_0001; const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000; const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000; const STREAM = 0b0001_0000;
} }
} }
@ -39,7 +38,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>, decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>, payload: Option<PayloadDecoder>,
version: Version, version: Version,
conn_type: ConnectionType, ctype: ConnectionType,
// encoder part // encoder part
flags: Flags, flags: Flags,
@ -52,32 +51,23 @@ impl Default for ClientCodec {
} }
} }
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec { impl ClientCodec {
/// Create HTTP/1 codec. /// Create HTTP/1 codec.
/// ///
/// `keepalive_enabled` how response `connection` header get generated. /// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self { pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() { let flags = if config.keep_alive_enabled() {
Flags::KEEP_ALIVE_ENABLED Flags::KEEPALIVE_ENABLED
} else { } else {
Flags::empty() Flags::empty()
}; };
ClientCodec { ClientCodec {
inner: ClientCodecInner { inner: ClientCodecInner {
config, config,
decoder: decoder::MessageDecoder::default(), decoder: decoder::MessageDecoder::default(),
payload: None, payload: None,
version: Version::HTTP_11, version: Version::HTTP_11,
conn_type: ConnectionType::Close, ctype: ConnectionType::Close,
flags, flags,
encoder: encoder::MessageEncoder::default(), encoder: encoder::MessageEncoder::default(),
@ -87,12 +77,12 @@ impl ClientCodec {
/// Check if request is upgrade /// Check if request is upgrade
pub fn upgrade(&self) -> bool { pub fn upgrade(&self) -> bool {
self.inner.conn_type == ConnectionType::Upgrade self.inner.ctype == ConnectionType::Upgrade
} }
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Check last request's message type /// Check last request's message type
@ -114,8 +104,8 @@ impl ClientCodec {
impl ClientPayloadCodec { impl ClientPayloadCodec {
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Transform payload codec to a message codec /// Transform payload codec to a message codec
@ -129,18 +119,15 @@ impl Decoder for ClientCodec {
type Error = ParseError; type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!( debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
if let Some((req, payload)) = self.inner.decoder.decode(src)? { if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(conn_type) = req.conn_type() { if let Some(ctype) = req.conn_type() {
// do not use peer's keep-alive // do not use peer's keep-alive
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive { self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.conn_type self.inner.ctype
} else { } else {
conn_type ctype
}; };
} }
@ -205,9 +192,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD); .set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status // connection status
inner.conn_type = match head.as_ref().connection_type() { inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => { ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) { if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive ConnectionType::KeepAlive
} else { } else {
ConnectionType::Close ConnectionType::Close
@ -224,7 +211,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false, false,
inner.version, inner.version,
length, length,
inner.conn_type, inner.ctype,
&inner.config, &inner.config,
)?; )?;
} }

View File

@ -1,22 +1,23 @@
use std::{fmt, io}; use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags; use bitflags::bitflags;
use bytes::BytesMut; use bytes::BytesMut;
use http::{Method, Version}; use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::{ use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, Message, MessageType, encoder, Message, MessageType,
}; };
use crate::{body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig}; use crate::{
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig,
};
bitflags! { bitflags! {
#[derive(Debug, Clone, Copy)]
struct Flags: u8 { struct Flags: u8 {
const HEAD = 0b0000_0001; const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_0010; const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100; const STREAM = 0b0000_0100;
} }
} }
@ -41,9 +42,7 @@ impl Default for Codec {
impl fmt::Debug for Codec { impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::Codec") write!(f, "h1::Codec({:?})", self.flags)
.field("flags", &self.flags)
.finish_non_exhaustive()
} }
} }
@ -52,8 +51,8 @@ impl Codec {
/// ///
/// `keepalive_enabled` how response `connection` header get generated. /// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self { pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() { let flags = if config.keep_alive_enabled() {
Flags::KEEP_ALIVE_ENABLED Flags::KEEPALIVE_ENABLED
} else { } else {
Flags::empty() Flags::empty()
}; };
@ -77,14 +76,14 @@ impl Codec {
/// Check if last response is keep-alive. /// Check if last response is keep-alive.
#[inline] #[inline]
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive self.conn_type == ConnectionType::KeepAlive
} }
/// Check if keep-alive enabled on server level. /// Check if keep-alive enabled on server level.
#[inline] #[inline]
pub fn keep_alive_enabled(&self) -> bool { pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEP_ALIVE_ENABLED) self.flags.contains(Flags::KEEPALIVE_ENABLED)
} }
/// Check last request's message type. /// Check last request's message type.
@ -124,13 +123,11 @@ impl Decoder for Codec {
self.flags.set(Flags::HEAD, head.method == Method::HEAD); self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version; self.version = head.version;
self.conn_type = head.connection_type(); self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED) && !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{ {
self.conn_type = ConnectionType::Close self.conn_type = ConnectionType::Close
} }
match payload { match payload {
PayloadType::None => self.payload = None, PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl), PayloadType::Payload(pl) => self.payload = Some(pl),
@ -182,11 +179,9 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
&self.config, &self.config,
)?; )?;
} }
Message::Chunk(Some(bytes)) => { Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?; self.encoder.encode_chunk(bytes.as_ref(), dst)?;
} }
Message::Chunk(None) => { Message::Chunk(None) => {
self.encoder.encode_eof(dst)?; self.encoder.encode_eof(dst)?;
} }
@ -198,6 +193,9 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use bytes::BytesMut;
use http::Method;
use super::*; use super::*;
use crate::HttpMessage as _; use crate::HttpMessage as _;

View File

@ -1,4 +1,4 @@
use std::{io, marker::PhantomData, mem::MaybeUninit, task::Poll}; use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Poll};
use actix_codec::Decoder; use actix_codec::Decoder;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
@ -6,7 +6,7 @@ use http::{
header::{self, HeaderName, HeaderValue}, header::{self, HeaderName, HeaderValue},
Method, StatusCode, Uri, Version, Method, StatusCode, Uri, Version,
}; };
use tracing::{debug, error, trace}; use log::{debug, error, trace};
use super::chunked::ChunkedState; use super::chunked::ChunkedState;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead}; use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
@ -46,23 +46,6 @@ pub(crate) enum PayloadLength {
None, None,
} }
impl PayloadLength {
/// Returns true if variant is `None`.
fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns true if variant is represents zero-length (not none) payload.
fn is_zero(&self) -> bool {
matches!(
self,
PayloadLength::Payload(PayloadType::Payload(PayloadDecoder {
kind: Kind::Length(0)
}))
)
}
}
pub(crate) trait MessageType: Sized { pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>); fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
@ -76,7 +59,6 @@ pub(crate) trait MessageType: Sized {
&mut self, &mut self,
slice: &Bytes, slice: &Bytes,
raw_headers: &[HeaderIndex], raw_headers: &[HeaderIndex],
version: Version,
) -> Result<PayloadLength, ParseError> { ) -> Result<PayloadLength, ParseError> {
let mut ka = None; let mut ka = None;
let mut has_upgrade_websocket = false; let mut has_upgrade_websocket = false;
@ -94,7 +76,9 @@ pub(crate) trait MessageType: Sized {
// SAFETY: httparse already checks header value is only visible ASCII bytes // SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here // from_maybe_shared_unchecked contains debug assertions so they are omitted here
let value = unsafe { let value = unsafe {
HeaderValue::from_maybe_shared_unchecked(slice.slice(idx.value.0..idx.value.1)) HeaderValue::from_maybe_shared_unchecked(
slice.slice(idx.value.0..idx.value.1),
)
}; };
match name { match name {
@ -103,23 +87,21 @@ pub(crate) trait MessageType: Sized {
return Err(ParseError::Header); return Err(ParseError::Header);
} }
header::CONTENT_LENGTH => match value.to_str().map(str::trim) { header::CONTENT_LENGTH => match value.to_str() {
Ok(val) if val.starts_with('+') => { Ok(s) if s.trim().starts_with('+') => {
debug!("illegal Content-Length: {:?}", val); debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header); return Err(ParseError::Header);
} }
Ok(s) => {
Ok(val) => { if let Ok(len) = s.parse::<u64>() {
if let Ok(len) = val.parse::<u64>() { if len != 0 {
// accept 0 lengths here and remove them in `decode` after all content_length = Some(len);
// headers have been processed to prevent request smuggling issues }
content_length = Some(len);
} else { } else {
debug!("illegal Content-Length: {:?}", val); debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header); return Err(ParseError::Header);
} }
} }
Err(_) => { Err(_) => {
debug!("illegal Content-Length: {:?}", value); debug!("illegal Content-Length: {:?}", value);
return Err(ParseError::Header); return Err(ParseError::Header);
@ -132,23 +114,22 @@ pub(crate) trait MessageType: Sized {
return Err(ParseError::Header); return Err(ParseError::Header);
} }
header::TRANSFER_ENCODING if version == Version::HTTP_11 => { header::TRANSFER_ENCODING => {
seen_te = true; seen_te = true;
if let Ok(val) = value.to_str().map(str::trim) { if let Ok(s) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("chunked") { if s.eq_ignore_ascii_case("chunked") {
chunked = true; chunked = true;
} else if val.eq_ignore_ascii_case("identity") { } else if s.eq_ignore_ascii_case("identity") {
// allow silently since multiple TE headers are already checked // allow silently since multiple TE headers are already checked
} else { } else {
debug!("illegal Transfer-Encoding: {:?}", val); debug!("illegal Transfer-Encoding: {:?}", s);
return Err(ParseError::Header); return Err(ParseError::Header);
} }
} else { } else {
return Err(ParseError::Header); return Err(ParseError::Header);
} }
} }
// connection keep-alive state // connection keep-alive state
header::CONNECTION => { header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(str::trim) { ka = if let Ok(conn) = value.to_str().map(str::trim) {
@ -165,7 +146,6 @@ pub(crate) trait MessageType: Sized {
None None
}; };
} }
header::UPGRADE => { header::UPGRADE => {
if let Ok(val) = value.to_str().map(str::trim) { if let Ok(val) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("websocket") { if val.eq_ignore_ascii_case("websocket") {
@ -173,23 +153,19 @@ pub(crate) trait MessageType: Sized {
} }
} }
} }
header::EXPECT => { header::EXPECT => {
let bytes = value.as_bytes(); let bytes = value.as_bytes();
if bytes.len() >= 4 && &bytes[0..4] == b"100-" { if bytes.len() >= 4 && &bytes[0..4] == b"100-" {
expect = true; expect = true;
} }
} }
_ => {} _ => {}
} }
headers.append(name, value); headers.append(name, value);
} }
} }
self.set_connection_type(ka); self.set_connection_type(ka);
if expect { if expect {
self.set_expect() self.set_expect()
} }
@ -233,16 +209,15 @@ impl MessageType for Request {
let (len, method, uri, ver, h_len) = { let (len, method, uri, ver, h_len) = {
// SAFETY: // SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which // safe because the type we are claiming to have initialized here is a
// do not require initialization. // bunch of `MaybeUninit`s, which do not require initialization.
let mut parsed = unsafe { let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit() MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init() .assume_init()
}; };
let mut req = httparse::Request::new(&mut []); let mut req = httparse::Request::new(&mut []);
match req.parse_with_uninit_headers(src, &mut parsed)? { match req.parse_with_uninit_headers(src, &mut parsed)? {
httparse::Status::Complete(len) => { httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes()) let method = Method::from_bytes(req.method.unwrap().as_bytes())
@ -257,7 +232,6 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len()) (len, method, uri, version, req.headers.len())
} }
httparse::Status::Partial => { httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE { return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing"); trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
@ -273,21 +247,7 @@ impl MessageType for Request {
let mut msg = Request::new(); let mut msg = Request::new();
// convert headers // convert headers
let mut length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?; let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// disallow HTTP/1.0 POST requests that do not contain a Content-Length headers
// see https://datatracker.ietf.org/doc/html/rfc1945#section-7.2.2
if ver == Version::HTTP_10 && method == Method::POST && length.is_none() {
debug!("no Content-Length specified for HTTP/1.0 POST request");
return Err(ParseError::Header);
}
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
// Protects against some request smuggling attacks.
// See https://github.com/actix/actix-web/issues/2767.
if length.is_zero() {
length = PayloadLength::None;
}
// payload decoder // payload decoder
let decoder = match length { let decoder = match length {
@ -331,35 +291,22 @@ impl MessageType for ResponseHead {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, ver, status, h_len) = { let (len, ver, status, h_len) = {
// SAFETY: let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
// do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut res = httparse::Response::new(&mut []); let mut res = httparse::Response::new(&mut parsed);
match res.parse(src)? {
let mut config = httparse::ParserConfig::default();
config.allow_spaces_after_header_name_in_responses(true);
match config.parse_response_with_uninit_headers(&mut res, src, &mut parsed)? {
httparse::Status::Complete(len) => { httparse::Status::Complete(len) => {
let version = if res.version.unwrap() == 1 { let version = if res.version.unwrap() == 1 {
Version::HTTP_11 Version::HTTP_11
} else { } else {
Version::HTTP_10 Version::HTTP_10
}; };
let status = StatusCode::from_u16(res.code.unwrap())
let status = .map_err(|_| ParseError::Status)?;
StatusCode::from_u16(res.code.unwrap()).map_err(|_| ParseError::Status)?;
HeaderIndex::record(src, res.headers, &mut headers); HeaderIndex::record(src, res.headers, &mut headers);
(len, version, status, res.headers.len()) (len, version, status, res.headers.len())
} }
httparse::Status::Partial => { httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE { return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing"); error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
@ -375,14 +322,7 @@ impl MessageType for ResponseHead {
msg.version = ver; msg.version = ver;
// convert headers // convert headers
let mut length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?; let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
// Protects against some request smuggling attacks.
// See https://github.com/actix/actix-web/issues/2767.
if length.is_zero() {
length = PayloadLength::None;
}
// message payload // message payload
let decoder = if let PayloadLength::Payload(pl) = length { let decoder = if let PayloadLength::Payload(pl) = length {
@ -418,6 +358,9 @@ pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS]; [EMPTY_HEADER_INDEX; MAX_HEADERS];
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
impl HeaderIndex { impl HeaderIndex {
pub(crate) fn record( pub(crate) fn record(
bytes: &[u8], bytes: &[u8],
@ -436,64 +379,61 @@ impl HeaderIndex {
} }
} }
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq)]
/// Chunk type yielded while decoding a payload. /// Http payload item
pub enum PayloadItem { pub enum PayloadItem {
Chunk(Bytes), Chunk(Bytes),
Eof, Eof,
} }
/// Decoder that can handle different payload types. /// Decoders to handle different Transfer-Encodings.
/// ///
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`. /// If a message body does not include a Transfer-Encoding, it *should*
#[derive(Debug, Clone, PartialEq, Eq)] /// include a Content-Length header.
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadDecoder { pub struct PayloadDecoder {
kind: Kind, kind: Kind,
} }
impl PayloadDecoder { impl PayloadDecoder {
/// Constructs a fixed-length payload decoder.
pub fn length(x: u64) -> PayloadDecoder { pub fn length(x: u64) -> PayloadDecoder {
PayloadDecoder { PayloadDecoder {
kind: Kind::Length(x), kind: Kind::Length(x),
} }
} }
/// Constructs a chunked encoding decoder.
pub fn chunked() -> PayloadDecoder { pub fn chunked() -> PayloadDecoder {
PayloadDecoder { PayloadDecoder {
kind: Kind::Chunked(ChunkedState::Size, 0), kind: Kind::Chunked(ChunkedState::Size, 0),
} }
} }
/// Creates an decoder that yields chunks until the stream returns EOF.
pub fn eof() -> PayloadDecoder { pub fn eof() -> PayloadDecoder {
PayloadDecoder { kind: Kind::Eof } PayloadDecoder { kind: Kind::Eof }
} }
} }
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq)]
enum Kind { enum Kind {
/// A reader used when a `Content-Length` header is passed with a positive integer. /// A Reader used when a Content-Length header is passed with a positive
/// integer.
Length(u64), Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
/// A reader used when `Transfer-Encoding` is `chunked`.
Chunked(ChunkedState, u64), Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
/// A reader used for responses that don't indicate a length or chunked.
/// ///
/// Note: This should only used for `Response`s. It is illegal for a `Request` to be made /// Note: This should only used for `Response`s. It is illegal for a
/// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained /// `Request` to be made with both `Content-Length` and
/// in [RFC 7230 §3.3.3]: /// `Transfer-Encoding: chunked` missing, as explained from the spec:
/// ///
/// > If a Transfer-Encoding header field is present in a response and the chunked transfer /// > If a Transfer-Encoding header field is present in a response and
/// > coding is not the final encoding, the message body length is determined by reading the /// > the chunked transfer coding is not the final encoding, the
/// > connection until it is closed by the server. If a Transfer-Encoding header field is /// > message body length is determined by reading the connection until
/// > present in a request and the chunked transfer coding is not the final encoding, the /// > it is closed by the server. If a Transfer-Encoding header field
/// > message body length cannot be determined reliably; the server MUST respond with the 400 /// > is present in a request and the chunked transfer coding is not
/// > (Bad Request) status code and then close the connection. /// > the final encoding, the message body length cannot be determined
/// /// > reliably; the server MUST respond with the 400 (Bad Request)
/// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 /// > status code and then close the connection.
Eof, Eof,
} }
@ -523,7 +463,6 @@ impl Decoder for PayloadDecoder {
Ok(Some(PayloadItem::Chunk(buf))) Ok(Some(PayloadItem::Chunk(buf)))
} }
} }
Kind::Chunked(ref mut state, ref mut size) => { Kind::Chunked(ref mut state, ref mut size) => {
loop { loop {
let mut buf = None; let mut buf = None;
@ -532,7 +471,7 @@ impl Decoder for PayloadDecoder {
*state = match state.step(src, size, &mut buf) { *state = match state.step(src, size, &mut buf) {
Poll::Pending => return Ok(None), Poll::Pending => return Ok(None),
Poll::Ready(Ok(state)) => state, Poll::Ready(Ok(state)) => state,
Poll::Ready(Err(err)) => return Err(err), Poll::Ready(Err(e)) => return Err(e),
}; };
if *state == ChunkedState::End { if *state == ChunkedState::End {
@ -549,7 +488,6 @@ impl Decoder for PayloadDecoder {
} }
} }
} }
Kind::Eof => { Kind::Eof => {
if src.is_empty() { if src.is_empty() {
Ok(None) Ok(None)
@ -563,8 +501,15 @@ impl Decoder for PayloadDecoder {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::*; use super::*;
use crate::{header::SET_COOKIE, HttpMessage as _}; use crate::{
error::ParseError,
header::{HeaderName, SET_COOKIE},
HttpMessage as _,
};
impl PayloadType { impl PayloadType {
pub(crate) fn unwrap(self) -> PayloadDecoder { pub(crate) fn unwrap(self) -> PayloadDecoder {
@ -644,100 +589,14 @@ mod tests {
} }
#[test] #[test]
fn parse_h09_reject() { fn test_parse_post() {
let mut buf = BytesMut::from( let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n");
"GET /test1 HTTP/0.9\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
reader.decode(&mut buf).unwrap_err();
let mut buf = BytesMut::from(
"POST /test2 HTTP/0.9\r\n\
Content-Length: 3\r\n\
\r\n
abc",
);
let mut reader = MessageDecoder::<Request>::default();
reader.decode(&mut buf).unwrap_err();
}
#[test]
fn parse_h10_get() {
let mut buf = BytesMut::from(
"GET /test1 HTTP/1.0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test1");
let mut buf = BytesMut::from(
"GET /test2 HTTP/1.0\r\n\
Content-Length: 0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test2");
let mut buf = BytesMut::from(
"GET /test3 HTTP/1.0\r\n\
Content-Length: 3\r\n\
\r\n
abc",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test3");
}
#[test]
fn parse_h10_post() {
let mut buf = BytesMut::from(
"POST /test1 HTTP/1.0\r\n\
Content-Length: 3\r\n\
\r\n\
abc",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::POST);
assert_eq!(req.path(), "/test1");
let mut buf = BytesMut::from(
"POST /test2 HTTP/1.0\r\n\
Content-Length: 0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default(); let mut reader = MessageDecoder::<Request>::default();
let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert_eq!(req.version(), Version::HTTP_10); assert_eq!(req.version(), Version::HTTP_10);
assert_eq!(*req.method(), Method::POST); assert_eq!(*req.method(), Method::POST);
assert_eq!(req.path(), "/test2"); assert_eq!(req.path(), "/test2");
let mut buf = BytesMut::from(
"POST /test3 HTTP/1.0\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let err = reader.decode(&mut buf).unwrap_err();
assert!(err.to_string().contains("Header"))
} }
#[test] #[test]
@ -833,98 +692,121 @@ mod tests {
#[test] #[test]
fn test_conn_default_1_0() { fn test_conn_default_1_0() {
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.0\r\n\r\n")); let mut buf = BytesMut::from("GET /test HTTP/1.0\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close); assert_eq!(req.head().connection_type(), ConnectionType::Close);
} }
#[test] #[test]
fn test_conn_default_1_1() { fn test_conn_default_1_1() {
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.1\r\n\r\n")); let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
} }
#[test] #[test]
fn test_conn_close() { fn test_conn_close() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
connection: close\r\n\r\n", connection: close\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close); assert_eq!(req.head().connection_type(), ConnectionType::Close);
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
connection: Close\r\n\r\n", connection: Close\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close); assert_eq!(req.head().connection_type(), ConnectionType::Close);
} }
#[test] #[test]
fn test_conn_close_1_0() { fn test_conn_close_1_0() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\ "GET /test HTTP/1.0\r\n\
connection: close\r\n\r\n", connection: close\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close); assert_eq!(req.head().connection_type(), ConnectionType::Close);
} }
#[test] #[test]
fn test_conn_keep_alive_1_0() { fn test_conn_keep_alive_1_0() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\ "GET /test HTTP/1.0\r\n\
connection: keep-alive\r\n\r\n", connection: keep-alive\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\ "GET /test HTTP/1.0\r\n\
connection: Keep-Alive\r\n\r\n", connection: Keep-Alive\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
} }
#[test] #[test]
fn test_conn_keep_alive_1_1() { fn test_conn_keep_alive_1_1() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
connection: keep-alive\r\n\r\n", connection: keep-alive\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
} }
#[test] #[test]
fn test_conn_other_1_0() { fn test_conn_other_1_0() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.0\r\n\ "GET /test HTTP/1.0\r\n\
connection: other\r\n\r\n", connection: other\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::Close); assert_eq!(req.head().connection_type(), ConnectionType::Close);
} }
#[test] #[test]
fn test_conn_other_1_1() { fn test_conn_other_1_1() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
connection: other\r\n\r\n", connection: other\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
} }
#[test] #[test]
fn test_conn_upgrade() { fn test_conn_upgrade() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
upgrade: websockets\r\n\ upgrade: websockets\r\n\
connection: upgrade\r\n\r\n", connection: upgrade\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert!(req.upgrade()); assert!(req.upgrade());
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade); assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
upgrade: Websockets\r\n\ upgrade: Websockets\r\n\
connection: Upgrade\r\n\r\n", connection: Upgrade\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert!(req.upgrade()); assert!(req.upgrade());
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade); assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
@ -932,62 +814,59 @@ mod tests {
#[test] #[test]
fn test_conn_upgrade_connect_method() { fn test_conn_upgrade_connect_method() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"CONNECT /test HTTP/1.1\r\n\ "CONNECT /test HTTP/1.1\r\n\
content-type: text/plain\r\n\r\n", content-type: text/plain\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert!(req.upgrade()); assert!(req.upgrade());
} }
#[test] #[test]
fn test_headers_bad_content_length() { fn test_headers_content_length_err_1() {
// string CL let mut buf = BytesMut::from(
expect_parse_err!(&mut BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
content-length: line\r\n\r\n", content-length: line\r\n\r\n",
)); );
// negative CL expect_parse_err!(&mut buf)
expect_parse_err!(&mut BytesMut::from(
"GET /test HTTP/1.1\r\n\
content-length: -1\r\n\r\n",
));
} }
#[test] #[test]
fn octal_ish_cl_parsed_as_decimal() { fn test_headers_content_length_err_2() {
let mut buf = BytesMut::from( let mut buf = BytesMut::from(
"POST /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
content-length: 011\r\n\r\n", content-length: -1\r\n\r\n",
); );
let mut reader = MessageDecoder::<Request>::default();
let (_req, pl) = reader.decode(&mut buf).unwrap().unwrap(); expect_parse_err!(&mut buf);
assert!(matches!(
pl,
PayloadType::Payload(pl) if pl == PayloadDecoder::length(11)
));
} }
#[test] #[test]
fn test_invalid_header() { fn test_invalid_header() {
expect_parse_err!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
test line\r\n\r\n", test line\r\n\r\n",
)); );
expect_parse_err!(&mut buf);
} }
#[test] #[test]
fn test_invalid_name() { fn test_invalid_name() {
expect_parse_err!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
test[]: line\r\n\r\n", test[]: line\r\n\r\n",
)); );
expect_parse_err!(&mut buf);
} }
#[test] #[test]
fn test_http_request_bad_status_line() { fn test_http_request_bad_status_line() {
expect_parse_err!(&mut BytesMut::from("getpath \r\n\r\n")); let mut buf = BytesMut::from("getpath \r\n\r\n");
expect_parse_err!(&mut buf);
} }
#[test] #[test]
@ -1027,10 +906,11 @@ mod tests {
#[test] #[test]
fn test_http_request_parser_utf8() { fn test_http_request_parser_utf8() {
let req = parse_ready!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
x-test: тест\r\n\r\n", x-test: тест\r\n\r\n",
)); );
let req = parse_ready!(&mut buf);
assert_eq!( assert_eq!(
req.headers().get("x-test").unwrap().as_bytes(), req.headers().get("x-test").unwrap().as_bytes(),
@ -1040,18 +920,24 @@ mod tests {
#[test] #[test]
fn test_http_request_parser_two_slashes() { fn test_http_request_parser_two_slashes() {
let req = parse_ready!(&mut BytesMut::from("GET //path HTTP/1.1\r\n\r\n")); let mut buf = BytesMut::from("GET //path HTTP/1.1\r\n\r\n");
let req = parse_ready!(&mut buf);
assert_eq!(req.path(), "//path"); assert_eq!(req.path(), "//path");
} }
#[test] #[test]
fn test_http_request_parser_bad_method() { fn test_http_request_parser_bad_method() {
expect_parse_err!(&mut BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n")); let mut buf = BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n");
expect_parse_err!(&mut buf);
} }
#[test] #[test]
fn test_http_request_parser_bad_version() { fn test_http_request_parser_bad_version() {
expect_parse_err!(&mut BytesMut::from("GET //get HT/11\r\n\r\n")); let mut buf = BytesMut::from("GET //get HT/11\r\n\r\n");
expect_parse_err!(&mut buf);
} }
#[test] #[test]
@ -1068,66 +954,29 @@ mod tests {
#[test] #[test]
fn hrs_multiple_content_length() { fn hrs_multiple_content_length() {
expect_parse_err!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\ "GET / HTTP/1.1\r\n\
Host: example.com\r\n\ Host: example.com\r\n\
Content-Length: 4\r\n\ Content-Length: 4\r\n\
Content-Length: 2\r\n\ Content-Length: 2\r\n\
\r\n\ \r\n\
abcd", abcd",
)); );
expect_parse_err!(&mut BytesMut::from( expect_parse_err!(&mut buf);
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 0\r\n\
Content-Length: 2\r\n\
\r\n\
ab",
));
} }
#[test] #[test]
fn hrs_content_length_plus() { fn hrs_content_length_plus() {
expect_parse_err!(&mut BytesMut::from( let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\ "GET / HTTP/1.1\r\n\
Host: example.com\r\n\ Host: example.com\r\n\
Content-Length: +3\r\n\ Content-Length: +3\r\n\
\r\n\ \r\n\
000", 000",
));
}
#[test]
fn hrs_te_http10() {
// in HTTP/1.0 transfer encoding is ignored and must therefore contain a CL header
expect_parse_err!(&mut BytesMut::from(
"POST / HTTP/1.0\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
3\r\n\
aaa\r\n\
0\r\n\
",
));
}
#[test]
fn hrs_cl_and_te_http10() {
// in HTTP/1.0 transfer encoding is simply ignored so it's fine to have both
let mut buf = BytesMut::from(
"GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Content-Length: 3\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
000",
); );
parse_ready!(&mut buf); expect_parse_err!(&mut buf);
} }
#[test] #[test]

File diff suppressed because it is too large Load Diff

View File

@ -1,972 +0,0 @@
use std::{future::Future, str, task::Poll, time::Duration};
use actix_codec::Framed;
use actix_rt::{pin, time::sleep};
use actix_service::{fn_service, Service};
use actix_utils::future::{ready, Ready};
use bytes::{Buf, Bytes, BytesMut};
use futures_util::future::lazy;
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
use crate::{
body::MessageBody,
config::ServiceConfig,
h1::{Codec, ExpectHandler, UpgradeHandler},
service::HttpFlow,
test::{TestBuffer, TestSeqBuffer},
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
memchr::memmem::find(&haystack[from..], needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
status_service(StatusCode::OK)
}
fn status_service(
status: StatusCode,
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
}
fn echo_path_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error>
{
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(
Response::ok().set_body(Bytes::copy_from_slice(path)),
))
})
}
fn drop_payload_service() -> impl Service<Request, Response = Response<&'static str>, Error = Error>
{
fn_service(|mut req: Request| async move {
let _ = req.take_payload();
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
})
})
}
#[actix_rt::test]
async fn late_request() {
let mut buf = TestBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Ready(_) => panic!("first poll should not be ready"),
Poll::Pending => {}
}
// polls: initial
assert_eq!(h1.poll_count, 1);
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("second poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial pending => handle req => shutdown
assert_eq!(h1.poll_count, 3);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn oneshot_connection() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 5
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
/abcd
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
#[actix_rt::test]
async fn keep_alive_timeout() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(200)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep slightly longer than keep-alive timeout
sleep(Duration::from_millis(250)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_ready(),
"keep-alive should have resolved",
);
// polls: initial => keep-alive wake-up shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
}
#[actix_rt::test]
async fn keep_alive_follow_up_req() {
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(500)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep for less than KA timeout
sleep(Duration::from_millis(100)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should not have resolved dispatcher yet",
);
// polls: initial => manual
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection not closed
assert!(!inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
lazy(|cx| {
buf.extend_read_buf(
"\
GET /efg HTTP/1.1\r\n\
Connection: close\r\n\
\r\n\r\n",
);
assert!(
h1.as_mut().poll(cx).is_ready(),
"connection close header should override keep-alive setting",
);
// polls: initial => manual => follow-up req => shutdown
assert_eq!(h1.poll_count, 4);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
}
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/efg\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&buf.write_buf_slice()[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_ok() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_bad() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn expect_handling() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn expect_eager() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = io.write_buf()[..].to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn upgrade_handling() {
struct TestUpgrade;
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
assert_eq!(req.method(), Method::GET);
assert!(req.upgrade());
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
ready(Ok(()))
}
}
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
// fix in #2624 reverted temporarily
// complete fix tracked in #2745
#[ignore]
#[actix_rt::test]
async fn handler_drop_payload() {
let _ = env_logger::try_init();
let mut buf = TestBuffer::new(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 3
abc
",
));
let services = HttpFlow::new(
drop_payload_service(),
ExpectHandler,
None::<UpgradeHandler>,
);
let h1 = Dispatcher::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual
assert_eq!(h1.poll_count, 1);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
assert!(inner.state.is_none());
}
})
.await;
lazy(|cx| {
// add message that claims to have payload longer than provided
buf.extend_read_buf(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 200
abc
",
));
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual => manual
assert_eq!(h1.poll_count, 2);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect response immediately even though request side has not finished reading payload
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual => manual => manual
assert_eq!(h1.poll_count, 3);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect that unrequested error response is sent back since connection could not be cleaned
let exp = http_msg(
r"
HTTP/1.1 500 Internal Server Error
content-length: 0
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
let mut msg = msg
.as_ref()
.trim()
.split('\n')
.map(|line| [line.trim_start(), "\r"].concat())
.collect::<Vec<_>>()
.join("\n");
// remove trailing \r
msg.pop();
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
msg.push_str("\r\n\r\n");
}
BytesMut::from(msg.as_bytes())
}
#[test]
fn http_msg_creates_msg() {
assert_eq!(http_msg(r""), "");
assert_eq!(
http_msg(
r"
POST / HTTP/1.1
Content-Length: 3
abc
"
),
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
);
assert_eq!(
http_msg(
r"
GET / HTTP/1.1
Content-Length: 3
"
),
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
);
}

View File

@ -105,7 +105,7 @@ pub(crate) trait MessageType: Sized {
} }
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"), BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"), BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case), BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::None => dst.put_slice(b"\r\n"), BodySize::None => dst.put_slice(b"\r\n"),
} }
@ -152,6 +152,7 @@ pub(crate) trait MessageType: Sized {
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
let k_len = k.len(); let k_len = k.len();
// TODO: drain?
for val in value.iter() { for val in value.iter() {
let v = val.as_ref(); let v = val.as_ref();
let v_len = v.len(); let v_len = v.len();
@ -210,14 +211,14 @@ pub(crate) trait MessageType: Sized {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
// optimized date header, set_date writes \r\n
if !has_date { if !has_date {
// optimized date header, write_date_header writes its own \r\n config.set_date(dst);
config.write_date_header(dst, camel_case); } else {
// msg eof
dst.extend_from_slice(b"\r\n");
} }
// end-of-headers marker
dst.extend_from_slice(b"\r\n");
Ok(()) Ok(())
} }
@ -257,12 +258,6 @@ impl MessageType for Response<()> {
None None
} }
fn camel_case(&self) -> bool {
self.head()
.flags
.contains(crate::message::Flags::CAMEL_CASE)
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> { fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head(); let head = self.head();
let reason = head.reason().as_bytes(); let reason = head.reason().as_bytes();
@ -313,22 +308,21 @@ impl MessageType for RequestHeadType {
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")), _ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")),
} }
) )
.map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
} }
} }
impl<T: MessageType> MessageEncoder<T> { impl<T: MessageType> MessageEncoder<T> {
/// Encode chunk. /// Encode message
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> { pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf) self.te.encode(msg, buf)
} }
/// Encode EOF. /// Encode eof
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> { pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf) self.te.encode_eof(buf)
} }
/// Encode message.
pub fn encode( pub fn encode(
&mut self, &mut self,
dst: &mut BytesMut, dst: &mut BytesMut,
@ -433,7 +427,7 @@ impl TransferEncoding {
buf.extend_from_slice(b"0\r\n\r\n"); buf.extend_from_slice(b"0\r\n\r\n");
} else { } else {
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len()) writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2); buf.reserve(msg.len() + 2);
buf.extend_from_slice(msg); buf.extend_from_slice(msg);
@ -450,7 +444,7 @@ impl TransferEncoding {
buf.extend_from_slice(&msg[..len as usize]); buf.extend_from_slice(&msg[..len as usize]);
*remaining -= len; *remaining -= len as u64;
Ok(*remaining == 0) Ok(*remaining == 0)
} else { } else {
Ok(true) Ok(true)
@ -517,7 +511,6 @@ unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
if let Some(c @ b'a'..=b'z') = iter.next() { if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111; buffer[index] = c & 0b1101_1111;
} }
index += 1;
} }
index += 1; index += 1;
@ -529,7 +522,7 @@ mod tests {
use std::rc::Rc; use std::rc::Rc;
use bytes::Bytes; use bytes::Bytes;
use http::header::{AUTHORIZATION, UPGRADE_INSECURE_REQUESTS}; use http::header::AUTHORIZATION;
use super::*; use super::*;
use crate::{ use crate::{
@ -560,9 +553,6 @@ mod tests {
head.headers head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text")); .insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
head.headers
.insert(UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
let mut head = RequestHeadType::Owned(head); let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers( let _ = head.encode_headers(
@ -578,7 +568,6 @@ mod tests {
assert!(data.contains("Connection: close\r\n")); assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n")); assert!(data.contains("Date: date\r\n"));
assert!(data.contains("Upgrade-Insecure-Requests: 1\r\n"));
let _ = head.encode_headers( let _ = head.encode_headers(
&mut bytes, &mut bytes,

View File

@ -7,34 +7,28 @@ mod client;
mod codec; mod codec;
mod decoder; mod decoder;
mod dispatcher; mod dispatcher;
#[cfg(test)]
mod dispatcher_tests;
mod encoder; mod encoder;
mod expect; mod expect;
mod payload; mod payload;
mod service; mod service;
mod timer;
mod upgrade; mod upgrade;
mod utils; mod utils;
pub use self::{ pub use self::client::{ClientCodec, ClientPayloadCodec};
client::{ClientCodec, ClientPayloadCodec}, pub use self::codec::Codec;
codec::Codec, pub use self::dispatcher::Dispatcher;
dispatcher::Dispatcher, pub use self::expect::ExpectHandler;
expect::ExpectHandler, pub use self::payload::Payload;
payload::Payload, pub use self::service::{H1Service, H1ServiceHandler};
service::{H1Service, H1ServiceHandler}, pub use self::upgrade::UpgradeHandler;
upgrade::UpgradeHandler, pub use self::utils::SendResponse;
utils::SendResponse,
};
#[derive(Debug)] #[derive(Debug)]
/// Codec message /// Codec message
pub enum Message<T> { pub enum Message<T> {
/// HTTP message. /// Http message
Item(T), Item(T),
/// Payload chunk
/// Payload chunk.
Chunk(Option<Bytes>), Chunk(Option<Bytes>),
} }

View File

@ -16,7 +16,7 @@ use crate::error::PayloadError;
/// max buffer size 32k /// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768; pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq)]
pub enum PayloadStatus { pub enum PayloadStatus {
Read, Read,
Pause, Pause,
@ -117,7 +117,6 @@ impl PayloadSender {
} }
} }
#[allow(clippy::needless_pass_by_ref_mut)]
#[inline] #[inline]
pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus { pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive, // we check need_read only if Payload (other side) is alive,
@ -175,7 +174,7 @@ impl Inner {
/// Register future waiting data from payload. /// Register future waiting data from payload.
/// Waker would be used in `Inner::wake` /// Waker would be used in `Inner::wake`
fn register(&mut self, cx: &Context<'_>) { fn register(&mut self, cx: &mut Context<'_>) {
if self if self
.task .task
.as_ref() .as_ref()
@ -187,7 +186,7 @@ impl Inner {
// Register future feeding data to payload. // Register future feeding data to payload.
/// Waker would be used in `Inner::wake_io` /// Waker would be used in `Inner::wake_io`
fn register_io(&mut self, cx: &Context<'_>) { fn register_io(&mut self, cx: &mut Context<'_>) {
if self if self
.io_task .io_task
.as_ref() .as_ref()
@ -222,7 +221,7 @@ impl Inner {
fn poll_next( fn poll_next(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() { if let Some(data) = self.items.pop_front() {
self.len -= data.len(); self.len -= data.len();
@ -253,15 +252,18 @@ impl Inner {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn; use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any}; use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*; use super::*;
assert_impl_all!(Payload: Unpin); assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync); assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync); assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test] #[actix_rt::test]
async fn test_unread_data() { async fn test_unread_data() {

View File

@ -13,9 +13,7 @@ use actix_service::{
}; };
use actix_utils::future::ready; use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture; use futures_core::future::LocalBoxFuture;
use tracing::error;
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
use crate::{ use crate::{
body::{BoxBody, MessageBody}, body::{BoxBody, MessageBody},
config::ServiceConfig, config::ServiceConfig,
@ -24,6 +22,8 @@ use crate::{
ConnectCallback, OnConnectData, Request, Response, ConnectCallback, OnConnectData, Request, Response,
}; };
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport /// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> { pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
srv: S, srv: S,
@ -81,8 +81,13 @@ where
/// Create simple tcp stream service /// Create simple tcp stream service
pub fn tcp( pub fn tcp(
self, self,
) -> impl ServiceFactory<TcpStream, Config = (), Response = (), Error = DispatchError, InitError = ()> ) -> impl ServiceFactory<
{ TcpStream,
Config = (),
Response = (),
Error = DispatchError,
InitError = (),
> {
fn_service(|io: TcpStream| { fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok(); let peer_addr = io.peer_addr().ok();
ready(Ok((io, peer_addr))) ready(Ok((io, peer_addr)))
@ -93,6 +98,8 @@ where
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
mod openssl { mod openssl {
use super::*;
use actix_tls::accept::{ use actix_tls::accept::{
openssl::{ openssl::{
reexports::{Error as SslError, SslAcceptor}, reexports::{Error as SslError, SslAcceptor},
@ -101,8 +108,6 @@ mod openssl {
TlsError, TlsError,
}; };
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Request, Config = ()>,
@ -152,13 +157,14 @@ mod openssl {
} }
} }
#[cfg(feature = "rustls-0_20")] #[cfg(feature = "rustls")]
mod rustls_0_20 { mod rustls {
use std::io; use std::io;
use actix_service::ServiceFactoryExt as _; use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{ use actix_tls::accept::{
rustls_0_20::{reexports::ServerConfig, Acceptor, TlsStream}, rustls::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError, TlsError,
}; };
@ -188,7 +194,7 @@ mod rustls_0_20 {
U::Error: fmt::Display + Into<Response<BoxBody>>, U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Create Rustls v0.20 based service. /// Create Rustls based service.
pub fn rustls( pub fn rustls(
self, self,
config: ServerConfig, config: ServerConfig,
@ -213,189 +219,6 @@ mod rustls_0_20 {
} }
} }
#[cfg(feature = "rustls-0_21")]
mod rustls_0_21 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.21 based service.
pub fn rustls_021(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_22")]
mod rustls_0_22 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.22 based service.
pub fn rustls_0_22(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_23")]
mod rustls_0_23 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.23 based service.
pub fn rustls_0_23(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> H1Service<T, S, B, X, U> impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Request, Config = ()>,
@ -480,15 +303,15 @@ where
let cfg = self.cfg.clone(); let cfg = self.cfg.clone();
Box::pin(async move { Box::pin(async move {
let expect = expect.await.map_err(|err| { let expect = expect
tracing::error!("Initialization of HTTP expect service error: {err:?}"); .await
})?; .map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade { let upgrade = match upgrade {
Some(upgrade) => { Some(upgrade) => {
let upgrade = upgrade.await.map_err(|err| { let upgrade = upgrade
tracing::error!("Initialization of HTTP upgrade service error: {err:?}"); .await
})?; .map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade) Some(upgrade)
} }
None => None, None => None,
@ -496,7 +319,7 @@ where
let service = service let service = service
.await .await
.map_err(|err| error!("Initialization of HTTP service error: {err:?}"))?; .map_err(|e| log::error!("Init http service error: {:?}", e))?;
Ok(H1ServiceHandler::new( Ok(H1ServiceHandler::new(
cfg, cfg,
@ -534,13 +357,13 @@ where
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| { self._poll_ready(cx).map_err(|err| {
error!("HTTP/1 service readiness error: {:?}", err); log::error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err) DispatchError::Service(err)
}) })
} }
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future { fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(io, Rc::clone(&self.flow), self.cfg.clone(), addr, conn_data) Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data)
} }
} }

View File

@ -1,81 +0,0 @@
use std::{fmt, future::Future, pin::Pin, task::Context};
use actix_rt::time::{Instant, Sleep};
use tracing::trace;
#[derive(Debug)]
pub(super) enum TimerState {
Disabled,
Inactive,
Active { timer: Pin<Box<Sleep>> },
}
impl TimerState {
pub(super) fn new(enabled: bool) -> Self {
if enabled {
Self::Inactive
} else {
Self::Disabled
}
}
pub(super) fn is_enabled(&self) -> bool {
matches!(self, Self::Active { .. } | Self::Inactive)
}
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
if matches!(self, Self::Disabled) {
trace!("setting disabled timer from line {}", line);
}
*self = Self::Active {
timer: Box::pin(timer),
};
}
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
self.set(timer, line);
self.init(cx);
}
pub(super) fn clear(&mut self, line: u32) {
if matches!(self, Self::Disabled) {
trace!("trying to clear a disabled timer from line {}", line);
}
if matches!(self, Self::Inactive) {
trace!("trying to clear an inactive timer from line {}", line);
}
*self = Self::Inactive;
}
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
if let TimerState::Active { timer } = self {
let _ = timer.as_mut().poll(cx);
}
}
}
impl fmt::Display for TimerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimerState::Disabled => f.write_str("timer is disabled"),
TimerState::Inactive => f.write_str("timer is inactive"),
TimerState::Active { timer } => {
let deadline = timer.deadline();
let now = Instant::now();
if deadline < now {
f.write_str("timer is active and has reached deadline")
} else {
write!(
f,
"timer is active and due to expire in {} milliseconds",
((deadline - now).as_secs_f32() * 1000.0)
)
}
}
}
}
}

View File

@ -4,7 +4,7 @@ use std::{
future::Future, future::Future,
marker::PhantomData, marker::PhantomData,
net, net,
pin::{pin, Pin}, pin::Pin,
rc::Rc, rc::Rc,
task::{Context, Poll}, task::{Context, Poll},
}; };
@ -19,16 +19,15 @@ use h2::{
server::{Connection, SendResponse}, server::{Connection, SendResponse},
Ping, PingPong, Ping, PingPong,
}; };
use log::{error, trace};
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use crate::{ use crate::{
body::{BodySize, BoxBody, MessageBody}, body::{BodySize, BoxBody, MessageBody},
config::ServiceConfig, config::ServiceConfig,
header::{ header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
},
service::HttpFlow, service::HttpFlow,
Extensions, Method, OnConnectData, Payload, Request, Response, ResponseHead, Extensions, OnConnectData, Payload, Request, Response, ResponseHead,
}; };
const CHUNK_SIZE: usize = 16_384; const CHUNK_SIZE: usize = 16_384;
@ -58,15 +57,15 @@ where
conn_data: OnConnectData, conn_data: OnConnectData,
timer: Option<Pin<Box<Sleep>>>, timer: Option<Pin<Box<Sleep>>>,
) -> Self { ) -> Self {
let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong { let ping_pong = config.keep_alive().map(|dur| H2PingPong {
timer: timer timer: timer
.map(|mut timer| { .map(|mut timer| {
// reuse timer slot if it was initialized for handshake // reset timer if it's received from new function.
timer.as_mut().reset((config.now() + dur).into()); timer.as_mut().reset(config.now() + dur);
timer timer
}) })
.unwrap_or_else(|| Box::pin(sleep(dur))), .unwrap_or_else(|| Box::pin(sleep(dur))),
in_flight: false, on_flight: false,
ping_pong: conn.ping_pong().unwrap(), ping_pong: conn.ping_pong().unwrap(),
}); });
@ -83,14 +82,9 @@ where
} }
struct H2PingPong { struct H2PingPong {
/// Handle to send ping frames from the peer.
ping_pong: PingPong,
/// True when a ping has been sent and is waiting for a reply.
in_flight: bool,
/// Timeout for pong response.
timer: Pin<Box<Sleep>>, timer: Pin<Box<Sleep>>,
on_flight: bool,
ping_pong: PingPong,
} }
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U> impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
@ -117,7 +111,6 @@ where
let payload = crate::h2::Payload::new(body); let payload = crate::h2::Payload::new(body);
let pl = Payload::H2 { payload }; let pl = Payload::H2 { payload };
let mut req = Request::with_payload(pl); let mut req = Request::with_payload(pl);
let head_req = parts.method == Method::HEAD;
let head = req.head_mut(); let head = req.head_mut();
head.uri = parts.uri; head.uri = parts.uri;
@ -126,7 +119,7 @@ where
head.headers = parts.headers.into(); head.headers = parts.headers.into();
head.peer_addr = this.peer_addr; head.peer_addr = this.peer_addr;
req.conn_data.clone_from(&this.conn_data); req.conn_data = this.conn_data.as_ref().map(Rc::clone);
let fut = this.flow.service.call(req); let fut = this.flow.service.call(req);
let config = this.config.clone(); let config = this.config.clone();
@ -135,10 +128,10 @@ where
actix_rt::spawn(async move { actix_rt::spawn(async move {
// resolve service call and send response. // resolve service call and send response.
let res = match fut.await { let res = match fut.await {
Ok(res) => handle_response(res.into(), tx, config, head_req).await, Ok(res) => handle_response(res.into(), tx, config).await,
Err(err) => { Err(err) => {
let res: Response<BoxBody> = err.into(); let res: Response<BoxBody> = err.into();
handle_response(res, tx, config, head_req).await handle_response(res, tx, config).await
} }
}; };
@ -146,49 +139,45 @@ where
if let Err(err) = res { if let Err(err) = res {
match err { match err {
DispatchError::SendResponse(err) => { DispatchError::SendResponse(err) => {
tracing::trace!("Error sending response: {err:?}"); trace!("Error sending HTTP/2 response: {:?}", err)
}
DispatchError::SendData(err) => {
tracing::warn!("Send data error: {err:?}");
} }
DispatchError::SendData(err) => warn!("{:?}", err),
DispatchError::ResponseBody(err) => { DispatchError::ResponseBody(err) => {
tracing::error!("Response payload stream error: {err:?}"); error!("Response payload stream error: {:?}", err)
} }
} }
} }
}); });
} }
Poll::Ready(None) => return Poll::Ready(Ok(())), Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Pending => match this.ping_pong.as_mut() { Poll::Pending => match this.ping_pong.as_mut() {
Some(ping_pong) => loop { Some(ping_pong) => loop {
if ping_pong.in_flight { if ping_pong.on_flight {
// When there is an in-flight ping-pong, poll pong and and keep-alive // When have on flight ping pong. poll pong and and keep alive timer.
// timer. On successful pong received, update keep-alive timer to // on success pong received update keep alive timer to determine the next timing of
// determine the next timing of ping pong. // ping pong.
match ping_pong.ping_pong.poll_pong(cx)? { match ping_pong.ping_pong.poll_pong(cx)? {
Poll::Ready(_) => { Poll::Ready(_) => {
ping_pong.in_flight = false; ping_pong.on_flight = false;
let dead_line = this.config.keep_alive_deadline().unwrap(); let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into()); ping_pong.timer.as_mut().reset(dead_line);
} }
Poll::Pending => { Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(())); return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
} }
} }
} else { } else {
// When there is no in-flight ping-pong, keep-alive timer is used to // When there is no on flight ping pong. keep alive timer is used to wait for next
// wait for next timing of ping-pong. Therefore, at this point it serves // timing of ping pong. Therefore at this point it serves as an interval instead.
// as an interval instead.
ready!(ping_pong.timer.as_mut().poll(cx)); ready!(ping_pong.timer.as_mut().poll(cx));
ping_pong.ping_pong.send_ping(Ping::opaque())?; ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_deadline().unwrap(); let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line.into()); ping_pong.timer.as_mut().reset(dead_line);
ping_pong.in_flight = true; ping_pong.on_flight = true;
} }
}, },
None => return Poll::Pending, None => return Poll::Pending,
@ -208,7 +197,6 @@ async fn handle_response<B>(
res: Response<B>, res: Response<B>,
mut tx: SendResponse<Bytes>, mut tx: SendResponse<Bytes>,
config: ServiceConfig, config: ServiceConfig,
head_req: bool,
) -> Result<(), DispatchError> ) -> Result<(), DispatchError>
where where
B: MessageBody, B: MessageBody,
@ -218,20 +206,20 @@ where
// prepare response. // prepare response.
let mut size = body.size(); let mut size = body.size();
let res = prepare_response(config, res.head(), &mut size); let res = prepare_response(config, res.head(), &mut size);
let eof_or_head = size.is_eof() || head_req; let eof = size.is_eof();
// send response head and return on eof. // send response head and return on eof.
let mut stream = tx let mut stream = tx
.send_response(res, eof_or_head) .send_response(res, eof)
.map_err(DispatchError::SendResponse)?; .map_err(DispatchError::SendResponse)?;
if eof_or_head { if eof {
return Ok(()); return Ok(());
} }
let mut body = pin!(body);
// poll response body and send chunks to client // poll response body and send chunks to client
actix_rt::pin!(body);
while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await { while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await {
let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?; let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?;
@ -297,13 +285,13 @@ fn prepare_response(
_ => {} _ => {}
} }
match size { let _ = match size {
BodySize::None | BodySize::Stream => {} BodySize::None | BodySize::Stream => None,
BodySize::Sized(0) => { BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)] #[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0"); const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO); res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
} }
BodySize::Sized(len) => { BodySize::Sized(len) => {
@ -312,28 +300,19 @@ fn prepare_response(
res.headers_mut().insert( res.headers_mut().insert(
CONTENT_LENGTH, CONTENT_LENGTH,
HeaderValue::from_str(buf.format(*len)).unwrap(), HeaderValue::from_str(buf.format(*len)).unwrap(),
); )
} }
}; };
// copy headers // copy headers
for (key, value) in head.headers.iter() { for (key, value) in head.headers.iter() {
match key { match *key {
// omit HTTP/1.x only headers according to: // TODO: consider skipping other headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2 // https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
&CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue, // omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
&CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
&DATE => has_date = true, DATE => has_date = true,
// omit HTTP/1.x only headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
hdr if hdr == HeaderName::from_static("keep-alive")
|| hdr == HeaderName::from_static("proxy-connection") =>
{
continue
}
_ => {} _ => {}
} }
@ -343,7 +322,7 @@ fn prepare_response(
// set date header // set date header
if !has_date { if !has_date {
let mut bytes = BytesMut::with_capacity(29); let mut bytes = BytesMut::with_capacity(29);
config.write_date_header_value(&mut bytes); config.set_date_header(&mut bytes);
res.headers_mut().insert( res.headers_mut().insert(
DATE, DATE,
// SAFETY: serialized date-times are known ASCII strings // SAFETY: serialized date-times are known ASCII strings

View File

@ -7,7 +7,7 @@ use std::{
}; };
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep_until, Sleep}; use actix_rt::time::Sleep;
use bytes::Bytes; use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use h2::{ use h2::{
@ -15,16 +15,17 @@ use h2::{
RecvStream, RecvStream,
}; };
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::{ use crate::{
config::ServiceConfig, config::ServiceConfig,
error::{DispatchError, PayloadError}, error::{DispatchError, PayloadError},
}; };
mod dispatcher;
mod service;
pub use self::{dispatcher::Dispatcher, service::H2Service};
/// HTTP/2 peer stream. /// HTTP/2 peer stream.
pub struct Payload { pub struct Payload {
stream: RecvStream, stream: RecvStream,
@ -57,15 +58,16 @@ impl Stream for Payload {
} }
} }
pub(crate) fn handshake_with_timeout<T>(io: T, config: &ServiceConfig) -> HandshakeWithTimeout<T> pub(crate) fn handshake_with_timeout<T>(
io: T,
config: &ServiceConfig,
) -> HandshakeWithTimeout<T>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
{ {
HandshakeWithTimeout { HandshakeWithTimeout {
handshake: handshake(io), handshake: handshake(io),
timer: config timer: config.client_timer().map(Box::pin),
.client_request_deadline()
.map(|deadline| Box::pin(sleep_until(deadline.into()))),
} }
} }
@ -84,7 +86,7 @@ where
let this = self.get_mut(); let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? { match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake; its slot can be re-used for h2 ping-pong // return the timer on success handshake. It can be re-used for h2 ping-pong.
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))), Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() { Poll::Pending => match this.timer.as_mut() {
Some(timer) => { Some(timer) => {
@ -99,9 +101,11 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::assert_impl_all; use static_assertions::assert_impl_all;
use super::*; use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync); assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
} }

View File

@ -14,9 +14,8 @@ use actix_service::{
}; };
use actix_utils::future::ready; use actix_utils::future::ready;
use futures_core::{future::LocalBoxFuture, ready}; use futures_core::{future::LocalBoxFuture, ready};
use tracing::{error, trace}; use log::error;
use super::{dispatcher::Dispatcher, handshake_with_timeout, HandshakeWithTimeout};
use crate::{ use crate::{
body::{BoxBody, MessageBody}, body::{BoxBody, MessageBody},
config::ServiceConfig, config::ServiceConfig,
@ -25,6 +24,8 @@ use crate::{
ConnectCallback, OnConnectData, Request, Response, ConnectCallback, OnConnectData, Request, Response,
}; };
use super::{dispatcher::Dispatcher, handshake_with_timeout, HandshakeWithTimeout};
/// `ServiceFactory` implementation for HTTP/2 transport /// `ServiceFactory` implementation for HTTP/2 transport
pub struct H2Service<T, S, B> { pub struct H2Service<T, S, B> {
srv: S, srv: S,
@ -140,8 +141,8 @@ mod openssl {
} }
} }
#[cfg(feature = "rustls-0_20")] #[cfg(feature = "rustls")]
mod rustls_0_20 { mod rustls {
use std::io; use std::io;
use actix_service::ServiceFactoryExt as _; use actix_service::ServiceFactoryExt as _;
@ -162,7 +163,7 @@ mod rustls_0_20 {
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create Rustls v0.20 based service. /// Create Rustls based service.
pub fn rustls( pub fn rustls(
self, self,
mut config: ServerConfig, mut config: ServerConfig,
@ -191,159 +192,6 @@ mod rustls_0_20 {
} }
} }
#[cfg(feature = "rustls-0_21")]
mod rustls_0_21 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static,
{
/// Create Rustls v0.21 based service.
pub fn rustls_021(
self,
mut config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError,
> {
let mut protos = vec![b"h2".to_vec()];
protos.extend_from_slice(&config.alpn_protocols);
config.alpn_protocols = protos;
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_22")]
mod rustls_0_22 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static,
{
/// Create Rustls v0.22 based service.
pub fn rustls_0_22(
self,
mut config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError,
> {
let mut protos = vec![b"h2".to_vec()];
protos.extend_from_slice(&config.alpn_protocols);
config.alpn_protocols = protos;
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_23")]
mod rustls_0_23 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static,
{
/// Create Rustls v0.23 based service.
pub fn rustls_0_23(
self,
mut config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError,
> {
let mut protos = vec![b"h2".to_vec()];
protos.extend_from_slice(&config.alpn_protocols);
config.alpn_protocols = protos;
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B> ServiceFactory<(T, Option<net::SocketAddr>)> for H2Service<T, S, B> impl<T, S, B> ServiceFactory<(T, Option<net::SocketAddr>)> for H2Service<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin + 'static, T: AsyncRead + AsyncWrite + Unpin + 'static,
@ -434,7 +282,7 @@ where
H2ServiceHandlerResponse { H2ServiceHandlerResponse {
state: State::Handshake( state: State::Handshake(
Some(Rc::clone(&self.flow)), Some(self.flow.clone()),
Some(self.cfg.clone()), Some(self.cfg.clone()),
addr, addr,
on_connect_data, on_connect_data,

View File

@ -1,61 +0,0 @@
//! Common header names not defined in [`http`].
//!
//! Any headers added to this file will need to be re-exported from the list at `crate::headers`.
use http::header::HeaderName;
/// Response header field that indicates how caches have handled that response and its corresponding
/// request.
///
/// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211) for full semantics.
// TODO(breaking): replace with http's version
pub const CACHE_STATUS: HeaderName = HeaderName::from_static("cache-status");
/// Response header field that allows origin servers to control the behavior of CDN caches
/// interposed between them and clients separately from other caches that might handle the response.
///
/// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213) for full semantics.
// TODO(breaking): replace with http's version
pub const CDN_CACHE_CONTROL: HeaderName = HeaderName::from_static("cdn-cache-control");
/// Response header field that sends a signal to the user agent that it ought to remove all data of
/// a certain set of types.
///
/// See the [W3C Clear-Site-Data spec] for full semantics.
///
/// [W3C Clear-Site-Data spec]: https://www.w3.org/TR/clear-site-data/#header
pub const CLEAR_SITE_DATA: HeaderName = HeaderName::from_static("clear-site-data");
/// Response header that prevents a document from loading any cross-origin resources that don't
/// explicitly grant the document permission (using [CORP] or [CORS]).
///
/// [CORP]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Cross-Origin_Resource_Policy_(CORP)
/// [CORS]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
pub const CROSS_ORIGIN_EMBEDDER_POLICY: HeaderName =
HeaderName::from_static("cross-origin-embedder-policy");
/// Response header that allows you to ensure a top-level document does not share a browsing context
/// group with cross-origin documents.
pub const CROSS_ORIGIN_OPENER_POLICY: HeaderName =
HeaderName::from_static("cross-origin-opener-policy");
/// Response header that conveys a desire that the browser blocks no-cors cross-origin/cross-site
/// requests to the given resource.
pub const CROSS_ORIGIN_RESOURCE_POLICY: HeaderName =
HeaderName::from_static("cross-origin-resource-policy");
/// Response header that provides a mechanism to allow and deny the use of browser features in a
/// document or within any `<iframe>` elements in the document.
pub const PERMISSIONS_POLICY: HeaderName = HeaderName::from_static("permissions-policy");
/// Request header (de-facto standard) for identifying the originating IP address of a client
/// connecting to a web server through a proxy server.
pub const X_FORWARDED_FOR: HeaderName = HeaderName::from_static("x-forwarded-for");
/// Request header (de-facto standard) for identifying the original host requested by the client in
/// the `Host` HTTP request header.
pub const X_FORWARDED_HOST: HeaderName = HeaderName::from_static("x-forwarded-host");
/// Request header (de-facto standard) for identifying the protocol that a client used to connect to
/// your proxy or load balancer.
pub const X_FORWARDED_PROTO: HeaderName = HeaderName::from_static("x-forwarded-proto");

View File

@ -1,5 +1,7 @@
//! [`TryIntoHeaderPair`] trait and implementations. //! [`TryIntoHeaderPair`] trait and implementations.
use std::convert::TryFrom as _;
use super::{ use super::{
Header, HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, TryIntoHeaderValue, Header, HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, TryIntoHeaderValue,
}; };

View File

@ -1,5 +1,7 @@
//! [`TryIntoHeaderValue`] trait and implementations. //! [`TryIntoHeaderValue`] trait and implementations.
use std::convert::TryFrom as _;
use bytes::Bytes; use bytes::Bytes;
use http::{header::InvalidHeaderValue, Error as HttpError, HeaderValue}; use http::{header::InvalidHeaderValue, Error as HttpError, HeaderValue};
use mime::Mime; use mime::Mime;

View File

@ -2,7 +2,7 @@
use std::{borrow::Cow, collections::hash_map, iter, ops}; use std::{borrow::Cow, collections::hash_map, iter, ops};
use foldhash::{HashMap as FoldHashMap, HashMapExt as _}; use ahash::AHashMap;
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
@ -13,9 +13,8 @@ use super::AsHeaderName;
/// `HeaderMap` is a "multi-map" of [`HeaderName`] to one or more [`HeaderValue`]s. /// `HeaderMap` is a "multi-map" of [`HeaderName`] to one or more [`HeaderValue`]s.
/// ///
/// # Examples /// # Examples
///
/// ``` /// ```
/// # use actix_http::header::{self, HeaderMap, HeaderValue}; /// use actix_http::header::{self, HeaderMap, HeaderValue};
/// ///
/// let mut map = HeaderMap::new(); /// let mut map = HeaderMap::new();
/// ///
@ -30,24 +29,9 @@ use super::AsHeaderName;
/// ///
/// assert!(!map.contains_key(header::ORIGIN)); /// assert!(!map.contains_key(header::ORIGIN));
/// ``` /// ```
///
/// Construct a header map using the [`FromIterator`] implementation. Note that it uses the append
/// strategy, so duplicate header names are preserved.
///
/// ```
/// use actix_http::header::{self, HeaderMap, HeaderValue};
///
/// let headers = HeaderMap::from_iter([
/// (header::CONTENT_TYPE, HeaderValue::from_static("text/plain")),
/// (header::COOKIE, HeaderValue::from_static("foo=1")),
/// (header::COOKIE, HeaderValue::from_static("bar=1")),
/// ]);
///
/// assert_eq!(headers.len(), 3);
/// ```
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
pub struct HeaderMap { pub struct HeaderMap {
pub(crate) inner: FoldHashMap<HeaderName, Value>, pub(crate) inner: AHashMap<HeaderName, Value>,
} }
/// A bespoke non-empty list for HeaderMap values. /// A bespoke non-empty list for HeaderMap values.
@ -116,7 +100,7 @@ impl HeaderMap {
/// ``` /// ```
pub fn with_capacity(capacity: usize) -> Self { pub fn with_capacity(capacity: usize) -> Self {
HeaderMap { HeaderMap {
inner: FoldHashMap::with_capacity(capacity), inner: AHashMap::with_capacity(capacity),
} }
} }
@ -166,7 +150,9 @@ impl HeaderMap {
/// assert_eq!(map.len(), 3); /// assert_eq!(map.len(), 3);
/// ``` /// ```
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
self.inner.values().map(|vals| vals.len()).sum() self.inner
.iter()
.fold(0, |acc, (_, values)| acc + values.len())
} }
/// Returns the number of _keys_ stored in the map. /// Returns the number of _keys_ stored in the map.
@ -323,7 +309,7 @@ impl HeaderMap {
pub fn get_all(&self, key: impl AsHeaderName) -> std::slice::Iter<'_, HeaderValue> { pub fn get_all(&self, key: impl AsHeaderName) -> std::slice::Iter<'_, HeaderValue> {
match self.get_value(key) { match self.get_value(key) {
Some(value) => value.iter(), Some(value) => value.iter(),
None => [].iter(), None => (&[]).iter(),
} }
} }
@ -384,8 +370,8 @@ impl HeaderMap {
/// let removed = map.insert(header::ACCEPT, HeaderValue::from_static("text/html")); /// let removed = map.insert(header::ACCEPT, HeaderValue::from_static("text/html"));
/// assert!(!removed.is_empty()); /// assert!(!removed.is_empty());
/// ``` /// ```
pub fn insert(&mut self, name: HeaderName, val: HeaderValue) -> Removed { pub fn insert(&mut self, key: HeaderName, val: HeaderValue) -> Removed {
let value = self.inner.insert(name, Value::one(val)); let value = self.inner.insert(key, Value::one(val));
Removed::new(value) Removed::new(value)
} }
@ -566,39 +552,6 @@ impl HeaderMap {
Keys(self.inner.keys()) Keys(self.inner.keys())
} }
/// Retains only the headers specified by the predicate.
///
/// In other words, removes all headers `(name, val)` for which `retain_fn(&name, &mut val)`
/// returns false.
///
/// The order in which headers are visited should be considered arbitrary.
///
/// # Examples
/// ```
/// # use actix_http::header::{self, HeaderMap, HeaderValue};
/// let mut map = HeaderMap::new();
///
/// map.append(header::HOST, HeaderValue::from_static("duck.com"));
/// map.append(header::SET_COOKIE, HeaderValue::from_static("one=1"));
/// map.append(header::SET_COOKIE, HeaderValue::from_static("two=2"));
///
/// map.retain(|name, val| val.as_bytes().starts_with(b"one"));
///
/// assert_eq!(map.len(), 1);
/// assert!(map.contains_key(&header::SET_COOKIE));
/// ```
pub fn retain<F>(&mut self, mut retain_fn: F)
where
F: FnMut(&HeaderName, &mut HeaderValue) -> bool,
{
self.inner.retain(|name, vals| {
vals.inner.retain(|val| retain_fn(name, val));
// invariant: make sure newly empty value lists are removed
!vals.is_empty()
})
}
/// Clears the map, returning all name-value sets as an iterator. /// Clears the map, returning all name-value sets as an iterator.
/// ///
/// Header names will only be yielded for the first value in each set. All items that are /// Header names will only be yielded for the first value in each set. All items that are
@ -652,34 +605,10 @@ impl<'a> IntoIterator for &'a HeaderMap {
} }
} }
impl FromIterator<(HeaderName, HeaderValue)> for HeaderMap { /// Convert `http::HeaderMap` to our `HeaderMap`.
fn from_iter<T: IntoIterator<Item = (HeaderName, HeaderValue)>>(iter: T) -> Self {
iter.into_iter()
.fold(Self::new(), |mut map, (name, value)| {
map.append(name, value);
map
})
}
}
/// Convert a `http::HeaderMap` to our `HeaderMap`.
impl From<http::HeaderMap> for HeaderMap { impl From<http::HeaderMap> for HeaderMap {
fn from(mut map: http::HeaderMap) -> Self { fn from(mut map: http::HeaderMap) -> HeaderMap {
Self::from_drain(map.drain()) HeaderMap::from_drain(map.drain())
}
}
/// Convert our `HeaderMap` to a `http::HeaderMap`.
impl From<HeaderMap> for http::HeaderMap {
fn from(map: HeaderMap) -> Self {
Self::from_iter(map)
}
}
/// Convert our `&HeaderMap` to a `http::HeaderMap`.
impl From<&HeaderMap> for http::HeaderMap {
fn from(map: &HeaderMap) -> Self {
map.to_owned().into()
} }
} }
@ -701,7 +630,7 @@ impl Removed {
/// Returns true if iterator contains no elements, without consuming it. /// Returns true if iterator contains no elements, without consuming it.
/// ///
/// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate /// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate
/// whether any items were actually replaced or removed, respectively. /// wether any items were actually replaced or removed, respectively.
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
match self.inner { match self.inner {
// size hint lower bound of smallvec is the correct length // size hint lower bound of smallvec is the correct length
@ -830,7 +759,7 @@ impl<'a> Drain<'a> {
} }
} }
impl Iterator for Drain<'_> { impl<'a> Iterator for Drain<'a> {
type Item = (Option<HeaderName>, HeaderValue); type Item = (Option<HeaderName>, HeaderValue);
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -1014,55 +943,6 @@ mod tests {
assert!(map.is_empty()); assert!(map.is_empty());
} }
#[test]
fn retain() {
let mut map = HeaderMap::new();
map.append(header::LOCATION, HeaderValue::from_static("/test"));
map.append(header::HOST, HeaderValue::from_static("duck.com"));
map.append(header::COOKIE, HeaderValue::from_static("one=1"));
map.append(header::COOKIE, HeaderValue::from_static("two=2"));
assert_eq!(map.len(), 4);
// by value
map.retain(|_, val| !val.as_bytes().contains(&b'/'));
assert_eq!(map.len(), 3);
// by name
map.retain(|name, _| name.as_str() != "cookie");
assert_eq!(map.len(), 1);
// keep but mutate value
map.retain(|_, val| {
*val = HeaderValue::from_static("replaced");
true
});
assert_eq!(map.len(), 1);
assert_eq!(map.get("host").unwrap(), "replaced");
}
#[test]
fn retain_removes_empty_value_lists() {
let mut map = HeaderMap::with_capacity(3);
map.append(header::HOST, HeaderValue::from_static("duck.com"));
map.append(header::HOST, HeaderValue::from_static("duck.com"));
assert_eq!(map.len(), 2);
assert_eq!(map.len_keys(), 1);
assert_eq!(map.inner.len(), 1);
assert_eq!(map.capacity(), 3);
// remove everything
map.retain(|_n, _v| false);
assert_eq!(map.len(), 0);
assert_eq!(map.len_keys(), 0);
assert_eq!(map.inner.len(), 0);
assert_eq!(map.capacity(), 3);
}
#[test] #[test]
fn entries_into_iter() { fn entries_into_iter() {
let mut map = HeaderMap::new(); let mut map = HeaderMap::new();
@ -1160,7 +1040,9 @@ mod tests {
assert!(vals.next().is_none()); assert!(vals.next().is_none());
} }
fn owned_pair<'a>((name, val): (&'a HeaderName, &'a HeaderValue)) -> (HeaderName, HeaderValue) { fn owned_pair<'a>(
(name, val): (&'a HeaderName, &'a HeaderValue),
) -> (HeaderName, HeaderValue) {
(name.clone(), val.clone()) (name.clone(), val.clone())
} }
} }

View File

@ -1,59 +1,51 @@
//! Pre-defined `HeaderName`s, traits for parsing and conversion, and other header utility methods. //! Pre-defined `HeaderName`s, traits for parsing and conversion, and other header utility methods.
// declaring new header consts will yield this error use percent_encoding::{AsciiSet, CONTROLS};
#![allow(clippy::declare_interior_mutable_const)]
// re-export from http except header map related items // re-export from http except header map related items
pub use ::http::header::{ pub use http::header::{
HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, ToStrError, HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, ToStrError,
}; };
// re-export const header names, list is explicit so that any updates to `common` module do not
// conflict with this set // re-export const header names
pub use ::http::header::{ pub use http::header::{
ACCEPT, ACCEPT_CHARSET, ACCEPT_ENCODING, ACCEPT_LANGUAGE, ACCEPT_RANGES, ACCEPT, ACCEPT_CHARSET, ACCEPT_ENCODING, ACCEPT_LANGUAGE, ACCEPT_RANGES,
ACCESS_CONTROL_ALLOW_CREDENTIALS, ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_CREDENTIALS, ACCESS_CONTROL_ALLOW_HEADERS,
ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_MAX_AGE, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS,
ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD, AGE, ALLOW, ALT_SVC, ACCESS_CONTROL_MAX_AGE, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD, AGE,
AUTHORIZATION, CACHE_CONTROL, CONNECTION, CONTENT_DISPOSITION, CONTENT_ENCODING, ALLOW, ALT_SVC, AUTHORIZATION, CACHE_CONTROL, CONNECTION, CONTENT_DISPOSITION,
CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_LOCATION, CONTENT_RANGE, CONTENT_SECURITY_POLICY, CONTENT_ENCODING, CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_LOCATION, CONTENT_RANGE,
CONTENT_SECURITY_POLICY_REPORT_ONLY, CONTENT_TYPE, COOKIE, DATE, DNT, ETAG, EXPECT, EXPIRES, CONTENT_SECURITY_POLICY, CONTENT_SECURITY_POLICY_REPORT_ONLY, CONTENT_TYPE, COOKIE, DATE,
FORWARDED, FROM, HOST, IF_MATCH, IF_MODIFIED_SINCE, IF_NONE_MATCH, IF_RANGE, DNT, ETAG, EXPECT, EXPIRES, FORWARDED, FROM, HOST, IF_MATCH, IF_MODIFIED_SINCE,
IF_UNMODIFIED_SINCE, LAST_MODIFIED, LINK, LOCATION, MAX_FORWARDS, ORIGIN, PRAGMA, IF_NONE_MATCH, IF_RANGE, IF_UNMODIFIED_SINCE, LAST_MODIFIED, LINK, LOCATION, MAX_FORWARDS,
PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, PUBLIC_KEY_PINS, PUBLIC_KEY_PINS_REPORT_ONLY, RANGE, ORIGIN, PRAGMA, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, PUBLIC_KEY_PINS,
REFERER, REFERRER_POLICY, REFRESH, RETRY_AFTER, SEC_WEBSOCKET_ACCEPT, SEC_WEBSOCKET_EXTENSIONS, PUBLIC_KEY_PINS_REPORT_ONLY, RANGE, REFERER, REFERRER_POLICY, REFRESH, RETRY_AFTER,
SEC_WEBSOCKET_KEY, SEC_WEBSOCKET_PROTOCOL, SEC_WEBSOCKET_VERSION, SERVER, SET_COOKIE, SEC_WEBSOCKET_ACCEPT, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_KEY, SEC_WEBSOCKET_PROTOCOL,
STRICT_TRANSPORT_SECURITY, TE, TRAILER, TRANSFER_ENCODING, UPGRADE, UPGRADE_INSECURE_REQUESTS, SEC_WEBSOCKET_VERSION, SERVER, SET_COOKIE, STRICT_TRANSPORT_SECURITY, TE, TRAILER,
USER_AGENT, VARY, VIA, WARNING, WWW_AUTHENTICATE, X_CONTENT_TYPE_OPTIONS, TRANSFER_ENCODING, UPGRADE, UPGRADE_INSECURE_REQUESTS, USER_AGENT, VARY, VIA, WARNING,
X_DNS_PREFETCH_CONTROL, X_FRAME_OPTIONS, X_XSS_PROTECTION, WWW_AUTHENTICATE, X_CONTENT_TYPE_OPTIONS, X_DNS_PREFETCH_CONTROL, X_FRAME_OPTIONS,
X_XSS_PROTECTION,
}; };
use percent_encoding::{AsciiSet, CONTROLS};
use crate::{error::ParseError, HttpMessage}; use crate::{error::ParseError, HttpMessage};
mod as_name; mod as_name;
mod common;
mod into_pair; mod into_pair;
mod into_value; mod into_value;
pub mod map; pub mod map;
mod shared; mod shared;
mod utils; mod utils;
pub use self::{ pub use self::as_name::AsHeaderName;
as_name::AsHeaderName, pub use self::into_pair::TryIntoHeaderPair;
// re-export list is explicit so that any updates to `http` do not conflict with this set pub use self::into_value::TryIntoHeaderValue;
common::{ pub use self::map::HeaderMap;
CACHE_STATUS, CDN_CACHE_CONTROL, CLEAR_SITE_DATA, CROSS_ORIGIN_EMBEDDER_POLICY, pub use self::shared::{
CROSS_ORIGIN_OPENER_POLICY, CROSS_ORIGIN_RESOURCE_POLICY, PERMISSIONS_POLICY, parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate, LanguageTag,
X_FORWARDED_FOR, X_FORWARDED_HOST, X_FORWARDED_PROTO, Quality, QualityItem,
}, };
into_pair::TryIntoHeaderPair, pub use self::utils::{
into_value::TryIntoHeaderValue, fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode,
map::HeaderMap,
shared::{
parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate, LanguageTag,
Quality, QualityItem,
},
utils::{fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode},
}; };
/// An interface for types that already represent a valid header. /// An interface for types that already represent a valid header.

View File

@ -1,4 +1,4 @@
use std::str::FromStr; use std::{convert::TryFrom, str::FromStr};
use derive_more::{Display, Error}; use derive_more::{Display, Error};
use http::header::InvalidHeaderValue; use http::header::InvalidHeaderValue;
@ -11,7 +11,7 @@ use crate::{
/// Error returned when a content encoding is unknown. /// Error returned when a content encoding is unknown.
#[derive(Debug, Display, Error)] #[derive(Debug, Display, Error)]
#[display("unsupported content encoding")] #[display(fmt = "unsupported content encoding")]
pub struct ContentEncodingParseError; pub struct ContentEncodingParseError;
/// Represents a supported content encoding. /// Represents a supported content encoding.

View File

@ -12,7 +12,7 @@ use crate::header::{Charset, HTTP_VALUE};
/// - A character sequence representing the actual value (`value`), separated by single quotes. /// - A character sequence representing the actual value (`value`), separated by single quotes.
/// ///
/// It is defined in [RFC 5987 §3.2](https://datatracker.ietf.org/doc/html/rfc5987#section-3.2). /// It is defined in [RFC 5987 §3.2](https://datatracker.ietf.org/doc/html/rfc5987#section-3.2).
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq)]
pub struct ExtendedValue { pub struct ExtendedValue {
/// The character set that is used to encode the `value` to a string. /// The character set that is used to encode the `value` to a string.
pub charset: Charset, pub charset: Charset,

View File

@ -4,7 +4,8 @@ use bytes::BytesMut;
use http::header::{HeaderValue, InvalidHeaderValue}; use http::header::{HeaderValue, InvalidHeaderValue};
use crate::{ use crate::{
date::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue, helpers::MutWriter, config::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue,
helpers::MutWriter,
}; };
/// A timestamp with HTTP-style formatting and parsing. /// A timestamp with HTTP-style formatting and parsing.
@ -24,7 +25,8 @@ impl FromStr for HttpDate {
impl fmt::Display for HttpDate { impl fmt::Display for HttpDate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
httpdate::HttpDate::from(self.0).fmt(f) let date_str = httpdate::fmt_http_date(self.0);
f.write_str(&date_str)
} }
} }
@ -36,7 +38,7 @@ impl TryIntoHeaderValue for HttpDate {
let mut wrt = MutWriter(&mut buf); let mut wrt = MutWriter(&mut buf);
// unwrap: date output is known to be well formed and of known length // unwrap: date output is known to be well formed and of known length
write!(wrt, "{}", self).unwrap(); write!(wrt, "{}", httpdate::fmt_http_date(self.0)).unwrap();
HeaderValue::from_maybe_shared(buf.split().freeze()) HeaderValue::from_maybe_shared(buf.split().freeze())
} }

Some files were not shown because too many files have changed in this diff Show More