1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-12 13:33:41 +02:00

Compare commits

..

5 Commits

Author SHA1 Message Date
bc54cdd772 prepare actix-http release 2.2.2 2022-01-21 21:23:22 +00:00
ad7e3c06e7 migrate to brotli crate 2022-01-21 21:16:23 +00:00
0669ed0f06 soft-deprecate NormalizePath::default in v3 (#2529) 2021-12-18 22:57:23 +00:00
c9c36679e4 bump actix http version 2021-08-11 19:21:40 +01:00
655d7b4f05 sec fixes 2021-08-08 21:21:48 +01:00
360 changed files with 26169 additions and 42439 deletions

View File

@ -1,14 +0,0 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
# lib checking
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"

3
.github/FUNDING.yml vendored
View File

@ -1,3 +0,0 @@
# These are supported funding model platforms
github: [robjtede]

View File

@ -33,5 +33,5 @@ Please search on the [Actix Web issue tracker](https://github.com/actix/actix-we
## Your Environment ## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in --> <!--- Include as many relevant details about the environment you experienced the bug in -->
- Rust Version (I.e, output of `rustc -V`): * Rust Version (I.e, output of `rustc -V`):
- Actix Web Version: * Actix Web Version:

View File

@ -1,8 +1,15 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat
- name: GitHub Discussions - name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A about: Actix Web Q&A
- name: Gitter chat (actix-web)
url: https://gitter.im/actix/actix-web
about: Actix Web Q&A
- name: Gitter chat (actix)
url: https://gitter.im/actix/actix
about: Actix (actor framework) Q&A
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat

View File

@ -1,21 +1,21 @@
<!-- Thanks for considering contributing actix! --> <!-- Thanks for considering contributing actix! -->
<!-- Please fill out the following to get your PR reviewed quicker. --> <!-- Please fill out the following to make our reviews easy. -->
## PR Type ## PR Type
<!-- What kind of change does this PR make? --> <!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other --> <!-- Bug Fix / Feature / Refactor / Code Style / Other -->
PR_TYPE INSERT_PR_TYPE
## PR Checklist ## PR Checklist
<!-- Check your PR fulfills the following items. --> Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. --> <!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated. - [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated. - [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages. - [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt. - [ ] Format code with the latest stable rustfmt
- [ ] (Team) Label with affected crates and semver status.
## Overview ## Overview

View File

@ -1,6 +1,8 @@
name: Benchmark name: Benchmark (Linux)
on: on:
pull_request:
types: [opened, synchronize, reopened]
push: push:
branches: branches:
- master - master

View File

@ -1,153 +0,0 @@
name: CI (master only)
on:
push:
branches: [master]
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Generate coverage file
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }

View File

@ -1,120 +0,0 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- 1.54.0 # MSRV
- stable
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests
uses: actions-rs/cargo@v1
timeout-minutes: 60
with: { command: ci-doctest }

View File

@ -1,48 +1,32 @@
name: Lint
on: on:
pull_request: pull_request:
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs: jobs:
fmt: clippy_check:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Install Rust - uses: actions-rs/toolchain@v1
uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: stable
profile: minimal
components: rustfmt components: rustfmt
override: true
- name: Check with rustfmt - name: Check with rustfmt
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: fmt command: fmt
args: --all -- --check args: --all -- --check
clippy: - uses: actions-rs/toolchain@v1
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: nightly
profile: minimal
components: clippy components: clippy
override: true override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy - name: Check with Clippy
uses: actions-rs/clippy-check@v1 uses: actions-rs/clippy-check@v1
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --examples --all-features args: --all-features --all --tests

69
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,69 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.42.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

44
.github/workflows/macos.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

View File

@ -1,12 +1,14 @@
name: Upload Documentation name: Upload documentation
on: on:
push: push:
branches: [master] branches:
- master
jobs: jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@ -18,14 +20,14 @@ jobs:
profile: minimal profile: minimal
override: true override: true
- name: Build Docs - name: check build
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: doc command: doc
args: --workspace --all-features --no-deps args: --no-deps --workspace --all-features
- name: Tweak HTML - name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1 uses: JamesIves/github-pages-deploy-action@3.7.1

64
.github/workflows/windows.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

3
.gitignore vendored
View File

@ -16,6 +16,3 @@ guide/build/
# Configuration directory generated by CLion # Configuration directory generated by CLion
.idea .idea
# Configuration directory generated by VSCode
.vscode

File diff suppressed because it is too large Load Diff

View File

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include: Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language * Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences * Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism * Gracefully accepting constructive criticism
- Focusing on what is best for the community * Focusing on what is best for the community
- Showing empathy towards other community members * Showing empathy towards other community members
Examples of unacceptable behavior by participants include: Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances * The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks * Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment * Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission * Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting * Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities ## Our Responsibilities

View File

@ -1,130 +1,123 @@
[package] [package]
name = "actix-web" name = "actix-web"
version = "4.0.0-beta.19" version = "3.3.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust" description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
readme = "README.md"
keywords = ["actix", "http", "web", "framework", "async"] keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git" repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
[package.metadata.docs.rs] [package.metadata.docs.rs]
# features that docs.rs will build with features = ["openssl", "rustls", "compress", "secure-cookies"]
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
rustdoc-args = ["--cfg", "docsrs"] [badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib] [lib]
name = "actix_web" name = "actix_web"
path = "src/lib.rs" path = "src/lib.rs"
[workspace] [workspace]
resolver = "2"
members = [ members = [
".", ".",
"actix-files", "awc",
"actix-http-test",
"actix-http", "actix-http",
"actix-files",
"actix-multipart", "actix-multipart",
"actix-router",
"actix-test",
"actix-web-actors", "actix-web-actors",
"actix-web-codegen", "actix-web-codegen",
"awc", "test-server",
] ]
[features] [features]
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"] default = ["compress"]
# Brotli algorithm content-encoding support # content-encoding support
compress-brotli = ["actix-http/compress-brotli", "__compress"] compress = ["actix-http/compress", "awc/compress"]
# Gzip and deflate algorithms content-encoding support
compress-gzip = ["actix-http/compress-gzip", "__compress"]
# Zstd algorithm content-encoding support
compress-zstd = ["actix-http/compress-zstd", "__compress"]
# support for cookies # sessions feature
cookies = ["cookie"] secure-cookies = ["actix-http/secure-cookies"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl # openssl
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"] openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls # rustls
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"] rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
# Internal (PRIVATE!) features used to aid testing and checking feature status. [[example]]
# Don't rely on these whatsoever. They may disappear at anytime. name = "basic"
__compress = [] required-features = ["compress"]
# io-uring feature only avaiable for Linux OSes. [[example]]
experimental-io-uring = ["actix-server/io-uring"] name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[[example]]
name = "on_connect"
required-features = []
[[example]]
name = "client"
required-features = ["rustls"]
[dependencies] [dependencies]
actix-codec = "0.4.1" actix-codec = "0.3.0"
actix-macros = "0.2.3" actix-service = "1.0.6"
actix-rt = "2.3" actix-utils = "2.0.0"
actix-server = "2.0.0-rc.2" actix-router = "0.2.4"
actix-service = "2.0.0" actix-rt = "1.1.1"
actix-utils = "3.0.0" actix-server = "1.0.0"
actix-tls = { version = "3.0.0", default-features = false, optional = true } actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0"
actix-http = "3.0.0-beta.18" actix-web-codegen = "0.4.0"
actix-router = "0.5.0-beta.4" actix-http = "2.2.0"
actix-web-codegen = "0.5.0-rc.1" awc = { version = "2.0.3", default-features = false }
ahash = "0.7" bytes = "0.5.3"
bytes = "1"
cfg-if = "1"
cookie = { version = "0.16", features = ["percent-encode"], optional = true }
derive_more = "0.99.5" derive_more = "0.99.5"
encoding_rs = "0.8" encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false } futures-channel = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.7", default-features = false } futures-core = { version = "0.3.5", default-features = false }
itoa = "1" futures-util = { version = "0.3.5", default-features = false }
language-tags = "0.3" fxhash = "0.2.1"
once_cell = "1.5"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
pin-project-lite = "0.2.7" socket2 = "0.3.16"
pin-project = "1.0.0"
regex = "1.4" regex = "1.4"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
serde_urlencoded = "0.7" serde_urlencoded = "0.7"
smallvec = "1.6.1" time = { version = "0.2.7", default-features = false, features = ["std"] }
socket2 = "0.4.0"
time = { version = "0.3", default-features = false, features = ["formatting"] }
url = "2.1" url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "1", features = ["alloc"] }
[dev-dependencies] [dev-dependencies]
actix-files = "0.6.0-beta.13" actix = "0.10.0"
actix-test = { version = "0.1.0-beta.11", features = ["openssl", "rustls"] } actix-http = { version = "2.1.0", features = ["actors"] }
awc = { version = "3.0.0-beta.18", features = ["openssl"] } rand = "0.7"
env_logger = "0.8"
brotli2 = "0.3.2" serde_derive = "1.0"
const-str = "0.3" brotli = "3.3.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
flate2 = "1.0.13" flate2 = "1.0.13"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] } criterion = "0.3"
rand = "0.8"
rcgen = "0.8"
rustls-pemfile = "0.2"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
zstd = "0.9"
[profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0
[profile.release] [profile.release]
lto = true lto = true
@ -132,42 +125,14 @@ opt-level = 3
codegen-units = 1 codegen-units = 1
[patch.crates-io] [patch.crates-io]
actix-files = { path = "actix-files" }
actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "." } actix-web = { path = "." }
actix-web-actors = { path = "actix-web-actors" } actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" } actix-web-codegen = { path = "actix-web-codegen" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" } awc = { path = "awc" }
# uncomment for quick testing against local actix-net repo
# actix-service = { path = "../actix-net/actix-service" }
# actix-macros = { path = "../actix-net/actix-macros" }
# actix-rt = { path = "../actix-net/actix-rt" }
# actix-codec = { path = "../actix-net/actix-codec" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }
[[test]]
name = "test_server"
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
[[example]]
name = "basic"
required-features = ["compress-gzip"]
[[example]]
name = "uds"
required-features = ["compress-gzip"]
[[example]]
name = "on-connect"
required-features = []
[[bench]] [[bench]]
name = "server" name = "server"
harness = false harness = false
@ -175,7 +140,3 @@ harness = false
[[bench]] [[bench]]
name = "service" name = "service"
harness = false harness = false
[[bench]]
name = "responder"
harness = false

View File

@ -1,4 +1,4 @@
Copyright (c) 2017-NOW Actix Team Copyright (c) 2017 Actix Team
Permission is hereby granted, free of charge, to any Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated person obtaining a copy of this software and associated

View File

@ -1,57 +1,32 @@
## Unreleased ## Unreleased
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
It is advised that the `new` method be used instead.
Before: `#[get("/test/")]`
After: `#[get("/test")]`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
- The `type Config` of `FromRequest` was removed.
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
By default all compression algorithms are enabled.
To select algorithm you want to include with `middleware::Compress` use following flags:
- `compress-brotli`
- `compress-gzip`
- `compress-zstd`
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
to have compression enabled. Please change features selection like bellow:
Before: `"compress"`
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
## 3.0.0 ## 3.0.0
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to * The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`. simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
- Cookie handling has been offloaded to the `cookie` crate: * Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs. * `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters. * Some types now require lifetime parameters.
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects * The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input. any `actix-web` method previously expecting a time v0.1 input.
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now * Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header. result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site. To create a cookie without a SameSite attribute, remove any calls setting same_site.
- actix-http support for Actors messages was moved to actix-http crate and is enabled * actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors` with feature `actors`
- content_length function is removed from actix-http. * content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function. You can set Content-Length by normally setting the response body or calling no_chunking function.
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a * `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`. `u64` instead of a `usize`.
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use * Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example: destructuring or `.into_inner()`. For example:
```rust ```rust
@ -71,35 +46,35 @@
} }
``` ```
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one. * `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`, It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`. or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. * `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`. * `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0 ## 2.0.0
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to * `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit. `.await` on `run` method result, in that case it awaits server exit.
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`. * `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime. Stored data is available via `HttpRequest::app_data()` method at runtime.
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()` * Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()` * Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async replace `fn` with `async fn` to convert sync handler to async
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start * `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods test server use `test::start()` or `test_start_with_config()` methods
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders * `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response. http response.
- Feature `rust-tls` renamed to `rustls` * Feature `rust-tls` renamed to `rustls`
instead of instead of
@ -113,7 +88,7 @@
actix-web = { version = "2.0.0", features = ["rustls"] } actix-web = { version = "2.0.0", features = ["rustls"] }
``` ```
- Feature `ssl` renamed to `openssl` * Feature `ssl` renamed to `openssl`
instead of instead of
@ -126,11 +101,11 @@
```rust ```rust
actix-web = { version = "2.0.0", features = ["openssl"] } actix-web = { version = "2.0.0", features = ["openssl"] }
``` ```
- `Cors` builder now requires that you call `.finish()` to construct the middleware * `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1 ## 1.0.1
- Cors middleware has been moved to `actix-cors` crate * Cors middleware has been moved to `actix-cors` crate
instead of instead of
@ -144,7 +119,7 @@
use actix_cors::Cors; use actix_cors::Cors;
``` ```
- Identity middleware has been moved to `actix-identity` crate * Identity middleware has been moved to `actix-identity` crate
instead of instead of
@ -161,7 +136,7 @@
## 1.0.0 ## 1.0.0
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration * Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of instead of
@ -219,7 +194,7 @@
) )
``` ```
- Resource registration. 1.0 version uses generalized resource * Resource registration. 1.0 version uses generalized resource
registration via `.service()` method. registration via `.service()` method.
instead of instead of
@ -239,7 +214,7 @@
.route(web::post().to(post_handler)) .route(web::post().to(post_handler))
``` ```
- Scope registration. * Scope registration.
instead of instead of
@ -263,7 +238,7 @@
); );
``` ```
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`. * `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of instead of
@ -277,7 +252,7 @@
App.new().service(web::resource("/welcome").to(welcome)) App.new().service(web::resource("/welcome").to(welcome))
``` ```
- Passing arguments to handler with extractors, multiple arguments are allowed * Passing arguments to handler with extractors, multiple arguments are allowed
instead of instead of
@ -295,7 +270,7 @@
} }
``` ```
- `.f()`, `.a()` and `.h()` handler registration methods have been removed. * `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors. must use extractors.
@ -311,7 +286,7 @@
App.new().service(web::resource("/welcome").to(welcome)) App.new().service(web::resource("/welcome").to(welcome))
``` ```
- `HttpRequest` does not provide access to request's payload stream. * `HttpRequest` does not provide access to request's payload stream.
instead of instead of
@ -341,7 +316,7 @@
} }
``` ```
- `State` is now `Data`. You register Data during the App initialization process * `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using and then access it from handlers either using a Data extractor or using
HttpRequest's api. HttpRequest's api.
@ -377,7 +352,7 @@
``` ```
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type. * AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of instead of
@ -393,7 +368,7 @@
.. simply omit AsyncResponder and the corresponding responder() finish method .. simply omit AsyncResponder and the corresponding responder() finish method
- Middleware * Middleware
instead of instead of
@ -410,7 +385,7 @@
.route("/index.html", web::get().to(index)); .route("/index.html", web::get().to(index));
``` ```
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()` * `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead. method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of instead of
@ -432,9 +407,9 @@
} }
``` ```
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type * `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
- StaticFiles and NamedFile have been moved to a separate crate. * StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile` instead of `use actix_web::fs::StaticFile`
@ -444,20 +419,20 @@
use `use actix_files::NamedFile` use `use actix_files::NamedFile`
- Multipart has been moved to a separate crate. * Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart` instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart` use `use actix_multipart::Multipart`
- Response compression is not enabled by default. * Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`. To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
- Session middleware moved to actix-session crate * Session middleware moved to actix-session crate
- Actors support have been moved to `actix-web-actors` crate * Actors support have been moved to `actix-web-actors` crate
- Custom Error * Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller. Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
@ -471,7 +446,7 @@
## 0.7.15 ## 0.7.15
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in * The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`. your routes, you should use `%20`.
instead of instead of
@ -496,18 +471,18 @@
} }
``` ```
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future` * If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4 ## 0.7.4
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple * `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter. even for handler with one parameter.
## 0.7 ## 0.7
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload * `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method. use `HttpMessage::payload()` method.
instead of instead of
@ -533,10 +508,10 @@
} }
``` ```
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html) * [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`. trait uses `&HttpRequest` instead of `&mut HttpRequest`.
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead. * Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of instead of
@ -550,17 +525,17 @@
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {} fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
``` ```
- `Handler::handle()` uses `&self` instead of `&mut self` * `Handler::handle()` uses `&self` instead of `&mut self`
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value * `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
- Removed deprecated `HttpServer::threads()`, use * Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead. [HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
- Renamed `client::ClientConnectorError::Connector` to * Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver` `client::ClientConnectorError::Resolver`
- `Route::with()` does not return `ExtractorConfig`, to configure * `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()` extractor use `Route::with_config()`
instead of instead of
@ -589,26 +564,26 @@
} }
``` ```
- `Route::with_async()` does not return `ExtractorConfig`, to configure * `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()` extractor use `Route::with_async_config()`
## 0.6 ## 0.6
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest` * `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
- `ws::Message::Close` now includes optional close reason. * `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed. `ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
- `HttpServer::threads()` renamed to `HttpServer::workers()`. * `HttpServer::threads()` renamed to `HttpServer::workers()`.
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated. * `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead. Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
- `HttpRequest::extensions()` returns read only reference to the request's Extension * `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference. `HttpRequest::extensions_mut()` returns mutable reference.
- Instead of * Instead of
`use actix_web::middleware::{ `use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession, CookieSessionBackend, CookieSessionError, RequestSession,
@ -619,15 +594,15 @@
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError, `use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};` RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
- `FromRequest::from_request()` accepts mutable reference to a request * `FromRequest::from_request()` accepts mutable reference to a request
- `FromRequest::Result` has to implement `Into<Reply<Self>>` * `FromRequest::Result` has to implement `Into<Reply<Self>>`
- [`Responder::respond_to()`]( * [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to) https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S` is generic over `S`
- Use `Query` extractor instead of HttpRequest::query()`. * Use `Query` extractor instead of HttpRequest::query()`.
```rust ```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> { fn index(q: Query<HashMap<String, String>>) -> Result<..> {
@ -641,37 +616,37 @@
let q = Query::<HashMap<String, String>>::extract(req); let q = Query::<HashMap<String, String>>::extract(req);
``` ```
- Websocket operations are implemented as `WsWriter` trait. * Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter` you need to use `use actix_web::ws::WsWriter`
## 0.5 ## 0.5
- `HttpResponseBuilder::body()`, `.finish()`, `.json()` * `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>` methods return `HttpResponse` instead of `Result<HttpResponse>`
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version` * `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module moved to `actix_web::http` module
- `actix_web::header` moved to `actix_web::http::header` * `actix_web::header` moved to `actix_web::http::header`
- `NormalizePath` moved to `actix_web::http` module * `NormalizePath` moved to `actix_web::http` module
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function, * `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()` shortcut for `actix_web::server::HttpServer::new()`
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself * `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead. * `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type * `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()` * `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead functions should be used instead
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>` * `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>` instead of `Result<_, http::Error>`
- `Application` renamed to a `App` * `Application` renamed to a `App`
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev` * `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1,19 +1,20 @@
<div align="center"> <div align="center">
<h1>Actix Web</h1> <h1>Actix web</h1>
<p> <p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong> <strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p> </p>
<p> <p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web) [![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.19)](https://docs.rs/actix-web/4.0.0-beta.19) [![Documentation](https://docs.rs/actix-web/badge.svg?version=3.3.3)](https://docs.rs/actix-web/3.3.3)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) [![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg) ![License](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.19/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.19) [![Dependency Status](https://deps.rs/crate/actix-web/3.3.3/status.svg)](https://deps.rs/crate/actix-web/3.3.3)
<br /> <br />
[![CI](https://github.com/actix/actix-web/actions/workflows/ci.yml/badge.svg)](https://github.com/actix/actix-web/actions/workflows/ci.yml) [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg) [![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p> </p>
@ -21,25 +22,26 @@
## Features ## Features
- Supports *HTTP/1.x* and *HTTP/2* * Supports *HTTP/1.x* and *HTTP/2*
- Streaming and pipelining * Streaming and pipelining
- Keep-alive and slow requests handling * Keep-alive and slow requests handling
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support * Client/server [WebSockets](https://actix.rs/docs/websockets/) support
- Transparent content compression/decompression (br, gzip, deflate, zstd) * Transparent content compression/decompression (br, gzip, deflate)
- Powerful [request routing](https://actix.rs/docs/url-dispatch/) * Powerful [request routing](https://actix.rs/docs/url-dispatch/)
- Multipart streams * Multipart streams
- Static assets * Static assets
- SSL support using OpenSSL or Rustls * SSL support using OpenSSL or Rustls
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/)) * Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
- Includes an async [HTTP client](https://docs.rs/awc/) * Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
- Runs on stable Rust 1.54+ * Supports [Actix actor framework](https://github.com/actix/actix)
* Runs on stable Rust 1.42+
## Documentation ## Documentation
- [Website & User Guide](https://actix.rs) * [Website & User Guide](https://actix.rs)
- [Examples Repository](https://github.com/actix/examples) * [Examples Repository](https://github.com/actix/examples)
- [API Documentation](https://docs.rs/actix-web) * [API Documentation](https://docs.rs/actix-web)
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web) * [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example ## Example
@ -71,18 +73,18 @@ async fn main() -> std::io::Result<()> {
### More examples ### More examples
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/) * [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
- [Application State](https://github.com/actix/examples/tree/master/basics/state/) * [Application State](https://github.com/actix/examples/tree/master/state/)
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/) * [JSON Handling](https://github.com/actix/examples/tree/master/json/)
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/) * [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/) * [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/) * [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/) * [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/) * [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/) * [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/) * [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/) * [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/) * [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
You may consider checking out You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples. [this directory](https://github.com/actix/examples/tree/master/) for more examples.
@ -90,20 +92,20 @@ You may consider checking out
## Benchmarks ## Benchmarks
One of the fastest web frameworks available according to the One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite). [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
## License ## License
This project is licensed under either of This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0]) [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or * MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT]) [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option. at your option.
## Code of Conduct ## Code of Conduct
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant. Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
The Actix team promises to intervene to uphold that code of conduct. maintainers of Actix web, promises to intervene to uphold that code of conduct.

View File

@ -1,193 +1,102 @@
# Changes # Changes
## Unreleased - 2021-xx-xx ## Unreleased - 2020-xx-xx
## 0.6.0-beta.13 - 2022-01-04
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12 - 2021-12-29
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11 - 2021-12-27
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10 - 2021-12-11
- No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9 - 2021-11-22
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8 - 2021-10-20
- Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6 - 2021-06-26
- Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5 - 2021-06-17
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156
[#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4 - 2021-04-02
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3 - 2021-03-09
- No notable changes.
## 0.6.0-beta.2 - 2021-02-10
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1 - 2021-01-07
- `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0 - 2020-12-26
- Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1 - 2020-11-24 ## 0.4.1 - 2020-11-24
- Clarify order of parameters in `Files::new` and improve docs. * Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0 - 2020-10-06 ## 0.4.0 - 2020-10-06
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714] * Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714 [#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0 - 2020-09-11 ## 0.3.0 - 2020-09-11
- No significant changes from 0.3.0-beta.1. * No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1 - 2020-07-15 ## 0.3.0-beta.1 - 2020-07-15
- Update `v_htmlescape` to 0.10 * Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1 * Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1 - 2020-05-23 ## 0.3.0-alpha.1 - 2020-05-23
- Update `actix-web` and `actix-http` dependencies to alpha * Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs * Fix some typos in the docs
- Bump minimum supported Rust version to 1.40 * Bump minimum supported Rust version to 1.40
- Support sending Content-Length when Content-Range is specified [#1384] * Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384 [#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1 - 2019-12-22 ## 0.2.1 - 2019-12-22
- Use the same format for file URLs regardless of platforms * Use the same format for file URLs regardless of platforms
## 0.2.0 - 2019-12-20 ## 0.2.0 - 2019-12-20
- Fix BodyEncoding trait import #1220 * Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1 - 2019-12-07 ## 0.2.0-alpha.1 - 2019-12-07
- Migrate to `std::future` * Migrate to `std::future`
## 0.1.7 - 2019-11-06 ## 0.1.7 - 2019-11-06
- Add an additional `filename*` param in the `Content-Disposition` header of * Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151) `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14 ## 0.1.6 - 2019-10-14
- Add option to redirect to a slash-ended path `Files` #1132 * Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5 - 2019-10-08 ## 0.1.5 - 2019-10-08
- Bump up `mime_guess` crate version to 2.0.1 * Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1 * Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113 * Allow user defined request guards for `Files` #1113
## 0.1.4 - 2019-07-20 ## 0.1.4 - 2019-07-20
- Allow to disable `Content-Disposition` header #686 * Allow to disable `Content-Disposition` header #686
## 0.1.3 - 2019-06-28 ## 0.1.3 - 2019-06-28
- Do not set `Content-Length` header, let actix-http set it #930 * Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2 - 2019-06-13 ## 0.1.2 - 2019-06-13
- Content-Length is 0 for NamedFile HEAD request #914 * Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741 * Fix ring dependency from actix-web default features for #741
## 0.1.1 - 2019-06-01 ## 0.1.1 - 2019-06-01
- Static files are incorrectly served as both chunked and with length #812 * Static files are incorrectly served as both chunked and with length #812
## 0.1.0 - 2019-05-25 ## 0.1.0 - 2019-05-25
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820 * NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4 - 2019-05-12 ## 0.1.0-beta.4 - 2019-05-12
- Update actix-web to beta.4 * Update actix-web to beta.4
## 0.1.0-beta.1 - 2019-04-20 ## 0.1.0-beta.1 - 2019-04-20
- Update actix-web to beta.1 * Update actix-web to beta.1
## 0.1.0-alpha.6 - 2019-04-14 ## 0.1.0-alpha.6 - 2019-04-14
- Update actix-web to alpha6 * Update actix-web to alpha6
## 0.1.0-alpha.4 - 2019-04-08 ## 0.1.0-alpha.4 - 2019-04-08
- Update actix-web to alpha4 * Update actix-web to alpha4
## 0.1.0-alpha.2 - 2019-04-02 ## 0.1.0-alpha.2 - 2019-04-02
- Add default handler support * Add default handler support
## 0.1.0-alpha.1 - 2019-03-28 ## 0.1.0-alpha.1 - 2019-03-28
- Initial impl * Initial impl

View File

@ -1,15 +1,13 @@
[package] [package]
name = "actix-files" name = "actix-files"
version = "0.6.0-beta.13" version = "0.4.1"
authors = [ authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web" description = "Static file serving for Actix Web"
readme = "README.md"
keywords = ["actix", "http", "async", "futures"] keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
categories = ["asynchronous", "web-programming::http-server"] categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
@ -18,31 +16,20 @@ edition = "2018"
name = "actix_files" name = "actix_files"
path = "src/lib.rs" path = "src/lib.rs"
[features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies] [dependencies]
actix-http = "3.0.0-beta.18" actix-web = { version = "3.0.0", default-features = false }
actix-service = "2" actix-service = "1.0.6"
actix-utils = "3"
actix-web = { version = "4.0.0-beta.19", default-features = false }
askama_escape = "0.10"
bitflags = "1" bitflags = "1"
bytes = "1" bytes = "0.5.3"
derive_more = "0.99.5" futures-core = { version = "0.3.7", default-features = false }
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] } futures-util = { version = "0.3.7", default-features = false }
http-range = "0.1.4" derive_more = "0.99.2"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
mime_guess = "2.0.1" mime_guess = "2.0.1"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2.7" v_htmlescape = "0.11"
tokio-uring = { version = "0.1", optional = true }
[dev-dependencies] [dev-dependencies]
actix-rt = "2.2" actix-rt = "1.0.0"
actix-test = "0.1.0-beta.11" actix-web = { version = "3.0.0", features = ["openssl"] }
actix-web = "4.0.0-beta.19"
tempfile = "3.2"

View File

@ -3,16 +3,17 @@
> Static file serving for Actix Web > Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.13)](https://docs.rs/actix-files/0.6.0-beta.13) [![Documentation](https://docs.rs/actix-files/badge.svg?version=0.4.1)](https://docs.rs/actix-files/0.4.1)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) [![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-files.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.13/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.13) [![dependency status](https://deps.rs/crate/actix-files/0.4.1/status.svg)](https://deps.rs/crate/actix-files/0.4.1)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & Resources ## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files/) - [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index) - [Example Project](https://github.com/actix/examples/tree/master/static_index)
- Minimum Supported Rust Version (MSRV): 1.54 - [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum supported Rust version: 1.42 or later

View File

@ -1,277 +1,94 @@
use std::{ use std::{
cmp, fmt, cmp, fmt,
fs::File,
future::Future, future::Future,
io, io::{self, Read, Seek},
pin::Pin, pin::Pin,
task::{Context, Poll}, task::{Context, Poll},
}; };
use actix_web::{error::Error, web::Bytes}; use actix_web::{
error::{BlockingError, Error},
web,
};
use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use pin_project_lite::pin_project; use futures_util::future::{FutureExt, LocalBoxFuture};
use super::named::File; use crate::handle_error;
pin_project! { type ChunkedBoxFuture =
/// Adapter to read a `std::file::File` in chunks. LocalBoxFuture<'static, Result<(File, Bytes), BlockingError<io::Error>>>;
#[doc(hidden)]
pub struct ChunkedReadFile<F, Fut> { #[doc(hidden)]
size: u64, /// A helper created from a `std::fs::File` which reads the file
offset: u64, /// chunk-by-chunk on a `ThreadPool`.
#[pin] pub struct ChunkedReadFile {
state: ChunkedReadFileState<Fut>, pub(crate) size: u64,
counter: u64, pub(crate) offset: u64,
callback: F, pub(crate) file: Option<File>,
} pub(crate) fut: Option<ChunkedBoxFuture>,
pub(crate) counter: u64,
} }
#[cfg(not(feature = "experimental-io-uring"))] impl fmt::Debug for ChunkedReadFile {
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<File>, },
Future { #[pin] fut: Fut },
}
}
#[cfg(feature = "experimental-io-uring")]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<(File, BytesMut)> },
Future { #[pin] fut: Fut },
}
}
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile") f.write_str("ChunkedReadFile")
} }
} }
pub(crate) fn new_chunked_read( impl Stream for ChunkedReadFile {
size: u64, type Item = Result<Bytes, Error>;
offset: u64,
file: File, fn poll_next(
) -> impl Stream<Item = Result<Bytes, Error>> { mut self: Pin<&mut Self>,
ChunkedReadFile { cx: &mut Context<'_>,
size, ) -> Poll<Option<Self::Item>> {
offset, if let Some(ref mut fut) = self.fut {
#[cfg(not(feature = "experimental-io-uring"))] return match ready!(Pin::new(fut).poll(cx)) {
state: ChunkedReadFileState::File { file: Some(file) }, Ok((file, bytes)) => {
#[cfg(feature = "experimental-io-uring")] self.fut.take();
state: ChunkedReadFileState::File { self.file = Some(file);
file: Some((file, BytesMut::new())),
}, self.offset += bytes.len() as u64;
counter: 0, self.counter += bytes.len() as u64;
callback: chunked_read_file_callback,
Poll::Ready(Some(Ok(bytes)))
}
Err(e) => Poll::Ready(Some(Err(handle_error(e)))),
};
} }
}
#[cfg(not(feature = "experimental-io-uring"))] let size = self.size;
async fn chunked_read_file_callback( let offset = self.offset;
mut file: File, let counter = self.counter;
offset: u64,
max_bytes: usize, if size == counter {
) -> Result<(File, Bytes), Error> { Poll::Ready(None)
use io::{Read as _, Seek as _}; } else {
let mut file = self.file.take().expect("Use after completion");
self.fut = Some(
web::block(move || {
let max_bytes =
cmp::min(size.saturating_sub(counter), 65_536) as usize;
let res = actix_web::rt::task::spawn_blocking(move || {
let mut buf = Vec::with_capacity(max_bytes); let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?; file.seek(io::SeekFrom::Start(offset))?;
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; let n_bytes =
file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok((file, Bytes::from(buf)))
}
})
.await
.map_err(|_| actix_web::error::BlockingError)??;
Ok(res)
}
#[cfg(feature = "experimental-io-uring")]
async fn chunked_read_file_callback(
file: File,
offset: u64,
max_bytes: usize,
mut bytes_mut: BytesMut,
) -> io::Result<(File, Bytes, BytesMut)> {
bytes_mut.reserve(max_bytes);
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await;
let n_bytes = res?;
if n_bytes == 0 { if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into()); return Err(io::ErrorKind::UnexpectedEof.into());
} }
let bytes = bytes_mut.split_to(n_bytes).freeze(); Ok((file, Bytes::from(buf)))
})
Ok((file, bytes, bytes_mut)) .boxed_local(),
} );
#[cfg(feature = "experimental-io-uring")]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize, BytesMut) -> Fut,
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let (file, bytes_mut) = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx) self.poll_next(cx)
} }
} }
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
this.state.project_replace(ChunkedReadFileState::File {
file: Some((file, bytes_mut)),
});
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes) = ready!(fut.poll(cx))?;
this.state
.project_replace(ChunkedReadFileState::File { file: Some(file) });
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(feature = "experimental-io-uring")]
use bytes_mut::BytesMut;
// TODO: remove new type and use bytes::BytesMut directly
#[doc(hidden)]
#[cfg(feature = "experimental-io-uring")]
mod bytes_mut {
use std::ops::{Deref, DerefMut};
use tokio_uring::buf::{IoBuf, IoBufMut};
#[derive(Debug)]
pub struct BytesMut(bytes::BytesMut);
impl BytesMut {
pub(super) fn new() -> Self {
Self(bytes::BytesMut::new())
}
}
impl Deref for BytesMut {
type Target = bytes::BytesMut;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for BytesMut {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
unsafe impl IoBuf for BytesMut {
fn stable_ptr(&self) -> *const u8 {
self.0.as_ptr()
}
fn bytes_init(&self) -> usize {
self.0.len()
}
fn bytes_total(&self) -> usize {
self.0.capacity()
}
}
unsafe impl IoBufMut for BytesMut {
fn stable_mut_ptr(&mut self) -> *mut u8 {
self.0.as_mut_ptr()
}
unsafe fn set_init(&mut self, init_len: usize) {
if self.len() < init_len {
self.0.set_len(init_len);
}
}
}
} }

View File

@ -1,8 +1,8 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf}; use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse}; use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS}; use percent_encoding::{utf8_percent_encode, CONTROLS};
use v_htmlescape::escape as escape_html_entity;
/// A directory; responds with the generated directory listing. /// A directory; responds with the generated directory listing.
#[derive(Debug)] #[derive(Debug)]
@ -40,26 +40,17 @@ impl Directory {
pub(crate) type DirectoryRenderer = pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>; dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
/// Returns percent encoded file URL path. // show file url as relative to static path
macro_rules! encode_file_url { macro_rules! encode_file_url {
($path:ident) => { ($path:ident) => {
utf8_percent_encode(&$path, CONTROLS) utf8_percent_encode(&$path, CONTROLS)
}; };
} }
/// Returns HTML entity encoded formatter. // " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name { macro_rules! encode_file_name {
($entry:ident) => { ($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy(), Html) escape_html_entity(&$entry.file_name().to_string_lossy())
}; };
} }
@ -75,7 +66,9 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) { if dir.is_visible(&entry) {
let entry = entry.unwrap(); let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) { let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"), Ok(p) if cfg!(windows) => {
base.join(p).to_string_lossy().replace("\\", "/")
}
Ok(p) => base.join(p).to_string_lossy().into_owned(), Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue, Err(_) => continue,
}; };

View File

@ -1,4 +1,4 @@
use actix_web::{http::StatusCode, ResponseError}; use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use derive_more::Display; use derive_more::Display;
/// Errors which can occur when serving static files. /// Errors which can occur when serving static files.
@ -16,30 +16,22 @@ pub enum FilesError {
/// Return `NotFound` for `FilesError` /// Return `NotFound` for `FilesError`
impl ResponseError for FilesError { impl ResponseError for FilesError {
fn status_code(&self) -> StatusCode { fn error_response(&self) -> HttpResponse {
StatusCode::NOT_FOUND HttpResponse::new(StatusCode::NOT_FOUND)
} }
} }
#[allow(clippy::enum_variant_names)]
#[derive(Display, Debug, PartialEq)] #[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError { pub enum UriSegmentError {
/// The segment started with the wrapped invalid character. /// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")] #[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char), BadStart(char),
/// The segment contained the wrapped invalid character. /// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")] #[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char), BadChar(char),
/// The segment ended with the wrapped invalid character. /// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")] #[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char), BadEnd(char),
/// The path is not a valid UTF-8 string after doing percent decoding.
#[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
} }
/// Return `BadRequest` for `UriSegmentError` /// Return `BadRequest` for `UriSegmentError`

View File

@ -1,35 +1,27 @@
use std::{ use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
cell::RefCell,
fmt, io,
path::{Path, PathBuf},
rc::Rc,
};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt}; use actix_service::{boxed, IntoServiceFactory, ServiceFactory};
use actix_web::{ use actix_web::{
dev::{ dev::{
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest, AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse,
ServiceResponse,
}, },
error::Error, error::Error,
guard::Guard, guard::Guard,
http::header::DispositionType, http::header::DispositionType,
HttpRequest, HttpRequest,
}; };
use futures_core::future::LocalBoxFuture; use futures_util::future::{ok, FutureExt, LocalBoxFuture};
use crate::{ use crate::{
directory_listing, named, directory_listing, named, Directory, DirectoryRenderer, FilesService,
service::{FilesService, FilesServiceInner}, HttpNewService, MimeOverride,
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
}; };
/// Static files handling service. /// Static files handling service.
/// ///
/// `Files` service must be registered with `App::service()` method. /// `Files` service must be registered with `App::service()` method.
/// ///
/// # Examples /// ```rust
/// ```
/// use actix_web::App; /// use actix_web::App;
/// use actix_files::Files; /// use actix_files::Files;
/// ///
@ -45,11 +37,8 @@ pub struct Files {
default: Rc<RefCell<Option<Rc<HttpNewService>>>>, default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>, renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>, mime_override: Option<Rc<MimeOverride>>,
path_filter: Option<Rc<PathFilter>>,
file_flags: named::Flags, file_flags: named::Flags,
use_guards: Option<Rc<dyn Guard>>, guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>,
hidden_files: bool,
} }
impl fmt::Debug for Files { impl fmt::Debug for Files {
@ -70,10 +59,7 @@ impl Clone for Files {
file_flags: self.file_flags, file_flags: self.file_flags,
path: self.path.clone(), path: self.path.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
guards: self.guards.clone(), guards: self.guards.clone(),
hidden_files: self.hidden_files,
} }
} }
} }
@ -93,9 +79,9 @@ impl Files {
/// If the mount path is set as the root path `/`, services registered after this one will /// If the mount path is set as the root path `/`, services registered after this one will
/// be inaccessible. Register more specific handlers and services first. /// be inaccessible. Register more specific handlers and services first.
/// ///
/// `Files` utilizes the existing Tokio thread-pool for blocking filesystem operations. /// `Files` uses a threadpool for blocking filesystem operations. By default, the pool uses a
/// The number of running threads is adjusted over time as needed, up to a maximum of 512 times /// number of threads equal to 5x the number of available logical CPUs. Pool size can be changed
/// the number of server [workers](actix_web::HttpServer::workers), by default. /// by setting ACTIX_THREADPOOL environment variable.
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files { pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
let orig_dir = serve_from.into(); let orig_dir = serve_from.into();
let dir = match orig_dir.canonicalize() { let dir = match orig_dir.canonicalize() {
@ -107,7 +93,7 @@ impl Files {
}; };
Files { Files {
path: mount_path.trim_end_matches('/').to_owned(), path: mount_path.to_owned(),
directory: dir, directory: dir,
index: None, index: None,
show_index: false, show_index: false,
@ -115,20 +101,14 @@ impl Files {
default: Rc::new(RefCell::new(None)), default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing), renderer: Rc::new(directory_listing),
mime_override: None, mime_override: None,
path_filter: None,
file_flags: named::Flags::default(), file_flags: named::Flags::default(),
use_guards: None, guards: None,
guards: Vec::new(),
hidden_files: false,
} }
} }
/// Show files listing for directories. /// Show files listing for directories.
/// ///
/// By default show files listing is disabled. /// By default show files listing is disabled.
///
/// When used with [`Files::index_file()`], files listing is shown as a fallback
/// when the index file is not found.
pub fn show_files_listing(mut self) -> Self { pub fn show_files_listing(mut self) -> Self {
self.show_index = true; self.show_index = true;
self self
@ -145,8 +125,8 @@ impl Files {
/// Set custom directory renderer /// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where where
for<'r, 's> F: for<'r, 's> F: Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error>
Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error> + 'static, + 'static,
{ {
self.renderer = Rc::new(f); self.renderer = Rc::new(f);
self self
@ -161,45 +141,10 @@ impl Files {
self self
} }
/// Sets path filtering closure.
///
/// The path provided to the closure is relative to `serve_from` path.
/// You can safely join this path with the `serve_from` path to get the real path.
/// However, the real path may not exist since the filter is called before checking path existence.
///
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
/// `404 Not Found` is returned.
///
/// # Examples
/// ```
/// use std::path::Path;
/// use actix_files::Files;
///
/// // prevent searching subdirectories and following symlinks
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
/// path.components().count() == 1
/// && Path::new("./static")
/// .join(path)
/// .symlink_metadata()
/// .map(|m| !m.file_type().is_symlink())
/// .unwrap_or(false)
/// });
/// ```
pub fn path_filter<F>(mut self, f: F) -> Self
where
F: Fn(&Path, &RequestHead) -> bool + 'static,
{
self.path_filter = Some(Rc::new(f));
self
}
/// Set index file /// Set index file
/// ///
/// Shows specific index file for directories instead of /// Shows specific index file for directory "/" instead of
/// showing files listing. /// showing files listing.
///
/// If the index file is not found, files listing is shown as a fallback if
/// [`Files::show_files_listing()`] is set.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self { pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into()); self.index = Some(index.into());
self self
@ -208,6 +153,7 @@ impl Files {
/// Specifies whether to use ETag or not. /// Specifies whether to use ETag or not.
/// ///
/// Default is true. /// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self { pub fn use_etag(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::ETAG, value); self.file_flags.set(named::Flags::ETAG, value);
self self
@ -216,6 +162,7 @@ impl Files {
/// Specifies whether to use Last-Modified or not. /// Specifies whether to use Last-Modified or not.
/// ///
/// Default is true. /// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self { pub fn use_last_modified(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::LAST_MD, value); self.file_flags.set(named::Flags::LAST_MD, value);
self self
@ -224,86 +171,37 @@ impl Files {
/// Specifies whether text responses should signal a UTF-8 encoding. /// Specifies whether text responses should signal a UTF-8 encoding.
/// ///
/// Default is false (but will default to true in a future version). /// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self { pub fn prefer_utf8(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::PREFER_UTF8, value); self.file_flags.set(named::Flags::PREFER_UTF8, value);
self self
} }
/// Adds a routing guard. /// Specifies custom guards to use for directory listings and files.
/// ///
/// Use this to allow multiple chained file services that respond to strictly different /// Default behaviour allows GET and HEAD.
/// properties of a request. Due to the way routing works, if a guard check returns true and the #[inline]
/// request starts being handled by the file service, it will not be able to back-out and try pub fn use_guards<G: Guard + 'static>(mut self, guards: G) -> Self {
/// the next service, you will simply get a 404 (or 405) error response. self.guards = Some(Rc::new(guards));
///
/// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
///
/// # Examples
/// ```
/// use actix_web::{guard::Header, App};
/// use actix_files::Files;
///
/// App::new().service(
/// Files::new("/","/my/site/files")
/// .guard(Header("Host", "example.com"))
/// );
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Rc::new(guard));
self self
} }
/// Specifies guard to check before fetching directory listings or files.
///
/// Note that this guard has no effect on routing; it's main use is to guard on the request's
/// method just before serving the file, only allowing `GET` and `HEAD` requests by default.
/// See [`Files::guard`] for routing guards.
pub fn method_guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.use_guards = Some(Rc::new(guard));
self
}
/// See [`Files::method_guard`].
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
self.method_guard(guard)
}
/// Disable `Content-Disposition` header. /// Disable `Content-Disposition` header.
/// ///
/// By default Content-Disposition` header is enabled. /// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self { pub fn disable_content_disposition(mut self) -> Self {
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION); self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
self self
} }
/// Sets default handler which is used when no matched file could be found. /// Sets default handler which is used when no matched file could be found.
///
/// # Examples
/// Setting a fallback static file handler:
/// ```
/// use actix_files::{Files, NamedFile};
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
///
/// # fn run() -> Result<(), actix_web::Error> {
/// let files = Files::new("/", "./static")
/// .index_file("index.html")
/// .default_handler(fn_service(|req: ServiceRequest| async {
/// let (req, _) = req.into_parts();
/// let file = NamedFile::open_async("./static/404.html").await?;
/// let res = file.into_response(&req);
/// Ok(ServiceResponse::new(req, res))
/// }));
/// # Ok(())
/// # }
/// ```
pub fn default_handler<F, U>(mut self, f: F) -> Self pub fn default_handler<F, U>(mut self, f: F) -> Self
where where
F: IntoServiceFactory<U, ServiceRequest>, F: IntoServiceFactory<U>,
U: ServiceFactory< U: ServiceFactory<
ServiceRequest,
Config = (), Config = (),
Request = ServiceRequest,
Response = ServiceResponse, Response = ServiceResponse,
Error = Error, Error = Error,
> + 'static, > + 'static,
@ -315,28 +213,10 @@ impl Files {
self self
} }
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
pub fn use_hidden_files(mut self) -> Self {
self.hidden_files = true;
self
}
} }
impl HttpServiceFactory for Files { impl HttpServiceFactory for Files {
fn register(mut self, config: &mut AppService) { fn register(self, config: &mut AppService) {
let guards = if self.guards.is_empty() {
None
} else {
let guards = std::mem::take(&mut self.guards);
Some(
guards
.into_iter()
.map(|guard| -> Box<dyn Guard> { Box::new(guard) })
.collect::<Vec<_>>(),
)
};
if self.default.borrow().is_none() { if self.default.borrow().is_none() {
*self.default.borrow_mut() = Some(config.default_service()); *self.default.borrow_mut() = Some(config.default_service());
} }
@ -347,11 +227,12 @@ impl HttpServiceFactory for Files {
ResourceDef::prefix(&self.path) ResourceDef::prefix(&self.path)
}; };
config.register_service(rdef, guards, self, None) config.register_service(rdef, None, self, None)
} }
} }
impl ServiceFactory<ServiceRequest> for Files { impl ServiceFactory for Files {
type Request = ServiceRequest;
type Response = ServiceResponse; type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Config = (); type Config = ();
@ -360,7 +241,7 @@ impl ServiceFactory<ServiceRequest> for Files {
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
let mut inner = FilesServiceInner { let mut srv = FilesService {
directory: self.directory.clone(), directory: self.directory.clone(),
index: self.index.clone(), index: self.index.clone(),
show_index: self.show_index, show_index: self.show_index,
@ -368,25 +249,23 @@ impl ServiceFactory<ServiceRequest> for Files {
default: None, default: None,
renderer: self.renderer.clone(), renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
file_flags: self.file_flags, file_flags: self.file_flags,
guards: self.use_guards.clone(), guards: self.guards.clone(),
hidden_files: self.hidden_files,
}; };
if let Some(ref default) = *self.default.borrow() { if let Some(ref default) = *self.default.borrow() {
let fut = default.new_service(()); default
Box::pin(async { .new_service(())
match fut.await { .map(move |result| match result {
Ok(default) => { Ok(default) => {
inner.default = Some(default); srv.default = Some(default);
Ok(FilesService(Rc::new(inner))) Ok(srv)
} }
Err(_) => Err(()), Err(_) => Err(()),
}
}) })
.boxed_local()
} else { } else {
Box::pin(async move { Ok(FilesService(Rc::new(inner))) }) ok(srv).boxed_local()
} }
} }
} }

View File

@ -3,7 +3,7 @@
//! Provides a non-blocking service for serving static files from disk. //! Provides a non-blocking service for serving static files from disk.
//! //!
//! # Example //! # Example
//! ``` //! ```rust
//! use actix_web::App; //! use actix_web::App;
//! use actix_files::Files; //! use actix_files::Files;
//! //!
@ -11,17 +11,18 @@
//! .service(Files::new("/static", ".").prefer_utf8(true)); //! .service(Files::new("/static", ".").prefer_utf8(true));
//! ``` //! ```
#![deny(rust_2018_idioms, nonstandard_style)] #![deny(rust_2018_idioms)]
#![warn(future_incompatible, missing_docs, missing_debug_implementations)] #![warn(missing_docs, missing_debug_implementations)]
use std::io;
use actix_service::boxed::{BoxService, BoxServiceFactory}; use actix_service::boxed::{BoxService, BoxServiceFactory};
use actix_web::{ use actix_web::{
dev::{RequestHead, ServiceRequest, ServiceResponse}, dev::{ServiceRequest, ServiceResponse},
error::Error, error::{BlockingError, Error, ErrorInternalServerError},
http::header::DispositionType, http::header::DispositionType,
}; };
use mime_guess::from_ext; use mime_guess::from_ext;
use std::path::Path;
mod chunked; mod chunked;
mod directory; mod directory;
@ -33,12 +34,12 @@ mod path_buf;
mod range; mod range;
mod service; mod service;
pub use self::chunked::ChunkedReadFile; pub use crate::chunked::ChunkedReadFile;
pub use self::directory::Directory; pub use crate::directory::Directory;
pub use self::files::Files; pub use crate::files::Files;
pub use self::named::NamedFile; pub use crate::named::NamedFile;
pub use self::range::HttpRange; pub use crate::range::HttpRange;
pub use self::service::FilesService; pub use crate::service::FilesService;
use self::directory::{directory_listing, DirectoryRenderer}; use self::directory::{directory_listing, DirectoryRenderer};
use self::error::FilesError; use self::error::FilesError;
@ -55,14 +56,19 @@ pub fn file_extension_to_mime(ext: &str) -> mime::Mime {
from_ext(ext).first_or_octet_stream() from_ext(ext).first_or_octet_stream()
} }
type MimeOverride = dyn Fn(&mime::Name<'_>) -> DispositionType; pub(crate) fn handle_error(err: BlockingError<io::Error>) -> Error {
match err {
BlockingError::Error(err) => err.into(),
BlockingError::Canceled => ErrorInternalServerError("Unexpected error"),
}
}
type PathFilter = dyn Fn(&Path, &RequestHead) -> bool; type MimeOverride = dyn Fn(&mime::Name<'_>) -> DispositionType;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::{ use std::{
fs::{self}, fs::{self, File},
ops::Add, ops::Add,
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
@ -76,14 +82,13 @@ mod tests {
}, },
middleware::Compress, middleware::Compress,
test::{self, TestRequest}, test::{self, TestRequest},
web::{self, Bytes}, web, App, HttpResponse, Responder,
App, HttpResponse, Responder,
}; };
use futures_util::future::ok;
use super::*; use super::*;
use crate::named::File;
#[actix_web::test] #[actix_rt::test]
async fn test_file_extension_to_mime() { async fn test_file_extension_to_mime() {
let m = file_extension_to_mime(""); let m = file_extension_to_mime("");
assert_eq!(m, mime::APPLICATION_OCTET_STREAM); assert_eq!(m, mime::APPLICATION_OCTET_STREAM);
@ -100,23 +105,12 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_if_modified_since_without_if_none_match() { async fn test_if_modified_since_without_if_none_match() {
let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let file = NamedFile::open("Cargo.toml").unwrap();
let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60))); let since =
header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since)) .header(header::IF_MODIFIED_SINCE, since)
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
}
#[actix_rt::test]
async fn test_if_modified_since_without_if_none_match_same() {
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = file.last_modified().unwrap();
let req = TestRequest::default()
.insert_header((header::IF_MODIFIED_SINCE, since))
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req).await.unwrap(); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
@ -124,45 +118,22 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_if_modified_since_with_if_none_match() { async fn test_if_modified_since_with_if_none_match() {
let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let file = NamedFile::open("Cargo.toml").unwrap();
let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60))); let since =
header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default() let req = TestRequest::default()
.insert_header((header::IF_NONE_MATCH, "miss_etag")) .header(header::IF_NONE_MATCH, "miss_etag")
.insert_header((header::IF_MODIFIED_SINCE, since)) .header(header::IF_MODIFIED_SINCE, since)
.to_http_request(); .to_http_request();
let resp = file.respond_to(&req).await.unwrap(); let resp = file.respond_to(&req).await.unwrap();
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED); assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
} }
#[actix_rt::test]
async fn test_if_unmodified_since() {
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = file.last_modified().unwrap();
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_if_unmodified_since_failed() {
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
let since = header::HttpDate::from(SystemTime::UNIX_EPOCH);
let req = TestRequest::default()
.insert_header((header::IF_UNMODIFIED_SINCE, since))
.to_http_request();
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
}
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_text() { async fn test_named_file_text() {
assert!(NamedFile::open_async("test--").await.is_err()); assert!(NamedFile::open("test--").is_err());
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap(); let mut file = NamedFile::open("Cargo.toml").unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
@ -185,8 +156,8 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_content_disposition() { async fn test_named_file_content_disposition() {
assert!(NamedFile::open_async("test--").await.is_err()); assert!(NamedFile::open("test--").is_err());
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap(); let mut file = NamedFile::open("Cargo.toml").unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
@ -202,8 +173,7 @@ mod tests {
"inline; filename=\"Cargo.toml\"" "inline; filename=\"Cargo.toml\""
); );
let file = NamedFile::open_async("Cargo.toml") let file = NamedFile::open("Cargo.toml")
.await
.unwrap() .unwrap()
.disable_content_disposition(); .disable_content_disposition();
let req = TestRequest::default().to_http_request(); let req = TestRequest::default().to_http_request();
@ -213,19 +183,9 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_non_ascii_file_name() { async fn test_named_file_non_ascii_file_name() {
let file = { let mut file =
#[cfg(feature = "experimental-io-uring")] NamedFile::from_file(File::open("Cargo.toml").unwrap(), "貨物.toml")
{ .unwrap();
crate::named::File::open("Cargo.toml").await.unwrap()
}
#[cfg(not(feature = "experimental-io-uring"))]
{
crate::named::File::open("Cargo.toml").unwrap()
}
};
let mut file = NamedFile::from_file(file, "貨物.toml").unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
@ -248,8 +208,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_set_content_type() { async fn test_named_file_set_content_type() {
let mut file = NamedFile::open_async("Cargo.toml") let mut file = NamedFile::open("Cargo.toml")
.await
.unwrap() .unwrap()
.set_content_type(mime::TEXT_XML); .set_content_type(mime::TEXT_XML);
{ {
@ -274,7 +233,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_image() { async fn test_named_file_image() {
let mut file = NamedFile::open_async("tests/test.png").await.unwrap(); let mut file = NamedFile::open("tests/test.png").unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
@ -295,30 +254,13 @@ mod tests {
); );
} }
#[actix_rt::test]
async fn test_named_file_javascript() {
let file = NamedFile::open_async("tests/test.js").await.unwrap();
let req = TestRequest::default().to_http_request();
let resp = file.respond_to(&req).await.unwrap();
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/javascript"
);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"test.js\""
);
}
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_image_attachment() { async fn test_named_file_image_attachment() {
let cd = ContentDisposition { let cd = ContentDisposition {
disposition: DispositionType::Attachment, disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Filename(String::from("test.png"))], parameters: vec![DispositionParam::Filename(String::from("test.png"))],
}; };
let mut file = NamedFile::open_async("tests/test.png") let mut file = NamedFile::open("tests/test.png")
.await
.unwrap() .unwrap()
.set_content_disposition(cd); .set_content_disposition(cd);
{ {
@ -343,7 +285,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_binary() { async fn test_named_file_binary() {
let mut file = NamedFile::open_async("tests/test.binary").await.unwrap(); let mut file = NamedFile::open("tests/test.binary").unwrap();
{ {
file.file(); file.file();
let _f: &File = &file; let _f: &File = &file;
@ -366,8 +308,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_status_code_text() { async fn test_named_file_status_code_text() {
let mut file = NamedFile::open_async("Cargo.toml") let mut file = NamedFile::open("Cargo.toml")
.await
.unwrap() .unwrap()
.set_status_code(StatusCode::NOT_FOUND); .set_status_code(StatusCode::NOT_FOUND);
{ {
@ -397,7 +338,7 @@ mod tests {
DispositionType::Attachment DispositionType::Attachment
} }
let srv = test::init_service( let mut srv = test::init_service(
App::new().service( App::new().service(
Files::new("/", ".") Files::new("/", ".")
.mime_override(all_attachment) .mime_override(all_attachment)
@ -407,7 +348,7 @@ mod tests {
.await; .await;
let request = TestRequest::get().uri("/").to_request(); let request = TestRequest::get().uri("/").to_request();
let response = test::call_service(&srv, request).await; let response = test::call_service(&mut srv, request).await;
assert_eq!(response.status(), StatusCode::OK); assert_eq!(response.status(), StatusCode::OK);
let content_disposition = response let content_disposition = response
@ -422,7 +363,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_ranges_status_code() { async fn test_named_file_ranges_status_code() {
let srv = test::init_service( let mut srv = test::init_service(
App::new().service(Files::new("/test", ".").index_file("Cargo.toml")), App::new().service(Files::new("/test", ".").index_file("Cargo.toml")),
) )
.await; .await;
@ -430,29 +371,29 @@ mod tests {
// Valid range header // Valid range header
let request = TestRequest::get() let request = TestRequest::get()
.uri("/t%65st/Cargo.toml") .uri("/t%65st/Cargo.toml")
.insert_header((header::RANGE, "bytes=10-20")) .header(header::RANGE, "bytes=10-20")
.to_request(); .to_request();
let response = test::call_service(&srv, request).await; let response = test::call_service(&mut srv, request).await;
assert_eq!(response.status(), StatusCode::PARTIAL_CONTENT); assert_eq!(response.status(), StatusCode::PARTIAL_CONTENT);
// Invalid range header // Invalid range header
let request = TestRequest::get() let request = TestRequest::get()
.uri("/t%65st/Cargo.toml") .uri("/t%65st/Cargo.toml")
.insert_header((header::RANGE, "bytes=1-0")) .header(header::RANGE, "bytes=1-0")
.to_request(); .to_request();
let response = test::call_service(&srv, request).await; let response = test::call_service(&mut srv, request).await;
assert_eq!(response.status(), StatusCode::RANGE_NOT_SATISFIABLE); assert_eq!(response.status(), StatusCode::RANGE_NOT_SATISFIABLE);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_content_range_headers() { async fn test_named_file_content_range_headers() {
let srv = actix_test::start(|| App::new().service(Files::new("/", "."))); let srv = test::start(|| App::new().service(Files::new("/", ".")));
// Valid range header // Valid range header
let response = srv let response = srv
.get("/tests/test.binary") .get("/tests/test.binary")
.insert_header((header::RANGE, "bytes=10-20")) .header(header::RANGE, "bytes=10-20")
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -462,7 +403,7 @@ mod tests {
// Invalid range header // Invalid range header
let response = srv let response = srv
.get("/tests/test.binary") .get("/tests/test.binary")
.insert_header((header::RANGE, "bytes=10-5")) .header(header::RANGE, "bytes=10-5")
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -472,12 +413,12 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_content_length_headers() { async fn test_named_file_content_length_headers() {
let srv = actix_test::start(|| App::new().service(Files::new("/", "."))); let srv = test::start(|| App::new().service(Files::new("/", ".")));
// Valid range header // Valid range header
let response = srv let response = srv
.get("/tests/test.binary") .get("/tests/test.binary")
.insert_header((header::RANGE, "bytes=10-20")) .header(header::RANGE, "bytes=10-20")
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -487,7 +428,7 @@ mod tests {
// Valid range header, starting from 0 // Valid range header, starting from 0
let response = srv let response = srv
.get("/tests/test.binary") .get("/tests/test.binary")
.insert_header((header::RANGE, "bytes=0-20")) .header(header::RANGE, "bytes=0-20")
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -511,7 +452,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_head_content_length_headers() { async fn test_head_content_length_headers() {
let srv = actix_test::start(|| App::new().service(Files::new("/", "."))); let srv = test::start(|| App::new().service(Files::new("/", ".")));
let response = srv.head("/tests/test.binary").send().await.unwrap(); let response = srv.head("/tests/test.binary").send().await.unwrap();
@ -527,14 +468,14 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_static_files_with_spaces() { async fn test_static_files_with_spaces() {
let srv = test::init_service( let mut srv = test::init_service(
App::new().service(Files::new("/", ".").index_file("Cargo.toml")), App::new().service(Files::new("/", ".").index_file("Cargo.toml")),
) )
.await; .await;
let request = TestRequest::get() let request = TestRequest::get()
.uri("/tests/test%20space.binary") .uri("/tests/test%20space.binary")
.to_request(); .to_request();
let response = test::call_service(&srv, request).await; let response = test::call_service(&mut srv, request).await;
assert_eq!(response.status(), StatusCode::OK); assert_eq!(response.status(), StatusCode::OK);
let bytes = test::read_body(response).await; let bytes = test::read_body(response).await;
@ -544,29 +485,29 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_files_not_allowed() { async fn test_files_not_allowed() {
let srv = test::init_service(App::new().service(Files::new("/", "."))).await; let mut srv = test::init_service(App::new().service(Files::new("/", "."))).await;
let req = TestRequest::default() let req = TestRequest::default()
.uri("/Cargo.toml") .uri("/Cargo.toml")
.method(Method::POST) .method(Method::POST)
.to_request(); .to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
let srv = test::init_service(App::new().service(Files::new("/", "."))).await; let mut srv = test::init_service(App::new().service(Files::new("/", "."))).await;
let req = TestRequest::default() let req = TestRequest::default()
.method(Method::PUT) .method(Method::PUT)
.uri("/Cargo.toml") .uri("/Cargo.toml")
.to_request(); .to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_files_guards() { async fn test_files_guards() {
let srv = test::init_service( let mut srv = test::init_service(
App::new().service(Files::new("/", ".").method_guard(guard::Post())), App::new().service(Files::new("/", ".").use_guards(guard::Post())),
) )
.await; .await;
@ -575,16 +516,15 @@ mod tests {
.method(Method::POST) .method(Method::POST)
.to_request(); .to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_content_encoding() { async fn test_named_file_content_encoding() {
let srv = test::init_service(App::new().wrap(Compress::default()).service( let mut srv = test::init_service(App::new().wrap(Compress::default()).service(
web::resource("/").to(|| async { web::resource("/").to(|| async {
NamedFile::open_async("Cargo.toml") NamedFile::open("Cargo.toml")
.await
.unwrap() .unwrap()
.set_content_encoding(header::ContentEncoding::Identity) .set_content_encoding(header::ContentEncoding::Identity)
}), }),
@ -593,20 +533,18 @@ mod tests {
let request = TestRequest::get() let request = TestRequest::get()
.uri("/") .uri("/")
.insert_header((header::ACCEPT_ENCODING, "gzip")) .header(header::ACCEPT_ENCODING, "gzip")
.to_request(); .to_request();
let res = test::call_service(&srv, request).await; let res = test::call_service(&mut srv, request).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert!(res.headers().contains_key(header::CONTENT_ENCODING)); assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
assert!(!test::read_body(res).await.is_empty());
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_content_encoding_gzip() { async fn test_named_file_content_encoding_gzip() {
let srv = test::init_service(App::new().wrap(Compress::default()).service( let mut srv = test::init_service(App::new().wrap(Compress::default()).service(
web::resource("/").to(|| async { web::resource("/").to(|| async {
NamedFile::open_async("Cargo.toml") NamedFile::open("Cargo.toml")
.await
.unwrap() .unwrap()
.set_content_encoding(header::ContentEncoding::Gzip) .set_content_encoding(header::ContentEncoding::Gzip)
}), }),
@ -615,9 +553,9 @@ mod tests {
let request = TestRequest::get() let request = TestRequest::get()
.uri("/") .uri("/")
.insert_header((header::ACCEPT_ENCODING, "gzip")) .header(header::ACCEPT_ENCODING, "gzip")
.to_request(); .to_request();
let res = test::call_service(&srv, request).await; let res = test::call_service(&mut srv, request).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers() res.headers()
@ -632,32 +570,34 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_named_file_allowed_method() { async fn test_named_file_allowed_method() {
let req = TestRequest::default().method(Method::GET).to_http_request(); let req = TestRequest::default().method(Method::GET).to_http_request();
let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let file = NamedFile::open("Cargo.toml").unwrap();
let resp = file.respond_to(&req).await.unwrap(); let resp = file.respond_to(&req).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_static_files() { async fn test_static_files() {
let srv = let mut srv = test::init_service(
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())) App::new().service(Files::new("/", ".").show_files_listing()),
)
.await; .await;
let req = TestRequest::with_uri("/missing").to_request(); let req = TestRequest::with_uri("/missing").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND); assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let srv = test::init_service(App::new().service(Files::new("/", "."))).await; let mut srv = test::init_service(App::new().service(Files::new("/", "."))).await;
let req = TestRequest::default().to_request(); let req = TestRequest::default().to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND); assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let srv = let mut srv = test::init_service(
test::init_service(App::new().service(Files::new("/", ".").show_files_listing())) App::new().service(Files::new("/", ".").show_files_listing()),
)
.await; .await;
let req = TestRequest::with_uri("/tests").to_request(); let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!( assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(), resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/html; charset=utf-8" "text/html; charset=utf-8"
@ -669,17 +609,17 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_redirect_to_slash_directory() { async fn test_redirect_to_slash_directory() {
// should not redirect if no index and files listing is disabled // should not redirect if no index
let srv = test::init_service( let mut srv = test::init_service(
App::new().service(Files::new("/", ".").redirect_to_slash_directory()), App::new().service(Files::new("/", ".").redirect_to_slash_directory()),
) )
.await; .await;
let req = TestRequest::with_uri("/tests").to_request(); let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND); assert_eq!(resp.status(), StatusCode::NOT_FOUND);
// should redirect if index present // should redirect if index present
let srv = test::init_service( let mut srv = test::init_service(
App::new().service( App::new().service(
Files::new("/", ".") Files::new("/", ".")
.index_file("test.png") .index_file("test.png")
@ -688,302 +628,149 @@ mod tests {
) )
.await; .await;
let req = TestRequest::with_uri("/tests").to_request(); let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
// should redirect if files listing is enabled
let srv = test::init_service(
App::new().service(
Files::new("/", ".")
.show_files_listing()
.redirect_to_slash_directory(),
),
)
.await;
let req = TestRequest::with_uri("/tests").to_request();
let resp = test::call_service(&srv, req).await;
assert_eq!(resp.status(), StatusCode::FOUND); assert_eq!(resp.status(), StatusCode::FOUND);
// should not redirect if the path is wrong // should not redirect if the path is wrong
let req = TestRequest::with_uri("/not_existing").to_request(); let req = TestRequest::with_uri("/not_existing").to_request();
let resp = test::call_service(&srv, req).await; let resp = test::call_service(&mut srv, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND); assert_eq!(resp.status(), StatusCode::NOT_FOUND);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_static_files_bad_directory() { async fn test_static_files_bad_directory() {
let service = Files::new("/", "./missing").new_service(()).await.unwrap(); let _st: Files = Files::new("/", "missing");
let _st: Files = Files::new("/", "Cargo.toml");
let req = TestRequest::with_uri("/").to_srv_request();
let resp = test::call_service(&service, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
} }
#[actix_rt::test] #[actix_rt::test]
async fn test_default_handler_file_missing() { async fn test_default_handler_file_missing() {
let st = Files::new("/", ".") let mut st = Files::new("/", ".")
.default_handler(|req: ServiceRequest| async { .default_handler(|req: ServiceRequest| {
Ok(req.into_response(HttpResponse::Ok().body("default content"))) ok(req.into_response(HttpResponse::Ok().body("default content")))
}) })
.new_service(()) .new_service(())
.await .await
.unwrap(); .unwrap();
let req = TestRequest::with_uri("/missing").to_srv_request(); let req = TestRequest::with_uri("/missing").to_srv_request();
let resp = test::call_service(&st, req).await;
let resp = test::call_service(&mut st, req).await;
assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.status(), StatusCode::OK);
let bytes = test::read_body(resp).await; let bytes = test::read_body(resp).await;
assert_eq!(bytes, web::Bytes::from_static(b"default content")); assert_eq!(bytes, web::Bytes::from_static(b"default content"));
} }
#[actix_rt::test] // #[actix_rt::test]
async fn test_serve_index_nested() { // async fn test_serve_index() {
let service = Files::new(".", ".") // let st = Files::new(".").index_file("test.binary");
.index_file("lib.rs") // let req = TestRequest::default().uri("/tests").finish();
.new_service(())
.await
.unwrap();
let req = TestRequest::default().uri("/src").to_srv_request(); // let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = test::call_service(&service, req).await; // let resp = resp.as_msg();
// assert_eq!(resp.status(), StatusCode::OK);
// assert_eq!(
// resp.headers()
// .get(header::CONTENT_TYPE)
// .expect("content type"),
// "application/octet-stream"
// );
// assert_eq!(
// resp.headers()
// .get(header::CONTENT_DISPOSITION)
// .expect("content disposition"),
// "attachment; filename=\"test.binary\""
// );
assert_eq!(resp.status(), StatusCode::OK); // let req = TestRequest::default().uri("/tests/").finish();
assert_eq!( // let resp = st.handle(&req).respond_to(&req).unwrap();
resp.headers().get(header::CONTENT_TYPE).unwrap(), // let resp = resp.as_msg();
"text/x-rust" // assert_eq!(resp.status(), StatusCode::OK);
); // assert_eq!(
assert_eq!( // resp.headers().get(header::CONTENT_TYPE).unwrap(),
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), // "application/octet-stream"
"inline; filename=\"lib.rs\"" // );
); // assert_eq!(
} // resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
// "attachment; filename=\"test.binary\""
// );
#[actix_rt::test] // // nonexistent index file
async fn integration_serve_index() { // let req = TestRequest::default().uri("/tests/unknown").finish();
let srv = test::init_service( // let resp = st.handle(&req).respond_to(&req).unwrap();
App::new().service(Files::new("test", ".").index_file("Cargo.toml")), // let resp = resp.as_msg();
) // assert_eq!(resp.status(), StatusCode::NOT_FOUND);
.await;
let req = TestRequest::get().uri("/test").to_request(); // let req = TestRequest::default().uri("/tests/unknown/").finish();
let res = test::call_service(&srv, req).await; // let resp = st.handle(&req).respond_to(&req).unwrap();
assert_eq!(res.status(), StatusCode::OK); // let resp = resp.as_msg();
// assert_eq!(resp.status(), StatusCode::NOT_FOUND);
// }
let bytes = test::read_body(res).await; // #[actix_rt::test]
// async fn test_serve_index_nested() {
// let st = Files::new(".").index_file("mod.rs");
// let req = TestRequest::default().uri("/src/client").finish();
// let resp = st.handle(&req).respond_to(&req).unwrap();
// let resp = resp.as_msg();
// assert_eq!(resp.status(), StatusCode::OK);
// assert_eq!(
// resp.headers().get(header::CONTENT_TYPE).unwrap(),
// "text/x-rust"
// );
// assert_eq!(
// resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
// "inline; filename=\"mod.rs\""
// );
// }
let data = Bytes::from(fs::read("Cargo.toml").unwrap()); // #[actix_rt::test]
assert_eq!(bytes, data); // fn integration_serve_index() {
// let mut srv = test::TestServer::with_factory(|| {
// App::new().handler(
// "test",
// Files::new(".").index_file("Cargo.toml"),
// )
// });
let req = TestRequest::get().uri("/test/").to_request(); // let request = srv.get().uri(srv.url("/test")).finish().unwrap();
let res = test::call_service(&srv, req).await; // let response = srv.execute(request.send()).unwrap();
assert_eq!(res.status(), StatusCode::OK); // assert_eq!(response.status(), StatusCode::OK);
// let bytes = srv.execute(response.body()).unwrap();
// let data = Bytes::from(fs::read("Cargo.toml").unwrap());
// assert_eq!(bytes, data);
let bytes = test::read_body(res).await; // let request = srv.get().uri(srv.url("/test/")).finish().unwrap();
let data = Bytes::from(fs::read("Cargo.toml").unwrap()); // let response = srv.execute(request.send()).unwrap();
assert_eq!(bytes, data); // assert_eq!(response.status(), StatusCode::OK);
// let bytes = srv.execute(response.body()).unwrap();
// let data = Bytes::from(fs::read("Cargo.toml").unwrap());
// assert_eq!(bytes, data);
// nonexistent index file // // nonexistent index file
let req = TestRequest::get().uri("/test/unknown").to_request(); // let request = srv.get().uri(srv.url("/test/unknown")).finish().unwrap();
let res = test::call_service(&srv, req).await; // let response = srv.execute(request.send()).unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND); // assert_eq!(response.status(), StatusCode::NOT_FOUND);
let req = TestRequest::get().uri("/test/unknown/").to_request(); // let request = srv.get().uri(srv.url("/test/unknown/")).finish().unwrap();
let res = test::call_service(&srv, req).await; // let response = srv.execute(request.send()).unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND); // assert_eq!(response.status(), StatusCode::NOT_FOUND);
} // }
#[actix_rt::test] // #[actix_rt::test]
async fn integration_percent_encoded() { // fn integration_percent_encoded() {
let srv = test::init_service( // let mut srv = test::TestServer::with_factory(|| {
App::new().service(Files::new("test", ".").index_file("Cargo.toml")), // App::new().handler(
) // "test",
.await; // Files::new(".").index_file("Cargo.toml"),
// )
// });
let req = TestRequest::get().uri("/test/%43argo.toml").to_request(); // let request = srv
let res = test::call_service(&srv, req).await; // .get()
assert_eq!(res.status(), StatusCode::OK); // .uri(srv.url("/test/%43argo.toml"))
// .finish()
// `%2F` == `/` // .unwrap();
let req = TestRequest::get().uri("/test%2Ftest.binary").to_request(); // let response = srv.execute(request.send()).unwrap();
let res = test::call_service(&srv, req).await; // assert_eq!(response.status(), StatusCode::OK);
assert_eq!(res.status(), StatusCode::NOT_FOUND); // }
let req = TestRequest::get().uri("/test/Cargo.toml%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_percent_encoding_2() {
let tmpdir = tempfile::tempdir().unwrap();
let filename = match cfg!(unix) {
true => "ض:?#[]{}<>()@!$&'`|*+,;= %20.test",
false => "ض#[]{}()@!$&'`+,;= %20.test",
};
let filename_encoded = filename
.as_bytes()
.iter()
.map(|c| format!("%{:02X}", c))
.collect::<String>();
std::fs::File::create(tmpdir.path().join(filename)).unwrap();
let srv = test::init_service(App::new().service(Files::new("", tmpdir.path()))).await;
let req = TestRequest::get()
.uri(&format!("/{}", filename_encoded))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
}
#[actix_rt::test]
async fn test_serve_named_file() {
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv = test::init_service(App::new().service(factory)).await;
let req = TestRequest::get().uri("/Cargo.toml").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let bytes = test::read_body(res).await;
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
let req = TestRequest::get().uri("/test/unknown").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_serve_named_file_prefix() {
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv =
test::init_service(App::new().service(web::scope("/test").service(factory))).await;
let req = TestRequest::get().uri("/test/Cargo.toml").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let bytes = test::read_body(res).await;
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
let req = TestRequest::get().uri("/Cargo.toml").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_named_file_default_service() {
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let srv = test::init_service(App::new().default_service(factory)).await;
for route in ["/foobar", "/baz", "/"].iter() {
let req = TestRequest::get().uri(route).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let bytes = test::read_body(res).await;
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
}
}
#[actix_rt::test]
async fn test_default_handler_named_file() {
let factory = NamedFile::open_async("Cargo.toml").await.unwrap();
let st = Files::new("/", ".")
.default_handler(factory)
.new_service(())
.await
.unwrap();
let req = TestRequest::with_uri("/missing").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let bytes = test::read_body(resp).await;
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
}
#[actix_rt::test]
async fn test_symlinks() {
let srv = test::init_service(App::new().service(Files::new("test", "."))).await;
let req = TestRequest::get()
.uri("/test/tests/symlink-test.png")
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"symlink-test.png\""
);
}
#[actix_rt::test]
async fn test_index_with_show_files_listing() {
let service = Files::new(".", ".")
.index_file("lib.rs")
.show_files_listing()
.new_service(())
.await
.unwrap();
// Serve the index if exists
let req = TestRequest::default().uri("/src").to_srv_request();
let resp = test::call_service(&service, req).await;
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-rust"
);
// Show files listing, otherwise.
let req = TestRequest::default().uri("/tests").to_srv_request();
let resp = test::call_service(&service, req).await;
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/html; charset=utf-8"
);
let bytes = test::read_body(resp).await;
assert!(format!("{:?}", bytes).contains("/tests/test.png"));
}
#[actix_rt::test]
async fn test_path_filter() {
// prevent searching subdirectories
let st = Files::new("/", ".")
.path_filter(|path, _| path.components().count() == 1)
.new_service(())
.await
.unwrap();
let req = TestRequest::with_uri("/Cargo.toml").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/src/lib.rs").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn test_default_handler_filter() {
let st = Files::new("/", ".")
.default_handler(|req: ServiceRequest| async {
Ok(req.into_response(HttpResponse::Ok().body("default content")))
})
.path_filter(|path, _| path.extension() == Some("png".as_ref()))
.new_service(())
.await
.unwrap();
let req = TestRequest::with_uri("/Cargo.toml").to_srv_request();
let resp = test::call_service(&st, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let bytes = test::read_body(resp).await;
assert_eq!(bytes, web::Bytes::from_static(b"default content"));
}
} }

View File

@ -1,29 +1,28 @@
use std::{ use std::fs::{File, Metadata};
fmt, use std::io;
fs::Metadata, use std::ops::{Deref, DerefMut};
io, use std::path::{Path, PathBuf};
path::{Path, PathBuf}, use std::time::{SystemTime, UNIX_EPOCH};
time::{SystemTime, UNIX_EPOCH},
}; #[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use actix_service::{Service, ServiceFactory};
use actix_web::{ use actix_web::{
body::{self, BoxBody, SizedStream}, dev::{BodyEncoding, SizedStream},
dev::{AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse},
http::{ http::{
header::{ header::{
self, Charset, ContentDisposition, ContentEncoding, DispositionParam, self, Charset, ContentDisposition, DispositionParam, DispositionType,
DispositionType, ExtendedValue, HeaderValue, ExtendedValue,
}, },
StatusCode, ContentEncoding, StatusCode,
}, },
Error, HttpMessage, HttpRequest, HttpResponse, Responder, Error, HttpMessage, HttpRequest, HttpResponse, Responder,
}; };
use bitflags::bitflags; use bitflags::bitflags;
use derive_more::{Deref, DerefMut}; use futures_util::future::{ready, Ready};
use futures_core::future::LocalBoxFuture;
use mime_guess::from_path; use mime_guess::from_path;
use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange}; use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! { bitflags! {
@ -42,34 +41,9 @@ impl Default for Flags {
} }
/// A file with an associated name. /// A file with an associated name.
/// #[derive(Debug)]
/// `NamedFile` can be registered as services:
/// ```
/// use actix_web::App;
/// use actix_files::NamedFile;
///
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let file = NamedFile::open_async("./static/index.html").await?;
/// let app = App::new().service(file);
/// # Ok(())
/// # }
/// ```
///
/// They can also be returned from handlers:
/// ```
/// use actix_web::{Responder, get};
/// use actix_files::NamedFile;
///
/// #[get("/")]
/// async fn index() -> impl Responder {
/// NamedFile::open_async("./static/index.html").await
/// }
/// ```
#[derive(Deref, DerefMut)]
pub struct NamedFile { pub struct NamedFile {
path: PathBuf, path: PathBuf,
#[deref]
#[deref_mut]
file: File, file: File,
modified: Option<SystemTime>, modified: Option<SystemTime>,
pub(crate) md: Metadata, pub(crate) md: Metadata,
@ -80,39 +54,6 @@ pub struct NamedFile {
pub(crate) encoding: Option<ContentEncoding>, pub(crate) encoding: Option<ContentEncoding>,
} }
impl fmt::Debug for NamedFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NamedFile")
.field("path", &self.path)
.field(
"file",
#[cfg(feature = "experimental-io-uring")]
{
&"tokio_uring::File"
},
#[cfg(not(feature = "experimental-io-uring"))]
{
&self.file
},
)
.field("modified", &self.modified)
.field("md", &self.md)
.field("flags", &self.flags)
.field("status_code", &self.status_code)
.field("content_type", &self.content_type)
.field("content_disposition", &self.content_disposition)
.field("encoding", &self.encoding)
.finish()
}
}
#[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File;
use super::chunked;
impl NamedFile { impl NamedFile {
/// Creates an instance from a previously opened file. /// Creates an instance from a previously opened file.
/// ///
@ -120,7 +61,8 @@ impl NamedFile {
/// `ContentDisposition` headers. /// `ContentDisposition` headers.
/// ///
/// # Examples /// # Examples
/// ```ignore ///
/// ```rust
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// use std::io::{self, Write}; /// use std::io::{self, Write};
/// use std::env; /// use std::env;
@ -154,11 +96,6 @@ impl NamedFile {
let disposition = match ct.type_() { let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline, mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment, _ => DispositionType::Attachment,
}; };
@ -181,30 +118,7 @@ impl NamedFile {
(ct, cd) (ct, cd)
}; };
let md = { let md = file.metadata()?;
#[cfg(not(feature = "experimental-io-uring"))]
{
file.metadata()?
}
#[cfg(feature = "experimental-io-uring")]
{
use std::os::unix::prelude::{AsRawFd, FromRawFd};
let fd = file.as_raw_fd();
// SAFETY: fd is borrowed and lives longer than the unsafe block
unsafe {
let file = std::fs::File::from_raw_fd(fd);
let md = file.metadata();
// SAFETY: forget the fd before exiting block in success or error case but don't
// run destructor (that would close file handle)
std::mem::forget(file);
md?
}
}
};
let modified = md.modified().ok(); let modified = md.modified().ok();
let encoding = None; let encoding = None;
@ -224,42 +138,14 @@ impl NamedFile {
/// Attempts to open a file in read-only mode. /// Attempts to open a file in read-only mode.
/// ///
/// # Examples /// # Examples
/// ``` ///
/// ```rust
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt"); /// let file = NamedFile::open("foo.txt");
/// ``` /// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> { pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = File::open(&path)?; Self::from_file(File::open(&path)?, path)
Self::from_file(file, path)
}
/// Attempts to open a file asynchronously in read-only mode.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
/// Otherwise, it will be just like [`open`][Self::open].
///
/// # Examples
/// ```
/// use actix_files::NamedFile;
/// # async fn open() {
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
/// # }
/// ```
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = {
#[cfg(not(feature = "experimental-io-uring"))]
{
File::open(&path)?
}
#[cfg(feature = "experimental-io-uring")]
{
File::open(&path).await?
}
};
Self::from_file(file, path)
} }
/// Returns reference to the underlying `File` object. /// Returns reference to the underlying `File` object.
@ -271,12 +157,13 @@ impl NamedFile {
/// Retrieve the path of this file. /// Retrieve the path of this file.
/// ///
/// # Examples /// # Examples
/// ``` ///
/// ```rust
/// # use std::io; /// # use std::io;
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// ///
/// # async fn path() -> io::Result<()> { /// # fn path() -> io::Result<()> {
/// let file = NamedFile::open_async("test.txt").await?; /// let file = NamedFile::open("test.txt")?;
/// assert_eq!(file.path().as_os_str(), "foo.txt"); /// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(()) /// # Ok(())
/// # } /// # }
@ -292,21 +179,21 @@ impl NamedFile {
self self
} }
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred /// Set the MIME Content-Type for serving this file. By default
/// from the filename extension. /// the Content-Type is inferred from the filename extension.
#[inline] #[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self { pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type; self.content_type = mime_type;
self self
} }
/// Set the Content-Disposition for serving this file. This allows changing the /// Set the Content-Disposition for serving this file. This allows
/// `inline/attachment` disposition as well as the filename sent to the peer. /// changing the inline/attachment disposition as well as the filename
/// /// sent to the peer. By default the disposition is `inline` for text,
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and /// image, and video content types, and `attachment` otherwise, and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the /// the filename is taken from the path provided in the `open` method
/// filename is taken from the path provided in the `open` method after converting it to UTF-8 /// after converting it to UTF-8 using.
/// (using `to_string_lossy`). /// [`std::ffi::OsStr::to_string_lossy`]
#[inline] #[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self { pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd; self.content_disposition = cd;
@ -324,15 +211,13 @@ impl NamedFile {
} }
/// Set content encoding for serving this file /// Set content encoding for serving this file
///
/// Must be used with [`actix_web::middleware::Compress`] to take effect.
#[inline] #[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc); self.encoding = Some(enc);
self self
} }
/// Specifies whether to return `ETag` header in response. /// Specifies whether to use ETag or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@ -341,7 +226,7 @@ impl NamedFile {
self self
} }
/// Specifies whether to return `Last-Modified` header in response. /// Specifies whether to use Last-Modified or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@ -359,18 +244,14 @@ impl NamedFile {
self self
} }
/// Creates an `ETag` in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> { pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| { self.modified.as_ref().map(|mtime| {
let ino = { let ino = {
#[cfg(unix)] #[cfg(unix)]
{ {
#[cfg(unix)]
use std::os::unix::fs::MetadataExt as _;
self.md.ino() self.md.ino()
} }
#[cfg(not(unix))] #[cfg(not(unix))]
{ {
0 0
@ -381,7 +262,7 @@ impl NamedFile {
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch"); .expect("modification time must be after epoch");
header::EntityTag::new_strong(format!( header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}", "{:x}:{:x}:{:x}:{:x}",
ino, ino,
self.md.len(), self.md.len(),
@ -396,32 +277,37 @@ impl NamedFile {
} }
/// Creates an `HttpResponse` with file as a streaming body. /// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> { pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
if self.status_code != StatusCode::OK { if self.status_code != StatusCode::OK {
let mut res = HttpResponse::build(self.status_code); let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
res.header(header::CONTENT_TYPE, ct.to_string());
} else { } else {
self.content_type res.header(header::CONTENT_TYPE, self.content_type.to_string());
}; }
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) { if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header(( res.header(
header::CONTENT_DISPOSITION, header::CONTENT_DISPOSITION,
self.content_disposition.to_string(), self.content_disposition.to_string(),
)); );
} }
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); res.encoding(current_encoding);
} }
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file); let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return res.streaming(reader); return Ok(res.streaming(reader));
} }
let etag = if self.flags.contains(Flags::ETAG) { let etag = if self.flags.contains(Flags::ETAG) {
@ -442,11 +328,11 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) = } else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header()) (last_modified, req.get_header())
{ {
let t1: SystemTime = (*m).into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = (*since).into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(), (Ok(t1), Ok(t2)) => t1 > t2,
_ => false, _ => false,
} }
} else { } else {
@ -461,47 +347,47 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) = } else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header()) (last_modified, req.get_header())
{ {
let t1: SystemTime = (*m).into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = (*since).into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(), (Ok(t1), Ok(t2)) => t1 <= t2,
_ => false, _ => false,
} }
} else { } else {
false false
}; };
let mut res = HttpResponse::build(self.status_code); let mut resp = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
resp.header(header::CONTENT_TYPE, ct.to_string());
} else { } else {
self.content_type resp.header(header::CONTENT_TYPE, self.content_type.to_string());
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
} }
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
resp.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
}
// default compressing
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); resp.encoding(current_encoding);
} }
if let Some(lm) = last_modified { if let Some(lm) = last_modified {
res.insert_header((header::LAST_MODIFIED, lm.to_string())); resp.header(header::LAST_MODIFIED, lm.to_string());
} }
if let Some(etag) = etag { if let Some(etag) = etag {
res.insert_header((header::ETAG, etag.to_string())); resp.header(header::ETAG, etag.to_string());
} }
res.insert_header((header::ACCEPT_RANGES, "bytes")); resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len(); let mut length = self.md.len();
let mut offset = 0; let mut offset = 0;
@ -513,41 +399,58 @@ impl NamedFile {
length = ranges[0].length; length = ranges[0].length;
offset = ranges[0].start; offset = ranges[0].start;
// don't allow compression middleware to modify partial content resp.encoding(ContentEncoding::Identity);
res.insert_header(( resp.header(
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
res.insert_header((
header::CONTENT_RANGE, header::CONTENT_RANGE,
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()), format!(
)); "bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else { } else {
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length))); resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish(); return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
}; };
} else { } else {
return res.status(StatusCode::BAD_REQUEST).finish(); return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
}; };
}; };
if precondition_failed { if precondition_failed {
return res.status(StatusCode::PRECONDITION_FAILED).finish(); return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified { } else if not_modified {
return res return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
.status(StatusCode::NOT_MODIFIED)
.body(body::None::new())
.map_into_boxed_body();
} }
let reader = chunked::new_chunked_read(length, offset, self.file); let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() { if offset != 0 || length != self.md.len() {
res.status(StatusCode::PARTIAL_CONTENT); resp.status(StatusCode::PARTIAL_CONTENT);
} }
res.body(SizedStream::new(length, reader)) Ok(resp.body(SizedStream::new(length, reader)))
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
} }
} }
@ -592,62 +495,10 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
} }
impl Responder for NamedFile { impl Responder for NamedFile {
type Body = BoxBody;
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.into_response(req)
}
}
impl ServiceFactory<ServiceRequest> for NamedFile {
type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Config = (); type Future = Ready<Result<HttpResponse, Error>>;
type Service = NamedFileService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn respond_to(self, req: &HttpRequest) -> Self::Future {
let service = NamedFileService { ready(self.into_response(req))
path: self.path.clone(),
};
Box::pin(async move { Ok(service) })
}
}
#[doc(hidden)]
#[derive(Debug)]
pub struct NamedFileService {
path: PathBuf,
}
impl Service<ServiceRequest> for NamedFileService {
type Response = ServiceResponse;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts();
let path = self.path.clone();
Box::pin(async move {
let file = NamedFile::open_async(path).await?;
let res = file.into_response(&req);
Ok(ServiceResponse::new(req, res))
})
}
}
impl HttpServiceFactory for NamedFile {
fn register(self, config: &mut AppService) {
config.register_service(
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
None,
self,
None,
)
} }
} }

View File

@ -1,50 +1,26 @@
use std::{ use std::{
path::{Component, Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
}; };
use actix_utils::future::{ready, Ready};
use actix_web::{dev::Payload, FromRequest, HttpRequest}; use actix_web::{dev::Payload, FromRequest, HttpRequest};
use futures_util::future::{ready, Ready};
use crate::error::UriSegmentError; use crate::error::UriSegmentError;
#[derive(Debug, PartialEq, Eq)] #[derive(Debug)]
pub(crate) struct PathBufWrap(PathBuf); pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap { impl FromStr for PathBufWrap {
type Err = UriSegmentError; type Err = UriSegmentError;
fn from_str(path: &str) -> Result<Self, Self::Err> { fn from_str(path: &str) -> Result<Self, Self::Err> {
Self::parse_path(path, false)
}
}
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
///
/// Path traversal is guarded by this method.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new(); let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segement_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') { for segment in path.split('/') {
if segment == ".." { if segment == ".." {
segment_count -= 1;
buf.pop(); buf.pop();
} else if !hidden_files && segment.starts_with('.') { } else if segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.')); return Err(UriSegmentError::BadStart('.'));
} else if segment.starts_with('*') { } else if segment.starts_with('*') {
return Err(UriSegmentError::BadStart('*')); return Err(UriSegmentError::BadStart('*'));
@ -55,7 +31,6 @@ impl PathBufWrap {
} else if segment.ends_with('<') { } else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<')); return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() { } else if segment.is_empty() {
segment_count -= 1;
continue; continue;
} else if cfg!(windows) && segment.contains('\\') { } else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\')); return Err(UriSegmentError::BadChar('\\'));
@ -64,12 +39,6 @@ impl PathBufWrap {
} }
} }
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(matches!(component, Component::Normal(_)));
assert!(i < segment_count);
}
Ok(PathBufWrap(buf)) Ok(PathBufWrap(buf))
} }
} }
@ -83,6 +52,7 @@ impl AsRef<Path> for PathBufWrap {
impl FromRequest for PathBufWrap { impl FromRequest for PathBufWrap {
type Error = UriSegmentError; type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>; type Future = Ready<Result<Self, Self::Error>>;
type Config = ();
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().path().parse()) ready(req.match_info().path().parse())
@ -126,37 +96,4 @@ mod tests {
PathBuf::from_iter(vec!["seg2"]) PathBuf::from_iter(vec!["seg2"])
); );
} }
#[test]
fn test_parse_path() {
assert_eq!(
PathBufWrap::parse_path("/test/.tt", false).map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::parse_path("/test/.tt", true).unwrap().0,
PathBuf::from_iter(vec!["test", ".tt"])
);
}
#[test]
fn path_traversal() {
assert_eq!(
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
.unwrap()
.0,
PathBuf::from_iter(vec!["etc/passwd"])
);
}
} }

View File

@ -1,5 +1,3 @@
use derive_more::{Display, Error};
/// HTTP Range header representation. /// HTTP Range header representation.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub struct HttpRange { pub struct HttpRange {
@ -10,26 +8,91 @@ pub struct HttpRange {
pub length: u64, pub length: u64,
} }
#[derive(Debug, Clone, Display, Error)] const PREFIX: &str = "bytes=";
#[display(fmt = "Parse HTTP Range failed")] const PREFIX_LEN: usize = 6;
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange { impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616. /// Parses Range HTTP header string as per RFC 2616.
/// ///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file). /// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> { pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
match http_range::HttpRange::parse(header, size) { if header.is_empty() {
Ok(ranges) => Ok(ranges return Ok(Vec::new());
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect()),
Err(_) => Err(ParseRangeErr(())),
} }
if !header.starts_with(PREFIX) {
return Err(());
}
let size_sig = size as i64;
let mut no_overlap = false;
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
} }
} }
@ -270,7 +333,8 @@ mod tests {
if expected.is_empty() { if expected.is_empty() {
continue; continue;
} else { } else {
panic!( assert!(
false,
"parse({}, {}) returned error {:?}", "parse({}, {}) returned error {:?}",
header, header,
size, size,
@ -282,24 +346,28 @@ mod tests {
let got = res.unwrap(); let got = res.unwrap();
if got.len() != expected.len() { if got.len() != expected.len() {
panic!( assert!(
false,
"len(parseRange({}, {})) = {}, want {}", "len(parseRange({}, {})) = {}, want {}",
header, header,
size, size,
got.len(), got.len(),
expected.len() expected.len()
); );
continue;
} }
for i in 0..expected.len() { for i in 0..expected.len() {
if got[i].start != expected[i].start { if got[i].start != expected[i].start {
panic!( assert!(
false,
"parseRange({}, {})[{}].start = {}, want {}", "parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start header, size, i, got[i].start, expected[i].start
) )
} }
if got[i].length != expected[i].length { if got[i].length != expected[i].length {
panic!( assert!(
false,
"parseRange({}, {})[{}].length = {}, want {}", "parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length header, size, i, got[i].length, expected[i].length
) )

View File

@ -1,33 +1,27 @@
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc}; use std::{
fmt, io,
path::PathBuf,
rc::Rc,
task::{Context, Poll},
};
use actix_service::Service;
use actix_web::{ use actix_web::{
body::BoxBody, dev::{ServiceRequest, ServiceResponse},
dev::{Service, ServiceRequest, ServiceResponse},
error::Error, error::Error,
guard::Guard, guard::Guard,
http::{header, Method}, http::{header, Method},
HttpResponse, HttpResponse,
}; };
use futures_core::future::LocalBoxFuture; use futures_util::future::{ok, Either, LocalBoxFuture, Ready};
use crate::{ use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile, named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride,
PathBufWrap, PathFilter, NamedFile, PathBufWrap,
}; };
/// Assembled file serving service. /// Assembled file serving service.
#[derive(Clone)] pub struct FilesService {
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
pub struct FilesServiceInner {
pub(crate) directory: PathBuf, pub(crate) directory: PathBuf,
pub(crate) index: Option<String>, pub(crate) index: Option<String>,
pub(crate) show_index: bool, pub(crate) show_index: bool,
@ -35,56 +29,25 @@ pub struct FilesServiceInner {
pub(crate) default: Option<HttpService>, pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>, pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>, pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) path_filter: Option<Rc<PathFilter>>,
pub(crate) file_flags: named::Flags, pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>, pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
} }
impl fmt::Debug for FilesServiceInner { type FilesServiceFuture = Either<
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Ready<Result<ServiceResponse, Error>>,
f.write_str("FilesServiceInner") LocalBoxFuture<'static, Result<ServiceResponse, Error>>,
} >;
}
impl FilesService { impl FilesService {
async fn handle_err( fn handle_err(&mut self, e: io::Error, req: ServiceRequest) -> FilesServiceFuture {
&self, log::debug!("Failed to handle {}: {}", req.path(), e);
err: io::Error,
req: ServiceRequest,
) -> Result<ServiceResponse, Error> {
log::debug!("error handling {}: {}", req.path(), err);
if let Some(ref default) = self.default { if let Some(ref mut default) = self.default {
default.call(req).await Either::Right(default.call(req))
} else { } else {
Ok(req.error_response(err)) Either::Left(ok(req.error_response(e)))
} }
} }
fn serve_named_file(
&self,
req: ServiceRequest,
mut named_file: NamedFile,
) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
ServiceResponse::new(req, res)
}
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
}
} }
impl fmt::Debug for FilesService { impl fmt::Debug for FilesService {
@ -93,101 +56,112 @@ impl fmt::Debug for FilesService {
} }
} }
impl Service<ServiceRequest> for FilesService { impl Service for FilesService {
type Response = ServiceResponse<BoxBody>; type Request = ServiceRequest;
type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = FilesServiceFuture;
actix_service::always_ready!(); fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&mut self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards { let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards // execute user defined guards
(**guard).check(&req.guard_ctx()) (**guard).check(req.head())
} else { } else {
// default behavior // default behavior
matches!(*req.method(), Method::HEAD | Method::GET) matches!(*req.method(), Method::HEAD | Method::GET)
}; };
let this = self.clone();
Box::pin(async move {
if !is_method_valid { if !is_method_valid {
return Ok(req.into_response( return Either::Left(ok(req.into_response(
HttpResponse::MethodNotAllowed() actix_web::HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8)) .header(header::CONTENT_TYPE, "text/plain")
.body("Request did not meet this resource's requirements."), .body("Request did not meet this resource's requirements."),
)); )));
} }
let real_path = let real_path: PathBufWrap = match req.match_info().path().parse() {
match PathBufWrap::parse_path(req.match_info().path(), this.hidden_files) {
Ok(item) => item, Ok(item) => item,
Err(err) => return Ok(req.error_response(err)), Err(e) => return Either::Left(ok(req.error_response(e))),
}; };
if let Some(filter) = &this.path_filter {
if !filter(real_path.as_ref(), req.head()) {
if let Some(ref default) = this.default {
return default.call(req).await;
} else {
return Ok(req.into_response(HttpResponse::NotFound().finish()));
}
}
}
// full file path // full file path
let path = this.directory.join(&real_path); let path = match self.directory.join(&real_path).canonicalize() {
if let Err(err) = path.canonicalize() { Ok(path) => path,
return this.handle_err(err, req).await; Err(e) => return self.handle_err(e, req),
} };
if path.is_dir() { if path.is_dir() {
if this.redirect_to_slash if let Some(ref redir_index) = self.index {
&& !req.path().ends_with('/') if self.redirect_to_slash && !req.path().ends_with('/') {
&& (this.index.is_some() || this.show_index)
{
let redirect_to = format!("{}/", req.path()); let redirect_to = format!("{}/", req.path());
return Ok(req.into_response( return Either::Left(ok(req.into_response(
HttpResponse::Found() HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to)) .header(header::LOCATION, redirect_to)
.finish(), .body("")
)); .into_body(),
)));
} }
match this.index { let path = path.join(redir_index);
Some(ref index) => {
let named_path = path.join(index); match NamedFile::open(path) {
match NamedFile::open_async(named_path).await {
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
Err(_) if this.show_index => Ok(this.show_index(req, path)),
Err(err) => this.handle_err(err, req).await,
}
}
None if this.show_index => Ok(this.show_index(req, path)),
_ => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),
}
} else {
match NamedFile::open_async(&path).await {
Ok(mut named_file) => { Ok(mut named_file) => {
if let Some(ref mime_override) = this.mime_override { if let Some(ref mime_override) = self.mime_override {
let new_disposition = let new_disposition =
mime_override(&named_file.content_type.type_()); mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition; named_file.content_disposition.disposition = new_disposition;
} }
named_file.flags = this.file_flags; named_file.flags = self.file_flags;
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();
let res = named_file.into_response(&req); Either::Left(ok(match named_file.into_response(&req) {
Ok(ServiceResponse::new(req, res)) Ok(item) => ServiceResponse::new(req, item),
Err(e) => ServiceResponse::from_err(e, req),
}))
} }
Err(err) => this.handle_err(err, req).await, Err(e) => self.handle_err(e, req),
}
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
let x = (self.renderer)(&dir, &req);
match x {
Ok(resp) => Either::Left(ok(resp)),
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
} else {
Either::Left(ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)))
}
} else {
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
match named_file.into_response(&req) {
Ok(item) => {
Either::Left(ok(ServiceResponse::new(req.clone(), item)))
}
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
}
Err(e) => self.handle_err(e, req),
} }
} }
})
} }
} }

View File

@ -8,13 +8,14 @@ use actix_web::{
App, App,
}; };
#[actix_web::test] #[actix_rt::test]
async fn test_utf8_file_contents() { async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding // use default ISO-8859-1 encoding
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await; let mut srv =
test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request(); let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&mut srv, req).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
@ -23,12 +24,13 @@ async fn test_utf8_file_contents() {
); );
// prefer UTF-8 encoding // prefer UTF-8 encoding
let srv = let mut srv = test::init_service(
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true))) App::new().service(Files::new("/", "./tests").prefer_utf8(true)),
)
.await; .await;
let req = TestRequest::with_uri("/utf8.txt").to_request(); let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&mut srv, req).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(

View File

@ -1 +0,0 @@
first

View File

@ -1 +0,0 @@
second

View File

@ -1,36 +0,0 @@
use actix_files::Files;
use actix_web::{
guard::Host,
http::StatusCode,
test::{self, TestRequest},
App,
};
use bytes::Bytes;
#[actix_web::test]
async fn test_guard_filter() {
let srv = test::init_service(
App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
),
)
.await;
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "first.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("first"));
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "second.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("second"));
}

View File

@ -1 +0,0 @@
test.png

View File

@ -1 +0,0 @@
// this file is empty.

View File

@ -1,27 +0,0 @@
use actix_files::Files;
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req =
TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
)
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}

View File

@ -1,138 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 3.0.0-beta.11 - 2022-01-04
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10 - 2021-12-27
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9 - 2021-12-11
- No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8 - 2021-11-30
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7 - 2021-11-22
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6 - 2021-11-15
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4 - 2021-04-02
- Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3 - 2021-03-09
- No notable changes.
## 3.0.0-beta.2 - 2021-02-10
- No notable changes.
## 3.0.0-beta.1 - 2021-01-07
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 2.1.0 - 2020-11-25
- Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0 - 2020-09-11
- Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1 - 2020-05-23
- Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7
## 1.0.0 - 2019-12-13
- Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3 - 2019-12-07
- Migrate to `std::future`
## 0.2.5 - 2019-09-17
- Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System`
## 0.2.4 - 2019-07-18
- Update actix-server to 0.6
## 0.2.3 - 2019-07-16
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2 - 2019-06-16
- Add .put() and .sput() methods
## 0.2.1 - 2019-06-05
- Add license files
## 0.2.0 - 2019-05-12
- Update awc and actix-http deps
## 0.1.1 - 2019-04-24
- Always make new connection for http client
## 0.1.0 - 2019-04-16
- No changes
## 0.1.0-alpha.3 - 2019-04-02
- Request functions accept path #743
## 0.1.0-alpha.2 - 2019-03-29
- Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries
## 0.1.0-alpha.1 - 2019-03-28
- Initial impl

View File

@ -1,55 +0,0 @@
[package]
name = "actix-http-test"
version = "3.0.0-beta.11"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = []
[lib]
name = "actix_http_test"
path = "src/lib.rs"
[features]
default = []
# openssl
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "2.0.0"
actix-codec = "0.4.1"
actix-tls = "3.0.0"
actix-utils = "3.0.0"
actix-rt = "2.2"
actix-server = "2.0.0-rc.2"
awc = { version = "3.0.0-beta.18", default-features = false }
base64 = "0.13"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
http = "0.2.5"
log = "0.4"
socket2 = "0.4"
serde = "1.0"
serde_json = "1.0"
slab = "0.4"
serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.8.4", features = ["sync"] }
[dev-dependencies]
actix-web = { version = "4.0.0-beta.19", default-features = false, features = ["cookies"] }
actix-http = "3.0.0-beta.18"

View File

@ -1,17 +0,0 @@
# actix-http-test
> Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.11)](https://docs.rs/actix-http-test/3.0.0-beta.11)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.11/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.11)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http-test)
- Minimum Supported Rust Version (MSRV): 1.54

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,21 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "3.0.0-beta.18" version = "2.2.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "HTTP primitives for the Actix ecosystem" description = "HTTP primitives for the Actix ecosystem"
readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"] keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git" repository = "https://github.com/actix/actix-web.git"
categories = [ documentation = "https://docs.rs/actix-http/"
"network-programming", categories = ["network-programming", "asynchronous",
"asynchronous",
"web-programming::http-server", "web-programming::http-server",
"web-programming::websocket", "web-programming::websocket"]
]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
[package.metadata.docs.rs] [package.metadata.docs.rs]
# features that docs.rs will build with features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
[lib] [lib]
name = "actix_http" name = "actix_http"
@ -27,83 +25,79 @@ path = "src/lib.rs"
default = [] default = []
# openssl # openssl
openssl = ["actix-tls/accept", "actix-tls/openssl"] openssl = ["actix-tls/openssl", "actix-connect/openssl"]
# rustls support # rustls support
rustls = ["actix-tls/accept", "actix-tls/rustls"] rustls = ["actix-tls/rustls", "actix-connect/rustls"]
# enable compression support # enable compressison support
compress-brotli = ["brotli2", "__compress"] compress = ["flate2", "brotli"]
compress-gzip = ["flate2", "__compress"]
compress-zstd = ["zstd", "__compress"]
# Internal (PRIVATE!) features used to aid testing and cheking feature status. # support for secure cookies
# Don't rely on these whatsoever. They may disappear at anytime. secure-cookies = ["cookie/secure"]
__compress = []
# support for actix Actor messages
actors = ["actix"]
[dependencies] [dependencies]
actix-service = "2.0.0" actix-service = "1.0.6"
actix-codec = "0.4.1" actix-codec = "0.3.0"
actix-utils = "3.0.0" actix-connect = "2.0.0"
actix-rt = { version = "2.2", default-features = false } actix-utils = "2.0.0"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0", optional = true }
ahash = "0.7"
base64 = "0.13" base64 = "0.13"
bitflags = "1.2" bitflags = "1.2"
bytes = "1" bytes = "0.5.3"
bytestring = "1" cookie = { version = "0.14.1", features = ["percent-encode"] }
derive_more = "0.99.5" copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8" encoding_rs = "0.8"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] } futures-channel = { version = "0.3.5", default-features = false }
h2 = "0.3.9" futures-core = { version = "0.3.5", default-features = false }
http = "0.2.5" futures-util = { version = "0.3.5", default-features = false }
httparse = "1.5.1" fxhash = "0.2.1"
httpdate = "1.0.1" h2 = "0.2.1"
itoa = "1" http = "0.2.0"
language-tags = "0.3" httparse = "1.3"
local-channel = "0.1" indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2" pin-project = "1.0.0"
rand = "0.8" rand = "0.7"
sha-1 = "0.10" regex = "1.3"
smallvec = "1.6.1" serde = "1.0"
serde_json = "1.0"
# tls sha-1 = "0.9"
actix-tls = { version = "3.0.0", default-features = false, optional = true } slab = "0.4"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
# compression # compression
brotli2 = { version="0.3.2", optional = true } brotli = { version = "3.3.3", optional = true }
flate2 = { version = "1.0.13", optional = true } flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.9", optional = true }
[dev-dependencies] [dev-dependencies]
actix-http-test = { version = "3.0.0-beta.11", features = ["openssl"] } actix-server = "1.0.1"
actix-server = "2.0.0-rc.2" actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-tls = { version = "3.0.0", features = ["openssl"] } actix-http-test = { version = "2.0.0", features = ["openssl"] }
actix-web = "4.0.0-beta.19" actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3"
async-stream = "0.3" env_logger = "0.7"
criterion = { version = "0.3", features = ["html_reports"] } serde_derive = "1.0"
env_logger = "0.9" open-ssl = { version="0.10", package = "openssl" }
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] } rust-tls = { version="0.18", package = "rustls" }
rcgen = "0.8"
regex = "1.3"
rustls-pemfile = "0.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
[[example]]
name = "ws"
required-features = ["rustls"]
[[bench]] [[bench]]
name = "write-camel-case" name = "content-length"
harness = false harness = false
[[bench]] [[bench]]
@ -113,7 +107,3 @@ harness = false
[[bench]] [[bench]]
name = "uninit-headers" name = "uninit-headers"
harness = false harness = false
[[bench]]
name = "quality-value"
harness = false

View File

@ -3,18 +3,16 @@
> HTTP primitives for the Actix ecosystem. > HTTP primitives for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.18)](https://docs.rs/actix-http/3.0.0-beta.18) [![Documentation](https://docs.rs/actix-http/badge.svg?version=2.2.2)](https://docs.rs/actix-http/2.2.2)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html) ![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-http)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg) [![Dependency Status](https://deps.rs/crate/actix-http/2.2.2/status.svg)](https://deps.rs/crate/actix-http/2.2.2)
<br /> [![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.18/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.18)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources ## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http) - [API Documentation](https://docs.rs/actix-http)
- Minimum Supported Rust Version (MSRV): 1.54 - [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.42.0
## Example ## Example
@ -54,8 +52,8 @@ async fn main() -> io::Result<()> {
This project is licensed under either of This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) * MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option. at your option.

View File

@ -0,0 +1,291 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -1,90 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
fn bench_quality_display_impls(c: &mut Criterion) {
let mut group = c.benchmark_group("quality value display impls");
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
b.iter(|| _new::Quality(i).to_string())
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| _naive::Quality(i).to_string())
});
}
group.finish();
}
criterion_group!(benches, bench_quality_display_impls);
criterion_main!(benches);
mod _new {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
// some number in the range 1999
x => {
f.write_str("0.")?;
// this implementation avoids string allocation otherwise required
// for `.trim_end_matches('0')`
if x < 10 {
f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return
itoa::fmt(f, x)
} else if x < 100 {
f.write_str("0")?;
if x % 10 == 0 {
// trailing 0, divide by 10 and write
itoa::fmt(f, x / 10)
} else {
itoa::fmt(f, x)
}
} else {
// x is in range 101999
if x % 100 == 0 {
// two trailing 0s, divide by 100 and write
itoa::fmt(f, x / 100)
} else if x % 10 == 0 {
// one trailing 0, divide by 10 and write
itoa::fmt(f, x / 10)
} else {
itoa::fmt(f, x)
}
}
}
}
}
}
}
mod _naive {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
x => {
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
}
}
}
}
}

View File

@ -176,7 +176,7 @@ mod _original {
buf[5] = b'0'; buf[5] = b'0';
buf[7] = b'9'; buf[7] = b'9';
} }
_ => {} _ => (),
} }
let mut curr: isize = 12; let mut curr: isize = 12;
@ -189,7 +189,11 @@ mod _original {
n /= 100; n /= 100;
curr -= 2; curr -= 2;
unsafe { unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
} }
// decode last 1 or 2 chars // decode last 1 or 2 chars
@ -202,7 +206,11 @@ mod _original {
let d1 = n << 1; let d1 = n << 1;
curr -= 2; curr -= 2;
unsafe { unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
} }
} }

View File

@ -54,10 +54,15 @@ const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
value: (0, 0), value: (0, 0),
}; };
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS]; const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex { impl HeaderIndex {
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) { fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize; let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) { for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr; let name_start = header.name.as_ptr() as usize - bytes_ptr;
@ -73,12 +78,12 @@ impl HeaderIndex {
// test cases taken from: // test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs // https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &[u8] = b"\ const REQ_SHORT: &'static [u8] = b"\
GET / HTTP/1.0\r\n\ GET / HTTP/1.0\r\n\
Host: example.com\r\n\ Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n"; Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &[u8] = b"\ const REQ: &'static [u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\ GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\ Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\ User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
@ -114,8 +119,6 @@ mod _original {
use std::mem::MaybeUninit; use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize { pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] = let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() }; unsafe { MaybeUninit::uninit().assume_init() };

View File

@ -1,93 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
let len = black_box(bts.len());
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
let buffer = unsafe {
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
std::slice::from_raw_parts_mut(buf, len)
};
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -1,26 +0,0 @@
use actix_http::HttpService;
use actix_server::Server;
use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App};
#[get("/")]
async fn index() -> &'static str {
"Hello, world. From Actix Web!"
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> std::io::Result<()> {
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
// construct actix-web app
let app = App::new().service(index);
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
.finish(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()
.await
}

View File

@ -1,17 +1,19 @@
use std::io; use std::{env, io};
use actix_http::{Error, HttpService, Request, Response, StatusCode}; use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server; use actix_server::Server;
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::StreamExt as _; use futures_util::StreamExt;
use http::header::HeaderValue; use http::header::HeaderValue;
use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", "127.0.0.1:8080", || {
HttpService::build() HttpService::build()
.client_timeout(1000) .client_timeout(1000)
.client_disconnect(1000) .client_disconnect(1000)
@ -21,11 +23,10 @@ async fn main() -> io::Result<()> {
body.extend_from_slice(&item?); body.extend_from_slice(&item?);
} }
log::info!("request body: {:?}", body); info!("request body: {:?}", body);
Ok::<_, Error>( Ok::<_, Error>(
Response::build(StatusCode::OK) Response::Ok()
.insert_header(("x-head", HeaderValue::from_static("dummy value!"))) .header("x-head", HeaderValue::from_static("dummy value!"))
.body(body), .body(body),
) )
}) })

View File

@ -1,31 +1,31 @@
use std::io; use std::{env, io};
use actix_http::{ use actix_http::http::HeaderValue;
body::MessageBody, header::HeaderValue, Error, HttpService, Request, Response, StatusCode, use actix_http::{Error, HttpService, Request, Response};
};
use actix_server::Server; use actix_server::Server;
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::StreamExt as _; use futures_util::StreamExt;
use log::info;
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> { async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut body = BytesMut::new(); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await { while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?) body.extend_from_slice(&item?)
} }
log::info!("request body: {:?}", body); info!("request body: {:?}", body);
Ok(Response::Ok()
Ok(Response::build(StatusCode::OK) .header("x-head", HeaderValue::from_static("dummy value!"))
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body)) .body(body))
} }
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(handle_request).tcp() HttpService::build().finish(handle_request).tcp()
})? })?
.run() .run()

View File

@ -1,35 +1,26 @@
use std::{convert::Infallible, io}; use std::{env, io};
use actix_http::{ use actix_http::{HttpService, Response};
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
};
use actix_server::Server; use actix_server::Server;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build() Server::build()
.bind("hello-world", ("127.0.0.1", 8080), || { .bind("hello-world", "127.0.0.1:8080", || {
HttpService::build() HttpService::build()
.client_timeout(1000) .client_timeout(1000)
.client_disconnect(1000) .client_disconnect(1000)
.on_connect_ext(|_, ext| { .finish(|_req| {
ext.insert(42u32); info!("{:?}", _req);
}) let mut res = Response::Ok();
.finish(|req: Request| async move { res.header("x-head", HeaderValue::from_static("dummy value!"));
log::info!("{:?}", req); future::ok::<_, ()>(res.body("Hello world!"))
let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
res.insert_header((
"x-forty-two",
HeaderValue::from_str(&forty_two).unwrap(),
));
Ok::<_, Infallible>(res.body("Hello world!"))
}) })
.tcp() .tcp()
})? })?

View File

@ -1,40 +0,0 @@
//! Example showing response body (chunked) stream erroring.
//!
//! Test using `nc` or `curl`.
//! ```sh
//! $ curl -vN 127.0.0.1:8080
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
//! ```
use std::{convert::Infallible, io, time::Duration};
use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
log::info!("{:?}", req);
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_millis(1000)).await;
yield Err(io::Error::new(io::ErrorKind::Other, ""));
})))
})
.tcp()
})?
.run()
.await
}

View File

@ -1,112 +0,0 @@
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls as rustls;
use std::{
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("tcp", ("127.0.0.1", 8080), || {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build().finish(handler).rustls(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
log::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
log::info!("responding");
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
}
struct Heartbeat {
codec: ws::Codec,
interval: Interval,
}
impl Heartbeat {
fn new(codec: ws::Codec) -> Self {
Self {
codec,
interval: interval(Duration::from_secs(4)),
}
}
}
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
log::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
let mut buffer = BytesMut::new();
self.as_mut()
.codec
.encode(
ws::Message::Text(ByteString::from_static("hello world")),
&mut buffer,
)
.unwrap();
Poll::Ready(Some(Ok(buffer.freeze())))
}
}
fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader;
use rustls::{Certificate, PrivateKey};
use rustls_pemfile::{certs, pkcs8_private_keys};
let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
let cert_file = cert.serialize_pem().unwrap();
let key_file = cert.serialize_private_key_pem();
let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file)
.unwrap()
.into_iter()
.map(Certificate)
.collect();
let mut keys = pkcs8_private_keys(key_file).unwrap();
let mut config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
}

5
actix-http/rustfmt.toml Normal file
View File

@ -0,0 +1,5 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

723
actix-http/src/body.rs Normal file
View File

@ -0,0 +1,723 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::pin_project;
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => matches!(*other, Body::None),
Body::Empty => matches!(*other, Body::Empty),
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use futures_util::stream;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -1,215 +0,0 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Streaming response wrapper.
///
/// Response does not contain `Content-Length` header and appropriate transfer encoding is used.
pub struct BodyStream<S> {
#[pin]
stream: S,
}
}
// TODO: from_infallible method
impl<S, E> BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(stream: S) -> Self {
BodyStream { stream }
}
}
impl<S, E> MessageBody for BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::{convert::Infallible, time::Duration};
use actix_rt::{
pin,
time::{sleep, Sleep},
};
use actix_utils::future::poll_fn;
use derive_more::{Display, Error};
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[derive(Debug, Display, Error)]
#[display(fmt = "stream error")]
struct StreamErr;
#[actix_rt::test]
async fn stream_immediate_error() {
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async {
Err(Box::<dyn StdError>::from("stringy error"))
}));
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
#[actix_rt::test]
async fn stream_delayed_error() {
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
pin_project! {
#[derive(Debug)]
#[project = TimeDelayStreamProj]
enum TimeDelayStream {
Start,
Sleep { delay: Pin<Box<Sleep>> },
Done,
}
}
impl Stream for TimeDelayStream {
type Item = Result<Bytes, StreamErr>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.as_mut().get_mut() {
TimeDelayStream::Start => {
let sleep = sleep(Duration::from_millis(1));
self.as_mut().set(TimeDelayStream::Sleep {
delay: Box::pin(sleep),
});
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Sleep { ref mut delay } => {
ready!(delay.poll_unpin(cx));
self.set(TimeDelayStream::Done);
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
}
}
}
let body = BodyStream::new(TimeDelayStream::Start);
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
}

View File

@ -1,127 +0,0 @@
use std::{
error::Error as StdError,
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body;
/// A boxed message body with boxed errors.
#[derive(Debug)]
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody {
/// Same as `MessageBody::boxed`.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self
where
B: MessageBody + 'static,
{
match body.size() {
BodySize::None => Self(BoxBodyInner::None(body::None)),
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
}
/// Returns a mutable pinned reference to the inner message body type.
#[inline]
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
Pin::new(self)
}
}
impl MessageBody for BoxBody {
type Error = Box<dyn StdError>;
#[inline]
fn size(&self) -> BodySize {
match &self.0 {
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
}
#[inline]
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 {
BoxBodyInner::None(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Bytes(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
#[inline]
fn boxed(self) -> BoxBody {
self
}
}
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
#[actix_rt::test]
async fn nested_boxed_body() {
let body = Bytes::from_static(&[1, 2, 3]);
let boxed_body = BoxBody::new(BoxBody::new(body));
assert_eq!(
to_bytes(boxed_body).await.unwrap(),
Bytes::from(vec![1, 2, 3]),
);
}
}

View File

@ -1,108 +0,0 @@
use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
/// A body of type `L`.
Left { #[pin] body: L },
/// A body of type `R`.
Right { #[pin] body: R },
}
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` using left variant and boxed right variant.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }
}
}
impl<L, R> EitherBody<L, R> {
/// Creates new `EitherBody` using left variant.
#[inline]
pub fn left(body: L) -> Self {
Self::Left { body }
}
/// Creates new `EitherBody` using right variant.
#[inline]
pub fn right(body: R) -> Self {
Self::Right { body }
}
}
impl<L, R> MessageBody for EitherBody<L, R>
where
L: MessageBody + 'static,
R: MessageBody + 'static,
{
type Error = Error;
#[inline]
fn size(&self) -> BodySize {
match self {
EitherBody::Left { body } => body.size(),
EitherBody::Right { body } => body.size(),
}
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EitherBodyProj::Left { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
EitherBodyProj::Right { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
EitherBody::Left { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
}
}
#[inline]
fn boxed(self) -> BoxBody {
match self {
EitherBody::Left { body } => body.boxed(),
EitherBody::Right { body } => body.boxed(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn type_parameter_inference() {
let _body: EitherBody<(), _> = EitherBody::new(());
let _body: EitherBody<_, ()> = EitherBody::left(());
let _body: EitherBody<(), _> = EitherBody::right(());
}
}

View File

@ -1,554 +0,0 @@
//! [`MessageBody`] trait and foreign implementations.
use std::{
convert::Infallible,
error::Error as StdError,
mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody};
/// An interface types that can converted to bytes and used as response bodies.
// TODO: examples
pub trait MessageBody {
/// The type of error that will be returned if streaming body fails.
///
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>;
/// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
// TODO: expand documentation
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>;
/// Try to convert into the complete chunk of body bytes.
///
/// Implement this method if the entire body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
/// this method, it is recommended to check `size` first and return early.
///
/// # Errors
/// The default implementation will error and return the original type back to the caller for
/// further use.
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
}
/// Converts this body into `BoxBody`.
#[inline]
fn boxed(self) -> BoxBody
where
Self: Sized + 'static,
{
BoxBody::new(self)
}
}
mod foreign_impls {
use super::*;
impl MessageBody for Infallible {
type Error = Infallible;
fn size(&self) -> BodySize {
match *self {}
}
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {}
}
}
impl MessageBody for () {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(0)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
impl<B> MessageBody for Pin<Box<B>>
where
B: MessageBody + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx)
}
}
impl MessageBody for &'static [u8] {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self))
}
}
impl MessageBody for Bytes {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self)
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.freeze())
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for &'static str {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
let bytes = Bytes::from_static(string.as_bytes());
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self.as_bytes()))
}
}
impl MessageBody for String {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(Bytes::from(string))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(string.into_bytes())))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.into_bytes())
}
}
}
pin_project! {
pub(crate) struct MessageBodyMapErr<B, F> {
#[pin]
body: B,
mapper: Option<F>,
}
}
impl<B, F, E> MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
{
pub(crate) fn new(body: B, mapper: F) -> Self {
Self {
body,
mapper: Some(mapper),
}
}
}
impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
E: Into<Box<dyn StdError>>,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
self.body.size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let this = self.as_mut().project();
match ready!(this.body.poll_next(cx)) {
Some(Err(err)) => {
let f = self.as_mut().project().mapper.take().unwrap();
let mapped_err = (f)(err);
Poll::Ready(Some(Err(mapped_err)))
}
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
}
#[cfg(test)]
mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => {
assert_eq!(
poll_fn(|cx| $pin.as_mut().poll_next(cx))
.await
.unwrap() // unwrap option
.unwrap(), // unwrap result
$exp
);
};
}
macro_rules! assert_poll_next_none {
($pin:expr) => {
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
};
}
#[actix_rt::test]
async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), Box::new(()).size());
assert_eq!(().size(), Box::pin(()).size());
let pl = Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut pl = Box::pin(());
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_unit() {
let pl = ();
assert_eq!(pl.size(), BodySize::Sized(0));
pin!(pl);
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!("".size(), BodySize::Sized(0));
assert_eq!("test".size(), BodySize::Sized(4));
let pl = "test";
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
let pl = b"test".as_ref();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
let pl = Vec::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes() {
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
let pl = Bytes::from_static(b"test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes_mut() {
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
let pl = BytesMut::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_string() {
assert_eq!(String::new().size(), BodySize::Sized(0));
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
let pl = "test".to_owned();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators_poll() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
assert_poll_next_none!(Pin::new(&mut body));
}
#[actix_rt::test]
async fn none_body_combinators() {
fn none_body() -> BoxBody {
let body = body::None;
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None);
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
assert_poll_next_none!(Pin::new(&mut none_body()));
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -1,20 +0,0 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
mod body_stream;
mod boxed;
mod either;
mod message_body;
mod none;
mod size;
mod sized_stream;
mod utils;
pub use self::body_stream::BodyStream;
pub use self::boxed::BoxBody;
pub use self::either::EitherBody;
pub use self::message_body::MessageBody;
pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::none::None;
pub use self::size::BodySize;
pub use self::sized_stream::SizedStream;
pub use self::utils::to_bytes;

View File

@ -1,48 +0,0 @@
use std::{
convert::Infallible,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// Distinct from an empty response which would contain a Content-Length header.
///
/// For an "empty" body, use `()` or `Bytes::new()`.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;
impl None {
/// Constructs new "none" body.
#[inline]
pub fn new() -> Self {
None
}
}
impl MessageBody for None {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::None
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(Option::None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}

View File

@ -1,41 +0,0 @@
/// Body size hint.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BodySize {
/// Implicitly empty body.
///
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
/// hint are allowed to make optimizations that skip reading or writing the payload.
None,
/// Known size body.
///
/// Will write `Content-Length: N` header.
Sized(u64),
/// Unknown size body.
///
/// Will not write Content-Length header. Can be used with chunked Transfer-Encoding.
Stream,
}
impl BodySize {
/// Equivalent to `BodySize::Sized(0)`;
pub const ZERO: Self = Self::Sized(0);
/// Returns true if size hint indicates omitted or empty body.
///
/// Streams will return false because it cannot be known without reading the stream.
///
/// ```
/// # use actix_http::body::BodySize;
/// assert!(BodySize::None.is_eof());
/// assert!(BodySize::Sized(0).is_eof());
///
/// assert!(!BodySize::Sized(64).is_eof());
/// assert!(!BodySize::Stream.is_eof());
/// ```
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Sized(0))
}
}

View File

@ -1,171 +0,0 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Known sized streaming response wrapper.
///
/// This body implementation should be used if total size of stream is known. Data is sent as-is
/// without using chunked transfer encoding.
pub struct SizedStream<S> {
size: u64,
#[pin]
stream: S,
}
}
impl<S, E> SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
// TODO: from_infallible method
impl<S, E> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(
0,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
let body = SizedStream::new(
1,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
}

View File

@ -1,77 +0,0 @@
use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// # Examples
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # async fn test_to_bytes() {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, b"123"[..]);
/// # }
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) => size as usize,
// good enough first guess for chunk size
BodySize::Stream => 32_768,
};
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
Ok(buf.freeze())
}
#[cfg(test)]
mod test {
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{body::BodyStream, Error};
#[actix_rt::test]
async fn test_to_bytes() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
}

View File

@ -1,22 +1,26 @@
use std::{fmt, marker::PhantomData, net, rc::Rc}; use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory}; use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::{ use crate::body::MessageBody;
body::{BoxBody, MessageBody}, use crate::config::{KeepAlive, ServiceConfig};
config::{KeepAlive, ServiceConfig}, use crate::error::Error;
h1::{self, ExpectHandler, H1Service, UpgradeHandler}, use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
h2::H2Service, use crate::h2::H2Service;
service::HttpService, use crate::helpers::{Data, DataFactory};
ConnectCallback, Extensions, Request, Response, use crate::request::Request;
}; use crate::response::Response;
use crate::service::HttpService;
use crate::{ConnectCallback, Extensions};
/// A HTTP service builder /// A HTTP service builder
/// ///
/// This type can be used to construct an instance of [`HttpService`] through a /// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern. /// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> { pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_timeout: u64, client_timeout: u64,
client_disconnect: u64, client_disconnect: u64,
@ -24,19 +28,20 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
// DEPRECATED: in favor of on_connect_ext
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<S>, _t: PhantomData<(T, S)>,
} }
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler> impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
{ {
/// Create instance of `ServiceConfigBuilder` /// Create instance of `ServiceConfigBuilder`
#[allow(clippy::new_without_default)]
pub fn new() -> Self { pub fn new() -> Self {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5), keep_alive: KeepAlive::Timeout(5),
@ -46,24 +51,27 @@ where
local_addr: None, local_addr: None,
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect: None,
on_connect_ext: None, on_connect_ext: None,
_phantom: PhantomData, _t: PhantomData,
} }
} }
} }
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U> impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
X: ServiceFactory<Request, Config = (), Response = Request>, X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Response<BoxBody>>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>, <X::Service as Service>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{ {
/// Set server keep-alive setting. /// Set server keep-alive setting.
/// ///
@ -119,10 +127,11 @@ where
/// request will be forwarded to main service. /// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U> pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where where
F: IntoServiceFactory<X1, Request>, F: IntoServiceFactory<X1>,
X1: ServiceFactory<Request, Config = (), Response = Request>, X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>, X1::Error: Into<Error>,
X1::InitError: fmt::Debug, X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
@ -132,8 +141,9 @@ where
local_addr: self.local_addr, local_addr: self.local_addr,
expect: expect.into_factory(), expect: expect.into_factory(),
upgrade: self.upgrade, upgrade: self.upgrade,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
@ -143,10 +153,15 @@ where
/// and this service get called with original request and framed object. /// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1> pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where where
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>, F: IntoServiceFactory<U1>,
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>, U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U1::Error: fmt::Display, U1::Error: fmt::Display,
U1::InitError: fmt::Debug, U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
@ -156,11 +171,26 @@ where
local_addr: self.local_addr, local_addr: self.local_addr,
expect: self.expect, expect: self.expect,
upgrade: Some(upgrade.into_factory()), upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
/// Set on-connect callback.
///
/// Called once per connection. Return value of the call is stored in request extensions.
///
/// *SOFT DEPRECATED*: Prefer the `on_connect_ext` style callback.
pub fn on_connect<F, I>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self
}
/// Sets the callback to be run on connection establishment. /// Sets the callback to be run on connection establishment.
/// ///
/// Has mutable access to a data container that will be merged into request extensions. /// Has mutable access to a data container that will be merged into request extensions.
@ -178,8 +208,8 @@ where
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U> pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where where
B: MessageBody, B: MessageBody,
F: IntoServiceFactory<S, Request>, F: IntoServiceFactory<S>,
S::Error: Into<Response<BoxBody>>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
{ {
@ -194,18 +224,19 @@ where
H1Service::with_config(cfg, service.into_factory()) H1Service::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create a HTTP service for HTTP/2 protocol. /// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B> pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
where where
F: IntoServiceFactory<S, Request>, B: MessageBody + 'static,
S::Error: Into<Response<BoxBody>> + 'static, F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
@ -215,18 +246,20 @@ where
self.local_addr, self.local_addr,
); );
H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext) H2Service::with_config(cfg, service.into_factory())
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create `HttpService` instance. /// Finish service configuration and create `HttpService` instance.
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U> pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where where
F: IntoServiceFactory<S, Request>, B: MessageBody + 'static,
S::Error: Into<Response<BoxBody>> + 'static, F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
@ -239,6 +272,7 @@ where
HttpService::with_config(cfg, service.into_factory()) HttpService::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
} }

View File

@ -1,34 +1,31 @@
use std::{net::IpAddr, time::Duration}; use std::time::Duration;
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2MB // These values are taken from hyper/src/proto/h2/client.rs
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1MB const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration /// Connector configuration
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct ConnectorConfig { pub(crate) struct ConnectorConfig {
pub(crate) timeout: Duration, pub(crate) timeout: Duration,
pub(crate) handshake_timeout: Duration,
pub(crate) conn_lifetime: Duration, pub(crate) conn_lifetime: Duration,
pub(crate) conn_keep_alive: Duration, pub(crate) conn_keep_alive: Duration,
pub(crate) disconnect_timeout: Option<Duration>, pub(crate) disconnect_timeout: Option<Duration>,
pub(crate) limit: usize, pub(crate) limit: usize,
pub(crate) conn_window_size: u32, pub(crate) conn_window_size: u32,
pub(crate) stream_window_size: u32, pub(crate) stream_window_size: u32,
pub(crate) local_address: Option<IpAddr>,
} }
impl Default for ConnectorConfig { impl Default for ConnectorConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
timeout: Duration::from_secs(5), timeout: Duration::from_secs(1),
handshake_timeout: Duration::from_secs(5),
conn_lifetime: Duration::from_secs(75), conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15), conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)), disconnect_timeout: Some(Duration::from_millis(3000)),
limit: 100, limit: 100,
conn_window_size: DEFAULT_H2_CONN_WINDOW, conn_window_size: DEFAULT_H2_CONN_WINDOW,
stream_window_size: DEFAULT_H2_STREAM_WINDOW, stream_window_size: DEFAULT_H2_STREAM_WINDOW,
local_address: None,
} }
} }
} }

View File

@ -0,0 +1,291 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool
fn release(self: Pin<&mut Self>);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(),
}
}
}
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
match self.project() {
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -0,0 +1,547 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.config.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.config.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.config.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.config.disconnect_timeout = Some(dur);
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.config.no_disconnect_timeout(),
),
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use std::task::{Context, Poll};
use futures_util::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
use futures_util::future::Either;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::A),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::B),
)
}
}
}

View File

@ -1,17 +1,16 @@
use std::{fmt, io}; use std::io;
use actix_connect::resolver::ResolveError;
use derive_more::{Display, From}; use derive_more::{Display, From};
use actix_http::error::{HttpError, ParseError};
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
use actix_tls::accept::openssl::reexports::Error as OpensslError; use actix_connect::ssl::openssl::{HandshakeError, SslError};
use crate::BoxError; use crate::error::{Error, ParseError, ResponseError};
use crate::http::{Error as HttpError, StatusCode};
/// A set of errors that can occur while connecting to an HTTP host /// A set of errors that can occur while connecting to an HTTP host
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum ConnectError { pub enum ConnectError {
/// SSL feature is not enabled /// SSL feature is not enabled
#[display(fmt = "SSL is not supported")] #[display(fmt = "SSL is not supported")]
@ -20,14 +19,19 @@ pub enum ConnectError {
/// SSL error /// SSL error
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
SslError(OpensslError), SslError(SslError),
/// SSL Handshake error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslHandshakeError(String),
/// Failed to resolve the hostname /// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)] #[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(Box<dyn std::error::Error>), Resolver(ResolveError),
/// No dns records /// No dns records
#[display(fmt = "No DNS records found for the input")] #[display(fmt = "No dns records found for the input")]
NoRecords, NoRecords,
/// Http2 error /// Http2 error
@ -53,31 +57,34 @@ pub enum ConnectError {
impl std::error::Error for ConnectError {} impl std::error::Error for ConnectError {}
impl From<actix_tls::connect::ConnectError> for ConnectError { impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_tls::connect::ConnectError) -> ConnectError { fn from(err: actix_connect::ConnectError) -> ConnectError {
match err { match err {
actix_tls::connect::ConnectError::Resolver(e) => ConnectError::Resolver(e), actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_tls::connect::ConnectError::NoRecords => ConnectError::NoRecords, actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_tls::connect::ConnectError::InvalidInput => panic!(), actix_connect::ConnectError::InvalidInput => panic!(),
actix_tls::connect::ConnectError::Unresolved => ConnectError::Unresolved, actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_tls::connect::ConnectError::Io(e) => ConnectError::Io(e), actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
} }
} }
} }
#[cfg(feature = "openssl")]
impl<T: std::fmt::Debug> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
ConnectError::SslHandshakeError(format!("{:?}", err))
}
}
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum InvalidUrl { pub enum InvalidUrl {
#[display(fmt = "Missing URL scheme")] #[display(fmt = "Missing url scheme")]
MissingScheme, MissingScheme,
#[display(fmt = "Unknown url scheme")]
#[display(fmt = "Unknown URL scheme")]
UnknownScheme, UnknownScheme,
#[display(fmt = "Missing host name")] #[display(fmt = "Missing host name")]
MissingHost, MissingHost,
#[display(fmt = "Url parse error: {}", _0)]
#[display(fmt = "URL parse error: {}", _0)]
HttpError(http::Error), HttpError(http::Error),
} }
@ -85,73 +92,66 @@ impl std::error::Error for InvalidUrl {}
/// A set of errors that can occur during request sending and response reading /// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum SendRequestError { pub enum SendRequestError {
/// Invalid URL /// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)] #[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl), Url(InvalidUrl),
/// Failed to connect to host /// Failed to connect to host
#[display(fmt = "Failed to connect to host: {}", _0)] #[display(fmt = "Failed to connect to host: {}", _0)]
Connect(ConnectError), Connect(ConnectError),
/// Error sending request /// Error sending request
Send(io::Error), Send(io::Error),
/// Error parsing response /// Error parsing response
Response(ParseError), Response(ParseError),
/// Http error /// Http error
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
Http(HttpError), Http(HttpError),
/// Http2 error /// Http2 error
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
H2(h2::Error), H2(h2::Error),
/// Response took too long /// Response took too long
#[display(fmt = "Timeout while waiting for response")] #[display(fmt = "Timeout while waiting for response")]
Timeout, Timeout,
/// Tunnels are not supported for http2 connection
/// Tunnels are not supported for HTTP/2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")] #[display(fmt = "Tunnels are not supported for http2 connection")]
TunnelNotSupported, TunnelNotSupported,
/// Error sending request body /// Error sending request body
Body(BoxError), Body(Error),
/// Other errors that can occur after submitting a request.
#[display(fmt = "{:?}: {}", _1, _0)]
Custom(BoxError, Box<dyn fmt::Debug>),
} }
impl std::error::Error for SendRequestError {} impl std::error::Error for SendRequestError {}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn status_code(&self) -> StatusCode {
match *self {
SendRequestError::Connect(ConnectError::Timeout) => {
StatusCode::GATEWAY_TIMEOUT
}
SendRequestError::Connect(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// A set of errors that can occur during freezing a request /// A set of errors that can occur during freezing a request
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
#[non_exhaustive]
pub enum FreezeRequestError { pub enum FreezeRequestError {
/// Invalid URL /// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)] #[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl), Url(InvalidUrl),
/// Http error
/// HTTP error
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
Http(HttpError), Http(HttpError),
/// Other errors that can occur after submitting a request.
#[display(fmt = "{:?}: {}", _1, _0)]
Custom(BoxError, Box<dyn fmt::Debug>),
} }
impl std::error::Error for FreezeRequestError {} impl std::error::Error for FreezeRequestError {}
impl From<FreezeRequestError> for SendRequestError { impl From<FreezeRequestError> for SendRequestError {
fn from(err: FreezeRequestError) -> Self { fn from(e: FreezeRequestError) -> Self {
match err { match e {
FreezeRequestError::Url(err) => err.into(), FreezeRequestError::Url(e) => e.into(),
FreezeRequestError::Http(err) => err.into(), FreezeRequestError::Http(e) => e.into(),
FreezeRequestError::Custom(err, msg) => SendRequestError::Custom(err, msg),
} }
} }
} }

View File

@ -0,0 +1,298 @@
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) async fn send_request<T, B>(
io: T,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().split().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
// create Framed and send request
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed_inner.send((head, body.size()).into()).await?;
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
};
// read response and init read body
let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.codec_ref().message_type() {
h1::MessageType::None => {
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<T, B>(
body: B,
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError>
where
T: ConnectionLifetime + Unpin,
B: MessageBody,
{
pin_mut!(body);
let mut eof = false;
while !eof {
while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.as_mut().write(h1::Message::Chunk(None))?;
}
}
}
if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
})
.await?;
}
}
SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream {
framed: Some(framed),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Poll::Ready(None)
}
}
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where
T: ConnectionLifetime,
{
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
framed.io_pin().release()
} else {
framed.io_pin().close()
}
}

View File

@ -1,44 +1,46 @@
use std::convert::TryFrom;
use std::future::Future; use std::future::Future;
use std::time;
use actix_utils::future::poll_fn; use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes; use bytes::Bytes;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use h2::{ use h2::{
client::{Builder, Connection, SendRequest}, client::{Builder, Connection, SendRequest},
SendStream, SendStream,
}; };
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING}; use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, Method, Version}; use http::{request::Request, Method, Version};
use log::trace;
use actix_http::{ use crate::body::{BodySize, MessageBody};
body::{BodySize, MessageBody}, use crate::header::HeaderMap;
header::HeaderMap, use crate::message::{RequestHeadType, ResponseHead};
Payload, RequestHeadType, ResponseHead, use crate::payload::Payload;
};
use crate::BoxError; use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
use super::{ pub(crate) async fn send_request<T, B>(
config::ConnectorConfig, mut io: SendRequest<Bytes>,
connection::{ConnectionIo, H2Connection},
error::SendRequestError,
};
pub(crate) async fn send_request<Io, B>(
mut io: H2Connection<Io>,
head: RequestHeadType, head: RequestHeadType,
body: B, body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError> ) -> Result<(ResponseHead, Payload), SendRequestError>
where where
Io: ConnectionIo, T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody, B: MessageBody,
B::Error: Into<BoxError>,
{ {
trace!("Sending client request: {:?} {:?}", head, body.size()); trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.as_ref().method == Method::HEAD; let head_req = head.as_ref().method == Method::HEAD;
let length = body.size(); let length = body.size();
let eof = matches!(length, BodySize::None | BodySize::Sized(0)); let eof = matches!(
length,
BodySize::None | BodySize::Empty | BodySize::Sized(0)
);
let mut req = Request::new(()); let mut req = Request::new(());
*req.uri_mut() = head.as_ref().uri.clone(); *req.uri_mut() = head.as_ref().uri.clone();
@ -51,26 +53,17 @@ where
// Content length // Content length
let _ = match length { let _ = match length {
BodySize::None => None, BodySize::None => None,
BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
req.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new();
req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::from_str(buf.format(len)).unwrap(),
)
}
BodySize::Stream => { BodySize::Stream => {
skip_len = false; skip_len = false;
None None
} }
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
}; };
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate. // Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
@ -93,26 +86,23 @@ where
// copy headers // copy headers
for (key, value) in headers { for (key, value) in headers {
match *key { match *key {
// TODO: consider skipping other headers according to: CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true, // DATE => has_date = true,
_ => {} _ => (),
} }
req.headers_mut().append(key, value.clone()); req.headers_mut().append(key, value.clone());
} }
let res = poll_fn(|cx| io.poll_ready(cx)).await; let res = poll_fn(|cx| io.poll_ready(cx)).await;
if let Err(e) = res { if let Err(e) = res {
io.on_release(e.is_io()); release(io, pool, created, e.is_io());
return Err(SendRequestError::from(e)); return Err(SendRequestError::from(e));
} }
let resp = match io.send_request(req, eof) { let resp = match io.send_request(req, eof) {
Ok((fut, send)) => { Ok((fut, send)) => {
io.on_release(false); release(io, pool, created, false);
if !eof { if !eof {
send_body(body, send).await?; send_body(body, send).await?;
@ -120,7 +110,7 @@ where
fut.await.map_err(SendRequestError::from)? fut.await.map_err(SendRequestError::from)?
} }
Err(e) => { Err(e) => {
io.on_release(e.is_io()); release(io, pool, created, e.is_io());
return Err(e.into()); return Err(e.into());
} }
}; };
@ -134,15 +124,12 @@ where
Ok((head, payload)) Ok((head, payload))
} }
async fn send_body<B>(body: B, mut send: SendStream<Bytes>) -> Result<(), SendRequestError> async fn send_body<B: MessageBody>(
where body: B,
B: MessageBody, mut send: SendStream<Bytes>,
B::Error: Into<BoxError>, ) -> Result<(), SendRequestError> {
{
let mut buf = None; let mut buf = None;
pin_mut!(body);
actix_rt::pin!(body);
loop { loop {
if buf.is_none() { if buf.is_none() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await { match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
@ -150,10 +137,10 @@ where
send.reserve_capacity(b.len()); send.reserve_capacity(b.len());
buf = Some(b); buf = Some(b);
} }
Some(Err(err)) => return Err(SendRequestError::Body(err.into())), Some(Err(e)) => return Err(e.into()),
None => { None => {
if let Err(err) = send.send_data(Bytes::new(), true) { if let Err(e) = send.send_data(Bytes::new(), true) {
return Err(err.into()); return Err(e.into());
} }
send.reserve_capacity(0); send.reserve_capacity(0);
return Ok(()); return Ok(());
@ -170,7 +157,7 @@ where
if let Err(e) = send.send_data(bytes, false) { if let Err(e) = send.send_data(bytes, false) {
return Err(e.into()); return Err(e.into());
} } else {
if !b.is_empty() { if !b.is_empty() {
send.reserve_capacity(b.len()); send.reserve_capacity(b.len());
} else { } else {
@ -178,15 +165,35 @@ where
} }
continue; continue;
} }
}
Some(Err(e)) => return Err(e.into()), Some(Err(e)) => return Err(e.into()),
} }
} }
} }
pub(crate) fn handshake<Io: ConnectionIo>( // release SendRequest object
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
io: SendRequest<Bytes>,
pool: Option<Acquired<T>>,
created: time::Instant,
close: bool,
) {
if let Some(mut pool) = pool {
if close {
pool.close(IoConnection::new(ConnectionType::H2(io), created, None));
} else {
pool.release(IoConnection::new(ConnectionType::H2(io), created, None));
}
}
}
pub(crate) fn handshake<Io>(
io: Io, io: Io,
config: &ConnectorConfig, config: &ConnectorConfig,
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>> { ) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let mut builder = Builder::new(); let mut builder = Builder::new();
builder builder
.initial_window_size(config.stream_window_size) .initial_window_size(config.stream_window_size)

View File

@ -0,0 +1,21 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -0,0 +1,644 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector));
let inner_rc = Rc::new(RefCell::new(Inner {
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: FxHashMap::default(),
waker: LocalWaker::new(),
}));
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: Rc::clone(&connector_rc),
inner: Rc::clone(&inner_rc),
});
ConnectionPool(connector_rc, inner_rc)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolved);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
))
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availability();
}
}
}
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
inner: Some(inner),
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availability();
}
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if Rc::strong_count(this.inner) == 1 {
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
// and we are safe to exit.
return Poll::Ready(());
}
let mut inner = this.inner.borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

View File

@ -0,0 +1,40 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -1,29 +1,24 @@
use std::{ use std::cell::Cell;
cell::Cell, use std::fmt::Write;
fmt::{self, Write}, use std::rc::Rc;
net, use std::time::Duration;
rc::Rc, use std::{fmt, net};
time::{Duration, SystemTime},
};
use actix_rt::{ use actix_rt::time::{delay_for, delay_until, Delay, Instant};
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
};
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::{future, FutureExt};
use time::OffsetDateTime;
/// "Sun, 06 Nov 1994 08:49:37 GMT".len() // "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29; const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting /// Server keep-alive setting
pub enum KeepAlive { pub enum KeepAlive {
/// Keep alive in seconds /// Keep alive in seconds
Timeout(usize), Timeout(usize),
/// Rely on OS to shutdown tcp connection /// Rely on OS to shutdown tcp connection
Os, Os,
/// Disabled /// Disabled
Disabled, Disabled,
} }
@ -54,7 +49,7 @@ struct Inner {
ka_enabled: bool, ka_enabled: bool,
secure: bool, secure: bool,
local_addr: Option<std::net::SocketAddr>, local_addr: Option<std::net::SocketAddr>,
date_service: DateService, timer: DateService,
} }
impl Clone for ServiceConfig { impl Clone for ServiceConfig {
@ -96,42 +91,42 @@ impl ServiceConfig {
client_disconnect, client_disconnect,
secure, secure,
local_addr, local_addr,
date_service: DateService::new(), timer: DateService::new(),
})) }))
} }
/// Returns true if connection is secure (HTTPS)
#[inline] #[inline]
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool { pub fn secure(&self) -> bool {
self.0.secure self.0.secure
} }
/// Returns the local address that this server is bound to.
///
/// Returns `None` for connections via UDS (Unix Domain Socket).
#[inline] #[inline]
/// Returns the local address that this server is bound to.
pub fn local_addr(&self) -> Option<net::SocketAddr> { pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr self.0.local_addr
} }
/// Keep alive duration if configured.
#[inline] #[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> { pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive self.0.keep_alive
} }
/// Return state of connection keep-alive functionality
#[inline] #[inline]
/// Return state of connection keep-alive functionality
pub fn keep_alive_enabled(&self) -> bool { pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled self.0.ka_enabled
} }
/// Client timeout for first request.
#[inline] #[inline]
pub fn client_timer(&self) -> Option<Sleep> { /// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay_time = self.0.client_timeout; let delay_time = self.0.client_timeout;
if delay_time != 0 { if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time))) Some(delay_until(
self.0.timer.now() + Duration::from_millis(delay_time),
))
} else { } else {
None None
} }
@ -141,7 +136,7 @@ impl ServiceConfig {
pub fn client_timer_expire(&self) -> Option<Instant> { pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout; let delay = self.0.client_timeout;
if delay != 0 { if delay != 0 {
Some(self.now() + Duration::from_millis(delay)) Some(self.0.timer.now() + Duration::from_millis(delay))
} else { } else {
None None
} }
@ -151,26 +146,34 @@ impl ServiceConfig {
pub fn client_disconnect_timer(&self) -> Option<Instant> { pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect; let delay = self.0.client_disconnect;
if delay != 0 { if delay != 0 {
Some(self.now() + Duration::from_millis(delay)) Some(self.0.timer.now() + Duration::from_millis(delay))
} else { } else {
None None
} }
} }
/// Return keep-alive timer delay is configured.
#[inline] #[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> { /// Return keep-alive timer delay is configured.
self.keep_alive().map(|ka| sleep_until(self.now() + ka)) pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(delay_until(self.0.timer.now() + ka))
} else {
None
}
} }
/// Keep-alive expire time /// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> { pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka) if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
} }
#[inline] #[inline]
pub(crate) fn now(&self) -> Instant { pub(crate) fn now(&self) -> Instant {
self.0.date_service.now() self.0.timer.now()
} }
#[doc(hidden)] #[doc(hidden)]
@ -178,7 +181,7 @@ impl ServiceConfig {
let mut buf: [u8; 39] = [0; 39]; let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: "); buf[..6].copy_from_slice(b"date: ");
self.0 self.0
.date_service .timer
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes)); .set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n"); buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf); dst.extend_from_slice(&buf);
@ -186,7 +189,7 @@ impl ServiceConfig {
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) { pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
self.0 self.0
.date_service .timer
.set_date(|date| dst.extend_from_slice(&date.bytes)); .set_date(|date| dst.extend_from_slice(&date.bytes));
} }
} }
@ -209,7 +212,12 @@ impl Date {
fn update(&mut self) { fn update(&mut self) {
self.pos = 0; self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap(); write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
} }
} }
@ -222,102 +230,57 @@ impl fmt::Write for Date {
} }
} }
/// Service for update Date and Instant periodically at 500 millis interval. #[derive(Clone)]
struct DateService { struct DateService(Rc<DateServiceInner>);
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>, struct DateServiceInner {
current: Cell<Option<(Date, Instant)>>,
} }
impl Drop for DateService { impl DateServiceInner {
fn drop(&mut self) { fn new() -> Self {
// stop the timer update async task on drop. DateServiceInner {
self.handle.abort(); current: Cell::new(None),
}
}
fn reset(&self) {
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
self.current.set(Some((date, now)));
} }
} }
impl DateService { impl DateService {
fn new() -> Self { fn new() -> Self {
// shared date and timer for DateService and update async task. DateService(Rc::new(DateServiceInner::new()))
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
} }
});
DateService { current, handle } fn check_date(&self) {
if self.0.current.get().is_none() {
self.0.update();
// periodic date update
let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
} }
fn now(&self) -> Instant { fn now(&self) -> Instant {
self.current.get().1 self.check_date();
self.0.current.get().unwrap().1
} }
fn set_date<F: FnMut(&Date)>(&self, mut f: F) { fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0); self.check_date();
} f(&self.0.current.get().unwrap().0);
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
#[cfg(test)]
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
} }
} }
@ -325,67 +288,14 @@ mod notify_on_drop {
mod tests { mod tests {
use super::*; use super::*;
use actix_rt::{task::yield_now, time::sleep}; // Test modifying the date from within the closure
// passed to `set_date`
#[actix_rt::test] #[test]
async fn test_date_service_update() { fn test_evil_date() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None); let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
yield_now().await; service.0.update();
service.set_date(|_| service.0.reset());
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let now1 = settings.now();
sleep_until(Instant::now() + Duration::from_secs(2)).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_ne!(now1, now2);
assert_ne!(buf1, buf2);
drop(settings);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[actix_rt::test]
async fn test_date_service_drop() {
let service = Rc::new(DateService::new());
// yield so date service have a chance to register the spawned timer update task.
yield_now().await;
let clone1 = service.clone();
let clone2 = service.clone();
let clone3 = service.clone();
drop(clone1);
assert!(!notify_on_drop::is_dropped());
drop(clone2);
assert!(!notify_on_drop::is_dropped());
drop(clone3);
assert!(!notify_on_drop::is_dropped());
drop(service);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
} }
#[test] #[test]

View File

@ -1,41 +1,25 @@
//! Stream decoders. use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{ use actix_threadpool::{run, CpuFuture};
future::Future, use brotli::DecompressorWriter as BrotliDecoder;
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
#[cfg(feature = "compress-brotli")] use super::Writer;
use brotli2::write::BrotliDecoder; use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
#[cfg(feature = "compress-gzip")] const INPLACE: usize = 2049;
use flate2::write::{GzDecoder, ZlibDecoder};
#[cfg(feature = "compress-zstd")] pub struct Decoder<S> {
use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{
encoding::Writer,
error::{BlockingError, PayloadError},
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
pin_project_lite::pin_project! {
pub struct Decoder<S> {
decoder: Option<ContentDecoder>, decoder: Option<ContentDecoder>,
#[pin]
stream: S, stream: S,
eof: bool, eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>, fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
}
} }
impl<S> Decoder<S> impl<S> Decoder<S>
@ -46,28 +30,17 @@ where
#[inline] #[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> { pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "compress-brotli")] ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new( BrotliDecoder::new(Writer::new(), 8 * 1024),
BrotliDecoder::new(Writer::new()),
))), ))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new( ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()), ZlibDecoder::new(Writer::new()),
))), ))),
#[cfg(feature = "compress-gzip")] ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new( GzDecoder::new(Writer::new()),
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
"Failed to create zstd decoder. This is a bug. \
Please report it to the actix-web repository.",
),
))), ))),
_ => None, _ => None,
}; };
Decoder { Decoder {
decoder, decoder,
stream, stream,
@ -80,11 +53,15 @@ where
#[inline] #[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> { pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding // check content-encoding
let encoding = headers let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
.get(&CONTENT_ENCODING) if let Ok(enc) = enc.to_str() {
.and_then(|val| val.to_str().ok()) ContentEncoding::from(enc)
.and_then(|x| x.parse().ok()) } else {
.unwrap_or(ContentEncoding::Identity); ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
Self::new(stream, encoding) Self::new(stream, encoding)
} }
@ -92,59 +69,55 @@ where
impl<S> Stream for Decoder<S> impl<S> Stream for Decoder<S>
where where
S: Stream<Item = Result<Bytes, PayloadError>>, S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{ {
type Item = Result<Bytes, PayloadError>; type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(
let mut this = self.project(); mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
loop { loop {
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??; Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
*this.decoder = Some(decoder); };
this.fut.take(); self.decoder = Some(decoder);
self.fut.take();
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
if *this.eof { if self.eof {
return Poll::Ready(None); return Poll::Ready(None);
} }
match ready!(this.stream.as_mut().poll_next(cx)) { match Pin::new(&mut self.stream).poll_next(cx) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))), Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
Some(Ok(chunk)) => { if let Some(mut decoder) = self.decoder.take() {
if let Some(mut decoder) = this.decoder.take() { if chunk.len() < INPLACE {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
*this.decoder = Some(decoder); self.decoder = Some(decoder);
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(spawn_blocking(move || { self.fut = Some(run(move || {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder)) Ok((chunk, decoder))
})); }));
} }
continue; continue;
} else { } else {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
Poll::Ready(None) => {
None => { self.eof = true;
*this.eof = true; return if let Some(mut decoder) = self.decoder.take() {
return if let Some(mut decoder) = this.decoder.take() {
match decoder.feed_eof() { match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))), Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None), Ok(None) => Poll::Ready(None),
@ -154,32 +127,25 @@ where
Poll::Ready(None) Poll::Ready(None)
}; };
} }
Poll::Pending => break,
} }
} }
Poll::Pending
} }
} }
enum ContentDecoder { enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "compress-brotli")] Br(Box<BrotliDecoder<Writer>>),
Brotli(Box<BrotliDecoder<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]
Zstd(Box<ZstdDecoder<'static, Writer>>),
} }
impl ContentDecoder { impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> { fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() {
Ok(()) => { Ok(()) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
@ -188,12 +154,9 @@ impl ContentDecoder {
} }
Err(e) => Err(e), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
@ -202,8 +165,6 @@ impl ContentDecoder {
} }
Err(e) => Err(e), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
@ -215,30 +176,15 @@ impl ContentDecoder {
} }
Err(e) => Err(e), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
} }
} }
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> { fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
@ -247,13 +193,10 @@ impl ContentDecoder {
} }
Err(e) => Err(e), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
@ -262,27 +205,9 @@ impl ContentDecoder {
} }
Err(e) => Err(e), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))

View File

@ -1,210 +1,166 @@
//! Stream encoders. //! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{ use actix_threadpool::{run, CpuFuture};
error::Error as StdError, use brotli::CompressorWriter as BrotliEncoder;
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use derive_more::Display;
use futures_core::ready;
use pin_project_lite::pin_project;
#[cfg(feature = "compress-brotli")]
use brotli2::write::BrotliEncoder;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready;
use pin_project::pin_project;
#[cfg(feature = "compress-zstd")] use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use zstd::stream::write::Encoder as ZstdEncoder; use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead};
use super::Writer; use super::Writer;
use crate::{
body::{self, BodySize, MessageBody},
error::BlockingError,
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024; const INPLACE: usize = 1024;
pin_project! { #[pin_project]
pub struct Encoder<B> { pub struct Encoder<B> {
eof: bool,
#[pin] #[pin]
body: EncoderBody<B>, body: EncoderBody<B>,
encoder: Option<ContentEncoder>, encoder: Option<ContentEncoder>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>, fut: Option<CpuFuture<ContentEncoder, io::Error>>,
eof: bool,
}
} }
impl<B: MessageBody> Encoder<B> { impl<B: MessageBody> Encoder<B> {
fn none() -> Self { pub fn response(
Encoder { encoding: ContentEncoding,
body: EncoderBody::None { head: &mut ResponseHead,
body: body::None::new(), body: ResponseBody<B>,
}, ) -> ResponseBody<Encoder<B>> {
encoder: None, let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress an empty body
if matches!(body.size(), BodySize::None) {
return Self::none();
}
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS || head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT || head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity); || encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
let body = match body.try_into_bytes() { let body = match body {
Ok(body) => EncoderBody::Full { body }, ResponseBody::Other(b) => match b {
Err(body) => EncoderBody::Stream { body }, Body::None => return ResponseBody::Other(Body::None),
}; Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if should_encode { if can_encode {
// wrap body only if encoder is feature-enabled EncoderBody::Bytes(buf)
if let Some(enc) = ContentEncoder::select(encoding) {
update_head(encoding, head);
return Encoder {
body,
encoder: Some(enc),
fut: None,
eof: false,
};
}
}
Encoder {
body,
encoder: None,
fut: None,
eof: false,
}
}
}
pin_project! {
#[project = EncoderBodyProj]
enum EncoderBody<B> {
None { body: body::None },
Full { body: Bytes },
Stream { #[pin] body: B },
}
}
impl<B> MessageBody for EncoderBody<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
match self {
EncoderBody::None { body } => body.size(),
EncoderBody::Full { body } => body.size(),
EncoderBody::Stream { body } => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EncoderBodyProj::None { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Full { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream { body } => body
.poll_next(cx)
.map_err(|err| EncoderError::Body(err.into())),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
}
impl<B> MessageBody for Encoder<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
if self.encoder.is_some() {
BodySize::Stream
} else { } else {
self.body.size() return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
}
}
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
} }
} }
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project(); match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
}
EncoderBodyProj::Stream(b) => b.poll_next(cx),
EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
}
}
}
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
self.body.size()
} else {
BodySize::Stream
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project();
loop { loop {
if *this.eof { if *this.eof {
return Poll::Ready(None); return Poll::Ready(None);
} }
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx)) let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
.map_err(|_| EncoderError::Blocking(BlockingError))? Ok(item) => item,
.map_err(EncoderError::Io)?; Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take(); let chunk = encoder.take();
*this.encoder = Some(encoder); *this.encoder = Some(encoder);
this.fut.take(); this.fut.take();
if !chunk.is_empty() { if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
let result = ready!(this.body.as_mut().poll_next(cx)); let result = this.body.as_mut().poll_next(cx);
match result { match result {
Some(Err(err)) => return Poll::Ready(Some(Err(err))), Poll::Ready(Some(Ok(chunk))) => {
Some(Ok(chunk)) => {
if let Some(mut encoder) = this.encoder.take() { if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_ENCODE_IN_PLACE { if chunk.len() < INPLACE {
encoder.write(&chunk).map_err(EncoderError::Io)?; encoder.write(&chunk)?;
let chunk = encoder.take(); let chunk = encoder.take();
*this.encoder = Some(encoder); *this.encoder = Some(encoder);
if !chunk.is_empty() { if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(spawn_blocking(move || { *this.fut = Some(run(move || {
encoder.write(&chunk)?; encoder.write(&chunk)?;
Ok(encoder) Ok(encoder)
})); }));
@ -213,11 +169,9 @@ where
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
Poll::Ready(None) => {
None => {
if let Some(encoder) = this.encoder.take() { if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish().map_err(EncoderError::Io)?; let chunk = encoder.finish()?;
if chunk.is_empty() { if chunk.is_empty() {
return Poll::Ready(None); return Poll::Ready(None);
} else { } else {
@ -228,80 +182,42 @@ where
return Poll::Ready(None); return Poll::Ready(None);
} }
} }
} val => return val,
}
}
#[inline]
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() {
Err(self)
} else {
match self.body.try_into_bytes() {
Ok(body) => Ok(body),
Err(body) => {
self.body = body;
Err(self)
}
} }
} }
} }
} }
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) { fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut() head.headers_mut().insert(
.insert(header::CONTENT_ENCODING, encoding.to_header_value()); CONTENT_ENCODING,
head.headers_mut() HeaderValue::from_static(encoding.as_str()),
.insert(header::VARY, HeaderValue::from_static("accept-encoding")); );
head.no_chunking(false);
} }
enum ContentEncoder { enum ContentEncoder {
#[cfg(feature = "compress-gzip")]
Deflate(ZlibEncoder<Writer>), Deflate(ZlibEncoder<Writer>),
#[cfg(feature = "compress-gzip")]
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
Br(BrotliEncoder<Writer>),
#[cfg(feature = "compress-brotli")]
Brotli(BrotliEncoder<Writer>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
#[cfg(feature = "compress-zstd")]
Zstd(ZstdEncoder<'static, Writer>),
} }
impl ContentEncoder { impl ContentEncoder {
fn select(encoding: ContentEncoding) -> Option<Self> { fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding { match encoding {
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new( ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new( ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
ContentEncoding::Br => Some(ContentEncoder::Br(BrotliEncoder::new(
#[cfg(feature = "compress-brotli")] Writer::new(),
ContentEncoding::Brotli => { 32 * 1024,
Some(ContentEncoder::Brotli(BrotliEncoder::new(Writer::new(), 3))) 3,
} 22,
))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => {
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
Some(ContentEncoder::Zstd(encoder))
}
_ => None, _ => None,
} }
} }
@ -309,60 +225,38 @@ impl ContentEncoder {
#[inline] #[inline]
pub(crate) fn take(&mut self) -> Bytes { pub(crate) fn take(&mut self) -> Bytes {
match *self { match *self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
} }
} }
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(mut encoder) => match encoder.flush() {
ContentEncoder::Brotli(encoder) => match encoder.finish() { Ok(()) => Ok(encoder.into_inner().buf.freeze()),
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(encoder) => match encoder.finish() { ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(encoder) => match encoder.finish() { ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
} }
} }
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
trace!("Error decoding br encoding: {}", err); trace!("Error decoding br encoding: {}", err);
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -370,8 +264,6 @@ impl ContentEncoder {
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -379,44 +271,6 @@ impl ContentEncoder {
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding ztsd encoding: {}", err);
Err(err)
}
},
} }
} }
} }
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError {
#[display(fmt = "body")]
Body(Box<dyn StdError>),
#[display(fmt = "blocking")]
Blocking(BlockingError),
#[display(fmt = "io")]
Io(io::Error),
}
impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Blocking(err) => Some(err),
EncoderError::Io(err) => Some(err),
}
}
}
impl From<EncoderError> for crate::Error {
fn from(err: EncoderError) -> Self {
crate::Error::new_encoder().with_cause(err)
}
}

View File

@ -1,5 +1,4 @@
//! Content-Encoding support. //! Content-Encoding support
use std::io; use std::io;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
@ -10,9 +9,6 @@ mod encoder;
pub use self::decoder::Decoder; pub use self::decoder::Decoder;
pub use self::encoder::Encoder; pub use self::encoder::Encoder;
/// Special-purpose writer for streaming (de-)compression.
///
/// Pre-allocates 8KiB of capacity.
pub(self) struct Writer { pub(self) struct Writer {
buf: BytesMut, buf: BytesMut,
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,118 +1,62 @@
use std::{ use std::any::{Any, TypeId};
any::{Any, TypeId}, use std::{fmt, mem};
fmt,
};
use ahash::AHashMap; use fxhash::FxHashMap;
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)] #[derive(Default)]
/// A type map of request extensions.
pub struct Extensions { pub struct Extensions {
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys. /// Use FxHasher with a std HashMap with for faster
map: AHashMap<TypeId, Box<dyn Any>>, /// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
} }
impl Extensions { impl Extensions {
/// Creates an empty `Extensions`. /// Create an empty `Extensions`.
#[inline] #[inline]
pub fn new() -> Extensions { pub fn new() -> Extensions {
Extensions { Extensions {
map: AHashMap::new(), map: FxHashMap::default(),
} }
} }
/// Insert an item into the map. /// Insert a type into this `Extensions`.
/// ///
/// If an item of this type was already stored, it will be replaced and returned. /// If a extension of this type already existed, it will
/// /// be returned.
/// ``` pub fn insert<T: 'static>(&mut self, val: T) {
/// # use actix_http::Extensions; self.map.insert(TypeId::of::<T>(), Box::new(val));
/// let mut map = Extensions::new();
/// assert_eq!(map.insert(""), None);
/// assert_eq!(map.insert(1u32), None);
/// assert_eq!(map.insert(2u32), Some(1u32));
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
/// ```
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
self.map
.insert(TypeId::of::<T>(), Box::new(val))
.and_then(downcast_owned)
} }
/// Check if map contains an item of a given type. /// Check if container contains entry
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert!(!map.contains::<u32>());
///
/// assert_eq!(map.insert(1u32), None);
/// assert!(map.contains::<u32>());
/// ```
pub fn contains<T: 'static>(&self) -> bool { pub fn contains<T: 'static>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>()) self.map.contains_key(&TypeId::of::<T>())
} }
/// Get a reference to an item of a given type. /// Get a reference to a type previously inserted on this `Extensions`.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
/// ```
pub fn get<T: 'static>(&self) -> Option<&T> { pub fn get<T: 'static>(&self) -> Option<&T> {
self.map self.map
.get(&TypeId::of::<T>()) .get(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_ref()) .and_then(|boxed| boxed.downcast_ref())
} }
/// Get a mutable reference to an item of a given type. /// Get a mutable reference to a type previously inserted on this `Extensions`.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
/// ```
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> { pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map self.map
.get_mut(&TypeId::of::<T>()) .get_mut(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_mut()) .and_then(|boxed| boxed.downcast_mut())
} }
/// Remove an item from the map of a given type. /// Remove a type from this `Extensions`.
/// ///
/// If an item of this type was already stored, it will be returned. /// If a extension of this type existed, it will be returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
///
/// assert_eq!(map.remove::<u32>(), Some(1u32));
/// assert!(!map.contains::<u32>());
/// ```
pub fn remove<T: 'static>(&mut self) -> Option<T> { pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned) self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
} }
/// Clear the `Extensions` of all inserted extensions. /// Clear the `Extensions` of all inserted extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert!(map.contains::<u32>());
///
/// map.clear();
/// assert!(!map.contains::<u32>());
/// ```
#[inline] #[inline]
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.map.clear(); self.map.clear();
@ -122,6 +66,11 @@ impl Extensions {
pub fn extend(&mut self, other: Extensions) { pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map); self.map.extend(other.map);
} }
/// Sets (or overrides) items from `other` into this map.
pub(crate) fn drain_from(&mut self, other: &mut Self) {
self.map.extend(mem::take(&mut other.map));
}
} }
impl fmt::Debug for Extensions { impl fmt::Debug for Extensions {
@ -130,10 +79,6 @@ impl fmt::Debug for Extensions {
} }
} }
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
boxed.downcast().ok().map(|boxed| *boxed)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -173,8 +118,6 @@ mod tests {
#[test] #[test]
fn test_integers() { fn test_integers() {
static A: u32 = 8;
let mut map = Extensions::new(); let mut map = Extensions::new();
map.insert::<i8>(8); map.insert::<i8>(8);
@ -187,7 +130,6 @@ mod tests {
map.insert::<u32>(32); map.insert::<u32>(32);
map.insert::<u64>(64); map.insert::<u64>(64);
map.insert::<u128>(128); map.insert::<u128>(128);
map.insert::<&'static u32>(&A);
assert!(map.get::<i8>().is_some()); assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some()); assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some()); assert!(map.get::<i32>().is_some());
@ -198,7 +140,6 @@ mod tests {
assert!(map.get::<u32>().is_some()); assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some()); assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some()); assert!(map.get::<u128>().is_some());
assert!(map.get::<&'static u32>().is_some());
} }
#[test] #[test]
@ -277,4 +218,27 @@ mod tests {
assert_eq!(extensions.get(), Some(&20u8)); assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8)); assert_eq!(extensions.get_mut(), Some(&mut 20u8));
} }
#[test]
fn test_drain_from() {
let mut ext = Extensions::new();
ext.insert(2isize);
let mut more_ext = Extensions::new();
more_ext.insert(5isize);
more_ext.insert(5usize);
assert_eq!(ext.get::<isize>(), Some(&2isize));
assert_eq!(ext.get::<usize>(), None);
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
ext.drain_from(&mut more_ext);
assert_eq!(ext.get::<isize>(), Some(&5isize));
assert_eq!(ext.get::<usize>(), Some(&5usize));
assert_eq!(more_ext.get::<isize>(), None);
assert_eq!(more_ext.get::<usize>(), None);
}
} }

View File

@ -1,426 +0,0 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
#[derive(Debug, PartialEq, Clone)]
pub(super) enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl ChunkedState {
pub(super) fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n as u64;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
log::debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
log::trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use actix_codec::Decoder as _;
use bytes::{Bytes, BytesMut};
use http::Method;
use crate::{
error::ParseError,
h1::decoder::{MessageDecoder, PayloadItem},
HttpMessage as _, Request,
};
macro_rules! parse_ready {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Ok(Some((msg, _))) => msg,
Ok(_) => unreachable!("Eof during parsing http request"),
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
}
}};
}
macro_rules! expect_parse_err {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {}
},
_ => unreachable!("Error expected"),
}
}};
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn chunk_extension_quoted() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;hello=b;one=\"1 2 3\"\r\n\
xx",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
}
#[test]
fn hrs_chunk_extension_invalid() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;x\nx\r\n\
4c\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid character in chunk extension"));
}
#[test]
fn hrs_chunk_size_overflow() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
f0000000000000003\r\n\
abc\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid chunk size line: Size is too big"));
}
}

View File

@ -5,15 +5,13 @@ use bitflags::bitflags;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{Method, Version}; use http::{Method, Version};
use super::{ use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, use super::{decoder, encoder, reserve_readbuf};
encoder, reserve_readbuf, Message, MessageType, use super::{Message, MessageType};
}; use crate::body::BodySize;
use crate::{ use crate::config::ServiceConfig;
body::BodySize, use crate::error::{ParseError, PayloadError};
error::{ParseError, PayloadError}, use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
@ -122,7 +120,7 @@ impl Decoder for ClientCodec {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set"); debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
if let Some((req, payload)) = self.inner.decoder.decode(src)? { if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.conn_type() { if let Some(ctype) = req.ctype() {
// do not use peer's keep-alive // do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive { self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype self.inner.ctype
@ -225,3 +223,15 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
Ok(()) Ok(())
} }
} }
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@ -5,13 +5,15 @@ use bitflags::bitflags;
use bytes::BytesMut; use bytes::BytesMut;
use http::{Method, Version}; use http::{Method, Version};
use super::{ use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, use super::{decoder, encoder};
encoder, Message, MessageType, use super::{Message, MessageType};
}; use crate::body::BodySize;
use crate::{ use crate::config::ServiceConfig;
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig, use crate::error::ParseError;
}; use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
@ -27,7 +29,7 @@ pub struct Codec {
decoder: decoder::MessageDecoder<Request>, decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>, payload: Option<PayloadDecoder>,
version: Version, version: Version,
conn_type: ConnectionType, ctype: ConnectionType,
// encoder part // encoder part
flags: Flags, flags: Flags,
@ -56,38 +58,37 @@ impl Codec {
} else { } else {
Flags::empty() Flags::empty()
}; };
Codec { Codec {
config, config,
flags, flags,
decoder: decoder::MessageDecoder::default(), decoder: decoder::MessageDecoder::default(),
payload: None, payload: None,
version: Version::HTTP_11, version: Version::HTTP_11,
conn_type: ConnectionType::Close, ctype: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(), encoder: encoder::MessageEncoder::default(),
} }
} }
/// Check if request is upgrade.
#[inline] #[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool { pub fn upgrade(&self) -> bool {
self.conn_type == ConnectionType::Upgrade self.ctype == ConnectionType::Upgrade
} }
/// Check if last response is keep-alive.
#[inline] #[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive self.ctype == ConnectionType::KeepAlive
} }
/// Check if keep-alive enabled on server level.
#[inline] #[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool { pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED) self.flags.contains(Flags::KEEPALIVE_ENABLED)
} }
/// Check last request's message type.
#[inline] #[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType { pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) { if self.flags.contains(Flags::STREAM) {
MessageType::Stream MessageType::Stream
@ -109,8 +110,8 @@ impl Decoder for Codec {
type Error = ParseError; type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if let Some(ref mut payload) = self.payload { if self.payload.is_some() {
Ok(match payload.decode(src)? { Ok(match self.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))), Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => { Some(PayloadItem::Eof) => {
self.payload.take(); self.payload.take();
@ -122,11 +123,11 @@ impl Decoder for Codec {
let head = req.head(); let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD); self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version; self.version = head.version;
self.conn_type = head.connection_type(); self.ctype = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive if self.ctype == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED) && !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{ {
self.conn_type = ConnectionType::Close self.ctype = ConnectionType::Close
} }
match payload { match payload {
PayloadType::None => self.payload = None, PayloadType::None => self.payload = None,
@ -157,14 +158,14 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
res.head_mut().version = self.version; res.head_mut().version = self.version;
// connection status // connection status
self.conn_type = if let Some(ct) = res.head().conn_type() { self.ctype = if let Some(ct) = res.head().ctype() {
if ct == ConnectionType::KeepAlive { if ct == ConnectionType::KeepAlive {
self.conn_type self.ctype
} else { } else {
ct ct
} }
} else { } else {
self.conn_type self.ctype
}; };
// encode message // encode message
@ -175,9 +176,10 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.flags.contains(Flags::STREAM), self.flags.contains(Flags::STREAM),
self.version, self.version,
length, length,
self.conn_type, self.ctype,
&self.config, &self.config,
)?; )?;
// self.headers_size = (dst.len() - len) as u32;
} }
Message::Chunk(Some(bytes)) => { Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?; self.encoder.encode_chunk(bytes.as_ref(), dst)?;
@ -186,7 +188,6 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.encoder.encode_eof(dst)?; self.encoder.encode_eof(dst)?;
} }
} }
Ok(()) Ok(())
} }
} }
@ -197,10 +198,10 @@ mod tests {
use http::Method; use http::Method;
use super::*; use super::*;
use crate::HttpMessage as _; use crate::httpmessage::HttpMessage;
#[actix_rt::test] #[test]
async fn test_http_request_chunked_payload_and_next_message() { fn test_http_request_chunked_payload_and_next_message() {
let mut codec = Codec::default(); let mut codec = Codec::default();
let mut buf = BytesMut::from( let mut buf = BytesMut::from(

View File

@ -1,17 +1,20 @@
use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Poll}; use std::convert::TryFrom;
use std::io;
use std::marker::PhantomData;
use std::task::Poll;
use actix_codec::Decoder; use actix_codec::Decoder;
use bytes::{Bytes, BytesMut}; use bytes::{Buf, Bytes, BytesMut};
use http::{ use http::header::{HeaderName, HeaderValue};
header::{self, HeaderName, HeaderValue}, use http::{header, Method, StatusCode, Uri, Version};
Method, StatusCode, Uri, Version,
};
use log::{debug, error, trace}; use log::{debug, error, trace};
use super::chunked::ChunkedState; use crate::error::ParseError;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead}; use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request;
pub(crate) const MAX_BUFFER_SIZE: usize = 131_072; const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96; const MAX_HEADERS: usize = 96;
/// Incoming message decoder /// Incoming message decoder
@ -47,7 +50,7 @@ pub(crate) enum PayloadLength {
} }
pub(crate) trait MessageType: Sized { pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>); fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
fn set_expect(&mut self); fn set_expect(&mut self);
@ -71,7 +74,8 @@ pub(crate) trait MessageType: Sized {
let headers = self.headers_mut(); let headers = self.headers_mut();
for idx in raw_headers.iter() { for idx in raw_headers.iter() {
let name = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap(); let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// SAFETY: httparse already checks header value is only visible ASCII bytes // SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here // from_maybe_shared_unchecked contains debug assertions so they are omitted here
@ -88,7 +92,7 @@ pub(crate) trait MessageType: Sized {
} }
header::CONTENT_LENGTH => match value.to_str() { header::CONTENT_LENGTH => match value.to_str() {
Ok(s) if s.trim().starts_with('+') => { Ok(s) if s.trim().starts_with("+") => {
debug!("illegal Content-Length: {:?}", s); debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header); return Err(ParseError::Header);
} }
@ -117,7 +121,7 @@ pub(crate) trait MessageType: Sized {
header::TRANSFER_ENCODING => { header::TRANSFER_ENCODING => {
seen_te = true; seen_te = true;
if let Ok(s) = value.to_str().map(str::trim) { if let Ok(s) = value.to_str().map(|s| s.trim()) {
if s.eq_ignore_ascii_case("chunked") { if s.eq_ignore_ascii_case("chunked") {
chunked = true; chunked = true;
} else if s.eq_ignore_ascii_case("identity") { } else if s.eq_ignore_ascii_case("identity") {
@ -132,7 +136,7 @@ pub(crate) trait MessageType: Sized {
} }
// connection keep-alive state // connection keep-alive state
header::CONNECTION => { header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(str::trim) { ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) {
if conn.eq_ignore_ascii_case("keep-alive") { if conn.eq_ignore_ascii_case("keep-alive") {
Some(ConnectionType::KeepAlive) Some(ConnectionType::KeepAlive)
} else if conn.eq_ignore_ascii_case("close") { } else if conn.eq_ignore_ascii_case("close") {
@ -147,7 +151,7 @@ pub(crate) trait MessageType: Sized {
}; };
} }
header::UPGRADE => { header::UPGRADE => {
if let Ok(val) = value.to_str().map(str::trim) { if let Ok(val) = value.to_str().map(|val| val.trim()) {
if val.eq_ignore_ascii_case("websocket") { if val.eq_ignore_ascii_case("websocket") {
has_upgrade_websocket = true; has_upgrade_websocket = true;
} }
@ -159,7 +163,7 @@ pub(crate) trait MessageType: Sized {
expect = true; expect = true;
} }
} }
_ => {} _ => (),
} }
headers.append(name, value); headers.append(name, value);
@ -170,7 +174,7 @@ pub(crate) trait MessageType: Sized {
self.set_expect() self.set_expect()
} }
// https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 // https://tools.ietf.org/html/rfc7230#section-3.3.3
if chunked { if chunked {
// Chunked encoding // Chunked encoding
Ok(PayloadLength::Payload(PayloadType::Payload( Ok(PayloadLength::Payload(PayloadType::Payload(
@ -190,8 +194,8 @@ pub(crate) trait MessageType: Sized {
} }
impl MessageType for Request { impl MessageType for Request {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) { fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = conn_type { if let Some(ctype) = ctype {
self.head_mut().set_connection_type(ctype); self.head_mut().set_connection_type(ctype);
} }
} }
@ -208,17 +212,10 @@ impl MessageType for Request {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, method, uri, ver, h_len) = { let (len, method, uri, ver, h_len) = {
// SAFETY: let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is
// safe because the type we are claiming to have initialized here is a
// bunch of `MaybeUninit`s, which do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut req = httparse::Request::new(&mut []); let mut req = httparse::Request::new(&mut parsed);
match req.parse_with_uninit_headers(src, &mut parsed)? { match req.parse(src)? {
httparse::Status::Complete(len) => { httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes()) let method = Method::from_bytes(req.method.unwrap().as_bytes())
.map_err(|_| ParseError::Method)?; .map_err(|_| ParseError::Method)?;
@ -232,15 +229,7 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len()) (len, method, uri, version, req.headers.len())
} }
httparse::Status::Partial => { httparse::Status::Partial => return Ok(None),
return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
// Return None to notify more read are needed for parsing request
Ok(None)
};
}
} }
}; };
@ -253,12 +242,15 @@ impl MessageType for Request {
let decoder = match length { let decoder = match length {
PayloadLength::Payload(pl) => pl, PayloadLength::Payload(pl) => pl,
PayloadLength::UpgradeWebSocket => { PayloadLength::UpgradeWebSocket => {
// upgrade (WebSocket) // upgrade(websocket)
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} }
PayloadLength::None => { PayloadLength::None => {
if method == Method::CONNECT { if method == Method::CONNECT {
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else { } else {
PayloadType::None PayloadType::None
} }
@ -275,8 +267,8 @@ impl MessageType for Request {
} }
impl MessageType for ResponseHead { impl MessageType for ResponseHead {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) { fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = conn_type { if let Some(ctype) = ctype {
ResponseHead::set_connection_type(self, ctype); ResponseHead::set_connection_type(self, ctype);
} }
} }
@ -307,14 +299,7 @@ impl MessageType for ResponseHead {
(len, version, status, res.headers.len()) (len, version, status, res.headers.len())
} }
httparse::Status::Partial => { httparse::Status::Partial => return Ok(None),
return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
Ok(None)
}
}
} }
}; };
@ -330,6 +315,9 @@ impl MessageType for ResponseHead {
} else if status == StatusCode::SWITCHING_PROTOCOLS { } else if status == StatusCode::SWITCHING_PROTOCOLS {
// switching protocol or connect // switching protocol or connect
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else { } else {
// for HTTP/1.0 read to eof and close connection // for HTTP/1.0 read to eof and close connection
if msg.version == Version::HTTP_10 { if msg.version == Version::HTTP_10 {
@ -437,6 +425,20 @@ enum Kind {
Eof, Eof,
} }
#[derive(Debug, PartialEq, Clone)]
enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl Decoder for PayloadDecoder { impl Decoder for PayloadDecoder {
type Item = PayloadItem; type Item = PayloadItem;
type Error = io::Error; type Error = io::Error;
@ -466,23 +468,19 @@ impl Decoder for PayloadDecoder {
Kind::Chunked(ref mut state, ref mut size) => { Kind::Chunked(ref mut state, ref mut size) => {
loop { loop {
let mut buf = None; let mut buf = None;
// advances the chunked state // advances the chunked state
*state = match state.step(src, size, &mut buf) { *state = match state.step(src, size, &mut buf) {
Poll::Pending => return Ok(None), Poll::Pending => return Ok(None),
Poll::Ready(Ok(state)) => state, Poll::Ready(Ok(state)) => state,
Poll::Ready(Err(e)) => return Err(e), Poll::Ready(Err(e)) => return Err(e),
}; };
if *state == ChunkedState::End { if *state == ChunkedState::End {
trace!("End of chunked stream"); trace!("End of chunked stream");
return Ok(Some(PayloadItem::Eof)); return Ok(Some(PayloadItem::Eof));
} }
if let Some(buf) = buf { if let Some(buf) = buf {
return Ok(Some(PayloadItem::Chunk(buf))); return Ok(Some(PayloadItem::Chunk(buf)));
} }
if src.is_empty() { if src.is_empty() {
return Ok(None); return Ok(None);
} }
@ -499,40 +497,213 @@ impl Decoder for PayloadDecoder {
} }
} }
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
impl ChunkedState {
fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n as u64;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
debug!("chunk size would overflow");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{Method, Version}; use http::{Method, Version};
use super::*; use super::*;
use crate::{ use crate::error::ParseError;
error::ParseError, use crate::http::header::{HeaderName, SET_COOKIE};
header::{HeaderName, SET_COOKIE}, use crate::httpmessage::HttpMessage;
HttpMessage as _,
};
impl PayloadType { impl PayloadType {
pub(crate) fn unwrap(self) -> PayloadDecoder { fn unwrap(self) -> PayloadDecoder {
match self { match self {
PayloadType::Payload(pl) => pl, PayloadType::Payload(pl) => pl,
_ => panic!(), _ => panic!(),
} }
} }
pub(crate) fn is_unhandled(&self) -> bool { fn is_unhandled(&self) -> bool {
matches!(self, PayloadType::Stream(_)) matches!(self, PayloadType::Stream(_))
} }
} }
impl PayloadItem { impl PayloadItem {
pub(crate) fn chunk(self) -> Bytes { fn chunk(self) -> Bytes {
match self { match self {
PayloadItem::Chunk(chunk) => chunk, PayloadItem::Chunk(chunk) => chunk,
_ => panic!("error"), _ => panic!("error"),
} }
} }
fn eof(&self) -> bool {
pub(crate) fn eof(&self) -> bool {
matches!(*self, PayloadItem::Eof) matches!(*self, PayloadItem::Eof)
} }
} }
@ -552,7 +723,7 @@ mod tests {
match MessageDecoder::<Request>::default().decode($e) { match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err { Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"), ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {} _ => (),
}, },
_ => unreachable!("Error expected"), _ => unreachable!("Error expected"),
} }
@ -601,7 +772,8 @@ mod tests {
#[test] #[test]
fn test_parse_body() { fn test_parse_body() {
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); let mut buf =
BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default(); let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -617,7 +789,8 @@ mod tests {
#[test] #[test]
fn test_parse_body_crlf() { fn test_parse_body_crlf() {
let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); let mut buf =
BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default(); let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -686,8 +859,8 @@ mod tests {
.get_all(SET_COOKIE) .get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned()) .map(|v| v.to_str().unwrap().to_owned())
.collect(); .collect();
assert_eq!(val[0], "c1=cookie1"); assert_eq!(val[1], "c1=cookie1");
assert_eq!(val[1], "c2=cookie2"); assert_eq!(val[0], "c2=cookie2");
} }
#[test] #[test]
@ -823,6 +996,28 @@ mod tests {
assert!(req.upgrade()); assert!(req.upgrade());
} }
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test] #[test]
fn test_headers_content_length_err_1() { fn test_headers_content_length_err_1() {
let mut buf = BytesMut::from( let mut buf = BytesMut::from(
@ -940,9 +1135,128 @@ mod tests {
expect_parse_err!(&mut buf); expect_parse_err!(&mut buf);
} }
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
&"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"[..],
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test] #[test]
fn test_response_http10_read_until_eof() { fn test_response_http10_read_until_eof() {
let mut buf = BytesMut::from("HTTP/1.0 200 Ok\r\n\r\ntest data"); let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
let mut reader = MessageDecoder::<ResponseHead>::default(); let mut reader = MessageDecoder::<ResponseHead>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -951,84 +1265,4 @@ mod tests {
let chunk = pl.decode(&mut buf).unwrap().unwrap(); let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data"))); assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
} }
#[test]
fn hrs_multiple_content_length() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 4\r\n\
Content-Length: 2\r\n\
\r\n\
abcd",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_content_length_plus() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: +3\r\n\
\r\n\
000",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_unknown_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: JUNK\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
5\r\n\
hello\r\n\
0",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_multiple_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 51\r\n\
Transfer-Encoding: identity\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
0\r\n\
\r\n\
GET /forbidden HTTP/1.1\r\n\
Host: example.com\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn transfer_encoding_agrees() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 3\r\n\
Transfer-Encoding: identity\r\n\
\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"0\r\n")));
}
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,27 @@
use std::{ use std::io::Write;
cmp, use std::marker::PhantomData;
io::{self, Write as _}, use std::ptr::copy_nonoverlapping;
marker::PhantomData, use std::slice::from_raw_parts_mut;
ptr::copy_nonoverlapping, use std::{cmp, io};
slice::from_raw_parts_mut,
};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use crate::{ use crate::body::BodySize;
body::BodySize, use crate::config::ServiceConfig;
header::{ use crate::header::map;
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, use crate::helpers;
}, use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version, use crate::http::{HeaderMap, StatusCode, Version};
}; use crate::message::{ConnectionType, RequestHeadType};
use crate::response::Response;
const AVERAGE_HEADER_SIZE: usize = 30; const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> { pub(crate) struct MessageEncoder<T: MessageType> {
#[allow(dead_code)]
pub length: BodySize, pub length: BodySize,
pub te: TransferEncoding, pub te: TransferEncoding,
_phantom: PhantomData<T>, _t: PhantomData<T>,
} }
impl<T: MessageType> Default for MessageEncoder<T> { impl<T: MessageType> Default for MessageEncoder<T> {
@ -31,7 +29,7 @@ impl<T: MessageType> Default for MessageEncoder<T> {
MessageEncoder { MessageEncoder {
length: BodySize::None, length: BodySize::None,
te: TransferEncoding::empty(), te: TransferEncoding::empty(),
_phantom: PhantomData, _t: PhantomData,
} }
} }
} }
@ -56,7 +54,7 @@ pub(crate) trait MessageType: Sized {
dst: &mut BytesMut, dst: &mut BytesMut,
version: Version, version: Version,
mut length: BodySize, mut length: BodySize,
conn_type: ConnectionType, ctype: ConnectionType,
config: &ServiceConfig, config: &ServiceConfig,
) -> io::Result<()> { ) -> io::Result<()> {
let chunked = self.chunked(); let chunked = self.chunked();
@ -71,24 +69,14 @@ pub(crate) trait MessageType: Sized {
| StatusCode::PROCESSING | StatusCode::PROCESSING
| StatusCode::NO_CONTENT => { | StatusCode::NO_CONTENT => {
// skip content-length and transfer-encoding headers // skip content-length and transfer-encoding headers
// see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1 // See https://tools.ietf.org/html/rfc7230#section-3.3.1
// and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2 // and https://tools.ietf.org/html/rfc7230#section-3.3.2
skip_len = true; skip_len = true;
length = BodySize::None length = BodySize::None
} }
StatusCode::NOT_MODIFIED => {
// 304 responses should never have a body but should retain a manually set
// content-length header
// see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
skip_len = false;
length = BodySize::None;
}
_ => {} _ => {}
} }
} }
match length { match length {
BodySize::Stream => { BodySize::Stream => {
if chunked { if chunked {
@ -103,14 +91,19 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"\r\n"); dst.put_slice(b"\r\n");
} }
} }
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"), BodySize::Empty => {
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"), if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst), BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::None => dst.put_slice(b"\r\n"), BodySize::None => dst.put_slice(b"\r\n"),
} }
// Connection // Connection
match conn_type { match ctype {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"), ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => { ConnectionType::KeepAlive if version < Version::HTTP_11 => {
if camel_case { if camel_case {
@ -126,14 +119,24 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"connection: close\r\n") dst.put_slice(b"connection: close\r\n")
} }
} }
_ => {} _ => (),
} }
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers // write headers
let mut has_date = false; let mut has_date = false;
let mut buf = dst.chunk_mut().as_mut_ptr(); let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
let mut remaining = dst.capacity() - dst.len(); let mut remaining = dst.capacity() - dst.len();
// tracks bytes written since last buffer resize // tracks bytes written since last buffer resize
@ -141,19 +144,19 @@ pub(crate) trait MessageType: Sized {
// container's knowledge, this is used to sync the containers cursor after data is written // container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0; let mut pos = 0;
self.write_headers(|key, value| { for (key, value) in headers {
match *key { match *key {
CONNECTION => return, CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => return, TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true, DATE => has_date = true,
_ => {} _ => (),
} }
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
let k_len = k.len(); let k_len = k.len();
// TODO: drain? match value {
for val in value.iter() { map::Value::One(ref val) => {
let v = val.as_ref(); let v = val.as_ref();
let v_len = v.len(); let v_len = v.len();
@ -161,6 +164,8 @@ pub(crate) trait MessageType: Sized {
let len = k_len + v_len + 4; let len = k_len + v_len + 4;
if len > remaining { if len > remaining {
// not enough room in buffer for this header; reserve more space
// SAFETY: all the bytes written up to position "pos" are initialized // SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync // the written byte count and pointer advancement are kept in sync
unsafe { unsafe {
@ -173,15 +178,61 @@ pub(crate) trait MessageType: Sized {
// re-assign buf raw pointer since it's possible that the buffer was // re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized // reallocated and/or resized
buf = dst.chunk_mut().as_mut_ptr(); buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of the
// cursor matches the number of bytes written
unsafe {
// use upper Camel-Case
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else {
write_data(k, buf, k_len)
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
}
pos += len;
remaining -= len;
}
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let v_len = v.len();
let len = k_len + v_len + 4;
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
} }
// SAFETY: on each write, it is enough to ensure that the advancement of // SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written // the cursor matches the number of bytes written
unsafe { unsafe {
if camel_case { if camel_case {
// use Camel-Case headers write_camel_case(k, from_raw_parts_mut(buf, k_len));
write_camel_case(k, buf, k_len);
} else { } else {
write_data(k, buf, k_len); write_data(k, buf, k_len);
} }
@ -201,7 +252,9 @@ pub(crate) trait MessageType: Sized {
pos += len; pos += len;
remaining -= len; remaining -= len;
} }
}); }
}
}
// final cursor synchronization with the bytes container // final cursor synchronization with the bytes container
// //
@ -221,24 +274,6 @@ pub(crate) trait MessageType: Sized {
Ok(()) Ok(())
} }
fn write_headers<F>(&mut self, mut f: F)
where
F: FnMut(&HeaderName, &Value),
{
match self.extra_headers() {
Some(headers) => {
// merging headers from head and extra headers.
self.headers()
.inner
.iter()
.filter(|(name, _)| !headers.contains_key(*name))
.chain(headers.inner.iter())
.for_each(|(k, v)| f(k, v))
}
None => self.headers().inner.iter().for_each(|(k, v)| f(k, v)),
}
}
} }
impl MessageType for Response<()> { impl MessageType for Response<()> {
@ -295,7 +330,7 @@ impl MessageType for RequestHeadType {
let head = self.as_ref(); let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE); dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!( write!(
helpers::MutWriter(dst), Writer(dst),
"{} {} {}", "{} {} {}",
head.method, head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"), head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
@ -305,7 +340,11 @@ impl MessageType for RequestHeadType {
Version::HTTP_11 => "HTTP/1.1", Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0", Version::HTTP_2 => "HTTP/2.0",
Version::HTTP_3 => "HTTP/3.0", Version::HTTP_3 => "HTTP/3.0",
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")), _ =>
return Err(io::Error::new(
io::ErrorKind::Other,
"unsupported version"
)),
} }
) )
.map_err(|e| io::Error::new(io::ErrorKind::Other, e)) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))
@ -331,13 +370,13 @@ impl<T: MessageType> MessageEncoder<T> {
stream: bool, stream: bool,
version: Version, version: Version,
length: BodySize, length: BodySize,
conn_type: ConnectionType, ctype: ConnectionType,
config: &ServiceConfig, config: &ServiceConfig,
) -> io::Result<()> { ) -> io::Result<()> {
// transfer encoding // transfer encoding
if !head { if !head {
self.te = match length { self.te = match length {
BodySize::Sized(0) => TransferEncoding::empty(), BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len), BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => { BodySize::Stream => {
if message.chunked() && !stream { if message.chunked() && !stream {
@ -353,7 +392,7 @@ impl<T: MessageType> MessageEncoder<T> {
} }
message.encode_status(dst)?; message.encode_status(dst)?;
message.encode_headers(dst, version, length, conn_type, config) message.encode_headers(dst, version, length, ctype, config)
} }
} }
@ -367,12 +406,10 @@ pub(crate) struct TransferEncoding {
enum TransferEncodingKind { enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`. /// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool), Chunked(bool),
/// An Encoder for when Content-Length is set. /// An Encoder for when Content-Length is set.
/// ///
/// Enforces that the body is not longer than the Content-Length header. /// Enforces that the body is not longer than the Content-Length header.
Length(u64), Length(u64),
/// An Encoder for when Content-Length is not known. /// An Encoder for when Content-Length is not known.
/// ///
/// Application decides when to stop writing. /// Application decides when to stop writing.
@ -426,7 +463,7 @@ impl TransferEncoding {
*eof = true; *eof = true;
buf.extend_from_slice(b"0\r\n\r\n"); buf.extend_from_slice(b"0\r\n\r\n");
} else { } else {
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len()) writeln!(Writer(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2); buf.reserve(msg.len() + 2);
@ -476,45 +513,51 @@ impl TransferEncoding {
} }
} }
struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// # Safety /// # Safety
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is /// Callers must ensure that the given length matches given value length.
/// valid for writes of at least `len` bytes.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) { unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len); debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len); copy_nonoverlapping(value.as_ptr(), buf, len);
} }
/// # Safety fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is let mut index = 0;
/// valid for writes of at least `len` bytes. let key = value;
unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) { let mut key_iter = key.iter();
// first copy entire (potentially wrong) slice to output
write_data(value, buf, len);
// SAFETY: We just initialized the buffer with `value`
let buffer = from_raw_parts_mut(buf, len);
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1; index += 1;
} }
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
} }
#[cfg(test)] #[cfg(test)]
@ -525,10 +568,8 @@ mod tests {
use http::header::AUTHORIZATION; use http::header::AUTHORIZATION;
use super::*; use super::*;
use crate::{ use crate::http::header::{HeaderValue, CONTENT_TYPE};
header::{HeaderValue, CONTENT_TYPE}, use crate::RequestHead;
RequestHead,
};
#[test] #[test]
fn test_chunked_te() { fn test_chunked_te() {
@ -544,8 +585,8 @@ mod tests {
); );
} }
#[actix_rt::test] #[test]
async fn test_camel_case() { fn test_camel_case() {
let mut bytes = BytesMut::with_capacity(2048); let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default(); let mut head = RequestHead::default();
head.set_camel_case_headers(true); head.set_camel_case_headers(true);
@ -558,12 +599,12 @@ mod tests {
let _ = head.encode_headers( let _ = head.encode_headers(
&mut bytes, &mut bytes,
Version::HTTP_11, Version::HTTP_11,
BodySize::Sized(0), BodySize::Empty,
ConnectionType::Close, ConnectionType::Close,
&ServiceConfig::default(), &ServiceConfig::default(),
); );
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n")); assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n")); assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Content-Type: plain/text\r\n"));
@ -576,7 +617,8 @@ mod tests {
ConnectionType::KeepAlive, ConnectionType::KeepAlive,
&ServiceConfig::default(), &ServiceConfig::default(),
); );
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n")); assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n")); assert!(data.contains("Date: date\r\n"));
@ -597,15 +639,16 @@ mod tests {
ConnectionType::KeepAlive, ConnectionType::KeepAlive,
&ServiceConfig::default(), &ServiceConfig::default(),
); );
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n")); assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n")); assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n")); assert!(data.contains("content-type: plain/text\r\n"));
assert!(data.contains("date: date\r\n")); assert!(data.contains("date: date\r\n"));
} }
#[actix_rt::test] #[test]
async fn test_extra_headers() { fn test_extra_headers() {
let mut bytes = BytesMut::with_capacity(2048); let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default(); let mut head = RequestHead::default();
@ -626,25 +669,28 @@ mod tests {
let _ = head.encode_headers( let _ = head.encode_headers(
&mut bytes, &mut bytes,
Version::HTTP_11, Version::HTTP_11,
BodySize::Sized(0), BodySize::Empty,
ConnectionType::Close, ConnectionType::Close,
&ServiceConfig::default(), &ServiceConfig::default(),
); );
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n")); assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n")); assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n")); assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n")); assert!(data.contains("date: date\r\n"));
} }
#[actix_rt::test] #[test]
async fn test_no_content_length() { fn test_no_content_length() {
let mut bytes = BytesMut::with_capacity(2048); let mut bytes = BytesMut::with_capacity(2048);
let mut res = Response::with_body(StatusCode::SWITCHING_PROTOCOLS, ()); let mut res: Response<()> =
res.headers_mut().insert(DATE, HeaderValue::from_static("")); Response::new(StatusCode::SWITCHING_PROTOCOLS).into_body::<()>();
res.headers_mut() res.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")); .insert(DATE, HeaderValue::from_static(&""));
res.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static(&"0"));
let _ = res.encode_headers( let _ = res.encode_headers(
&mut bytes, &mut bytes,
@ -653,7 +699,8 @@ mod tests {
ConnectionType::Upgrade, ConnectionType::Upgrade,
&ServiceConfig::default(), &ServiceConfig::default(),
); );
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(!data.contains("content-length: 0\r\n")); assert!(!data.contains("content-length: 0\r\n"));
assert!(!data.contains("transfer-encoding: chunked\r\n")); assert!(!data.contains("transfer-encoding: chunked\r\n"));
} }

View File

@ -1,33 +1,38 @@
use actix_service::{Service, ServiceFactory}; use std::task::{Context, Poll};
use actix_utils::future::{ready, Ready};
use crate::{Error, Request}; use actix_service::{Service, ServiceFactory};
use futures_util::future::{ok, Ready};
use crate::error::Error;
use crate::request::Request;
pub struct ExpectHandler; pub struct ExpectHandler;
impl ServiceFactory<Request> for ExpectHandler { impl ServiceFactory for ExpectHandler {
type Config = ();
type Request = Request;
type Response = Request; type Response = Request;
type Error = Error; type Error = Error;
type Config = ();
type Service = ExpectHandler; type Service = ExpectHandler;
type InitError = Error; type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>; type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
ready(Ok(ExpectHandler)) ok(ExpectHandler)
} }
} }
impl Service<Request> for ExpectHandler { impl Service for ExpectHandler {
type Request = Request;
type Response = Request; type Response = Request;
type Error = Error; type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>; type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!(); fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&self, req: Request) -> Self::Future { fn call(&mut self, req: Request) -> Self::Future {
ready(Ok(req)) ok(req)
// TODO: add some way to trigger error
// Err(error::ErrorExpectationFailed("test"))
} }
} }

View File

@ -1,8 +1,6 @@
//! HTTP/1 protocol implementation. //! HTTP/1 implementation
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
mod chunked;
mod client; mod client;
mod codec; mod codec;
mod decoder; mod decoder;
@ -19,7 +17,7 @@ pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher; pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler; pub use self::expect::ExpectHandler;
pub use self::payload::Payload; pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler}; pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::upgrade::UpgradeHandler; pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse; pub use self::utils::SendResponse;
@ -59,7 +57,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::Request; use crate::request::Request;
impl Message<Request> { impl Message<Request> {
pub fn message(self) -> Request { pub fn message(self) -> Request {

View File

@ -1,13 +1,11 @@
//! Payload stream //! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll};
use std::{ use actix_utils::task::LocalWaker;
cell::RefCell,
collections::VecDeque,
pin::Pin,
rc::{Rc, Weak},
task::{Context, Poll, Waker},
};
use bytes::Bytes; use bytes::Bytes;
use futures_core::Stream; use futures_core::Stream;
@ -25,32 +23,39 @@ pub enum PayloadStatus {
/// Buffered stream of bytes chunks /// Buffered stream of bytes chunks
/// ///
/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does /// Payload stores chunks in a vector. First chunk can be received with
/// not notify current task when new data is available. /// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
/// ///
/// Payload can be used as `Response` body stream. /// Payload stream can be used as `Response` body stream.
#[derive(Debug)] #[derive(Debug)]
pub struct Payload { pub struct Payload {
inner: Rc<RefCell<Inner>>, inner: Rc<RefCell<Inner>>,
} }
impl Payload { impl Payload {
/// Creates a payload stream. /// Create payload stream.
/// ///
/// This method construct two objects responsible for bytes stream generation: /// This method construct two objects responsible for bytes stream
/// - `PayloadSender` - *Sender* side of the stream /// generation.
/// - `Payload` - *Receiver* side of the stream ///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) { pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof))); let shared = Rc::new(RefCell::new(Inner::new(eof)));
( (
PayloadSender::new(Rc::downgrade(&shared)), PayloadSender {
inner: Rc::downgrade(&shared),
},
Payload { inner: shared }, Payload { inner: shared },
) )
} }
/// Creates an empty payload. /// Create empty payload
pub(crate) fn empty() -> Payload { #[doc(hidden)]
pub fn empty() -> Payload {
Payload { Payload {
inner: Rc::new(RefCell::new(Inner::new(true))), inner: Rc::new(RefCell::new(Inner::new(true))),
} }
@ -73,6 +78,14 @@ impl Payload {
pub fn unread_data(&mut self, data: Bytes) { pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data); self.inner.borrow_mut().unread_data(data);
} }
#[inline]
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
} }
impl Stream for Payload { impl Stream for Payload {
@ -82,7 +95,7 @@ impl Stream for Payload {
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx) self.inner.borrow_mut().readany(cx)
} }
} }
@ -92,10 +105,6 @@ pub struct PayloadSender {
} }
impl PayloadSender { impl PayloadSender {
fn new(inner: Weak<RefCell<Inner>>) -> Self {
Self { inner }
}
#[inline] #[inline]
pub fn set_error(&mut self, err: PayloadError) { pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() { if let Some(shared) = self.inner.upgrade() {
@ -125,7 +134,7 @@ impl PayloadSender {
if shared.borrow().need_read { if shared.borrow().need_read {
PayloadStatus::Read PayloadStatus::Read
} else { } else {
shared.borrow_mut().register_io(cx); shared.borrow_mut().io_task.register(cx.waker());
PayloadStatus::Pause PayloadStatus::Pause
} }
} else { } else {
@ -141,8 +150,8 @@ struct Inner {
err: Option<PayloadError>, err: Option<PayloadError>,
need_read: bool, need_read: bool,
items: VecDeque<Bytes>, items: VecDeque<Bytes>,
task: Option<Waker>, task: LocalWaker,
io_task: Option<Waker>, io_task: LocalWaker,
} }
impl Inner { impl Inner {
@ -153,46 +162,8 @@ impl Inner {
err: None, err: None,
items: VecDeque::new(), items: VecDeque::new(),
need_read: true, need_read: true,
task: None, task: LocalWaker::new(),
io_task: None, io_task: LocalWaker::new(),
}
}
/// Wake up future waiting for payload data to be available.
fn wake(&mut self) {
if let Some(waker) = self.task.take() {
waker.wake();
}
}
/// Wake up future feeding data to Payload.
fn wake_io(&mut self) {
if let Some(waker) = self.io_task.take() {
waker.wake();
}
}
/// Register future waiting data from payload.
/// Waker would be used in `Inner::wake`
fn register(&mut self, cx: &mut Context<'_>) {
if self
.task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.task = Some(cx.waker().clone());
}
}
// Register future feeding data to payload.
/// Waker would be used in `Inner::wake_io`
fn register_io(&mut self, cx: &mut Context<'_>) {
if self
.io_task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.io_task = Some(cx.waker().clone());
} }
} }
@ -211,7 +182,9 @@ impl Inner {
self.len += data.len(); self.len += data.len();
self.items.push_back(data); self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE; self.need_read = self.len < MAX_BUFFER_SIZE;
self.wake(); if let Some(task) = self.task.take() {
task.wake()
}
} }
#[cfg(test)] #[cfg(test)]
@ -219,8 +192,8 @@ impl Inner {
self.len self.len
} }
fn poll_next( fn readany(
mut self: Pin<&mut Self>, &mut self,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> { ) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() { if let Some(data) = self.items.pop_front() {
@ -228,9 +201,9 @@ impl Inner {
self.need_read = self.len < MAX_BUFFER_SIZE; self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && !self.eof { if self.need_read && !self.eof {
self.register(cx); self.task.register(cx.waker());
} }
self.wake_io(); self.io_task.wake();
Poll::Ready(Some(Ok(data))) Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() { } else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err))) Poll::Ready(Some(Err(err)))
@ -238,8 +211,8 @@ impl Inner {
Poll::Ready(None) Poll::Ready(None)
} else { } else {
self.need_read = true; self.need_read = true;
self.register(cx); self.task.register(cx.waker());
self.wake_io(); self.io_task.wake();
Poll::Pending Poll::Pending
} }
} }
@ -252,18 +225,8 @@ impl Inner {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*; use super::*;
use futures_util::future::poll_fn;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test] #[actix_rt::test]
async fn test_unread_data() { async fn test_unread_data() {
@ -275,10 +238,7 @@ mod tests {
assert_eq!( assert_eq!(
Bytes::from("data"), Bytes::from("data"),
poll_fn(|cx| Pin::new(&mut payload).poll_next(cx)) poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
.await
.unwrap()
.unwrap()
); );
} }
} }

View File

@ -1,49 +1,50 @@
use std::{ use std::future::Future;
fmt, use std::marker::PhantomData;
marker::PhantomData, use std::pin::Pin;
net, use std::rc::Rc;
rc::Rc, use std::task::{Context, Poll};
task::{Context, Poll}, use std::{fmt, net};
};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_service::{ use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _, use futures_core::ready;
}; use futures_util::future::{ok, Ready};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use crate::{ use crate::body::MessageBody;
body::{BoxBody, MessageBody}, use crate::cloneable::CloneableService;
config::ServiceConfig, use crate::config::ServiceConfig;
error::DispatchError, use crate::error::{DispatchError, Error, ParseError};
service::HttpServiceHandler, use crate::helpers::DataFactory;
ConnectCallback, OnConnectData, Request, Response, use crate::request::Request;
}; use crate::response::Response;
use crate::{ConnectCallback, Extensions};
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler}; use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport /// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> { pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
srv: S, srv: S,
cfg: ServiceConfig, cfg: ServiceConfig,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<B>, _t: PhantomData<(T, B)>,
} }
impl<T, S, B> H1Service<T, S, B> impl<T, S, B> H1Service<T, S, B>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
{ {
/// Create new `HttpService` instance with config. /// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>( pub(crate) fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig, cfg: ServiceConfig,
service: F, service: F,
) -> Self { ) -> Self {
@ -52,45 +53,44 @@ where
srv: service.into_factory(), srv: service.into_factory(),
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect: None,
on_connect_ext: None, on_connect_ext: None,
_phantom: PhantomData, _t: PhantomData,
} }
} }
} }
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U> impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X: ServiceFactory<Request, Config = (), Response = Request>, X::Error: Into<Error>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<
U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>, Config = (),
U::Future: 'static, Request = (Request, Framed<TcpStream, Codec>),
U::Error: fmt::Display + Into<Response<BoxBody>>, Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Create simple tcp stream service /// Create simple tcp stream service
pub fn tcp( pub fn tcp(
self, self,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = DispatchError, Error = DispatchError,
InitError = (), InitError = (),
> { > {
fn_service(|io: TcpStream| { pipeline_factory(|io: TcpStream| {
let peer_addr = io.peer_addr().ok(); let peer_addr = io.peer_addr().ok();
ready(Ok((io, peer_addr))) ok((io, peer_addr))
}) })
.and_then(self) .and_then(self)
} }
@ -100,57 +100,46 @@ where
mod openssl { mod openssl {
use super::*; use super::*;
use actix_tls::accept::{ use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
openssl::{ use actix_tls::{openssl::HandshakeError, TlsError};
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X: ServiceFactory<Request, Config = (), Response = Request>, X::Error: Into<Error>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory< U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (), Config = (),
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
Response = (), Response = (),
>, >,
U::Future: 'static, U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Create OpenSSL based service. /// Create openssl based service
pub fn openssl( pub fn openssl(
self, self,
acceptor: SslAcceptor, acceptor: SslAcceptor,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = TlsError<SslError, DispatchError>, Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_init_err(|_| { .map_err(TlsError::Tls)
unreachable!("TLS acceptor service factory does not error on init") .map_init_err(|_| panic!()),
}) )
.map_err(TlsError::into_service_error) .and_then(|io: SslStream<TcpStream>| {
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok(); let peer_addr = io.get_ref().peer_addr().ok();
(io, peer_addr) ok((io, peer_addr))
}) })
.and_then(self.map_err(TlsError::Service)) .and_then(self.map_err(TlsError::Service))
} }
@ -159,60 +148,48 @@ mod openssl {
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
mod rustls { mod rustls {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X: ServiceFactory<Request, Config = (), Response = Request>, X::Error: Into<Error>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory< U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (), Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
Response = (), Response = (),
>, >,
U::Future: 'static, U::Error: fmt::Display + Into<Error>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
/// Create Rustls based service. /// Create rustls based service
pub fn rustls( pub fn rustls(
self, self,
config: ServerConfig, config: ServerConfig,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = TlsError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_init_err(|_| { .map_err(TlsError::Tls)
unreachable!("TLS acceptor service factory does not error on init") .map_init_err(|_| panic!()),
}) )
.map_err(TlsError::into_service_error) .and_then(|io: TlsStream<TcpStream>| {
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok(); let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr) ok((io, peer_addr))
}) })
.and_then(self.map_err(TlsError::Service)) .and_then(self.map_err(TlsError::Service))
} }
@ -221,16 +198,16 @@ mod rustls {
impl<T, S, B, X, U> H1Service<T, S, B, X, U> impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
B: MessageBody, B: MessageBody,
{ {
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U> pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where where
X1: ServiceFactory<Request, Response = Request>, X1: ServiceFactory<Request = Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>, X1::Error: Into<Error>,
X1::InitError: fmt::Debug, X1::InitError: fmt::Debug,
{ {
H1Service { H1Service {
@ -238,14 +215,15 @@ where
cfg: self.cfg, cfg: self.cfg,
srv: self.srv, srv: self.srv,
upgrade: self.upgrade, upgrade: self.upgrade,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1> pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where where
U1: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>, U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display, U1::Error: fmt::Display,
U1::InitError: fmt::Debug, U1::InitError: fmt::Debug,
{ {
@ -254,11 +232,21 @@ where
cfg: self.cfg, cfg: self.cfg,
srv: self.srv, srv: self.srv,
expect: self.expect, expect: self.expect,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
/// Set on connect callback. /// Set on connect callback.
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self { pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
self.on_connect_ext = f; self.on_connect_ext = f;
@ -266,104 +254,349 @@ where
} }
} }
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)> for H1Service<T, S, B, X, U> impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin + 'static, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S: ServiceFactory<Request, Config = ()>, S::Error: Into<Error>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X: ServiceFactory<Request, Config = (), Response = Request>, X::Error: Into<Error>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>, U::Error: fmt::Display + Into<Error>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Config = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type InitError = (); type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, S, B, X, U>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
let service = self.srv.new_service(()); H1ServiceResponse {
let expect = self.expect.new_service(()); fut: self.srv.new_service(()),
let upgrade = self.upgrade.as_ref().map(|s| s.new_service(())); fut_ex: Some(self.expect.new_service(())),
let on_connect_ext = self.on_connect_ext.clone(); fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
let cfg = self.cfg.clone(); expect: None,
upgrade: None,
Box::pin(async move { on_connect: self.on_connect.clone(),
let expect = expect on_connect_ext: self.on_connect_ext.clone(),
.await cfg: Some(self.cfg.clone()),
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?; _t: PhantomData,
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade
.await
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
} }
None => None, }
}; }
let service = service #[doc(hidden)]
.await #[pin_project::pin_project]
.map_err(|e| log::error!("Init http service error: {:?}", e))?; pub struct H1ServiceResponse<T, S, B, X, U>
where
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, B)>,
}
Ok(H1ServiceHandler::new( impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
cfg, where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Output = Result<H1ServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
H1ServiceHandler::new(
this.cfg.take().unwrap(),
service, service,
expect, this.expect.take().unwrap(),
upgrade, this.upgrade.take(),
on_connect_ext, this.on_connect.clone(),
)) this.on_connect_ext.clone(),
}) )
}))
} }
} }
/// `Service` implementation for HTTP/1 transport /// `Service` implementation for HTTP/1 transport
pub type H1ServiceHandler<T, S, B, X, U> = HttpServiceHandler<T, S, B, X, U>; pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)> for HttpServiceHandler<T, S, B, X, U> impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
) -> H1ServiceHandler<T, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg,
on_connect,
on_connect_ext,
_t: PhantomData,
}
}
}
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S: Service<Request>, S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: Service<Request = Request, Response = Request>,
X: Service<Request, Response = Request>, X::Error: Into<Error>,
X::Error: Into<Response<BoxBody>>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
{ {
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>; type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| { let ready = self
log::error!("HTTP/1 service readiness error: {:?}", err); .expect
DispatchError::Service(err) .poll_ready(cx)
}) .map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
let ready = if let Some(ref mut upg) = self.upgrade {
upg.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready
} else {
ready
};
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
} }
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future { fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); let deprecated_on_connect = self.on_connect.as_ref().map(|handler| handler(&io));
Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data)
let mut connect_extensions = Extensions::new();
if let Some(ref handler) = self.on_connect_ext {
// run on_connect_ext callback, populating connect extensions
handler(&io, &mut connect_extensions);
}
Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
deprecated_on_connect,
connect_extensions,
addr,
)
}
}
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T> {
config: ServiceConfig,
_t: PhantomData<T>,
}
impl<T> OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T> ServiceFactory for OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Config = ();
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OneRequestService {
_t: PhantomData,
config: self.config.clone(),
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T> {
_t: PhantomData<T>,
config: ServiceConfig,
}
impl<T> Service for OneRequestService<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
#[pin]
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
Some(Ok(req)) => match req {
Message::Item(req) => {
let mut this = self.as_mut().project();
Poll::Ready(Ok((req, this.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Some(Err(err)) => Poll::Ready(Err(err)),
None => Poll::Ready(Err(ParseError::Incomplete)),
}
} }
} }

View File

@ -1,32 +1,41 @@
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{Service, ServiceFactory}; use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture; use futures_util::future::Ready;
use crate::{h1::Codec, Error, Request}; use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler; pub struct UpgradeHandler<T>(PhantomData<T>);
impl<T> ServiceFactory<(Request, Framed<T, Codec>)> for UpgradeHandler { impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ();
type Request = (Request, Framed<T, Codec>);
type Response = (); type Response = ();
type Error = Error; type Error = Error;
type Config = (); type Service = UpgradeHandler<T>;
type Service = UpgradeHandler;
type InitError = Error; type InitError = Error;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
unimplemented!() unimplemented!()
} }
} }
impl<T> Service<(Request, Framed<T, Codec>)> for UpgradeHandler { impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
type Response = (); type Response = ();
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!(); fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&self, _: (Request, Framed<T, Codec>)) -> Self::Future { fn call(&mut self, _: Self::Request) -> Self::Future {
unimplemented!() unimplemented!()
} }
} }

View File

@ -1,35 +1,27 @@
use std::{ use std::future::Future;
future::Future, use std::pin::Pin;
pin::Pin, use std::task::{Context, Poll};
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed};
use pin_project_lite::pin_project;
use crate::{ use crate::body::{BodySize, MessageBody, ResponseBody};
body::{BodySize, MessageBody}, use crate::error::Error;
h1::{Codec, Message}, use crate::h1::{Codec, Message};
Error, Response, use crate::response::Response;
};
pin_project! { /// Send HTTP/1 response
/// Send HTTP/1 response #[pin_project::pin_project]
pub struct SendResponse<T, B> { pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>, res: Option<Message<(Response<()>, BodySize)>>,
#[pin] #[pin]
body: Option<B>, body: Option<ResponseBody<B>>,
#[pin] #[pin]
framed: Option<Framed<T, Codec>>, framed: Option<Framed<T, Codec>>,
}
} }
impl<T, B> SendResponse<T, B> impl<T, B> SendResponse<T, B>
where where
B: MessageBody, B: MessageBody,
B::Error: Into<Error>,
{ {
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self { pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts(); let (res, body) = response.into_parts();
@ -45,8 +37,7 @@ where
impl<T, B> Future for SendResponse<T, B> impl<T, B> Future for SendResponse<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody, B: MessageBody + Unpin,
B::Error: Into<Error>,
{ {
type Output = Result<Framed<T, Codec>, Error>; type Output = Result<Framed<T, Codec>, Error>;
@ -69,24 +60,15 @@ where
.unwrap() .unwrap()
.is_write_buf_full() .is_write_buf_full()
{ {
let next = match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) { match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
match next {
Poll::Ready(item) => { Poll::Ready(item) => {
// body is done when item is None // body is done when item is None
body_done = item.is_none(); body_done = item.is_none();
if body_done { if body_done {
this.body.set(None); let _ = this.body.take();
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap(); let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed framed.write(Message::Chunk(item))?;
.write(Message::Chunk(item))
.map_err(|err| Error::new_send_response().with_cause(err))?;
} }
Poll::Pending => body_ready = false, Poll::Pending => body_ready = false,
} }
@ -97,10 +79,7 @@ where
// flush write buffer // flush write buffer
if !framed.is_write_buf_empty() { if !framed.is_write_buf_empty() {
match framed match framed.flush(cx)? {
.flush(cx)
.map_err(|err| Error::new_send_response().with_cause(err))?
{
Poll::Ready(_) => { Poll::Ready(_) => {
if body_ready { if body_ready {
continue; continue;
@ -114,9 +93,7 @@ where
// send response // send response
if let Some(res) = this.res.take() { if let Some(res) = this.res.take() {
framed framed.write(res)?;
.write(res)
.map_err(|err| Error::new_send_response().with_cause(err))?;
continue; continue;
} }

View File

@ -1,271 +1,196 @@
use std::{ use std::convert::TryFrom;
cmp, use std::future::Future;
error::Error as StdError, use std::marker::PhantomData;
future::Future, use std::net;
marker::PhantomData, use std::pin::Pin;
net, use std::task::{Context, Poll};
pin::Pin,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{sleep, Sleep}; use actix_rt::time::{Delay, Instant};
use actix_service::Service; use actix_service::Service;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures_core::ready; use h2::server::{Connection, SendResponse};
use h2::{ use h2::SendStream;
server::{Connection, SendResponse}, use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
Ping, PingPong,
};
use log::{error, trace}; use log::{error, trace};
use pin_project_lite::pin_project;
use crate::{ use crate::body::{BodySize, MessageBody, ResponseBody};
body::{BodySize, BoxBody, MessageBody}, use crate::cloneable::CloneableService;
config::ServiceConfig, use crate::config::ServiceConfig;
header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}, use crate::error::{DispatchError, Error};
service::HttpFlow, use crate::helpers::DataFactory;
Extensions, OnConnectData, Payload, Request, Response, ResponseHead, use crate::httpmessage::HttpMessage;
}; use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
use crate::Extensions;
const CHUNK_SIZE: usize = 16_384; const CHUNK_SIZE: usize = 16_384;
pin_project! { /// Dispatcher for HTTP/2 protocol
/// Dispatcher for HTTP/2 protocol. #[pin_project::pin_project]
pub struct Dispatcher<T, S, B, X, U> { pub struct Dispatcher<T, S: Service<Request = Request>, B: MessageBody>
flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>,
conn_data: Option<Rc<Extensions>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ping_pong: Option<H2PingPong>,
_phantom: PhantomData<B>
}
}
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
{ {
pub(crate) fn new( service: CloneableService<S>,
mut conn: Connection<T, Bytes>, connection: Connection<T, Bytes>,
flow: Rc<HttpFlow<S, X, U>>, on_connect: Option<Box<dyn DataFactory>>,
on_connect_data: Extensions,
config: ServiceConfig, config: ServiceConfig,
peer_addr: Option<net::SocketAddr>, peer_addr: Option<net::SocketAddr>,
conn_data: OnConnectData, ka_expire: Instant,
timer: Option<Pin<Box<Sleep>>>, ka_timer: Option<Delay>,
) -> Self { _t: PhantomData<B>,
let ping_pong = config.keep_alive().map(|dur| H2PingPong {
timer: timer
.map(|mut timer| {
// reset timer if it's received from new function.
timer.as_mut().reset(config.now() + dur);
timer
})
.unwrap_or_else(|| Box::pin(sleep(dur))),
on_flight: false,
ping_pong: conn.ping_pong().unwrap(),
});
Self {
flow,
config,
peer_addr,
connection: conn,
conn_data: conn_data.0.map(Rc::new),
ping_pong,
_phantom: PhantomData,
}
}
} }
struct H2PingPong { impl<T, S, B> Dispatcher<T, S, B>
timer: Pin<Box<Sleep>>,
on_flight: bool,
ping_pong: PingPong,
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S: Service<Request>, S::Error: Into<Error>,
S::Error: Into<Response<BoxBody>>, // S::Future: 'static,
S::Future: 'static,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
{ {
type Output = Result<(), crate::error::DispatchError>; pub(crate) fn new(
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
on_connect_data: Extensions,
config: ServiceConfig,
timeout: Option<Delay>,
peer_addr: Option<net::SocketAddr>,
) -> Self {
// let keepalive = config.keep_alive_enabled();
// let flags = if keepalive {
// Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED
// } else {
// Flags::empty()
// };
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
service,
config,
peer_addr,
connection,
on_connect,
on_connect_data,
ka_expire,
ka_timer,
_t: PhantomData,
}
}
}
impl<T, S, B> Future for Dispatcher<T, S, B>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
type Output = Result<(), DispatchError>;
#[inline] #[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut(); let this = self.get_mut();
loop { loop {
match Pin::new(&mut this.connection).poll_accept(cx)? { match Pin::new(&mut this.connection).poll_accept(cx) {
Poll::Ready(Some((req, tx))) => { Poll::Ready(None) => return Poll::Ready(Ok(())),
let (parts, body) = req.into_parts(); Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
let payload = crate::h2::Payload::new(body); Poll::Ready(Some(Ok((req, res)))) => {
let pl = Payload::H2 { payload }; // update keep-alive expire
let mut req = Request::with_payload(pl); if this.ka_timer.is_some() {
if let Some(expire) = this.config.keep_alive_expire() {
this.ka_expire = expire;
}
}
let head = req.head_mut(); let (parts, body) = req.into_parts();
let mut req = Request::with_payload(Payload::<
crate::payload::PayloadStream,
>::H2(
crate::h2::Payload::new(body)
));
let head = &mut req.head_mut();
head.uri = parts.uri; head.uri = parts.uri;
head.method = parts.method; head.method = parts.method;
head.version = parts.version; head.version = parts.version;
head.headers = parts.headers.into(); head.headers = parts.headers.into();
head.peer_addr = this.peer_addr; head.peer_addr = this.peer_addr;
req.conn_data = this.conn_data.as_ref().map(Rc::clone); // DEPRECATED
// set on_connect data
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
let fut = this.flow.service.call(req); // merge on_connect_ext data into request extensions
let config = this.config.clone(); req.extensions_mut().drain_from(&mut this.on_connect_data);
// multiplex request handling with spawn task actix_rt::spawn(ServiceResponse::<
actix_rt::spawn(async move { S::Future,
// resolve service call and send response. S::Response,
let res = match fut.await { S::Error,
Ok(res) => handle_response(res.into(), tx, config).await, B,
Err(err) => { > {
let res: Response<BoxBody> = err.into(); state: ServiceResponseState::ServiceCall(
handle_response(res, tx, config).await this.service.call(req),
} Some(res),
}; ),
config: this.config.clone(),
// log error. buffer: None,
if let Err(err) = res { _t: PhantomData,
match err {
DispatchError::SendResponse(err) => {
trace!("Error sending HTTP/2 response: {:?}", err)
}
DispatchError::SendData(err) => warn!("{:?}", err),
DispatchError::ResponseBody(err) => {
error!("Response payload stream error: {:?}", err)
}
}
}
}); });
} }
Poll::Ready(None) => return Poll::Ready(Ok(())), Poll::Pending => return Poll::Pending,
Poll::Pending => match this.ping_pong.as_mut() {
Some(ping_pong) => loop {
if ping_pong.on_flight {
// When have on flight ping pong. poll pong and and keep alive timer.
// on success pong received update keep alive timer to determine the next timing of
// ping pong.
match ping_pong.ping_pong.poll_pong(cx)? {
Poll::Ready(_) => {
ping_pong.on_flight = false;
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
}
Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
}
}
} else {
// When there is no on flight ping pong. keep alive timer is used to wait for next
// timing of ping pong. Therefore at this point it serves as an interval instead.
ready!(ping_pong.timer.as_mut().poll(cx));
ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
ping_pong.on_flight = true;
}
},
None => return Poll::Pending,
},
} }
} }
} }
} }
enum DispatchError { #[pin_project::pin_project]
SendResponse(h2::Error), struct ServiceResponse<F, I, E, B> {
SendData(h2::Error), #[pin]
ResponseBody(Box<dyn StdError>), state: ServiceResponseState<F, B>,
}
async fn handle_response<B>(
res: Response<B>,
mut tx: SendResponse<Bytes>,
config: ServiceConfig, config: ServiceConfig,
) -> Result<(), DispatchError> buffer: Option<Bytes>,
_t: PhantomData<(I, E)>,
}
#[pin_project::pin_project(project = ServiceResponseStateProj)]
enum ServiceResponseState<F, B> {
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
}
impl<F, I, E, B> ServiceResponse<F, I, E, B>
where where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody, B: MessageBody,
{ {
let (res, body) = res.replace_body(()); fn prepare_response(
&self,
// prepare response.
let mut size = body.size();
let res = prepare_response(config, res.head(), &mut size);
let eof = size.is_eof();
// send response head and return on eof.
let mut stream = tx
.send_response(res, eof)
.map_err(DispatchError::SendResponse)?;
if eof {
return Ok(());
}
// poll response body and send chunks to client
actix_rt::pin!(body);
while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await {
let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?;
'send: loop {
let chunk_size = cmp::min(chunk.len(), CHUNK_SIZE);
// reserve enough space and wait for stream ready.
stream.reserve_capacity(chunk_size);
match poll_fn(|cx| stream.poll_capacity(cx)).await {
// No capacity left. drop body and return.
None => return Ok(()),
Some(Err(err)) => return Err(DispatchError::SendData(err)),
Some(Ok(cap)) => {
// split chunk to writeable size and send to client
let len = chunk.len();
let bytes = chunk.split_to(cmp::min(len, cap));
stream
.send_data(bytes, false)
.map_err(DispatchError::SendData)?;
// Current chuck completely sent. break send loop and poll next one.
if chunk.is_empty() {
break 'send;
}
}
}
}
}
// response body streaming finished. send end of stream and return.
stream
.send_data(Bytes::new(), true)
.map_err(DispatchError::SendData)?;
Ok(())
}
fn prepare_response(
config: ServiceConfig,
head: &ResponseHead, head: &ResponseHead,
size: &mut BodySize, size: &mut BodySize,
) -> http::Response<()> { ) -> http::Response<()> {
let mut has_date = false; let mut has_date = false;
let mut skip_len = size != &BodySize::Stream; let mut skip_len = size != &BodySize::Stream;
@ -282,47 +207,34 @@ fn prepare_response(
skip_len = true; skip_len = true;
*size = BodySize::Stream; *size = BodySize::Stream;
} }
_ => {} _ => (),
} }
let _ = match size { let _ = match size {
BodySize::None | BodySize::Stream => None, BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
BodySize::Sized(0) => { .headers_mut()
#[allow(clippy::declare_interior_mutable_const)] .insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
const HV_ZERO: HeaderValue = HeaderValue::from_static("0"); BodySize::Sized(len) => res.headers_mut().insert(
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new();
res.headers_mut().insert(
CONTENT_LENGTH, CONTENT_LENGTH,
HeaderValue::from_str(buf.format(*len)).unwrap(), HeaderValue::try_from(format!("{}", len)).unwrap(),
) ),
}
}; };
// copy headers // copy headers
for (key, value) in head.headers.iter() { for (key, value) in head.headers.iter() {
match *key { match *key {
// TODO: consider skipping other headers according to: CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true, DATE => has_date = true,
_ => {} _ => (),
} }
res.headers_mut().append(key, value.clone()); res.headers_mut().append(key, value.clone());
} }
// set date header // set date header
if !has_date { if !has_date {
let mut bytes = BytesMut::with_capacity(29); let mut bytes = BytesMut::with_capacity(29);
config.set_date_header(&mut bytes); self.config.set_date_header(&mut bytes);
res.headers_mut().insert( res.headers_mut().insert(
DATE, DATE,
// SAFETY: serialized date-times are known ASCII strings // SAFETY: serialized date-times are known ASCII strings
@ -331,4 +243,129 @@ fn prepare_response(
} }
res res
}
}
impl<F, I, E, B> Future for ServiceResponse<F, I, E, B>
where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.project() {
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => {
loop {
loop {
if let Some(ref mut buffer) = this.buffer {
match stream.poll_capacity(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(Ok(cap))) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Poll::Ready(());
} else if !buffer.is_empty() {
let cap =
std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
}
}
Poll::Ready(Some(Err(e))) => {
warn!("{:?}", e);
return Poll::Ready(());
}
}
} else {
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true)
{
warn!("{:?}", e);
}
return Poll::Ready(());
}
Poll::Ready(Some(Ok(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
*this.buffer = Some(chunk);
}
Poll::Ready(Some(Err(e))) => {
error!("Response payload stream error: {:?}", e);
return Poll::Ready(());
}
}
}
}
}
}
}
}
} }

View File

@ -1,111 +1,50 @@
//! HTTP/2 protocol. //! HTTP/2 implementation
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::Sleep;
use bytes::Bytes; use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::Stream;
use h2::{ use h2::RecvStream;
server::{handshake, Connection, Handshake},
RecvStream,
};
mod dispatcher; mod dispatcher;
mod service; mod service;
pub use self::dispatcher::Dispatcher; pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service; pub use self::service::H2Service;
use crate::error::PayloadError;
use crate::{ /// H2 receive stream
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
/// HTTP/2 peer stream.
pub struct Payload { pub struct Payload {
stream: RecvStream, pl: RecvStream,
} }
impl Payload { impl Payload {
pub(crate) fn new(stream: RecvStream) -> Self { pub(crate) fn new(pl: RecvStream) -> Self {
Self { stream } Self { pl }
} }
} }
impl Stream for Payload { impl Stream for Payload {
type Item = Result<Bytes, PayloadError>; type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.get_mut(); let this = self.get_mut();
match ready!(Pin::new(&mut this.stream).poll_data(cx)) { match Pin::new(&mut this.pl).poll_data(cx) {
Some(Ok(chunk)) => { Poll::Ready(Some(Ok(chunk))) => {
let len = chunk.len(); let len = chunk.len();
if let Err(err) = this.pl.flow_control().release_capacity(len) {
match this.stream.flow_control().release_capacity(len) { Poll::Ready(Some(Err(err.into())))
Ok(()) => Poll::Ready(Some(Ok(chunk))), } else {
Err(err) => Poll::Ready(Some(Err(err.into()))), Poll::Ready(Some(Ok(chunk)))
} }
} }
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))),
None => Poll::Ready(None), Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
} }
} }
} }
pub(crate) fn handshake_with_timeout<T>(
io: T,
config: &ServiceConfig,
) -> HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
HandshakeWithTimeout {
handshake: handshake(io),
timer: config.client_timer().map(Box::pin),
}
}
pub(crate) struct HandshakeWithTimeout<T: AsyncRead + AsyncWrite + Unpin> {
handshake: Handshake<T>,
timer: Option<Pin<Box<Sleep>>>,
}
impl<T> Future for HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Connection<T, Bytes>, Option<Pin<Box<Sleep>>>), DispatchError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake. It can be re-used for h2 ping-pong.
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() {
Some(timer) => {
ready!(timer.as_mut().poll(cx));
Poll::Ready(Err(DispatchError::SlowRequestTimeout))
}
None => Poll::Pending,
},
}
}
}
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
}

View File

@ -1,61 +1,73 @@
use std::{ use std::future::Future;
future::Future, use std::marker::PhantomData;
marker::PhantomData, use std::pin::Pin;
mem, net, use std::task::{Context, Poll};
pin::Pin, use std::{net, rc::Rc};
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_service::{ use actix_service::{
fn_factory, fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _, fn_factory, fn_service, pipeline_factory, IntoServiceFactory, Service,
ServiceFactory,
}; };
use actix_utils::future::ready; use bytes::Bytes;
use futures_core::{future::LocalBoxFuture, ready}; use futures_core::ready;
use futures_util::future::ok;
use h2::server::{self, Handshake};
use log::error; use log::error;
use crate::{ use crate::body::MessageBody;
body::{BoxBody, MessageBody}, use crate::cloneable::CloneableService;
config::ServiceConfig, use crate::config::ServiceConfig;
error::DispatchError, use crate::error::{DispatchError, Error};
service::HttpFlow, use crate::helpers::DataFactory;
ConnectCallback, OnConnectData, Request, Response, use crate::request::Request;
}; use crate::response::Response;
use crate::{ConnectCallback, Extensions};
use super::{dispatcher::Dispatcher, handshake_with_timeout, HandshakeWithTimeout}; use super::dispatcher::Dispatcher;
/// `ServiceFactory` implementation for HTTP/2 transport /// `ServiceFactory` implementation for HTTP2 transport
pub struct H2Service<T, S, B> { pub struct H2Service<T, S, B> {
srv: S, srv: S,
cfg: ServiceConfig, cfg: ServiceConfig,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<(T, B)>, _t: PhantomData<(T, B)>,
} }
impl<T, S, B> H2Service<T, S, B> impl<T, S, B> H2Service<T, S, B>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create new `H2Service` instance with config. /// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>( pub(crate) fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig, cfg: ServiceConfig,
service: F, service: F,
) -> Self { ) -> Self {
H2Service { H2Service {
cfg, cfg,
on_connect: None,
on_connect_ext: None, on_connect_ext: None,
srv: service.into_factory(), srv: service.into_factory(),
_phantom: PhantomData, _t: PhantomData,
} }
} }
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
/// Set on connect callback. /// Set on connect callback.
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self { pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
self.on_connect_ext = f; self.on_connect_ext = f;
@ -65,77 +77,70 @@ where
impl<S, B> H2Service<TcpStream, S, B> impl<S, B> H2Service<TcpStream, S, B>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create plain TCP based service /// Create simple tcp based service
pub fn tcp( pub fn tcp(
self, self,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = DispatchError, Error = DispatchError,
InitError = S::InitError, InitError = S::InitError,
> { > {
fn_factory(|| { pipeline_factory(fn_factory(|| async {
ready(Ok::<_, S::InitError>(fn_service(|io: TcpStream| { Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok(); let peer_addr = io.peer_addr().ok();
ready(Ok::<_, DispatchError>((io, peer_addr))) ok::<_, DispatchError>((io, peer_addr))
}))) }))
}) }))
.and_then(self) .and_then(self)
} }
} }
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
mod openssl { mod openssl {
use actix_service::ServiceFactoryExt as _; use actix_service::{fn_factory, fn_service};
use actix_tls::accept::{ use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
openssl::{ use actix_tls::{openssl::HandshakeError, TlsError};
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
use super::*; use super::*;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B> impl<S, B> H2Service<SslStream<TcpStream>, S, B>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create OpenSSL based service. /// Create ssl based service
pub fn openssl( pub fn openssl(
self, self,
acceptor: SslAcceptor, acceptor: SslAcceptor,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = TlsError<SslError, DispatchError>, Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_init_err(|_| { .map_err(TlsError::Tls)
unreachable!("TLS acceptor service factory does not error on init") .map_init_err(|_| panic!()),
}) )
.map_err(TlsError::into_service_error) .and_then(fn_factory(|| {
.map(|io: TlsStream<TcpStream>| { ok::<_, S::InitError>(fn_service(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok(); let peer_addr = io.get_ref().peer_addr().ok();
(io, peer_addr) ok((io, peer_addr))
}) }))
}))
.and_then(self.map_err(TlsError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
@ -143,175 +148,212 @@ mod openssl {
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
mod rustls { mod rustls {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::io;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B> impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Future: 'static, S::Error: Into<Error> + 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create Rustls based service. /// Create openssl based service
pub fn rustls( pub fn rustls(
self, self,
mut config: ServerConfig, mut config: ServerConfig,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = TlsError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
let mut protos = vec![b"h2".to_vec()]; let protos = vec!["h2".to_string().into()];
protos.extend_from_slice(&config.alpn_protocols); config.set_protocols(&protos);
config.alpn_protocols = protos;
pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_init_err(|_| { .map_err(TlsError::Tls)
unreachable!("TLS acceptor service factory does not error on init") .map_init_err(|_| panic!()),
}) )
.map_err(TlsError::into_service_error) .and_then(fn_factory(|| {
.map(|io: TlsStream<TcpStream>| { ok::<_, S::InitError>(fn_service(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok(); let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr) ok((io, peer_addr))
}) }))
}))
.and_then(self.map_err(TlsError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
impl<T, S, B> ServiceFactory<(T, Option<net::SocketAddr>)> for H2Service<T, S, B> impl<T, S, B> ServiceFactory for H2Service<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin + 'static, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S: ServiceFactory<Request, Config = ()>, S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Error: Into<Response<BoxBody>> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Config = ();
type Service = H2ServiceHandler<T, S::Service, B>;
type InitError = S::InitError; type InitError = S::InitError;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; type Service = H2ServiceHandler<T, S::Service, B>;
type Future = H2ServiceResponse<T, S, B>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
let service = self.srv.new_service(()); H2ServiceResponse {
let cfg = self.cfg.clone(); fut: self.srv.new_service(()),
let on_connect_ext = self.on_connect_ext.clone(); cfg: Some(self.cfg.clone()),
on_connect: self.on_connect.clone(),
Box::pin(async move { on_connect_ext: self.on_connect_ext.clone(),
let service = service.await?; _t: PhantomData,
Ok(H2ServiceHandler::new(cfg, on_connect_ext, service)) }
})
} }
} }
/// `Service` implementation for HTTP/2 transport #[doc(hidden)]
pub struct H2ServiceHandler<T, S, B> #[pin_project::pin_project]
where pub struct H2ServiceResponse<T, S: ServiceFactory, B> {
S: Service<Request>, #[pin]
{ fut: S::Future,
flow: Rc<HttpFlow<S, (), ()>>, cfg: Option<ServiceConfig>,
cfg: ServiceConfig, on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<B>, _t: PhantomData<(T, B)>,
}
impl<T, S, B> Future for H2ServiceResponse<T, S, B>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Output = Result<H2ServiceHandler<T, S::Service, B>, S::InitError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
Poll::Ready(ready!(this.fut.poll(cx)).map(|service| {
let this = self.as_mut().project();
H2ServiceHandler::new(
this.cfg.take().unwrap(),
this.on_connect.clone(),
this.on_connect_ext.clone(),
service,
)
}))
}
}
/// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, S: Service, B> {
srv: CloneableService<S>,
cfg: ServiceConfig,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>,
} }
impl<T, S, B> H2ServiceHandler<T, S, B> impl<T, S, B> H2ServiceHandler<T, S, B>
where where
S: Service<Request>, S: Service<Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
fn new( fn new(
cfg: ServiceConfig, cfg: ServiceConfig,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
service: S, srv: S,
) -> H2ServiceHandler<T, S, B> { ) -> H2ServiceHandler<T, S, B> {
H2ServiceHandler { H2ServiceHandler {
flow: HttpFlow::new(service, (), None),
cfg, cfg,
on_connect,
on_connect_ext, on_connect_ext,
_phantom: PhantomData, srv: CloneableService::new(srv),
_t: PhantomData,
} }
} }
} }
impl<T, S, B> Service<(T, Option<net::SocketAddr>)> for H2ServiceHandler<T, S, B> impl<T, S, B> Service for H2ServiceHandler<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>, S: Service<Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Future = H2ServiceHandlerResponse<T, S, B>; type Future = H2ServiceHandlerResponse<T, S, B>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flow.service.poll_ready(cx).map_err(|err| { self.srv.poll_ready(cx).map_err(|e| {
let err = err.into(); let e = e.into();
error!("Service readiness error: {:?}", err); error!("Service readiness error: {:?}", e);
DispatchError::Service(err) DispatchError::Service(e)
}) })
} }
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future { fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let on_connect_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); let deprecated_on_connect = self.on_connect.as_ref().map(|handler| handler(&io));
let mut connect_extensions = Extensions::new();
if let Some(ref handler) = self.on_connect_ext {
// run on_connect_ext callback, populating connect extensions
handler(&io, &mut connect_extensions);
}
H2ServiceHandlerResponse { H2ServiceHandlerResponse {
state: State::Handshake( state: State::Handshake(
Some(self.flow.clone()), Some(self.srv.clone()),
Some(self.cfg.clone()), Some(self.cfg.clone()),
addr, addr,
on_connect_data, deprecated_on_connect,
handshake_with_timeout(io, &self.cfg), Some(connect_extensions),
server::handshake(io),
), ),
} }
} }
} }
enum State<T, S: Service<Request>, B: MessageBody> enum State<T, S: Service<Request = Request>, B: MessageBody>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S::Future: 'static, S::Future: 'static,
{ {
Incoming(Dispatcher<T, S, B>),
Handshake( Handshake(
Option<Rc<HttpFlow<S, (), ()>>>, Option<CloneableService<S>>,
Option<ServiceConfig>, Option<ServiceConfig>,
Option<net::SocketAddr>, Option<net::SocketAddr>,
OnConnectData, Option<Box<dyn DataFactory>>,
HandshakeWithTimeout<T>, Option<Extensions>,
Handshake<T, Bytes>,
), ),
Established(Dispatcher<T, S, B, (), ()>),
} }
pub struct H2ServiceHandlerResponse<T, S, B> pub struct H2ServiceHandlerResponse<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>, S: Service<Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
@ -322,8 +364,8 @@ where
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B> impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>, S: Service<Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
B: MessageBody, B: MessageBody,
@ -332,35 +374,33 @@ where
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.state { match self.state {
State::Incoming(ref mut disp) => Pin::new(disp).poll(cx),
State::Handshake( State::Handshake(
ref mut srv, ref mut srv,
ref mut config, ref mut config,
ref peer_addr, ref peer_addr,
ref mut conn_data, ref mut on_connect,
ref mut on_connect_data,
ref mut handshake, ref mut handshake,
) => match ready!(Pin::new(handshake).poll(cx)) { ) => match Pin::new(handshake).poll(cx) {
Ok((conn, timer)) => { Poll::Ready(Ok(conn)) => {
let on_connect_data = mem::take(conn_data); self.state = State::Incoming(Dispatcher::new(
self.state = State::Established(Dispatcher::new(
conn,
srv.take().unwrap(), srv.take().unwrap(),
conn,
on_connect.take(),
on_connect_data.take().unwrap(),
config.take().unwrap(), config.take().unwrap(),
None,
*peer_addr, *peer_addr,
on_connect_data,
timer,
)); ));
self.poll(cx) self.poll(cx)
} }
Poll::Ready(Err(err)) => {
Err(err) => {
trace!("H2 handshake error: {}", err); trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err)) Poll::Ready(Err(err.into()))
} }
Poll::Pending => Poll::Pending,
}, },
State::Established(ref mut disp) => Pin::new(disp).poll(cx),
} }
} }
} }

View File

@ -1,56 +0,0 @@
//! Sealed [`AsHeaderName`] trait and implementations.
use std::{borrow::Cow, str::FromStr as _};
use http::header::{HeaderName, InvalidHeaderName};
/// Sealed trait implemented for types that can be effectively borrowed as a [`HeaderValue`].
///
/// [`HeaderValue`]: super::HeaderValue
pub trait AsHeaderName: Sealed {}
pub struct Seal;
pub trait Sealed {
fn try_as_name(&self, seal: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName>;
}
impl Sealed for HeaderName {
#[inline]
fn try_as_name(&self, _: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
Ok(Cow::Borrowed(self))
}
}
impl AsHeaderName for HeaderName {}
impl Sealed for &HeaderName {
#[inline]
fn try_as_name(&self, _: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
Ok(Cow::Borrowed(*self))
}
}
impl AsHeaderName for &HeaderName {}
impl Sealed for &str {
#[inline]
fn try_as_name(&self, _: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for &str {}
impl Sealed for String {
#[inline]
fn try_as_name(&self, _: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for String {}
impl Sealed for &String {
#[inline]
fn try_as_name(&self, _: Seal) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for &String {}

View File

@ -2,12 +2,11 @@ use std::cmp::Ordering;
use mime::Mime; use mime::Mime;
use super::QualityItem; use crate::header::{qitem, QualityItem};
use crate::http::header; use crate::http::header;
crate::http::header::common_header! { header! {
/// `Accept` header, defined /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2)
/// in [RFC 7231 §5.3.2](https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2)
/// ///
/// The `Accept` header field can be used by user agents to specify /// The `Accept` header field can be used by user agents to specify
/// response media types that are acceptable. Accept header fields can /// response media types that are acceptable. Accept header fields can
@ -16,7 +15,8 @@ crate::http::header::common_header! {
/// in-line image /// in-line image
/// ///
/// # ABNF /// # ABNF
/// ```plain ///
/// ```text
/// Accept = #( media-range [ accept-params ] ) /// Accept = #( media-range [ accept-params ] )
/// ///
/// media-range = ( "*/*" /// media-range = ( "*/*"
@ -27,94 +27,112 @@ crate::http::header::common_header! {
/// accept-ext = OWS ";" OWS token [ "=" ( token / quoted-string ) ] /// accept-ext = OWS ";" OWS token [ "=" ( token / quoted-string ) ]
/// ``` /// ```
/// ///
/// # Example Values /// # Example values
/// * `audio/*; q=0.2, audio/basic` /// * `audio/*; q=0.2, audio/basic`
/// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c` /// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c`
/// ///
/// # Examples /// # Examples
/// ``` /// ```rust
/// use actix_web::HttpResponse; /// # extern crate actix_http;
/// use actix_web::http::header::{Accept, QualityItem}; /// extern crate mime;
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem};
/// ///
/// let mut builder = HttpResponse::Ok(); /// # fn main() {
/// builder.insert_header( /// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// QualityItem::max(mime::TEXT_HTML), /// qitem(mime::TEXT_HTML),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ``` /// ```rust
/// use actix_web::HttpResponse; /// # extern crate actix_http;
/// use actix_web::http::header::{Accept, QualityItem}; /// extern crate mime;
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem};
/// ///
/// let mut builder = HttpResponse::Ok(); /// # fn main() {
/// builder.insert_header( /// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// QualityItem::max(mime::APPLICATION_JSON), /// qitem(mime::APPLICATION_JSON),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ``` /// ```rust
/// use actix_web::HttpResponse; /// # extern crate actix_http;
/// use actix_web::http::header::{Accept, QualityItem, q}; /// extern crate mime;
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, QualityItem, q, qitem};
/// ///
/// let mut builder = HttpResponse::Ok(); /// # fn main() {
/// builder.insert_header( /// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// QualityItem::max(mime::TEXT_HTML), /// qitem(mime::TEXT_HTML),
/// QualityItem::max("application/xhtml+xml".parse().unwrap()), /// qitem("application/xhtml+xml".parse().unwrap()),
/// QualityItem::new(mime::TEXT_XML, q(0.9)), /// QualityItem::new(
/// QualityItem::max("image/webp".parse().unwrap()), /// mime::TEXT_XML,
/// QualityItem::new(mime::STAR_STAR, q(0.8)), /// q(900)
/// ),
/// qitem("image/webp".parse().unwrap()),
/// QualityItem::new(
/// mime::STAR_STAR,
/// q(800)
/// ),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
(Accept, header::ACCEPT) => (QualityItem<Mime>)* (Accept, header::ACCEPT) => (QualityItem<Mime>)+
test_parse_and_format { test_accept {
// Tests from the RFC // Tests from the RFC
crate::http::header::common_header_test!( test_header!(
test1, test1,
vec![b"audio/*; q=0.2, audio/basic"], vec![b"audio/*; q=0.2, audio/basic"],
Some(Accept(vec![ Some(Accept(vec![
QualityItem::new("audio/*".parse().unwrap(), q(0.2)), QualityItem::new("audio/*".parse().unwrap(), q(200)),
QualityItem::max("audio/basic".parse().unwrap()), qitem("audio/basic".parse().unwrap()),
]))); ])));
test_header!(
crate::http::header::common_header_test!(
test2, test2,
vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"], vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"],
Some(Accept(vec![ Some(Accept(vec![
QualityItem::new(mime::TEXT_PLAIN, q(0.5)), QualityItem::new(mime::TEXT_PLAIN, q(500)),
QualityItem::max(mime::TEXT_HTML), qitem(mime::TEXT_HTML),
QualityItem::new( QualityItem::new(
"text/x-dvi".parse().unwrap(), "text/x-dvi".parse().unwrap(),
q(0.8)), q(800)),
QualityItem::max("text/x-c".parse().unwrap()), qitem("text/x-c".parse().unwrap()),
]))); ])));
// Custom tests // Custom tests
crate::http::header::common_header_test!( test_header!(
test3, test3,
vec![b"text/plain; charset=utf-8"], vec![b"text/plain; charset=utf-8"],
Some(Accept(vec![ Some(Accept(vec![
QualityItem::max(mime::TEXT_PLAIN_UTF_8), qitem(mime::TEXT_PLAIN_UTF_8),
]))); ])));
crate::http::header::common_header_test!( test_header!(
test4, test4,
vec![b"text/plain; charset=utf-8; q=0.5"], vec![b"text/plain; charset=utf-8; q=0.5"],
Some(Accept(vec![ Some(Accept(vec![
QualityItem::new(mime::TEXT_PLAIN_UTF_8, QualityItem::new(mime::TEXT_PLAIN_UTF_8,
q(0.5)), q(500)),
]))); ])));
#[test] #[test]
fn test_fuzzing1() { fn test_fuzzing1() {
let req = test::TestRequest::default() use crate::test::TestRequest;
.insert_header((header::ACCEPT, "chunk#;e")) let req = TestRequest::with_header(crate::header::ACCEPT, "chunk#;e").finish();
.finish();
let header = Accept::parse(&req); let header = Accept::parse(&req);
assert!(header.is_ok()); assert!(header.is_ok());
} }
@ -124,71 +142,34 @@ crate::http::header::common_header! {
impl Accept { impl Accept {
/// Construct `Accept: */*`. /// Construct `Accept: */*`.
pub fn star() -> Accept { pub fn star() -> Accept {
Accept(vec![QualityItem::max(mime::STAR_STAR)]) Accept(vec![qitem(mime::STAR_STAR)])
} }
/// Construct `Accept: application/json`. /// Construct `Accept: application/json`.
pub fn json() -> Accept { pub fn json() -> Accept {
Accept(vec![QualityItem::max(mime::APPLICATION_JSON)]) Accept(vec![qitem(mime::APPLICATION_JSON)])
} }
/// Construct `Accept: text/*`. /// Construct `Accept: text/*`.
pub fn text() -> Accept { pub fn text() -> Accept {
Accept(vec![QualityItem::max(mime::TEXT_STAR)]) Accept(vec![qitem(mime::TEXT_STAR)])
} }
/// Construct `Accept: image/*`. /// Construct `Accept: image/*`.
pub fn image() -> Accept { pub fn image() -> Accept {
Accept(vec![QualityItem::max(mime::IMAGE_STAR)]) Accept(vec![qitem(mime::IMAGE_STAR)])
} }
/// Construct `Accept: text/html`. /// Construct `Accept: text/html`.
pub fn html() -> Accept { pub fn html() -> Accept {
Accept(vec![QualityItem::max(mime::TEXT_HTML)]) Accept(vec![qitem(mime::TEXT_HTML)])
}
// TODO: method for getting best content encoding based on q-factors, available from server side
// and if none are acceptable return None
/// Extracts the most preferable mime type, accounting for [q-factor weighting].
///
/// If no q-factors are provided, the first mime type is chosen. Note that items without
/// q-factors are given the maximum preference value.
///
/// As per the spec, will return [`mime::STAR_STAR`] (indicating no preference) if the contained
/// list is empty.
///
/// [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2
pub fn preference(&self) -> Mime {
use actix_http::header::Quality;
let mut max_item = None;
let mut max_pref = Quality::ZERO;
// uses manual max lookup loop since we want the first occurrence in the case of same
// preference but `Iterator::max_by_key` would give us the last occurrence
for pref in &self.0 {
// only change if strictly greater
// equal items, even while unsorted, still have higher preference if they appear first
if pref.quality > max_pref {
max_pref = pref.quality;
max_item = Some(pref.item.clone());
}
}
max_item.unwrap_or(mime::STAR_STAR)
} }
/// Returns a sorted list of mime types from highest to lowest preference, accounting for /// Returns a sorted list of mime types from highest to lowest preference, accounting for
/// [q-factor weighting] and specificity. /// [q-factor weighting] and specificity.
/// ///
/// [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2 /// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
pub fn ranked(&self) -> Vec<Mime> { pub fn mime_precedence(&self) -> Vec<Mime> {
if self.is_empty() {
return vec![];
}
let mut types = self.0.clone(); let mut types = self.0.clone();
// use stable sort so items with equal q-factor and specificity retain listed order // use stable sort so items with equal q-factor and specificity retain listed order
@ -229,29 +210,42 @@ impl Accept {
types.into_iter().map(|qitem| qitem.item).collect() types.into_iter().map(|qitem| qitem.item).collect()
} }
/// Extracts the most preferable mime type, accounting for [q-factor weighting].
///
/// If no q-factors are provided, the first mime type is chosen. Note that items without
/// q-factors are given the maximum preference value.
///
/// Returns `None` if contained list is empty.
///
/// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
pub fn mime_preference(&self) -> Option<Mime> {
let types = self.mime_precedence();
types.first().cloned()
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::header::q; use crate::header::q;
#[test] #[test]
fn ranking_precedence() { fn test_mime_precedence() {
let test = Accept(vec![]); let test = Accept(vec![]);
assert!(test.ranked().is_empty()); assert!(test.mime_precedence().is_empty());
let test = Accept(vec![QualityItem::max(mime::APPLICATION_JSON)]); let test = Accept(vec![qitem(mime::APPLICATION_JSON)]);
assert_eq!(test.ranked(), vec![mime::APPLICATION_JSON]); assert_eq!(test.mime_precedence(), vec!(mime::APPLICATION_JSON));
let test = Accept(vec![ let test = Accept(vec![
QualityItem::max(mime::TEXT_HTML), qitem(mime::TEXT_HTML),
"application/xhtml+xml".parse().unwrap(), "application/xhtml+xml".parse().unwrap(),
QualityItem::new("application/xml".parse().unwrap(), q(0.9)), QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
QualityItem::new(mime::STAR_STAR, q(0.8)), QualityItem::new(mime::STAR_STAR, q(0.8)),
]); ]);
assert_eq!( assert_eq!(
test.ranked(), test.mime_precedence(),
vec![ vec![
mime::TEXT_HTML, mime::TEXT_HTML,
"application/xhtml+xml".parse().unwrap(), "application/xhtml+xml".parse().unwrap(),
@ -261,33 +255,33 @@ mod tests {
); );
let test = Accept(vec![ let test = Accept(vec![
QualityItem::max(mime::STAR_STAR), qitem(mime::STAR_STAR),
QualityItem::max(mime::IMAGE_STAR), qitem(mime::IMAGE_STAR),
QualityItem::max(mime::IMAGE_PNG), qitem(mime::IMAGE_PNG),
]); ]);
assert_eq!( assert_eq!(
test.ranked(), test.mime_precedence(),
vec![mime::IMAGE_PNG, mime::IMAGE_STAR, mime::STAR_STAR] vec![mime::IMAGE_PNG, mime::IMAGE_STAR, mime::STAR_STAR]
); );
} }
#[test] #[test]
fn preference_selection() { fn test_mime_preference() {
let test = Accept(vec![ let test = Accept(vec![
QualityItem::max(mime::TEXT_HTML), qitem(mime::TEXT_HTML),
"application/xhtml+xml".parse().unwrap(), "application/xhtml+xml".parse().unwrap(),
QualityItem::new("application/xml".parse().unwrap(), q(0.9)), QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
QualityItem::new(mime::STAR_STAR, q(0.8)), QualityItem::new(mime::STAR_STAR, q(0.8)),
]); ]);
assert_eq!(test.preference(), mime::TEXT_HTML); assert_eq!(test.mime_preference(), Some(mime::TEXT_HTML));
let test = Accept(vec![ let test = Accept(vec![
QualityItem::new("video/*".parse().unwrap(), q(0.8)), QualityItem::new("video/*".parse().unwrap(), q(0.8)),
QualityItem::max(mime::IMAGE_PNG), qitem(mime::IMAGE_PNG),
QualityItem::new(mime::STAR_STAR, q(0.5)), QualityItem::new(mime::STAR_STAR, q(0.5)),
QualityItem::max(mime::IMAGE_SVG), qitem(mime::IMAGE_SVG),
QualityItem::new(mime::IMAGE_STAR, q(0.8)), QualityItem::new(mime::IMAGE_STAR, q(0.8)),
]); ]);
assert_eq!(test.preference(), mime::IMAGE_PNG); assert_eq!(test.mime_preference(), Some(mime::IMAGE_PNG));
} }
} }

Some files were not shown because too many files have changed in this diff Show More