1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-04-23 21:53:22 +02:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Rob Ede
c9c36679e4
bump actix http version 2021-08-11 19:21:40 +01:00
Rob Ede
655d7b4f05
sec fixes 2021-08-08 21:21:48 +01:00
487 changed files with 37279 additions and 71740 deletions

View File

@ -1,7 +0,0 @@
disallowed-names = [
"e", # no single letter error bindings
]
disallowed-methods = [
"std::cell::RefCell::default()",
"std::rc::Rc::default()",
]

View File

@ -1,15 +0,0 @@
comment: false
coverage:
status:
project:
default:
threshold: 100% # make CI green
patch:
default:
threshold: 100% # make CI green
ignore: # ignore code coverage on following paths
- "**/tests"
- "**/benches"
- "**/examples"

View File

@ -1,3 +0,0 @@
version: "0.2"
words:
- actix

3
.github/FUNDING.yml vendored
View File

@ -1,3 +0,0 @@
# These are supported funding model platforms
github: [robjtede]

View File

@ -3,41 +3,35 @@ name: Bug Report
about: Create a bug report. about: Create a bug report.
--- ---
Your issue may already be reported! Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one. Your issue may already be reported!
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
## Expected Behavior ## Expected Behavior
<!--- If you're describing a bug, tell us what should happen --> <!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work --> <!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior ## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior --> <!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior --> <!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution ## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change --> <!--- or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs) ## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to --> <!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant --> <!--- reproduce this bug. Include code to reproduce, if relevant -->
1. 1.
2. 2.
3. 3.
4. 4.
## Context ## Context
<!--- How has this issue affected you? What are you trying to accomplish? --> <!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world --> <!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment ## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in --> <!--- Include as many relevant details about the environment you experienced the bug in -->
- Rust Version (I.e, output of `rustc -V`): * Rust Version (I.e, output of `rustc -V`):
- Actix Web Version: * Actix Web Version:

View File

@ -1,8 +1,15 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat
- name: GitHub Discussions - name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A about: Actix Web Q&A
- name: Gitter chat (actix-web)
url: https://gitter.im/actix/actix-web
about: Actix Web Q&A
- name: Gitter chat (actix)
url: https://gitter.im/actix/actix
about: Actix (actor framework) Q&A
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat

View File

@ -1,28 +1,27 @@
<!-- Thanks for considering contributing actix! --> <!-- Thanks for considering contributing actix! -->
<!-- Please fill out the following to get your PR reviewed quicker. --> <!-- Please fill out the following to make our reviews easy. -->
## PR Type ## PR Type
<!-- What kind of change does this PR make? --> <!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other --> <!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
PR_TYPE
## PR Checklist ## PR Checklist
Check your PR fulfills the following:
<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. --> <!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated. - [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated. - [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages. - [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt. - [ ] Format code with the latest stable rustfmt
- [ ] (Team) Label with affected crates and semver status.
## Overview ## Overview
<!-- Describe the current and new behavior. --> <!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. --> <!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. --> <!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 --> <!-- Closes #000 -->

View File

@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: cargo
directory: /
schedule:
interval: weekly
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly

View File

@ -1,28 +1,28 @@
name: Benchmark name: Benchmark (Linux)
on: on:
pull_request:
types: [opened, synchronize, reopened]
push: push:
branches: [master] branches:
- master
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
check_benchmark: check_benchmark:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Install Rust - name: Install Rust
run: | uses: actions-rs/toolchain@v1
rustup set profile minimal with:
rustup install nightly toolchain: nightly
rustup override set nightly profile: minimal
override: true
- name: Check benchmark - name: Check benchmark
run: cargo bench --bench=server -- --sample-size=15 uses: actions-rs/cargo@v1
with:
command: bench
args: --bench=server -- --sample-size=15

View File

@ -1,91 +0,0 @@
name: CI (post-merge)
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- { name: nightly, version: nightly }
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
steps:
- uses: actions/checkout@v4
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ matrix.version.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.49.50
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: check minimal
run: just check-min
- name: check default
run: just check-default
- name: tests
timeout-minutes: 60
run: just test
- name: CI cache clean
run: cargo-ci-cache-clean
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Free Disk Space
run: ./scripts/free-disk-space.sh
- name: Setup mold linker
uses: rui314/setup-mold@v1
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
- name: Install just, cargo-hack
uses: taiki-e/install-action@v2.49.50
with:
tool: just,cargo-hack
- name: Check feature combinations
run: just check-feature-combinations

View File

@ -1,121 +0,0 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
merge_group:
types: [checks_requested]
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
read_msrv:
name: Read MSRV
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
build_and_test:
needs: read_msrv
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" }
- { name: stable, version: stable }
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
steps:
- uses: actions/checkout@v4
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Setup mold linker
if: matrix.target.os == 'ubuntu-latest'
uses: rui314/setup-mold@v1
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ matrix.version.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.49.50
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: workaround MSRV issues
if: matrix.version.name == 'msrv'
run: just downgrade-for-msrv
- name: check minimal
run: just check-min
- name: check default
run: just check-default
- name: tests
timeout-minutes: 60
run: just test
- name: CI cache clean
run: cargo-ci-cache-clean
io-uring:
name: io-uring tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
- name: tests (io-uring)
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
- name: Install just
uses: taiki-e/install-action@v2.49.50
with:
tool: just
- name: doc tests
run: just test-docs

32
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@ -0,0 +1,32 @@
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all --tests

View File

@ -1,40 +0,0 @@
name: Coverage
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: llvm-tools
- name: Install just, cargo-llvm-cov, cargo-nextest
uses: taiki-e/install-action@v2.49.50
with:
tool: just,cargo-llvm-cov,cargo-nextest
- name: Generate code coverage
run: just test-coverage-codecov
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5.4.2
with:
files: codecov.json
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@ -1,90 +0,0 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: rustfmt
- name: Check with Rustfmt
run: cargo fmt --all -- --check
clippy:
permissions:
contents: read
checks: write # to add clippy checks to PR diffs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
components: clippy
- name: Check with Clippy
uses: giraffate/clippy-action@v1.0.1
with:
reporter: github-pr-check
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: >-
--workspace --all-features --tests --examples --bins --
-A unknown_lints -D clippy::todo -D clippy::dbg_macro
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: rust-docs
- name: Check for broken intra-doc links
env:
RUSTDOCFLAGS: -D warnings
run: cargo +nightly doc --no-deps --workspace --all-features
check-external-types:
if: false # rustdoc mismatch currently
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (${{ vars.RUST_VERSION_EXTERNAL_TYPES }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
- name: Install just
uses: taiki-e/install-action@v2.49.50
with:
tool: just
- name: Install cargo-check-external-types
uses: taiki-e/cache-cargo-install-action@v2.1.1
with:
tool: cargo-check-external-types
- name: check external types
run: just check-external-types-all +${{ vars.RUST_VERSION_EXTERNAL_TYPES }}

69
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,69 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.42.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

44
.github/workflows/macos.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

37
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Upload documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

64
.github/workflows/windows.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

8
.gitignore vendored
View File

@ -1,3 +1,4 @@
Cargo.lock
target/ target/
guide/build/ guide/build/
/gh-pages /gh-pages
@ -15,10 +16,3 @@ guide/build/
# Configuration directory generated by CLion # Configuration directory generated by CLion
.idea .idea
# Configuration directory generated by VSCode
.vscode
# code coverage
/lcov.info
/codecov.json

View File

@ -1,5 +0,0 @@
overrides:
- files: "*.md"
options:
printWidth: 9999
proseWrap: never

View File

@ -1,3 +0,0 @@
group_imports = "StdExternalCrate"
imports_granularity = "Crate"
use_field_init_shorthand = true

View File

@ -1,5 +1,600 @@
# Changelog # Changes
Changelogs are kept separately for each crate in this repo. ## Unreleased - 2020-xx-xx
Actix Web changelog [is now here &rarr;](./actix-web/CHANGES.md).
## 3.3.2 - 2020-12-01
### Fixed
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
* Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
* Add `Either<A, B>` extractor helper. [#1788]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
* Implement `exclude_regex` for Logger middleware. [#1723]
* Add request-local data extractor `web::ReqData`. [#1748]
* Add ability to register closure for request middleware logging. [#1749]
* Add `app_data` to `ServiceConfig`. [#1757]
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
* Re-export bytes::Buf{Mut} in web module. [#1750]
* Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
* Re-export `actix_rt::main` as `actix_web::main`.
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
* MSRV is now 1.41.1
### Fixed
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
* Move `BodyEncoding` to `dev` module #1220
* Allow to set `peer_addr` for TestRequest #1074
* Make web::Data deref to Arc<T> #1214
* Rename `App::register_data()` to `App::app_data()`
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
* Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
* Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
* Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
* Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
* Migrated to `std::future`
* Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Add `middleware::Condition` that conditionally enables another middleware
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
* Make UrlEncodedError::Overflow more informative
* Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
* Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
* Re-implement Host predicate (#989)
* Form implements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data`
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
* Enable `rust-tls` feature for client #1045
* Update serde_urlencoded to 0.6.1
* Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
* Unix domain sockets (HttpServer::bind_uds) #92
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
* Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
* Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
* Support asynchronous data factories #850
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
* Add support for PathConfig #903
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
* Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
* Re-apply patch from #637 #894
### Fixed
* HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
* Add `Scope::configure()` method.
* Add `ServiceRequest::set_payload()` method.
* Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Add macros for head, options, trace, connect and patch http methods
### Changed
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
* Fix Logger request time format, and use rfc3339. #867
* Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Add
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
* Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Add
* Allow to set/override app data on scope level
### Changed
* `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
* Add helper function for executing futures `test::block_fn()`
### Changed
* Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* CORS handling without headers #702
* Allow to construct `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
* Fix `NormalizePath` middleware impl #806
### Deleted
* `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
* Add raw services support via `web::service()`
* Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
* Extend `Responder` trait, allow to override status code and headers.
* Store visit and login timestamp in the identity cookie #502
### Changed
* `.to_async()` handler can return `Responder` type #792
### Fixed
* Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Add `.peer_addr()` #744
* Add `NormalizePath` middleware
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow to use any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* rustls support
### Changed
* use forked cookie
* multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538

View File

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include: Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language * Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences * Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism * Gracefully accepting constructive criticism
- Focusing on what is best for the community * Focusing on what is best for the community
- Showing empathy towards other community members * Showing empathy towards other community members
Examples of unacceptable behavior by participants include: Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances * The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks * Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment * Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission * Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting * Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities ## Our Responsibilities

3957
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,123 @@
[package]
name = "actix-web"
version = "3.3.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
readme = "README.md"
keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace] [workspace]
resolver = "2"
members = [ members = [
"actix-files", ".",
"actix-http-test", "awc",
"actix-http", "actix-http",
"actix-multipart", "actix-files",
"actix-multipart-derive", "actix-multipart",
"actix-router", "actix-web-actors",
"actix-test", "actix-web-codegen",
"actix-web-actors", "test-server",
"actix-web-codegen",
"actix-web",
"awc",
] ]
[workspace.package] [features]
homepage = "https://actix.rs" default = ["compress"]
repository = "https://github.com/actix/actix-web"
license = "MIT OR Apache-2.0"
edition = "2021"
rust-version = "1.75"
[profile.dev] # content-encoding support
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much. compress = ["actix-http/compress", "awc/compress"]
debug = 0
# sessions feature
secure-cookies = ["actix-http/secure-cookies"]
# openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[[example]]
name = "on_connect"
required-features = []
[[example]]
name = "client"
required-features = ["rustls"]
[dependencies]
actix-codec = "0.3.0"
actix-service = "1.0.6"
actix-utils = "2.0.0"
actix-router = "0.2.4"
actix-rt = "1.1.1"
actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0"
actix-web-codegen = "0.4.0"
actix-http = "2.2.0"
awc = { version = "2.0.3", default-features = false }
bytes = "0.5.3"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
log = "0.4"
mime = "0.3"
socket2 = "0.3.16"
pin-project = "1.0.0"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "1", features = ["alloc"] }
[dev-dependencies]
actix = "0.10.0"
actix-http = { version = "2.1.0", features = ["actors"] }
rand = "0.7"
env_logger = "0.8"
serde_derive = "1.0"
brotli2 = "0.3.2"
flate2 = "1.0.13"
criterion = "0.3"
[profile.release] [profile.release]
lto = true lto = true
@ -31,31 +125,18 @@ opt-level = 3
codegen-units = 1 codegen-units = 1
[patch.crates-io] [patch.crates-io]
actix-files = { path = "actix-files" } actix-web = { path = "." }
actix-http = { path = "actix-http" } actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" } actix-http-test = { path = "test-server" }
actix-multipart = { path = "actix-multipart" }
actix-multipart-derive = { path = "actix-multipart-derive" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "actix-web" }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" } actix-web-codegen = { path = "actix-web-codegen" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" } awc = { path = "awc" }
# uncomment for quick testing against local actix-net repo [[bench]]
# actix-service = { path = "../actix-net/actix-service" } name = "server"
# actix-macros = { path = "../actix-net/actix-macros" } harness = false
# actix-rt = { path = "../actix-net/actix-rt" }
# actix-codec = { path = "../actix-net/actix-codec" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }
[workspace.lints.rust] [[bench]]
rust_2018_idioms = { level = "deny" } name = "service"
future_incompatible = { level = "deny" } harness = false
nonstandard_style = { level = "deny" }
[workspace.lints.clippy]
# clone_on_ref_ptr = { level = "deny" }

View File

@ -1,4 +1,4 @@
Copyright (c) 2017-NOW Actix Team Copyright (c) 2017 Actix Team
Permission is hereby granted, free of charge, to any Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated person obtaining a copy of this software and associated

652
MIGRATION.md Normal file
View File

@ -0,0 +1,652 @@
## Unreleased
## 3.0.0
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
* Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1 +0,0 @@
actix-web/README.md

111
README.md Normal file
View File

@ -0,0 +1,111 @@
<div align="center">
<h1>Actix web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=3.3.2)](https://docs.rs/actix-web/3.3.2)
[![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/3.3.2/status.svg)](https://deps.rs/crate/actix-web/3.3.2)
<br />
[![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
* Runs on stable Rust 1.42+
## Documentation
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
* [Application State](https://github.com/actix/examples/tree/master/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
maintainers of Actix web, promises to intervene to uphold that code of conduct.

View File

@ -1,252 +1,102 @@
# Changes # Changes
## Unreleased ## Unreleased - 2020-xx-xx
- Minimum supported Rust version (MSRV) is now 1.75.
## 0.6.6 ## 0.4.1 - 2020-11-24
* Clarify order of parameters in `Files::new` and improve docs.
- Update `tokio-uring` dependency to `0.4`.
- Minimum supported Rust version (MSRV) is now 1.72.
## 0.6.5 ## 0.4.0 - 2020-10-06
* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
- Fix handling of special characters in filenames.
## 0.6.4
- Fix handling of newlines in filenames.
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 0.6.3
- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903]
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
- Update `tokio-uring` dependency to `0.4`.
[#2903]: https://github.com/actix/actix-web/pull/2903
## 0.6.2
- Allow partial range responses for video content to start streaming sooner. [#2817]
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
[#2817]: https://github.com/actix/actix-web/pull/2817
## 0.6.1
- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021]
- Update `tokio-uring` dependency to `0.3`.
- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645]
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
[#2021]: https://github.com/actix/actix-web/pull/2021
[#2645]: https://github.com/actix/actix-web/pull/2645
## 0.6.0
- No significant changes since `0.6.0-beta.16`.
## 0.6.0-beta.16
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15
- No significant changes since `0.6.0-beta.14`.
## 0.6.0-beta.14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10
- No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8
- Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7
- Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6
- Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156
[#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3
- No notable changes.
## 0.6.0-beta.2
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1
- `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0
- Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1
- Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714 [#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0
- No significant changes from 0.3.0-beta.1. ## 0.3.0 - 2020-09-11
* No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1
- Update `v_htmlescape` to 0.10 ## 0.3.0-beta.1 - 2020-07-15
- Update `actix-web` and `actix-http` dependencies to beta.1 * Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1
- Update `actix-web` and `actix-http` dependencies to alpha ## 0.3.0-alpha.1 - 2020-05-23
- Fix some typos in the docs * Update `actix-web` and `actix-http` dependencies to alpha
- Bump minimum supported Rust version to 1.40 * Fix some typos in the docs
- Support sending Content-Length when Content-Range is specified [#1384] * Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384 [#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1
- Use the same format for file URLs regardless of platforms ## 0.2.1 - 2019-12-22
* Use the same format for file URLs regardless of platforms
## 0.2.0
- Fix BodyEncoding trait import #1220 ## 0.2.0 - 2019-12-20
* Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1
- Migrate to `std::future` ## 0.2.0-alpha.1 - 2019-12-07
* Migrate to `std::future`
## 0.1.7
- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) ## 0.1.7 - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 ## 0.1.6 - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132
- Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5 ## 0.1.5 - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113
- Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113
## 0.1.4 ## 0.1.4 - 2019-07-20
* Allow to disable `Content-Disposition` header #686
- Allow to disable `Content-Disposition` header #686
## 0.1.3 ## 0.1.3 - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930
- Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2 ## 0.1.2 - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
- Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741
## 0.1.1 ## 0.1.1 - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
- Static files are incorrectly served as both chunked and with length #812
## 0.1.0 ## 0.1.0 - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds in file modified date #820
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4 ## 0.1.0-beta.4 - 2019-05-12
* Update actix-web to beta.4
- Update actix-web to beta.4
## 0.1.0-beta.1 ## 0.1.0-beta.1 - 2019-04-20
* Update actix-web to beta.1
- Update actix-web to beta.1
## 0.1.0-alpha.6 ## 0.1.0-alpha.6 - 2019-04-14
* Update actix-web to alpha6
- Update actix-web to alpha6
## 0.1.0-alpha.4 ## 0.1.0-alpha.4 - 2019-04-08
* Update actix-web to alpha4
- Update actix-web to alpha4
## 0.1.0-alpha.2 ## 0.1.0-alpha.2 - 2019-04-02
* Add default handler support
- Add default handler support
## 0.1.0-alpha.1 ## 0.1.0-alpha.1 - 2019-03-28
* Initial impl
- Initial impl

View File

@ -1,59 +1,35 @@
[package] [package]
name = "actix-files" name = "actix-files"
version = "0.6.6" version = "0.4.1"
authors = [ authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web" description = "Static file serving for Actix Web"
readme = "README.md"
keywords = ["actix", "http", "async", "futures"] keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
categories = ["asynchronous", "web-programming::http-server"] categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2021" edition = "2018"
[package.metadata.cargo_check_external_types] [lib]
allowed_external_types = [ name = "actix_files"
"actix_http::*", path = "src/lib.rs"
"actix_service::*",
"actix_web::*",
"http::*",
"mime::*",
]
[features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies] [dependencies]
actix-http = "3" actix-web = { version = "3.0.0", default-features = false }
actix-service = "2" actix-service = "1.0.6"
actix-utils = "3" bitflags = "1"
actix-web = { version = "4", default-features = false } bytes = "0.5.3"
futures-core = { version = "0.3.7", default-features = false }
bitflags = "2" futures-util = { version = "0.3.7", default-features = false }
bytes = "1" derive_more = "0.99.2"
derive_more = { version = "2", features = ["display", "error", "from"] }
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
log = "0.4" log = "0.4"
mime = "0.3.9" mime = "0.3"
mime_guess = "2.0.1" mime_guess = "2.0.1"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2.7" v_htmlescape = "0.11"
v_htmlescape = "0.15.5"
# experimental-io-uring
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.5", optional = true, features = ["bytes"] }
actix-server = { version = "2.4", optional = true } # ensure matching tokio-uring versions
[dev-dependencies] [dev-dependencies]
actix-rt = "2.7" actix-rt = "1.0.0"
actix-test = "0.1" actix-web = { version = "3.0.0", features = ["openssl"] }
actix-web = "4"
env_logger = "0.11"
tempfile = "3.2"
[lints]
workspace = true

View File

@ -1,32 +1,19 @@
# `actix-files` # actix-files
<!-- prettier-ignore-start --> > Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.6)](https://docs.rs/actix-files/0.6.6) [![Documentation](https://docs.rs/actix-files/badge.svg?version=0.4.1)](https://docs.rs/actix-files/0.4.1)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) [![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-files.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
<br /> <br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.6/status.svg)](https://deps.rs/crate/actix-files/0.6.6) [![dependency status](https://deps.rs/crate/actix-files/0.4.1/status.svg)](https://deps.rs/crate/actix-files/0.4.1)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
<!-- prettier-ignore-end --> ## Documentation & Resources
<!-- cargo-rdme start --> - [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/static_index)
Static file serving for Actix Web. - [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum supported Rust version: 1.42 or later
Provides a non-blocking service for serving static files from disk.
## Examples
```rust
use actix_web::App;
use actix_files::Files;
let app = App::new()
.service(Files::new("/static", ".").prefer_utf8(true));
```
<!-- cargo-rdme end -->

View File

@ -1,33 +0,0 @@
use actix_files::Files;
use actix_web::{get, guard, middleware, App, HttpServer, Responder};
const EXAMPLES_DIR: &str = concat![env!("CARGO_MANIFEST_DIR"), "/examples"];
#[get("/")]
async fn index() -> impl Responder {
"Hello world!"
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
log::info!("starting HTTP server at http://localhost:8080");
HttpServer::new(|| {
App::new()
.service(index)
.service(
Files::new("/assets", EXAMPLES_DIR)
.show_files_listing()
.guard(guard::Header("show-listing", "?1")),
)
.service(Files::new("/assets", EXAMPLES_DIR))
.wrap(middleware::Compress::default())
.wrap(middleware::Logger::default())
})
.bind(("127.0.0.1", 8080))?
.workers(2)
.run()
.await
}

View File

@ -1,217 +1,94 @@
use std::{ use std::{
cmp, fmt, cmp, fmt,
fs::File,
future::Future, future::Future,
io, io::{self, Read, Seek},
pin::Pin, pin::Pin,
task::{Context, Poll}, task::{Context, Poll},
}; };
use actix_web::{error::Error, web::Bytes}; use actix_web::{
#[cfg(feature = "experimental-io-uring")] error::{BlockingError, Error},
use bytes::BytesMut; web,
};
use bytes::Bytes;
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use pin_project_lite::pin_project; use futures_util::future::{FutureExt, LocalBoxFuture};
use super::named::File; use crate::handle_error;
pin_project! { type ChunkedBoxFuture =
/// Adapter to read a `std::file::File` in chunks. LocalBoxFuture<'static, Result<(File, Bytes), BlockingError<io::Error>>>;
#[doc(hidden)]
pub struct ChunkedReadFile<F, Fut> { #[doc(hidden)]
size: u64, /// A helper created from a `std::fs::File` which reads the file
offset: u64, /// chunk-by-chunk on a `ThreadPool`.
#[pin] pub struct ChunkedReadFile {
state: ChunkedReadFileState<Fut>, pub(crate) size: u64,
counter: u64, pub(crate) offset: u64,
callback: F, pub(crate) file: Option<File>,
} pub(crate) fut: Option<ChunkedBoxFuture>,
pub(crate) counter: u64,
} }
#[cfg(not(feature = "experimental-io-uring"))] impl fmt::Debug for ChunkedReadFile {
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<File>, },
Future { #[pin] fut: Fut },
}
}
#[cfg(feature = "experimental-io-uring")]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<(File, BytesMut)> },
Future { #[pin] fut: Fut },
}
}
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile") f.write_str("ChunkedReadFile")
} }
} }
pub(crate) fn new_chunked_read( impl Stream for ChunkedReadFile {
size: u64, type Item = Result<Bytes, Error>;
offset: u64,
file: File,
) -> impl Stream<Item = Result<Bytes, Error>> {
ChunkedReadFile {
size,
offset,
#[cfg(not(feature = "experimental-io-uring"))]
state: ChunkedReadFileState::File { file: Some(file) },
#[cfg(feature = "experimental-io-uring")]
state: ChunkedReadFileState::File {
file: Some((file, BytesMut::new())),
},
counter: 0,
callback: chunked_read_file_callback,
}
}
#[cfg(not(feature = "experimental-io-uring"))] fn poll_next(
async fn chunked_read_file_callback( mut self: Pin<&mut Self>,
mut file: File, cx: &mut Context<'_>,
offset: u64, ) -> Poll<Option<Self::Item>> {
max_bytes: usize, if let Some(ref mut fut) = self.fut {
) -> Result<(File, Bytes), Error> { return match ready!(Pin::new(fut).poll(cx)) {
use io::{Read as _, Seek as _}; Ok((file, bytes)) => {
self.fut.take();
self.file = Some(file);
let res = actix_web::web::block(move || { self.offset += bytes.len() as u64;
let mut buf = Vec::with_capacity(max_bytes); self.counter += bytes.len() as u64;
file.seek(io::SeekFrom::Start(offset))?; Poll::Ready(Some(Ok(bytes)))
}
Err(e) => Poll::Ready(Some(Err(handle_error(e)))),
};
}
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; let size = self.size;
let offset = self.offset;
let counter = self.counter;
if n_bytes == 0 { if size == counter {
Err(io::Error::from(io::ErrorKind::UnexpectedEof)) Poll::Ready(None)
} else { } else {
Ok((file, Bytes::from(buf))) let mut file = self.file.take().expect("Use after completion");
}
})
.await??;
Ok(res) self.fut = Some(
} web::block(move || {
let max_bytes =
cmp::min(size.saturating_sub(counter), 65_536) as usize;
#[cfg(feature = "experimental-io-uring")] let mut buf = Vec::with_capacity(max_bytes);
async fn chunked_read_file_callback( file.seek(io::SeekFrom::Start(offset))?;
file: File,
offset: u64,
max_bytes: usize,
mut bytes_mut: BytesMut,
) -> io::Result<(File, Bytes, BytesMut)> {
bytes_mut.reserve(max_bytes);
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await; let n_bytes =
let n_bytes = res?; file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 { if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into()); return Err(io::ErrorKind::UnexpectedEof.into());
} }
let bytes = bytes_mut.split_to(n_bytes).freeze(); Ok((file, Bytes::from(buf)))
})
.boxed_local(),
);
Ok((file, bytes, bytes_mut)) self.poll_next(cx)
}
#[cfg(feature = "experimental-io-uring")]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize, BytesMut) -> Fut,
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let (file, bytes_mut) = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
this.state.project_replace(ChunkedReadFileState::File {
file: Some((file, bytes_mut)),
});
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes) = ready!(fut.poll(cx))?;
this.state
.project_replace(ChunkedReadFileState::File { file: Some(file) });
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
} }
} }
} }

View File

@ -1,9 +1,4 @@
use std::{ use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
fmt::Write,
fs::DirEntry,
io,
path::{Path, PathBuf},
};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse}; use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use percent_encoding::{utf8_percent_encode, CONTROLS}; use percent_encoding::{utf8_percent_encode, CONTROLS};
@ -45,23 +40,14 @@ impl Directory {
pub(crate) type DirectoryRenderer = pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>; dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
/// Returns percent encoded file URL path. // show file url as relative to static path
macro_rules! encode_file_url { macro_rules! encode_file_url {
($path:ident) => { ($path:ident) => {
utf8_percent_encode(&$path, CONTROLS) utf8_percent_encode(&$path, CONTROLS)
}; };
} }
/// Returns HTML entity encoded formatter. // " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name { macro_rules! encode_file_name {
($entry:ident) => { ($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy()) escape_html_entity(&$entry.file_name().to_string_lossy())
@ -80,7 +66,9 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) { if dir.is_visible(&entry) {
let entry = entry.unwrap(); let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) { let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"), Ok(p) if cfg!(windows) => {
base.join(p).to_string_lossy().replace("\\", "/")
}
Ok(p) => base.join(p).to_string_lossy().into_owned(), Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue, Err(_) => continue,
}; };

View File

@ -1,48 +1,41 @@
use actix_web::{http::StatusCode, ResponseError}; use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use derive_more::Display; use derive_more::Display;
/// Errors which can occur when serving static files. /// Errors which can occur when serving static files.
#[derive(Debug, PartialEq, Eq, Display)] #[derive(Display, Debug, PartialEq)]
pub enum FilesError { pub enum FilesError {
/// Path is not a directory. /// Path is not a directory
#[allow(dead_code)] #[allow(dead_code)]
#[display("path is not a directory. Unable to serve static files")] #[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory, IsNotDirectory,
/// Cannot render directory. /// Cannot render directory
#[display("unable to render directory without index file")] #[display(fmt = "Unable to render directory without index file")]
IsDirectory, IsDirectory,
} }
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError { impl ResponseError for FilesError {
/// Returns `404 Not Found`. fn error_response(&self) -> HttpResponse {
fn status_code(&self) -> StatusCode { HttpResponse::new(StatusCode::NOT_FOUND)
StatusCode::NOT_FOUND
} }
} }
#[derive(Debug, PartialEq, Eq, Display)] #[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError { pub enum UriSegmentError {
/// Segment started with the wrapped invalid character. /// The segment started with the wrapped invalid character.
#[display("segment started with invalid character: ('{_0}')")] #[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char), BadStart(char),
/// The segment contained the wrapped invalid character.
/// Segment contained the wrapped invalid character. #[display(fmt = "The segment contained the wrapped invalid character")]
#[display("segment contained invalid character ('{_0}')")]
BadChar(char), BadChar(char),
/// The segment ended with the wrapped invalid character.
/// Segment ended with the wrapped invalid character. #[display(fmt = "The segment ended with the wrapped invalid character")]
#[display("segment ended with invalid character: ('{_0}')")]
BadEnd(char), BadEnd(char),
/// Path is not a valid UTF-8 string after percent-decoding.
#[display("path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
} }
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError { impl ResponseError for UriSegmentError {
/// Returns `400 Bad Request`.
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST StatusCode::BAD_REQUEST
} }

View File

@ -1,34 +1,27 @@
use std::{ use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
cell::RefCell,
fmt, io,
path::{Path, PathBuf},
rc::Rc,
};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt}; use actix_service::{boxed, IntoServiceFactory, ServiceFactory};
use actix_web::{ use actix_web::{
dev::{ dev::{
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest, ServiceResponse, AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse,
}, },
error::Error, error::Error,
guard::Guard, guard::Guard,
http::header::DispositionType, http::header::DispositionType,
HttpRequest, HttpRequest,
}; };
use futures_core::future::LocalBoxFuture; use futures_util::future::{ok, FutureExt, LocalBoxFuture};
use crate::{ use crate::{
directory_listing, named, directory_listing, named, Directory, DirectoryRenderer, FilesService,
service::{FilesService, FilesServiceInner}, HttpNewService, MimeOverride,
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
}; };
/// Static files handling service. /// Static files handling service.
/// ///
/// `Files` service must be registered with `App::service()` method. /// `Files` service must be registered with `App::service()` method.
/// ///
/// # Examples /// ```rust
/// ```
/// use actix_web::App; /// use actix_web::App;
/// use actix_files::Files; /// use actix_files::Files;
/// ///
@ -36,7 +29,7 @@ use crate::{
/// .service(Files::new("/static", ".")); /// .service(Files::new("/static", "."));
/// ``` /// ```
pub struct Files { pub struct Files {
mount_path: String, path: String,
directory: PathBuf, directory: PathBuf,
index: Option<String>, index: Option<String>,
show_index: bool, show_index: bool,
@ -44,11 +37,8 @@ pub struct Files {
default: Rc<RefCell<Option<Rc<HttpNewService>>>>, default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>, renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>, mime_override: Option<Rc<MimeOverride>>,
path_filter: Option<Rc<PathFilter>>,
file_flags: named::Flags, file_flags: named::Flags,
use_guards: Option<Rc<dyn Guard>>, guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>,
hidden_files: bool,
} }
impl fmt::Debug for Files { impl fmt::Debug for Files {
@ -67,12 +57,9 @@ impl Clone for Files {
default: self.default.clone(), default: self.default.clone(),
renderer: self.renderer.clone(), renderer: self.renderer.clone(),
file_flags: self.file_flags, file_flags: self.file_flags,
mount_path: self.mount_path.clone(), path: self.path.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
guards: self.guards.clone(), guards: self.guards.clone(),
hidden_files: self.hidden_files,
} }
} }
} }
@ -92,9 +79,9 @@ impl Files {
/// If the mount path is set as the root path `/`, services registered after this one will /// If the mount path is set as the root path `/`, services registered after this one will
/// be inaccessible. Register more specific handlers and services first. /// be inaccessible. Register more specific handlers and services first.
/// ///
/// `Files` utilizes the existing Tokio thread-pool for blocking filesystem operations. /// `Files` uses a threadpool for blocking filesystem operations. By default, the pool uses a
/// The number of running threads is adjusted over time as needed, up to a maximum of 512 times /// number of threads equal to 5x the number of available logical CPUs. Pool size can be changed
/// the number of server [workers](actix_web::HttpServer::workers), by default. /// by setting ACTIX_THREADPOOL environment variable.
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files { pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
let orig_dir = serve_from.into(); let orig_dir = serve_from.into();
let dir = match orig_dir.canonicalize() { let dir = match orig_dir.canonicalize() {
@ -106,7 +93,7 @@ impl Files {
}; };
Files { Files {
mount_path: mount_path.trim_end_matches('/').to_owned(), path: mount_path.to_owned(),
directory: dir, directory: dir,
index: None, index: None,
show_index: false, show_index: false,
@ -114,20 +101,14 @@ impl Files {
default: Rc::new(RefCell::new(None)), default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing), renderer: Rc::new(directory_listing),
mime_override: None, mime_override: None,
path_filter: None,
file_flags: named::Flags::default(), file_flags: named::Flags::default(),
use_guards: None, guards: None,
guards: Vec::new(),
hidden_files: false,
} }
} }
/// Show files listing for directories. /// Show files listing for directories.
/// ///
/// By default show files listing is disabled. /// By default show files listing is disabled.
///
/// When used with [`Files::index_file()`], files listing is shown as a fallback
/// when the index file is not found.
pub fn show_files_listing(mut self) -> Self { pub fn show_files_listing(mut self) -> Self {
self.show_index = true; self.show_index = true;
self self
@ -141,17 +122,17 @@ impl Files {
self self
} }
/// Set custom directory renderer. /// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where where
for<'r, 's> F: for<'r, 's> F: Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error>
Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error> + 'static, + 'static,
{ {
self.renderer = Rc::new(f); self.renderer = Rc::new(f);
self self
} }
/// Specifies MIME override callback. /// Specifies mime override callback
pub fn mime_override<F>(mut self, f: F) -> Self pub fn mime_override<F>(mut self, f: F) -> Self
where where
F: Fn(&mime::Name<'_>) -> DispositionType + 'static, F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
@ -160,45 +141,10 @@ impl Files {
self self
} }
/// Sets path filtering closure.
///
/// The path provided to the closure is relative to `serve_from` path.
/// You can safely join this path with the `serve_from` path to get the real path.
/// However, the real path may not exist since the filter is called before checking path existence.
///
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
/// `404 Not Found` is returned.
///
/// # Examples
/// ```
/// use std::path::Path;
/// use actix_files::Files;
///
/// // prevent searching subdirectories and following symlinks
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
/// path.components().count() == 1
/// && Path::new("./static")
/// .join(path)
/// .symlink_metadata()
/// .map(|m| !m.file_type().is_symlink())
/// .unwrap_or(false)
/// });
/// ```
pub fn path_filter<F>(mut self, f: F) -> Self
where
F: Fn(&Path, &RequestHead) -> bool + 'static,
{
self.path_filter = Some(Rc::new(f));
self
}
/// Set index file /// Set index file
/// ///
/// Shows specific index file for directories instead of /// Shows specific index file for directory "/" instead of
/// showing files listing. /// showing files listing.
///
/// If the index file is not found, files listing is shown as a fallback if
/// [`Files::show_files_listing()`] is set.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self { pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into()); self.index = Some(index.into());
self self
@ -207,6 +153,7 @@ impl Files {
/// Specifies whether to use ETag or not. /// Specifies whether to use ETag or not.
/// ///
/// Default is true. /// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self { pub fn use_etag(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::ETAG, value); self.file_flags.set(named::Flags::ETAG, value);
self self
@ -215,6 +162,7 @@ impl Files {
/// Specifies whether to use Last-Modified or not. /// Specifies whether to use Last-Modified or not.
/// ///
/// Default is true. /// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self { pub fn use_last_modified(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::LAST_MD, value); self.file_flags.set(named::Flags::LAST_MD, value);
self self
@ -223,85 +171,40 @@ impl Files {
/// Specifies whether text responses should signal a UTF-8 encoding. /// Specifies whether text responses should signal a UTF-8 encoding.
/// ///
/// Default is false (but will default to true in a future version). /// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self { pub fn prefer_utf8(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::PREFER_UTF8, value); self.file_flags.set(named::Flags::PREFER_UTF8, value);
self self
} }
/// Adds a routing guard. /// Specifies custom guards to use for directory listings and files.
/// ///
/// Use this to allow multiple chained file services that respond to strictly different /// Default behaviour allows GET and HEAD.
/// properties of a request. Due to the way routing works, if a guard check returns true and the #[inline]
/// request starts being handled by the file service, it will not be able to back-out and try pub fn use_guards<G: Guard + 'static>(mut self, guards: G) -> Self {
/// the next service, you will simply get a 404 (or 405) error response. self.guards = Some(Rc::new(guards));
///
/// To allow `POST` requests to retrieve files, see [`Files::method_guard()`].
///
/// # Examples
/// ```
/// use actix_web::{guard::Header, App};
/// use actix_files::Files;
///
/// App::new().service(
/// Files::new("/","/my/site/files")
/// .guard(Header("Host", "example.com"))
/// );
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Rc::new(guard));
self self
} }
/// Specifies guard to check before fetching directory listings or files.
///
/// Note that this guard has no effect on routing; it's main use is to guard on the request's
/// method just before serving the file, only allowing `GET` and `HEAD` requests by default.
/// See [`Files::guard`] for routing guards.
pub fn method_guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.use_guards = Some(Rc::new(guard));
self
}
/// See [`Files::method_guard`].
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
self.method_guard(guard)
}
/// Disable `Content-Disposition` header. /// Disable `Content-Disposition` header.
/// ///
/// By default Content-Disposition` header is enabled. /// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self { pub fn disable_content_disposition(mut self) -> Self {
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION); self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
self self
} }
/// Sets default handler which is used when no matched file could be found. /// Sets default handler which is used when no matched file could be found.
///
/// # Examples
/// Setting a fallback static file handler:
/// ```
/// use actix_files::{Files, NamedFile};
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
///
/// # fn run() -> Result<(), actix_web::Error> {
/// let files = Files::new("/", "./static")
/// .index_file("index.html")
/// .default_handler(fn_service(|req: ServiceRequest| async {
/// let (req, _) = req.into_parts();
/// let file = NamedFile::open_async("./static/404.html").await?;
/// let res = file.into_response(&req);
/// Ok(ServiceResponse::new(req, res))
/// }));
/// # Ok(())
/// # }
/// ```
pub fn default_handler<F, U>(mut self, f: F) -> Self pub fn default_handler<F, U>(mut self, f: F) -> Self
where where
F: IntoServiceFactory<U, ServiceRequest>, F: IntoServiceFactory<U>,
U: ServiceFactory<ServiceRequest, Config = (), Response = ServiceResponse, Error = Error> U: ServiceFactory<
+ 'static, Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
> + 'static,
{ {
// create and configure default resource // create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory( self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
@ -310,43 +213,26 @@ impl Files {
self self
} }
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
pub fn use_hidden_files(mut self) -> Self {
self.hidden_files = true;
self
}
} }
impl HttpServiceFactory for Files { impl HttpServiceFactory for Files {
fn register(mut self, config: &mut AppService) { fn register(self, config: &mut AppService) {
let guards = if self.guards.is_empty() {
None
} else {
let guards = std::mem::take(&mut self.guards);
Some(
guards
.into_iter()
.map(|guard| -> Box<dyn Guard> { Box::new(guard) })
.collect::<Vec<_>>(),
)
};
if self.default.borrow().is_none() { if self.default.borrow().is_none() {
*self.default.borrow_mut() = Some(config.default_service()); *self.default.borrow_mut() = Some(config.default_service());
} }
let rdef = if config.is_root() { let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.mount_path) ResourceDef::root_prefix(&self.path)
} else { } else {
ResourceDef::prefix(&self.mount_path) ResourceDef::prefix(&self.path)
}; };
config.register_service(rdef, guards, self, None) config.register_service(rdef, None, self, None)
} }
} }
impl ServiceFactory<ServiceRequest> for Files { impl ServiceFactory for Files {
type Request = ServiceRequest;
type Response = ServiceResponse; type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Config = (); type Config = ();
@ -355,7 +241,7 @@ impl ServiceFactory<ServiceRequest> for Files {
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
let mut inner = FilesServiceInner { let mut srv = FilesService {
directory: self.directory.clone(), directory: self.directory.clone(),
index: self.index.clone(), index: self.index.clone(),
show_index: self.show_index, show_index: self.show_index,
@ -363,68 +249,23 @@ impl ServiceFactory<ServiceRequest> for Files {
default: None, default: None,
renderer: self.renderer.clone(), renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(), mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
file_flags: self.file_flags, file_flags: self.file_flags,
guards: self.use_guards.clone(), guards: self.guards.clone(),
hidden_files: self.hidden_files,
}; };
if let Some(ref default) = *self.default.borrow() { if let Some(ref default) = *self.default.borrow() {
let fut = default.new_service(()); default
Box::pin(async { .new_service(())
match fut.await { .map(move |result| match result {
Ok(default) => { Ok(default) => {
inner.default = Some(default); srv.default = Some(default);
Ok(FilesService(Rc::new(inner))) Ok(srv)
} }
Err(_) => Err(()), Err(_) => Err(()),
} })
}) .boxed_local()
} else { } else {
Box::pin(async move { Ok(FilesService(Rc::new(inner))) }) ok(srv).boxed_local()
} }
} }
} }
#[cfg(test)]
mod tests {
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App, HttpResponse,
};
use super::*;
#[actix_web::test]
async fn custom_files_listing_renderer() {
let srv = test::init_service(
App::new().service(
Files::new("/", "./tests")
.show_files_listing()
.files_listing_renderer(|dir, req| {
Ok(ServiceResponse::new(
req.clone(),
HttpResponse::Ok().body(dir.path.to_str().unwrap().to_owned()),
))
}),
),
)
.await;
let req = TestRequest::with_uri("/").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let body = test::read_body(res).await;
let body_str = std::str::from_utf8(&body).unwrap();
let actual_path = Path::new(&body_str);
let expected_path = Path::new("actix-files/tests");
assert!(
actual_path.ends_with(expected_path),
"body {:?} does not end with {:?}",
actual_path,
expected_path
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +1,31 @@
use std::{ use std::fs::{File, Metadata};
fs::Metadata, use std::io;
io, use std::ops::{Deref, DerefMut};
path::{Path, PathBuf}, use std::path::{Path, PathBuf};
time::{SystemTime, UNIX_EPOCH}, use std::time::{SystemTime, UNIX_EPOCH};
};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use actix_web::{ use actix_web::{
body::{self, BoxBody, SizedStream}, dev::{BodyEncoding, SizedStream},
dev::{
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory, ServiceRequest,
ServiceResponse,
},
http::{ http::{
header::{ header::{
self, Charset, ContentDisposition, ContentEncoding, DispositionParam, DispositionType, self, Charset, ContentDisposition, DispositionParam, DispositionType,
ExtendedValue, HeaderValue, ExtendedValue,
}, },
StatusCode, ContentEncoding, StatusCode,
}, },
Error, HttpMessage, HttpRequest, HttpResponse, Responder, Error, HttpMessage, HttpRequest, HttpResponse, Responder,
}; };
use bitflags::bitflags; use bitflags::bitflags;
use derive_more::{Deref, DerefMut}; use futures_util::future::{ready, Ready};
use futures_core::future::LocalBoxFuture; use mime_guess::from_path;
use mime::Mime;
use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange}; use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! { bitflags! {
#[derive(Debug, Clone, Copy)]
pub(crate) struct Flags: u8 { pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001; const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010; const LAST_MD = 0b0000_0010;
@ -39,57 +36,24 @@ bitflags! {
impl Default for Flags { impl Default for Flags {
fn default() -> Self { fn default() -> Self {
Flags::from_bits_truncate(0b0000_1111) Flags::from_bits_truncate(0b0000_0111)
} }
} }
/// A file with an associated name. /// A file with an associated name.
/// #[derive(Debug)]
/// `NamedFile` can be registered as services:
/// ```
/// use actix_web::App;
/// use actix_files::NamedFile;
///
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let file = NamedFile::open_async("./static/index.html").await?;
/// let app = App::new().service(file);
/// # Ok(())
/// # }
/// ```
///
/// They can also be returned from handlers:
/// ```
/// use actix_web::{Responder, get};
/// use actix_files::NamedFile;
///
/// #[get("/")]
/// async fn index() -> impl Responder {
/// NamedFile::open_async("./static/index.html").await
/// }
/// ```
#[derive(Debug, Deref, DerefMut)]
pub struct NamedFile { pub struct NamedFile {
#[deref]
#[deref_mut]
file: File,
path: PathBuf, path: PathBuf,
file: File,
modified: Option<SystemTime>, modified: Option<SystemTime>,
pub(crate) md: Metadata, pub(crate) md: Metadata,
pub(crate) flags: Flags, pub(crate) flags: Flags,
pub(crate) status_code: StatusCode, pub(crate) status_code: StatusCode,
pub(crate) content_type: Mime, pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: ContentDisposition, pub(crate) content_disposition: header::ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>, pub(crate) encoding: Option<ContentEncoding>,
} }
#[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File;
use super::chunked;
impl NamedFile { impl NamedFile {
/// Creates an instance from a previously opened file. /// Creates an instance from a previously opened file.
/// ///
@ -97,19 +61,20 @@ impl NamedFile {
/// `ContentDisposition` headers. /// `ContentDisposition` headers.
/// ///
/// # Examples /// # Examples
/// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile;
/// ///
/// let mut file = File::create("foo.txt")?; /// ```rust
/// file.write_all(b"Hello, world!")?; /// use actix_files::NamedFile;
/// let named_file = NamedFile::from_file(file, "bar.txt")?; /// use std::io::{self, Write};
/// # std::fs::remove_file("foo.txt"); /// use std::env;
/// Ok(()) /// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ``` /// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> { pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
@ -127,25 +92,15 @@ impl NamedFile {
} }
}; };
let ct = mime_guess::from_path(&path).first_or_octet_stream(); let ct = from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() { let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline, mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" || name == "xhtml" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment, _ => DispositionType::Attachment,
}; };
// replace special characters in filenames which could occur on some filesystems let mut parameters =
let filename_s = filename vec![DispositionParam::Filename(String::from(filename.as_ref()))];
.replace('\n', "%0A") // \n line break
.replace('\x0B', "%0B") // \v vertical tab
.replace('\x0C', "%0C") // \f form feed
.replace('\r', "%0D"); // \r carriage return
let mut parameters = vec![DispositionParam::Filename(filename_s)];
if !filename.is_ascii() { if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue { parameters.push(DispositionParam::FilenameExt(ExtendedValue {
@ -163,30 +118,7 @@ impl NamedFile {
(ct, cd) (ct, cd)
}; };
let md = { let md = file.metadata()?;
#[cfg(not(feature = "experimental-io-uring"))]
{
file.metadata()?
}
#[cfg(feature = "experimental-io-uring")]
{
use std::os::unix::prelude::{AsRawFd, FromRawFd};
let fd = file.as_raw_fd();
// SAFETY: fd is borrowed and lives longer than the unsafe block
unsafe {
let file = std::fs::File::from_raw_fd(fd);
let md = file.metadata();
// SAFETY: forget the fd before exiting block in success or error case but don't
// run destructor (that would close file handle)
std::mem::forget(file);
md?
}
}
};
let modified = md.modified().ok(); let modified = md.modified().ok();
let encoding = None; let encoding = None;
@ -206,59 +138,32 @@ impl NamedFile {
/// Attempts to open a file in read-only mode. /// Attempts to open a file in read-only mode.
/// ///
/// # Examples /// # Examples
/// ``` ///
/// ```rust
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt"); /// let file = NamedFile::open("foo.txt");
/// ``` /// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> { pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = File::open(&path)?; Self::from_file(File::open(&path)?, path)
Self::from_file(file, path)
} }
/// Attempts to open a file asynchronously in read-only mode. /// Returns reference to the underlying `File` object.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it
/// will behave just like `open`.
///
/// # Examples
/// ```
/// use actix_files::NamedFile;
/// # async fn open() {
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
/// # }
/// ```
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = {
#[cfg(not(feature = "experimental-io-uring"))]
{
File::open(&path)?
}
#[cfg(feature = "experimental-io-uring")]
{
File::open(&path).await?
}
};
Self::from_file(file, path)
}
/// Returns reference to the underlying file object.
#[inline] #[inline]
pub fn file(&self) -> &File { pub fn file(&self) -> &File {
&self.file &self.file
} }
/// Returns the filesystem path to this file. /// Retrieve the path of this file.
/// ///
/// # Examples /// # Examples
/// ``` ///
/// ```rust
/// # use std::io; /// # use std::io;
/// use actix_files::NamedFile; /// use actix_files::NamedFile;
/// ///
/// # async fn path() -> io::Result<()> { /// # fn path() -> io::Result<()> {
/// let file = NamedFile::open_async("test.txt").await?; /// let file = NamedFile::open("test.txt")?;
/// assert_eq!(file.path().as_os_str(), "foo.txt"); /// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(()) /// # Ok(())
/// # } /// # }
@ -268,92 +173,51 @@ impl NamedFile {
self.path.as_path() self.path.as_path()
} }
/// Returns the time the file was last modified. /// Set response **Status Code**
///
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
/// Therefore, it is usually safe to unwrap this.
#[inline]
pub fn modified(&self) -> Option<SystemTime> {
self.modified
}
/// Returns the filesystem metadata associated with this file.
#[inline]
pub fn metadata(&self) -> &Metadata {
&self.md
}
/// Returns the `Content-Type` header that will be used when serving this file.
#[inline]
pub fn content_type(&self) -> &Mime {
&self.content_type
}
/// Returns the `Content-Disposition` that will be used when serving this file.
#[inline]
pub fn content_disposition(&self) -> &ContentDisposition {
&self.content_disposition
}
/// Returns the `Content-Encoding` that will be used when serving this file.
///
/// A return value of `None` indicates that the content is not already using a compressed
/// representation and may be subject to compression downstream.
#[inline]
pub fn content_encoding(&self) -> Option<ContentEncoding> {
self.encoding
}
/// Set response status code.
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
pub fn set_status_code(mut self, status: StatusCode) -> Self { pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status; self.status_code = status;
self self
} }
/// Sets the `Content-Type` header that will be used when serving this file. By default the /// Set the MIME Content-Type for serving this file. By default
/// `Content-Type` is inferred from the filename extension. /// the Content-Type is inferred from the filename extension.
#[inline] #[inline]
pub fn set_content_type(mut self, mime_type: Mime) -> Self { pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type; self.content_type = mime_type;
self self
} }
/// Set the Content-Disposition for serving this file. This allows changing the /// Set the Content-Disposition for serving this file. This allows
/// `inline/attachment` disposition as well as the filename sent to the peer. /// changing the inline/attachment disposition as well as the filename
/// /// sent to the peer. By default the disposition is `inline` for text,
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and /// image, and video content types, and `attachment` otherwise, and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the /// the filename is taken from the path provided in the `open` method
/// filename is taken from the path provided in the `open` method after converting it to UTF-8 /// after converting it to UTF-8 using.
/// (using `to_string_lossy`). /// [`std::ffi::OsStr::to_string_lossy`]
#[inline] #[inline]
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self { pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd; self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION); self.flags.insert(Flags::CONTENT_DISPOSITION);
self self
} }
/// Disables `Content-Disposition` header. /// Disable `Content-Disposition` header.
/// ///
/// By default, the `Content-Disposition` header is sent. /// By default Content-Disposition` header is enabled.
#[inline] #[inline]
pub fn disable_content_disposition(mut self) -> Self { pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION); self.flags.remove(Flags::CONTENT_DISPOSITION);
self self
} }
/// Sets content encoding for this file. /// Set content encoding for serving this file
///
/// This prevents the `Compress` middleware from modifying the file contents and signals to
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
#[inline] #[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc); self.encoding = Some(enc);
self self
} }
/// Specifies whether to return `ETag` header in response. /// Specifies whether to use ETag or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@ -362,7 +226,7 @@ impl NamedFile {
self self
} }
/// Specifies whether to return `Last-Modified` header in response. /// Specifies whether to use Last-Modified or not.
/// ///
/// Default is true. /// Default is true.
#[inline] #[inline]
@ -380,18 +244,14 @@ impl NamedFile {
self self
} }
/// Creates an `ETag` in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> { pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| { self.modified.as_ref().map(|mtime| {
let ino = { let ino = {
#[cfg(unix)] #[cfg(unix)]
{ {
#[cfg(unix)]
use std::os::unix::fs::MetadataExt as _;
self.md.ino() self.md.ino()
} }
#[cfg(not(unix))] #[cfg(not(unix))]
{ {
0 0
@ -402,7 +262,7 @@ impl NamedFile {
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch"); .expect("modification time must be after epoch");
header::EntityTag::new_strong(format!( header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}", "{:x}:{:x}:{:x}:{:x}",
ino, ino,
self.md.len(), self.md.len(),
@ -417,32 +277,37 @@ impl NamedFile {
} }
/// Creates an `HttpResponse` with file as a streaming body. /// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> { pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
if self.status_code != StatusCode::OK { if self.status_code != StatusCode::OK {
let mut res = HttpResponse::build(self.status_code); let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
res.header(header::CONTENT_TYPE, ct.to_string());
} else { } else {
self.content_type res.header(header::CONTENT_TYPE, self.content_type.to_string());
}; }
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) { if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header(( res.header(
header::CONTENT_DISPOSITION, header::CONTENT_DISPOSITION,
self.content_disposition.to_string(), self.content_disposition.to_string(),
)); );
} }
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); res.encoding(current_encoding);
} }
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file); let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return res.streaming(reader); return Ok(res.streaming(reader));
} }
let etag = if self.flags.contains(Flags::ETAG) { let etag = if self.flags.contains(Flags::ETAG) {
@ -463,11 +328,11 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) = } else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header()) (last_modified, req.get_header())
{ {
let t1: SystemTime = (*m).into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = (*since).into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(), (Ok(t1), Ok(t2)) => t1 > t2,
_ => false, _ => false,
} }
} else { } else {
@ -482,47 +347,47 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) = } else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header()) (last_modified, req.get_header())
{ {
let t1: SystemTime = (*m).into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = (*since).into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(), (Ok(t1), Ok(t2)) => t1 <= t2,
_ => false, _ => false,
} }
} else { } else {
false false
}; };
let mut res = HttpResponse::build(self.status_code); let mut resp = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) { if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone()) let ct = equiv_utf8_text(self.content_type.clone());
resp.header(header::CONTENT_TYPE, ct.to_string());
} else { } else {
self.content_type resp.header(header::CONTENT_TYPE, self.content_type.to_string());
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
} }
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
resp.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
}
// default compressing
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); resp.encoding(current_encoding);
} }
if let Some(lm) = last_modified { if let Some(lm) = last_modified {
res.insert_header((header::LAST_MODIFIED, lm.to_string())); resp.header(header::LAST_MODIFIED, lm.to_string());
} }
if let Some(etag) = etag { if let Some(etag) = etag {
res.insert_header((header::ETAG, etag.to_string())); resp.header(header::ETAG, etag.to_string());
} }
res.insert_header((header::ACCEPT_RANGES, "bytes")); resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len(); let mut length = self.md.len();
let mut offset = 0; let mut offset = 0;
@ -534,56 +399,58 @@ impl NamedFile {
length = ranges[0].length; length = ranges[0].length;
offset = ranges[0].start; offset = ranges[0].start;
// When a Content-Encoding header is present in a 206 partial content response resp.encoding(ContentEncoding::Identity);
// for video content, it prevents browser video players from starting playback resp.header(
// before loading the whole video and also prevents seeking.
//
// See: https://github.com/actix/actix-web/issues/2815
//
// The assumption of this fix is that the video player knows to not send an
// Accept-Encoding header for this request and that downstream middleware will
// not attempt compression for requests without it.
//
// TODO: Solve question around what to do if self.encoding is set and partial
// range is requested. Reject request? Ignoring self.encoding seems wrong, too.
// In practice, it should not come up.
if req.headers().contains_key(&header::ACCEPT_ENCODING) {
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
}
res.insert_header((
header::CONTENT_RANGE, header::CONTENT_RANGE,
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()), format!(
)); "bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else { } else {
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length))); resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish(); return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
}; };
} else { } else {
return res.status(StatusCode::BAD_REQUEST).finish(); return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
}; };
}; };
if precondition_failed { if precondition_failed {
return res.status(StatusCode::PRECONDITION_FAILED).finish(); return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified { } else if not_modified {
return res return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
.status(StatusCode::NOT_MODIFIED)
.body(body::None::new())
.map_into_boxed_body();
} }
let reader = chunked::new_chunked_read(length, offset, self.file); let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() { if offset != 0 || length != self.md.len() {
res.status(StatusCode::PARTIAL_CONTENT); resp.status(StatusCode::PARTIAL_CONTENT);
} }
res.body(SizedStream::new(length, reader)) Ok(resp.body(SizedStream::new(length, reader)))
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
} }
} }
@ -628,62 +495,10 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
} }
impl Responder for NamedFile { impl Responder for NamedFile {
type Body = BoxBody;
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.into_response(req)
}
}
impl ServiceFactory<ServiceRequest> for NamedFile {
type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Config = (); type Future = Ready<Result<HttpResponse, Error>>;
type Service = NamedFileService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn respond_to(self, req: &HttpRequest) -> Self::Future {
let service = NamedFileService { ready(self.into_response(req))
path: self.path.clone(),
};
Box::pin(async move { Ok(service) })
}
}
#[doc(hidden)]
#[derive(Debug)]
pub struct NamedFileService {
path: PathBuf,
}
impl Service<ServiceRequest> for NamedFileService {
type Response = ServiceResponse;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts();
let path = self.path.clone();
Box::pin(async move {
let file = NamedFile::open_async(path).await?;
let res = file.into_response(&req);
Ok(ServiceResponse::new(req, res))
})
}
}
impl HttpServiceFactory for NamedFile {
fn register(self, config: &mut AppService) {
config.register_service(
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
None,
self,
None,
)
} }
} }

View File

@ -1,50 +1,26 @@
use std::{ use std::{
path::{Component, Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
}; };
use actix_utils::future::{ready, Ready};
use actix_web::{dev::Payload, FromRequest, HttpRequest}; use actix_web::{dev::Payload, FromRequest, HttpRequest};
use futures_util::future::{ready, Ready};
use crate::error::UriSegmentError; use crate::error::UriSegmentError;
#[derive(Debug, PartialEq, Eq)] #[derive(Debug)]
pub(crate) struct PathBufWrap(PathBuf); pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap { impl FromStr for PathBufWrap {
type Err = UriSegmentError; type Err = UriSegmentError;
fn from_str(path: &str) -> Result<Self, Self::Err> { fn from_str(path: &str) -> Result<Self, Self::Err> {
Self::parse_path(path, false)
}
}
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
///
/// Path traversal is guarded by this method.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new(); let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segment_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') { for segment in path.split('/') {
if segment == ".." { if segment == ".." {
segment_count -= 1;
buf.pop(); buf.pop();
} else if !hidden_files && segment.starts_with('.') { } else if segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.')); return Err(UriSegmentError::BadStart('.'));
} else if segment.starts_with('*') { } else if segment.starts_with('*') {
return Err(UriSegmentError::BadStart('*')); return Err(UriSegmentError::BadStart('*'));
@ -55,27 +31,14 @@ impl PathBufWrap {
} else if segment.ends_with('<') { } else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<')); return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() { } else if segment.is_empty() {
segment_count -= 1;
continue; continue;
} else if cfg!(windows) && segment.contains('\\') { } else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\')); return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else { } else {
buf.push(segment) buf.push(segment)
} }
} }
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(i < segment_count);
}
Ok(PathBufWrap(buf)) Ok(PathBufWrap(buf))
} }
} }
@ -89,14 +52,17 @@ impl AsRef<Path> for PathBufWrap {
impl FromRequest for PathBufWrap { impl FromRequest for PathBufWrap {
type Error = UriSegmentError; type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>; type Future = Ready<Result<Self, Self::Error>>;
type Config = ();
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().unprocessed().parse()) ready(req.match_info().path().parse())
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::iter::FromIterator;
use super::*; use super::*;
#[test] #[test]
@ -130,59 +96,4 @@ mod tests {
PathBuf::from_iter(vec!["seg2"]) PathBuf::from_iter(vec!["seg2"])
); );
} }
#[test]
fn test_parse_path() {
assert_eq!(
PathBufWrap::parse_path("/test/.tt", false).map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::parse_path("/test/.tt", true).unwrap().0,
PathBuf::from_iter(vec!["test", ".tt"])
);
}
#[test]
fn path_traversal() {
assert_eq!(
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
.unwrap()
.0,
PathBuf::from_iter(vec!["etc/passwd"])
);
}
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
} }

View File

@ -1,37 +1,3 @@
use std::fmt;
use derive_more::Error;
/// Copy of `http_range::HttpRangeParseError`.
#[derive(Debug, Clone)]
enum HttpRangeParseError {
InvalidRange,
NoOverlap,
}
impl From<http_range::HttpRangeParseError> for HttpRangeParseError {
fn from(err: http_range::HttpRangeParseError) -> Self {
match err {
http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange,
http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap,
}
}
}
#[derive(Debug, Clone, Error)]
#[non_exhaustive]
pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError);
impl fmt::Display for ParseRangeErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid Range header: ")?;
f.write_str(match self.0 {
HttpRangeParseError::InvalidRange => "invalid syntax",
HttpRangeParseError::NoOverlap => "range starts after end of content",
})
}
}
/// HTTP Range header representation. /// HTTP Range header representation.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub struct HttpRange { pub struct HttpRange {
@ -42,22 +8,91 @@ pub struct HttpRange {
pub length: u64, pub length: u64,
} }
const PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6;
impl HttpRange { impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616. /// Parses Range HTTP header string as per RFC 2616.
/// ///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file). /// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> { pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
let ranges = if header.is_empty() {
http_range::HttpRange::parse(header, size).map_err(|err| ParseRangeErr(err.into()))?; return Ok(Vec::new());
}
if !header.starts_with(PREFIX) {
return Err(());
}
Ok(ranges let size_sig = size as i64;
.iter() let mut no_overlap = false;
.map(|range| HttpRange {
start: range.start, let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
length: range.length, .split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
}) })
.collect()) .collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
} }
} }
@ -298,7 +333,8 @@ mod tests {
if expected.is_empty() { if expected.is_empty() {
continue; continue;
} else { } else {
panic!( assert!(
false,
"parse({}, {}) returned error {:?}", "parse({}, {}) returned error {:?}",
header, header,
size, size,
@ -310,24 +346,28 @@ mod tests {
let got = res.unwrap(); let got = res.unwrap();
if got.len() != expected.len() { if got.len() != expected.len() {
panic!( assert!(
false,
"len(parseRange({}, {})) = {}, want {}", "len(parseRange({}, {})) = {}, want {}",
header, header,
size, size,
got.len(), got.len(),
expected.len() expected.len()
); );
continue;
} }
for i in 0..expected.len() { for i in 0..expected.len() {
if got[i].start != expected[i].start { if got[i].start != expected[i].start {
panic!( assert!(
false,
"parseRange({}, {})[{}].start = {}, want {}", "parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start header, size, i, got[i].start, expected[i].start
) )
} }
if got[i].length != expected[i].length { if got[i].length != expected[i].length {
panic!( assert!(
false,
"parseRange({}, {})[{}].length = {}, want {}", "parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length header, size, i, got[i].length, expected[i].length
) )

View File

@ -1,33 +1,27 @@
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc}; use std::{
fmt, io,
path::PathBuf,
rc::Rc,
task::{Context, Poll},
};
use actix_service::Service;
use actix_web::{ use actix_web::{
body::BoxBody, dev::{ServiceRequest, ServiceResponse},
dev::{self, Service, ServiceRequest, ServiceResponse},
error::Error, error::Error,
guard::Guard, guard::Guard,
http::{header, Method}, http::{header, Method},
HttpResponse, HttpResponse,
}; };
use futures_core::future::LocalBoxFuture; use futures_util::future::{ok, Either, LocalBoxFuture, Ready};
use crate::{ use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile, named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride,
PathBufWrap, PathFilter, NamedFile, PathBufWrap,
}; };
/// Assembled file serving service. /// Assembled file serving service.
#[derive(Clone)] pub struct FilesService {
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct FilesServiceInner {
pub(crate) directory: PathBuf, pub(crate) directory: PathBuf,
pub(crate) index: Option<String>, pub(crate) index: Option<String>,
pub(crate) show_index: bool, pub(crate) show_index: bool,
@ -35,52 +29,25 @@ pub struct FilesServiceInner {
pub(crate) default: Option<HttpService>, pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>, pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>, pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) path_filter: Option<Rc<PathFilter>>,
pub(crate) file_flags: named::Flags, pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>, pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
} }
impl fmt::Debug for FilesServiceInner { type FilesServiceFuture = Either<
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Ready<Result<ServiceResponse, Error>>,
f.write_str("FilesServiceInner") LocalBoxFuture<'static, Result<ServiceResponse, Error>>,
} >;
}
impl FilesService { impl FilesService {
async fn handle_err( fn handle_err(&mut self, e: io::Error, req: ServiceRequest) -> FilesServiceFuture {
&self, log::debug!("Failed to handle {}: {}", req.path(), e);
err: io::Error,
req: ServiceRequest,
) -> Result<ServiceResponse, Error> {
log::debug!("error handling {}: {}", req.path(), err);
if let Some(ref default) = self.default { if let Some(ref mut default) = self.default {
default.call(req).await Either::Right(default.call(req))
} else { } else {
Ok(req.error_response(err)) Either::Left(ok(req.error_response(e)))
} }
} }
fn serve_named_file(&self, req: ServiceRequest, mut named_file: NamedFile) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
ServiceResponse::new(req, res)
}
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|err| ServiceResponse::from_err(err, req))
}
} }
impl fmt::Debug for FilesService { impl fmt::Debug for FilesService {
@ -89,100 +56,112 @@ impl fmt::Debug for FilesService {
} }
} }
impl Service<ServiceRequest> for FilesService { impl Service for FilesService {
type Response = ServiceResponse<BoxBody>; type Request = ServiceRequest;
type Response = ServiceResponse;
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = FilesServiceFuture;
dev::always_ready!(); fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&mut self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards { let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards // execute user defined guards
(**guard).check(&req.guard_ctx()) (**guard).check(req.head())
} else { } else {
// default behavior // default behavior
matches!(*req.method(), Method::HEAD | Method::GET) matches!(*req.method(), Method::HEAD | Method::GET)
}; };
let this = self.clone(); if !is_method_valid {
return Either::Left(ok(req.into_response(
actix_web::HttpResponse::MethodNotAllowed()
.header(header::CONTENT_TYPE, "text/plain")
.body("Request did not meet this resource's requirements."),
)));
}
Box::pin(async move { let real_path: PathBufWrap = match req.match_info().path().parse() {
if !is_method_valid { Ok(item) => item,
return Ok(req.into_response( Err(e) => return Either::Left(ok(req.error_response(e))),
HttpResponse::MethodNotAllowed() };
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
));
}
let path_on_disk = // full file path
match PathBufWrap::parse_path(req.match_info().unprocessed(), this.hidden_files) { let path = match self.directory.join(&real_path).canonicalize() {
Ok(item) => item, Ok(path) => path,
Err(err) => return Ok(req.error_response(err)), Err(e) => return self.handle_err(e, req),
}; };
if let Some(filter) = &this.path_filter { if path.is_dir() {
if !filter(path_on_disk.as_ref(), req.head()) { if let Some(ref redir_index) = self.index {
if let Some(ref default) = this.default { if self.redirect_to_slash && !req.path().ends_with('/') {
return default.call(req).await;
} else {
return Ok(req.into_response(HttpResponse::NotFound().finish()));
}
}
}
// full file path
let path = this.directory.join(&path_on_disk);
if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await;
}
if path.is_dir() {
if this.redirect_to_slash
&& !req.path().ends_with('/')
&& (this.index.is_some() || this.show_index)
{
let redirect_to = format!("{}/", req.path()); let redirect_to = format!("{}/", req.path());
return Ok(req.into_response( return Either::Left(ok(req.into_response(
HttpResponse::Found() HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to)) .header(header::LOCATION, redirect_to)
.finish(), .body("")
)); .into_body(),
)));
} }
match this.index { let path = path.join(redir_index);
Some(ref index) => {
let named_path = path.join(index); match NamedFile::open(path) {
match NamedFile::open_async(named_path).await {
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
Err(_) if this.show_index => Ok(this.show_index(req, path)),
Err(err) => this.handle_err(err, req).await,
}
}
None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),
}
} else {
match NamedFile::open_async(&path).await {
Ok(mut named_file) => { Ok(mut named_file) => {
if let Some(ref mime_override) = this.mime_override { if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_()); let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition; named_file.content_disposition.disposition = new_disposition;
} }
named_file.flags = this.file_flags; named_file.flags = self.file_flags;
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();
let res = named_file.into_response(&req); Either::Left(ok(match named_file.into_response(&req) {
Ok(ServiceResponse::new(req, res)) Ok(item) => ServiceResponse::new(req, item),
Err(e) => ServiceResponse::from_err(e, req),
}))
} }
Err(err) => this.handle_err(err, req).await, Err(e) => self.handle_err(e, req),
} }
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
let x = (self.renderer)(&dir, &req);
match x {
Ok(resp) => Either::Left(ok(resp)),
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
} else {
Either::Left(ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)))
} }
}) } else {
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
match named_file.into_response(&req) {
Ok(item) => {
Either::Left(ok(ServiceResponse::new(req.clone(), item)))
}
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
}
Err(e) => self.handle_err(e, req),
}
}
} }
} }

View File

@ -1,65 +1,40 @@
use actix_files::{Files, NamedFile}; use actix_files::Files;
use actix_web::{ use actix_web::{
http::{ http::{
header::{self, HeaderValue}, header::{self, HeaderValue},
StatusCode, StatusCode,
}, },
test::{self, TestRequest}, test::{self, TestRequest},
web, App, App,
}; };
#[actix_web::test] #[actix_rt::test]
async fn test_utf8_file_contents() { async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding // use default ISO-8859-1 encoding
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await; let mut srv =
test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request(); let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await; let res = test::call_service(&mut srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
// disable UTF-8 attribute
let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.status(), StatusCode::OK);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_TYPE), res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")), Some(&HeaderValue::from_static("text/plain")),
); );
}
#[actix_web::test] // prefer UTF-8 encoding
async fn partial_range_response_encoding() { let mut srv = test::init_service(
let srv = test::init_service(App::new().default_service(web::to(|| async { App::new().service(Files::new("/", "./tests").prefer_utf8(true)),
NamedFile::open_async("./tests/test.binary").await.unwrap() )
})))
.await; .await;
// range request without accept-encoding returns no content-encoding header let req = TestRequest::with_uri("/utf8.txt").to_request();
let req = TestRequest::with_uri("/") let res = test::call_service(&mut srv, req).await;
.append_header((header::RANGE, "bytes=10-20"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
// range request with accept-encoding returns a content-encoding header assert_eq!(res.status(), StatusCode::OK);
let req = TestRequest::with_uri("/")
.append_header((header::RANGE, "bytes=10-20"))
.append_header((header::ACCEPT_ENCODING, "identity"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
assert_eq!( assert_eq!(
res.headers().get(header::CONTENT_ENCODING).unwrap(), res.headers().get(header::CONTENT_TYPE),
"identity" Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
); );
} }

View File

@ -1 +0,0 @@
first

View File

@ -1 +0,0 @@
second

View File

@ -1,34 +0,0 @@
use actix_files::Files;
use actix_web::{
guard::Host,
http::StatusCode,
test::{self, TestRequest},
App,
};
use bytes::Bytes;
#[actix_web::test]
async fn test_guard_filter() {
let srv = test::init_service(
App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com"))),
)
.await;
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "first.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("first"));
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "second.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("second"));
}

View File

@ -1 +0,0 @@
test.png

View File

@ -1 +0,0 @@
// this file is empty.

View File

@ -1,26 +0,0 @@
use actix_files::Files;
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
)
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}

View File

@ -1,175 +0,0 @@
# Changes
## Unreleased
- Minimum supported Rust version (MSRV) is now 1.72.
## 3.2.0
- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency.
## 3.1.0
- Minimum supported Rust version (MSRV) is now 1.59.
## 3.0.0
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Added `TestServer::client_headers` method. [#2097]
- Update `actix-server` dependency to `2`.
- Update `actix-tls` dependency to `3`.
- Update `bytes` to `1.0`. [#1813]
- Minimum supported Rust version (MSRV) is now 1.57.
[#2442]: https://github.com/actix/actix-web/pull/2442
[#2097]: https://github.com/actix/actix-web/pull/2097
[#1813]: https://github.com/actix/actix-web/pull/1813
<details>
<summary>3.0.0 Pre-Releases</summary>
## 3.0.0-beta.13
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12
- No significant changes since `3.0.0-beta.11`.
## 3.0.0-beta.11
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9
- No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5
- Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4
- Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3
- No notable changes.
## 3.0.0-beta.2
- No notable changes.
## 3.0.0-beta.1
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
</details>
## 2.1.0
- Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0
- Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1
- Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7
## 1.0.0
- Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3
- Migrate to `std::future`
## 0.2.5
- Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System`
## 0.2.4
- Update actix-server to 0.6
## 0.2.3
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2
- Add .put() and .sput() methods
## 0.2.1
- Add license files
## 0.2.0
- Update awc and actix-http deps
## 0.1.1
- Always make new connection for http client
## 0.1.0
- No changes
## 0.1.0-alpha.3
- Request functions accept path #743
## 0.1.0-alpha.2
- Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries
## 0.1.0-alpha.1
- Initial impl

View File

@ -1,64 +0,0 @@
[package]
name = "actix-http-test"
version = "3.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web"
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
edition = "2021"
[package.metadata.docs.rs]
features = []
[package.metadata.cargo_check_external_types]
allowed_external_types = [
"actix_codec::*",
"actix_http::*",
"actix_server::*",
"awc::*",
"bytes::*",
"futures_core::*",
"http::*",
"tokio::*",
]
[features]
default = []
# openssl
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "2"
actix-codec = "0.5"
actix-tls = "3"
actix-utils = "3"
actix-rt = "2.2"
actix-server = "2"
awc = { version = "3", default-features = false }
bytes = "1"
futures-core = { version = "0.3.17", default-features = false }
http = "0.2.7"
log = "0.4"
socket2 = "0.5"
serde = "1"
serde_json = "1"
slab = "0.4"
serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.55", package = "openssl", optional = true }
tokio = { version = "1.24.2", features = ["sync"] }
[dev-dependencies]
actix-http = "3"
[lints]
workspace = true

View File

@ -1,20 +0,0 @@
# `actix-http-test`
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.2.0)](https://docs.rs/actix-http-test/3.2.0)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.2.0/status.svg)](https://deps.rs/crate/actix-http-test/3.2.0)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end -->
<!-- cargo-rdme start -->
Various helpers for Actix applications to use during testing.
<!-- cargo-rdme end -->

File diff suppressed because it is too large Load Diff

View File

@ -1,188 +1,109 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "3.10.0" version = "2.2.1"
authors = [ authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
"Nikolay Kim <fafhrd91@gmail.com>", description = "HTTP primitives for the Actix ecosystem"
"Rob Ede <robjtede@icloud.com>", readme = "README.md"
]
description = "HTTP types and services for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"] keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web" repository = "https://github.com/actix/actix-web.git"
categories = [ documentation = "https://docs.rs/actix-http/"
"network-programming", categories = ["network-programming", "asynchronous",
"asynchronous", "web-programming::http-server",
"web-programming::http-server", "web-programming::websocket"]
"web-programming::websocket", license = "MIT OR Apache-2.0"
] edition = "2018"
license.workspace = true
edition.workspace = true
rust-version.workspace = true
[package.metadata.docs.rs] [package.metadata.docs.rs]
rustdoc-args = ["--cfg", "docsrs"] features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
features = [
"http2",
"ws",
"openssl",
"rustls-0_20",
"rustls-0_21",
"rustls-0_22",
"rustls-0_23",
"compress-brotli",
"compress-gzip",
"compress-zstd",
]
[package.metadata.cargo_check_external_types] [lib]
allowed_external_types = [ name = "actix_http"
"actix_codec::*", path = "src/lib.rs"
"actix_service::*",
"actix_tls::*",
"actix_utils::*",
"bytes::*",
"bytestring::*",
"encoding_rs::*",
"futures_core::*",
"h2::*",
"http::*",
"httparse::*",
"language_tags::*",
"mime::*",
"openssl::*",
"rustls::*",
"tokio_util::*",
"tokio::*",
]
[features] [features]
default = [] default = []
# HTTP/2 protocol support # openssl
http2 = ["dep:h2"] openssl = ["actix-tls/openssl", "actix-connect/openssl"]
# WebSocket protocol implementation # rustls support
ws = [ rustls = ["actix-tls/rustls", "actix-connect/rustls"]
"dep:local-channel",
"dep:base64",
"dep:rand",
"dep:sha1",
]
# TLS via OpenSSL # enable compressison support
openssl = ["__tls", "actix-tls/accept", "actix-tls/openssl"] compress = ["flate2", "brotli2"]
# TLS via Rustls v0.20 # support for secure cookies
rustls = ["__tls", "rustls-0_20"] secure-cookies = ["cookie/secure"]
# TLS via Rustls v0.20 # support for actix Actor messages
rustls-0_20 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_20"] actors = ["actix"]
# TLS via Rustls v0.21
rustls-0_21 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_21"]
# TLS via Rustls v0.22
rustls-0_22 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_22"]
# TLS via Rustls v0.23
rustls-0_23 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_23"]
# Compression codecs
compress-brotli = ["__compress", "dep:brotli"]
compress-gzip = ["__compress", "dep:flate2"]
compress-zstd = ["__compress", "dep:zstd"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
__compress = []
# Internal (PRIVATE!) features used to aid checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__tls = []
[dependencies] [dependencies]
actix-service = "2" actix-service = "1.0.6"
actix-codec = "0.5" actix-codec = "0.3.0"
actix-utils = "3" actix-connect = "2.0.0"
actix-rt = { version = "2.2", default-features = false } actix-utils = "2.0.0"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0", optional = true }
bitflags = "2" base64 = "0.13"
bytes = "1" bitflags = "1.2"
bytestring = "1" bytes = "0.5.3"
derive_more = { version = "2", features = ["as_ref", "deref", "deref_mut", "display", "error", "from"] } cookie = { version = "0.14.1", features = ["percent-encode"] }
copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8" encoding_rs = "0.8"
foldhash = "0.1" futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } futures-core = { version = "0.3.5", default-features = false }
http = "0.2.7" futures-util = { version = "0.3.5", default-features = false }
httparse = "1.5.1" fxhash = "0.2.1"
httpdate = "1.0.1" h2 = "0.2.1"
itoa = "1" http = "0.2.0"
language-tags = "0.3" httparse = "1.3"
mime = "0.3.4" indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project-lite = "0.2" pin-project = "1.0.0"
smallvec = "1.6.1" rand = "0.7"
tokio = { version = "1.24.2", features = [] } regex = "1.3"
tokio-util = { version = "0.7", features = ["io", "codec"] } serde = "1.0"
tracing = { version = "0.1.30", default-features = false, features = ["log"] } serde_json = "1.0"
sha-1 = "0.9"
slab = "0.4"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
# http2 # compression
h2 = { version = "0.3.26", optional = true } brotli2 = { version="0.3.2", optional = true }
# websockets
local-channel = { version = "0.1", optional = true }
base64 = { version = "0.22", optional = true }
rand = { version = "0.9", optional = true }
sha1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3.4", default-features = false, optional = true }
# compress-*
brotli = { version = "7", optional = true }
flate2 = { version = "1.0.13", optional = true } flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.13", optional = true }
[dev-dependencies] [dev-dependencies]
actix-http-test = { version = "3", features = ["openssl"] } actix-server = "1.0.1"
actix-server = "2" actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23-webpki-roots"] } actix-http-test = { version = "2.0.0", features = ["openssl"] }
actix-web = "4" actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3"
async-stream = "0.3" env_logger = "0.7"
criterion = { version = "0.5", features = ["html_reports"] } serde_derive = "1.0"
divan = "0.1.8" open-ssl = { version="0.10", package = "openssl" }
env_logger = "0.11" rust-tls = { version="0.18", package = "rustls" }
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
memchr = "2.4"
once_cell = "1.21"
rcgen = "0.13"
regex = "1.3"
rustversion = "1"
rustls-pemfile = "2"
serde = { version = "1", features = ["derive"] }
serde_json = "1.0"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.55" }
tls-rustls_023 = { package = "rustls", version = "0.23" }
tokio = { version = "1.24.2", features = ["net", "rt", "macros"] }
[lints]
workspace = true
[[example]]
name = "ws"
required-features = ["ws", "rustls-0_23"]
[[example]]
name = "tls_rustls"
required-features = ["http2", "rustls-0_23"]
[[bench]] [[bench]]
name = "response-body-compression" name = "content-length"
harness = false harness = false
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]
[[bench]] [[bench]]
name = "date-formatting" name = "status-line"
harness = false
[[bench]]
name = "uninit-headers"
harness = false harness = false

View File

@ -1,21 +1,20 @@
# `actix-http` # actix-http
> HTTP types and services for the Actix ecosystem. > HTTP primitives for the Actix ecosystem.
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.10.0)](https://docs.rs/actix-http/3.10.0) [![Documentation](https://docs.rs/actix-http/badge.svg?version=2.2.1)](https://docs.rs/actix-http/2.2.1)
![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-http)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg) [![Dependency Status](https://deps.rs/crate/actix-http/2.2.1/status.svg)](https://deps.rs/crate/actix-http/2.2.1)
<br /> [![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![dependency status](https://deps.rs/crate/actix-http/3.10.0/status.svg)](https://deps.rs/crate/actix-http/3.10.0)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end --> ## Documentation & Resources
## Examples - [API Documentation](https://docs.rs/actix-http)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.42.0
## Example
```rust ```rust
use std::{env, io}; use std::{env, io};
@ -24,7 +23,7 @@ use actix_http::{HttpService, Response};
use actix_server::Server; use actix_server::Server;
use futures_util::future; use futures_util::future;
use http::header::HeaderValue; use http::header::HeaderValue;
use tracing::info; use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
@ -48,3 +47,18 @@ async fn main() -> io::Result<()> {
.await .await
} }
``` ```
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-http crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@ -0,0 +1,291 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -1,20 +0,0 @@
use std::time::SystemTime;
use actix_http::header::HttpDate;
use divan::{black_box, AllocProfiler, Bencher};
#[global_allocator]
static ALLOC: AllocProfiler = AllocProfiler::system();
#[divan::bench]
fn date_formatting(b: Bencher<'_, '_>) {
let now = SystemTime::now();
b.bench(|| {
black_box(HttpDate::from(black_box(now)).to_string());
})
}
fn main() {
divan::main();
}

View File

@ -1,88 +0,0 @@
use std::convert::Infallible;
use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode};
use actix_service::{fn_service, Service as _};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
static BODY: &[u8] = include_bytes!("../Cargo.toml");
fn compression_responses(c: &mut Criterion) {
let mut group = c.benchmark_group("compression responses");
group.bench_function("identity", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Identity,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("gzip", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Gzip,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("br", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Brotli,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("zstd", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Zstd,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.finish();
}
criterion_group!(benches, compression_responses);
criterion_main!(benches);

View File

@ -0,0 +1,222 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -0,0 +1,137 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View File

@ -1,27 +0,0 @@
use actix_http::HttpService;
use actix_server::Server;
use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App, Responder};
#[get("/")]
async fn index() -> impl Responder {
"Hello, world. From Actix Web!"
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> std::io::Result<()> {
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
// construct actix-web app
let app = App::new().service(index);
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
// h1 will configure server to only use HTTP/1.1
.h1(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()
.await
}

View File

@ -1,27 +0,0 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View File

@ -1,37 +1,36 @@
use std::{io, time::Duration}; use std::{env, io};
use actix_http::{Error, HttpService, Request, Response, StatusCode}; use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server; use actix_server::Server;
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::StreamExt as _; use futures_util::StreamExt;
use http::header::HeaderValue; use http::header::HeaderValue;
use tracing::info; use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", "127.0.0.1:8080", || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move { .finish(|mut req: Request| async move {
let mut body = BytesMut::new(); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await { while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?); body.extend_from_slice(&item?);
} }
info!("request body: {body:?}"); info!("request body: {:?}", body);
Ok::<_, Error>(
let res = Response::build(StatusCode::OK) Response::Ok()
.insert_header(("x-head", HeaderValue::from_static("dummy value!"))) .header("x-head", HeaderValue::from_static("dummy value!"))
.body(body); .body(body),
)
Ok::<_, Error>(res)
}) })
.tcp() // No TLS .tcp()
})? })?
.run() .run()
.await .await

View File

@ -1,34 +1,32 @@
use std::io; use std::{env, io};
use actix_http::{ use actix_http::http::HeaderValue;
body::{BodyStream, MessageBody}, use actix_http::{Error, HttpService, Request, Response};
header, Error, HttpMessage, HttpService, Request, Response, StatusCode, use actix_server::Server;
}; use bytes::BytesMut;
use futures_util::StreamExt;
use log::info;
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> { async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut res = Response::build(StatusCode::OK); let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) { body.extend_from_slice(&item?)
res.insert_header((header::CONTENT_TYPE, ct));
} }
// echo request payload stream as (chunked) response body info!("request body: {:?}", body);
let res = res.message_body(BodyStream::new(req.payload().take()))?; Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
Ok(res) .body(body))
} }
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "echo=info");
env_logger::init();
actix_server::Server::build() Server::build()
.bind("echo", ("127.0.0.1", 8080), || { .bind("echo", "127.0.0.1:8080", || {
HttpService::build() HttpService::build().finish(handle_request).tcp()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})? })?
.run() .run()
.await .await

View File

@ -1,34 +0,0 @@
//! An example that supports automatic selection of plaintext h1/h2c connections.
//!
//! Notably, both the following commands will work.
//! ```console
//! $ curl --http1.1 'http://localhost:8080/'
//! $ curl --http2-prior-knowledge 'http://localhost:8080/'
//! ```
use std::{convert::Infallible, io};
use actix_http::{body::BodyStream, HttpService, Request, Response, StatusCode};
use actix_server::Server;
#[tokio::main(flavor = "current_thread")]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2c-detect", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|_req: Request| async move {
Ok::<_, Infallible>(Response::build(StatusCode::OK).body(BodyStream::new(
futures_util::stream::iter([
Ok::<_, String>("123".into()),
Err("wertyuikmnbvcxdfty6t".to_owned()),
]),
)))
})
.tcp_auto_h2c()
})?
.workers(2)
.run()
.await
}

View File

@ -1,25 +0,0 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View File

@ -1,31 +1,26 @@
use std::{convert::Infallible, io, time::Duration}; use std::{env, io};
use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode}; use actix_http::{HttpService, Response};
use actix_server::Server; use actix_server::Server;
use tracing::info; use futures_util::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main] #[actix_rt::main]
async fn main() -> io::Result<()> { async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build() Server::build()
.bind("hello-world", ("127.0.0.1", 8080), || { .bind("hello-world", "127.0.0.1:8080", || {
HttpService::build() HttpService::build()
.client_request_timeout(Duration::from_secs(1)) .client_timeout(1000)
.client_disconnect_timeout(Duration::from_secs(1)) .client_disconnect(1000)
.on_connect_ext(|_, ext| { .finish(|_req| {
ext.insert(42u32); info!("{:?}", _req);
}) let mut res = Response::Ok();
.finish(|req: Request| async move { res.header("x-head", HeaderValue::from_static("dummy value!"));
info!("{req:?}"); future::ok::<_, ()>(res.body("Hello world!"))
let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.conn_data::<u32>().unwrap().to_string();
res.insert_header(("x-forty-two", HeaderValue::from_str(&forty_two).unwrap()));
Ok::<_, Infallible>(res.body("Hello world!"))
}) })
.tcp() .tcp()
})? })?

View File

@ -1,41 +0,0 @@
//! Example showing response body (chunked) stream erroring.
//!
//! Test using `nc` or `curl`.
//! ```sh
//! $ curl -vN 127.0.0.1:8080
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
//! ```
use std::{convert::Infallible, io, time::Duration};
use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
use tracing::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
info!("{req:?}");
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_secs(1)).await;
yield Err(io::Error::new(io::ErrorKind::Other, "abc"));
})))
})
.tcp()
})?
.run()
.await
}

View File

@ -1,76 +0,0 @@
//! Demonstrates TLS configuration (via Rustls) for HTTP/1.1 and HTTP/2 connections.
//!
//! Test using cURL:
//!
//! ```console
//! $ curl --insecure https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/2.0
//!
//! $ curl --insecure --http1.1 https://127.0.0.1:8443
//! Hello World!
//! Protocol: HTTP/1.1
//! ```
extern crate tls_rustls_023 as rustls;
use std::io;
use actix_http::{Error, HttpService, Request, Response};
use actix_utils::future::ok;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
tracing::info!("starting HTTP server at https://127.0.0.1:8443");
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8443), || {
HttpService::build()
.finish(|req: Request| {
let body = format!(
"Hello World!\n\
Protocol: {:?}",
req.head().version
);
ok::<_, Error>(Response::ok().set_body(body))
})
.rustls_0_23(rustls_config())
})?
.run()
.await
}
fn rustls_config() -> rustls::ServerConfig {
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_file = cert.pem();
let key_file = key_pair.serialize_pem();
let cert_file = &mut io::BufReader::new(cert_file.as_bytes());
let key_file = &mut io::BufReader::new(key_file.as_bytes());
let cert_chain = rustls_pemfile::certs(cert_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut keys = rustls_pemfile::pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap();
const H1_ALPN: &[u8] = b"http/1.1";
const H2_ALPN: &[u8] = b"h2";
config.alpn_protocols.push(H2_ALPN.to_vec());
config.alpn_protocols.push(H1_ALPN.to_vec());
config
}

View File

@ -1,114 +0,0 @@
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls_023 as rustls;
use std::{
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
use tokio_util::codec::Encoder;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("tcp", ("127.0.0.1", 8080), || {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build()
.finish(handler)
.rustls_0_23(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
tracing::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
tracing::info!("responding");
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))
}
struct Heartbeat {
codec: ws::Codec,
interval: Interval,
}
impl Heartbeat {
fn new(codec: ws::Codec) -> Self {
Self {
codec,
interval: interval(Duration::from_secs(4)),
}
}
}
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
tracing::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
let mut buffer = BytesMut::new();
self.as_mut()
.codec
.encode(
ws::Message::Text(ByteString::from_static("hello world")),
&mut buffer,
)
.unwrap();
Poll::Ready(Some(Ok(buffer.freeze())))
}
}
fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader;
use rustls_pemfile::{certs, pkcs8_private_keys};
let rcgen::CertifiedKey { cert, key_pair } =
rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap();
let cert_file = cert.pem();
let key_file = key_pair.serialize_pem();
let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file).collect::<Result<Vec<_>, _>>().unwrap();
let mut keys = pkcs8_private_keys(key_file)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
cert_chain,
rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)),
)
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
}

5
actix-http/rustfmt.toml Normal file
View File

@ -0,0 +1,5 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

723
actix-http/src/body.rs Normal file
View File

@ -0,0 +1,723 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::pin_project;
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => matches!(*other, Body::None),
Body::Empty => matches!(*other, Body::Empty),
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use futures_util::stream;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -1,214 +0,0 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Streaming response wrapper.
///
/// Response does not contain `Content-Length` header and appropriate transfer encoding is used.
pub struct BodyStream<S> {
#[pin]
stream: S,
}
}
// TODO: from_infallible method
impl<S, E> BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(stream: S) -> Self {
BodyStream { stream }
}
}
impl<S, E> MessageBody for BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being ended on a
/// zero-length chunk, but rather proceed until the underlying [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::{convert::Infallible, time::Duration};
use actix_rt::{
pin,
time::{sleep, Sleep},
};
use actix_utils::future::poll_fn;
use derive_more::{Display, Error};
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[derive(Debug, Display, Error)]
#[display("stream error")]
struct StreamErr;
#[actix_rt::test]
async fn stream_immediate_error() {
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async {
Err(Box::<dyn StdError>::from("stringy error"))
}));
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
#[actix_rt::test]
async fn stream_delayed_error() {
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
pin_project! {
#[derive(Debug)]
#[project = TimeDelayStreamProj]
enum TimeDelayStream {
Start,
Sleep { delay: Pin<Box<Sleep>> },
Done,
}
}
impl Stream for TimeDelayStream {
type Item = Result<Bytes, StreamErr>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.as_mut().get_mut() {
TimeDelayStream::Start => {
let sleep = sleep(Duration::from_millis(1));
self.as_mut().set(TimeDelayStream::Sleep {
delay: Box::pin(sleep),
});
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Sleep { ref mut delay } => {
ready!(delay.poll_unpin(cx));
self.set(TimeDelayStream::Done);
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
}
}
}
let body = BodyStream::new(TimeDelayStream::Start);
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
}

View File

@ -1,121 +0,0 @@
use std::{
error::Error as StdError,
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body;
/// A boxed message body with boxed errors.
#[derive(Debug)]
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody {
/// Boxes body type, erasing type information.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self
where
B: MessageBody + 'static,
{
match body.size() {
BodySize::None => Self(BoxBodyInner::None(body::None)),
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
}
/// Returns a mutable pinned reference to the inner message body type.
#[inline]
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
Pin::new(self)
}
}
impl MessageBody for BoxBody {
type Error = Box<dyn StdError>;
#[inline]
fn size(&self) -> BodySize {
match &self.0 {
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
}
#[inline]
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 {
BoxBodyInner::None(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}),
BoxBodyInner::Bytes(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}),
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
#[inline]
fn boxed(self) -> BoxBody {
self
}
}
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
#[actix_rt::test]
async fn nested_boxed_body() {
let body = Bytes::from_static(&[1, 2, 3]);
let boxed_body = BoxBody::new(BoxBody::new(body));
assert_eq!(
to_bytes(boxed_body).await.unwrap(),
Bytes::from(vec![1, 2, 3]),
);
}
}

View File

@ -1,122 +0,0 @@
use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
/// A body of type `L`.
Left { #[pin] body: L },
/// A body of type `R`.
Right { #[pin] body: R },
}
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }
}
}
impl<L, R> EitherBody<L, R> {
/// Creates new `EitherBody` using left variant.
#[inline]
pub fn left(body: L) -> Self {
Self::Left { body }
}
/// Creates new `EitherBody` using right variant.
#[inline]
pub fn right(body: R) -> Self {
Self::Right { body }
}
}
impl<L, R> MessageBody for EitherBody<L, R>
where
L: MessageBody + 'static,
R: MessageBody + 'static,
{
type Error = Error;
#[inline]
fn size(&self) -> BodySize {
match self {
EitherBody::Left { body } => body.size(),
EitherBody::Right { body } => body.size(),
}
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EitherBodyProj::Left { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
EitherBodyProj::Right { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
EitherBody::Left { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
}
}
#[inline]
fn boxed(self) -> BoxBody {
match self {
EitherBody::Left { body } => body.boxed(),
EitherBody::Right { body } => body.boxed(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn type_parameter_inference() {
let _body: EitherBody<(), _> = EitherBody::new(());
let _body: EitherBody<_, ()> = EitherBody::left(());
let _body: EitherBody<(), _> = EitherBody::right(());
}
}

View File

@ -1,740 +0,0 @@
//! [`MessageBody`] trait and foreign implementations.
use std::{
convert::Infallible,
error::Error as StdError,
mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody};
/// An interface for types that can be used as a response body.
///
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
pub trait MessageBody {
/// The type of error that will be returned if streaming body fails.
///
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>;
/// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
///
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>;
/// Try to convert into the complete chunk of body bytes.
///
/// Override this method if the complete body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
/// this method, it is recommended to check `size` first and return early.
///
/// # Errors
/// The default implementation will error and return the original type back to the caller for
/// further use.
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
}
/// Wraps this body into a `BoxBody`.
///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
/// is required.
#[inline]
fn boxed(self) -> BoxBody
where
Self: Sized + 'static,
{
BoxBody::new(self)
}
}
mod foreign_impls {
use std::{borrow::Cow, ops::DerefMut};
use super::*;
impl<B> MessageBody for &mut B
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
fn size(&self) -> BodySize {
(**self).size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(&mut **self).poll_next(cx)
}
}
impl MessageBody for Infallible {
type Error = Infallible;
fn size(&self) -> BodySize {
match *self {}
}
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {}
}
}
impl MessageBody for () {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(0)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
impl<T, B> MessageBody for Pin<T>
where
T: DerefMut<Target = B> + Unpin,
B: MessageBody + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx)
}
}
impl MessageBody for &'static [u8] {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self))
}
}
impl MessageBody for Bytes {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self)
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.freeze())
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for Cow<'static, [u8]> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(b) => Bytes::from_static(b),
Cow::Owned(b) => Bytes::from(b),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(b) => Ok(Bytes::from_static(b)),
Cow::Owned(b) => Ok(Bytes::from(b)),
}
}
}
impl MessageBody for &'static str {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
let bytes = Bytes::from_static(string.as_bytes());
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self.as_bytes()))
}
}
impl MessageBody for String {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(Bytes::from(string))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for Cow<'static, str> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let bytes = match mem::take(self.get_mut()) {
Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()),
Cow::Owned(s) => Bytes::from(s.into_bytes()),
};
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())),
Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())),
}
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(string.into_bytes())))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.into_bytes())
}
}
}
pin_project! {
pub(crate) struct MessageBodyMapErr<B, F> {
#[pin]
body: B,
mapper: Option<F>,
}
}
impl<B, F, E> MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
{
pub(crate) fn new(body: B, mapper: F) -> Self {
Self {
body,
mapper: Some(mapper),
}
}
}
impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
E: Into<Box<dyn StdError>>,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
self.body.size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let this = self.as_mut().project();
match ready!(this.body.poll_next(cx)) {
Some(Err(err)) => {
let f = self.as_mut().project().mapper.take().unwrap();
let mapped_err = (f)(err);
Poll::Ready(Some(Err(mapped_err)))
}
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
}
#[cfg(test)]
mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => {
assert_eq!(
poll_fn(|cx| $pin.as_mut().poll_next(cx))
.await
.unwrap() // unwrap option
.unwrap(), // unwrap result
$exp
);
};
}
macro_rules! assert_poll_next_none {
($pin:expr) => {
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
};
}
#[allow(unused_allocation)] // triggered by `Box::new(()).size()`
#[actix_rt::test]
async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), Box::new(()).size());
assert_eq!(().size(), Box::pin(()).size());
let pl = Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut pl = Box::pin(());
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn mut_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), (&(&mut ())).size());
let pl = &mut ();
pin!(pl);
assert_poll_next_none!(pl);
let pl = &mut Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut body = body::SizedStream::new(
8,
stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]),
);
let body = &mut body;
assert_eq!(body.size(), BodySize::Sized(8));
pin!(body);
assert_poll_next!(body, Bytes::from_static(b"1234"));
assert_poll_next!(body, Bytes::from_static(b"5678"));
assert_poll_next_none!(body);
}
#[allow(clippy::let_unit_value)]
#[actix_rt::test]
async fn test_unit() {
let pl = ();
assert_eq!(pl.size(), BodySize::Sized(0));
pin!(pl);
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!("".size(), BodySize::Sized(0));
assert_eq!("test".size(), BodySize::Sized(4));
let pl = "test";
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
let pl = b"test".as_ref();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
let pl = Vec::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes() {
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
let pl = Bytes::from_static(b"test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes_mut() {
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
let pl = BytesMut::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_string() {
assert_eq!(String::new().size(), BodySize::Sized(0));
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
let pl = "test".to_owned();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators_poll() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
assert_poll_next_none!(Pin::new(&mut body));
}
#[actix_rt::test]
async fn none_body_combinators() {
fn none_body() -> BoxBody {
let body = body::None;
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None);
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
assert_poll_next_none!(Pin::new(&mut none_body()));
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
#[actix_rt::test]
async fn non_owning_to_bytes() {
let mut body = BoxBody::new(());
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::new());
let mut body = body::BodyStream::new(stream::iter([
Ok::<_, std::io::Error>(Bytes::from("1234")),
Ok(Bytes::from("5678")),
]));
let bytes = body::to_bytes(&mut body).await.unwrap();
assert_eq!(bytes, Bytes::from_static(b"12345678"));
}
}

View File

@ -1,27 +0,0 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
mod body_stream;
mod boxed;
mod either;
mod message_body;
mod none;
mod size;
mod sized_stream;
mod utils;
pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::{
body_stream::BodyStream,
boxed::BoxBody,
either::EitherBody,
message_body::MessageBody,
none::None,
size::BodySize,
sized_stream::SizedStream,
utils::{to_bytes, to_bytes_limited, BodyLimitExceeded},
};

View File

@ -1,51 +0,0 @@
use std::{
convert::Infallible,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header.
/// For an "empty" body, use `()` or `Bytes::new()`.
///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;
impl None {
/// Constructs new "none" body.
#[inline]
pub fn new() -> Self {
None
}
}
impl MessageBody for None {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::None
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(Option::None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}

View File

@ -1,41 +0,0 @@
/// Body size hint.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BodySize {
/// Implicitly empty body.
///
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
/// hint are allowed to make optimizations that skip reading or writing the payload.
None,
/// Known size body.
///
/// Will write `Content-Length: N` header.
Sized(u64),
/// Unknown size body.
///
/// Will not write Content-Length header. Can be used with chunked Transfer-Encoding.
Stream,
}
impl BodySize {
/// Equivalent to `BodySize::Sized(0)`;
pub const ZERO: Self = Self::Sized(0);
/// Returns true if size hint indicates omitted or empty body.
///
/// Streams will return false because it cannot be known without reading the stream.
///
/// ```
/// # use actix_http::body::BodySize;
/// assert!(BodySize::None.is_eof());
/// assert!(BodySize::Sized(0).is_eof());
///
/// assert!(!BodySize::Sized(64).is_eof());
/// assert!(!BodySize::Stream.is_eof());
/// ```
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Sized(0))
}
}

View File

@ -1,171 +0,0 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Known sized streaming response wrapper.
///
/// This body implementation should be used if total size of stream is known. Data is sent as-is
/// without using chunked transfer encoding.
pub struct SizedStream<S> {
size: u64,
#[pin]
stream: S,
}
}
impl<S, E> SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
// TODO: from_infallible method
impl<S, E> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(
0,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
let body = SizedStream::new(
1,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
}

View File

@ -1,198 +0,0 @@
use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use derive_more::{Display, Error};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects all the bytes produced by `body`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, "123");
/// # });
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
to_bytes_limited(body, usize::MAX)
.await
.expect("body should never yield more than usize::MAX bytes")
}
/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit.
#[derive(Debug, Display, Error)]
#[display("limit exceeded while collecting body bytes")]
#[non_exhaustive]
pub struct BodyLimitExceeded;
/// Collects the bytes produced by `body`, up to `limit` bytes.
///
/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an
/// `Err(BodyLimitExceeded)` is returned.
///
/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes_limited};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert_eq!(bytes, "123");
///
/// let body = Bytes::from_static(b"123");
/// assert!(to_bytes_limited(body, 2).await.is_err());
/// # });
/// ```
pub async fn to_bytes_limited<B: MessageBody>(
body: B,
limit: usize,
) -> Result<Result<Bytes, B::Error>, BodyLimitExceeded> {
/// Sensible default (32kB) for initial, bounded allocation when collecting body bytes.
const INITIAL_ALLOC_BYTES: usize = 32 * 1024;
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())),
BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded),
BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES),
BodySize::Stream => INITIAL_ALLOC_BYTES,
};
let mut exceeded_limit = false;
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
match poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => {
// if limit is exceeded...
if buf.len() + bytes.len() > limit {
// ...set flag to true and break out of poll_fn
exceeded_limit = true;
return Poll::Ready(Ok(()));
}
buf.extend_from_slice(&bytes)
}
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await
{
// propagate error returned from body poll
Err(err) => Ok(Err(err)),
// limit was exceeded while reading body
Ok(()) if exceeded_limit => Err(BodyLimitExceeded),
// otherwise return body buffer
Ok(()) => Ok(Ok(buf.freeze())),
}
}
#[cfg(test)]
mod tests {
use std::io;
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{
body::{BodyStream, SizedStream},
Error,
};
#[actix_rt::test]
async fn to_bytes_complete() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
}
#[actix_rt::test]
async fn to_bytes_streams() {
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
#[actix_rt::test]
async fn to_bytes_limited_complete() {
let bytes = to_bytes_limited((), 0).await.unwrap().unwrap();
assert!(bytes.is_empty());
let bytes = to_bytes_limited((), 1).await.unwrap().unwrap();
assert!(bytes.is_empty());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 0)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 1)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok());
}
#[actix_rt::test]
async fn to_bytes_limited_streams() {
// hinting a larger body fails
let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.is_err());
// hinting a smaller body is okay
let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty());
// hinting a smaller body then returning a larger one fails
let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>);
let body = SizedStream::new(3, stream);
assert!(to_bytes_limited(body, 3).await.is_err());
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
assert!(to_bytes_limited(body, 3).await.is_err());
}
#[actix_rt::test]
async fn to_body_limit_error() {
let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) });
let body = SizedStream::new(8, err_stream);
// not too big, but propagates error from body stream
assert!(to_bytes_limited(body, 10).await.unwrap().is_err());
}
}

View File

@ -1,73 +1,81 @@
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration}; use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory}; use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::{ use crate::body::MessageBody;
body::{BoxBody, MessageBody}, use crate::config::{KeepAlive, ServiceConfig};
h1::{self, ExpectHandler, H1Service, UpgradeHandler}, use crate::error::Error;
service::HttpService, use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig, use crate::h2::H2Service;
}; use crate::helpers::{Data, DataFactory};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
use crate::{ConnectCallback, Extensions};
/// An HTTP service builder. /// A HTTP service builder
/// ///
/// This type can construct an instance of [`HttpService`] through a builder-like pattern. /// This type can be used to construct an instance of [`HttpService`] through a
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> { /// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
// DEPRECATED: in favor of on_connect_ext
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<S>, _t: PhantomData<(T, S)>,
} }
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler> impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
{ {
fn default() -> Self { /// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
HttpServiceBuilder { HttpServiceBuilder {
// ServiceConfig parts (make sure defaults match) keep_alive: KeepAlive::Timeout(5),
keep_alive: KeepAlive::default(), client_timeout: 5000,
client_request_timeout: Duration::from_secs(5), client_disconnect: 0,
client_disconnect_timeout: Duration::ZERO,
secure: false, secure: false,
local_addr: None, local_addr: None,
// dispatcher parts
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect: None,
on_connect_ext: None, on_connect_ext: None,
_phantom: PhantomData, _t: PhantomData,
} }
} }
} }
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U> impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where where
S: ServiceFactory<Request, Config = ()>, S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Response<BoxBody>> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service<Request>>::Future: 'static, <S::Service as Service>::Future: 'static,
X: ServiceFactory<Request, Config = (), Response = Request>, X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Response<BoxBody>>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>, <X::Service as Service>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{ {
/// Set connection keep-alive setting. /// Set server keep-alive setting.
/// ///
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong. /// By default keep alive is set to a 5 seconds.
///
/// By default keep-alive is 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self { pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into(); self.keep_alive = val.into();
self self
@ -85,45 +93,33 @@ where
self self
} }
/// Set client request timeout (for first request). /// Set server client timeout in milliseconds for first request.
/// ///
/// Defines a timeout for reading client request header. If the client does not transmit the /// Defines a timeout for reading client request header. If a client does not transmit
/// request head within this duration, the connection is terminated with a `408 Request Timeout` /// the entire set headers within this time, the request is terminated with
/// response error. /// the 408 (Request Time-out) error.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the client timeout is 5 seconds. /// By default client timeout is set to 5000 milliseconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self { pub fn client_timeout(mut self, val: u64) -> Self {
self.client_request_timeout = dur; self.client_timeout = val;
self self
} }
#[doc(hidden)] /// Set server connection disconnect timeout in milliseconds.
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
/// ///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete /// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections. /// within this time, the request get dropped. This timeout affects secure connections.
/// ///
/// A duration of zero disables the timeout. /// To disable timeout set value to 0.
/// ///
/// By default, the disconnect timeout is disabled. /// By default disconnect timeout is set to 0.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self { pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect_timeout = dur; self.client_disconnect = val;
self self
} }
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support. /// Provide service for `EXPECT: 100-Continue` support.
/// ///
/// Service get called with request that contains `EXPECT` header. /// Service get called with request that contains `EXPECT` header.
@ -131,21 +127,23 @@ where
/// request will be forwarded to main service. /// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U> pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where where
F: IntoServiceFactory<X1, Request>, F: IntoServiceFactory<X1>,
X1: ServiceFactory<Request, Config = (), Response = Request>, X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>, X1::Error: Into<Error>,
X1::InitError: fmt::Debug, X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: expect.into_factory(), expect: expect.into_factory(),
upgrade: self.upgrade, upgrade: self.upgrade,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
@ -155,24 +153,44 @@ where
/// and this service get called with original request and framed object. /// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1> pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where where
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>, F: IntoServiceFactory<U1>,
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>, U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U1::Error: fmt::Display, U1::Error: fmt::Display,
U1::InitError: fmt::Debug, U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
client_request_timeout: self.client_request_timeout, client_timeout: self.client_timeout,
client_disconnect_timeout: self.client_disconnect_timeout, client_disconnect: self.client_disconnect,
secure: self.secure, secure: self.secure,
local_addr: self.local_addr, local_addr: self.local_addr,
expect: self.expect, expect: self.expect,
upgrade: Some(upgrade.into_factory()), upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext, on_connect_ext: self.on_connect_ext,
_phantom: PhantomData, _t: PhantomData,
} }
} }
/// Set on-connect callback.
///
/// Called once per connection. Return value of the call is stored in request extensions.
///
/// *SOFT DEPRECATED*: Prefer the `on_connect_ext` style callback.
pub fn on_connect<F, I>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self
}
/// Sets the callback to be run on connection establishment. /// Sets the callback to be run on connection establishment.
/// ///
/// Has mutable access to a data container that will be merged into request extensions. /// Has mutable access to a data container that will be merged into request extensions.
@ -186,19 +204,19 @@ where
self self
} }
/// Finish service configuration and create a service for the HTTP/1 protocol. /// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U> pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where where
B: MessageBody, B: MessageBody,
F: IntoServiceFactory<S, Request>, F: IntoServiceFactory<S>,
S::Error: Into<Response<BoxBody>>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
@ -206,46 +224,47 @@ where
H1Service::with_config(cfg, service.into_factory()) H1Service::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create a service for the HTTP/2 protocol. /// Finish service configuration and create a HTTP service for HTTP/2 protocol.
#[cfg(feature = "http2")] pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
where where
F: IntoServiceFactory<S, Request>, B: MessageBody + 'static,
S::Error: Into<Response<BoxBody>> + 'static, F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
crate::h2::H2Service::with_config(cfg, service.into_factory()) H2Service::with_config(cfg, service.into_factory())
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create `HttpService` instance. /// Finish service configuration and create `HttpService` instance.
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U> pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where where
F: IntoServiceFactory<S, Request>, B: MessageBody + 'static,
S::Error: Into<Response<BoxBody>> + 'static, F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
self.client_request_timeout, self.client_timeout,
self.client_disconnect_timeout, self.client_disconnect,
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
@ -253,6 +272,7 @@ where
HttpService::with_config(cfg, service.into_factory()) HttpService::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext) .on_connect_ext(self.on_connect_ext)
} }
} }

View File

@ -1,34 +1,31 @@
use std::{net::IpAddr, time::Duration}; use std::time::Duration;
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2MB // These values are taken from hyper/src/proto/h2/client.rs
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1MB const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration /// Connector configuration
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct ConnectorConfig { pub(crate) struct ConnectorConfig {
pub(crate) timeout: Duration, pub(crate) timeout: Duration,
pub(crate) handshake_timeout: Duration,
pub(crate) conn_lifetime: Duration, pub(crate) conn_lifetime: Duration,
pub(crate) conn_keep_alive: Duration, pub(crate) conn_keep_alive: Duration,
pub(crate) disconnect_timeout: Option<Duration>, pub(crate) disconnect_timeout: Option<Duration>,
pub(crate) limit: usize, pub(crate) limit: usize,
pub(crate) conn_window_size: u32, pub(crate) conn_window_size: u32,
pub(crate) stream_window_size: u32, pub(crate) stream_window_size: u32,
pub(crate) local_address: Option<IpAddr>,
} }
impl Default for ConnectorConfig { impl Default for ConnectorConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
timeout: Duration::from_secs(5), timeout: Duration::from_secs(1),
handshake_timeout: Duration::from_secs(5),
conn_lifetime: Duration::from_secs(75), conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15), conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)), disconnect_timeout: Some(Duration::from_millis(3000)),
limit: 100, limit: 100,
conn_window_size: DEFAULT_H2_CONN_WINDOW, conn_window_size: DEFAULT_H2_CONN_WINDOW,
stream_window_size: DEFAULT_H2_STREAM_WINDOW, stream_window_size: DEFAULT_H2_STREAM_WINDOW,
local_address: None,
} }
} }
} }

View File

@ -0,0 +1,291 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool
fn release(self: Pin<&mut Self>);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(),
}
}
}
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
match self.project() {
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -0,0 +1,547 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.config.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.config.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.config.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.config.disconnect_timeout = Some(dur);
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.config.no_disconnect_timeout(),
),
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use std::task::{Context, Poll};
use futures_util::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
use futures_util::future::Either;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::A),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::B),
)
}
}
}

View File

@ -0,0 +1,157 @@
use std::io;
use actix_connect::resolver::ResolveError;
use derive_more::{Display, From};
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::{HandshakeError, SslError};
use crate::error::{Error, ParseError, ResponseError};
use crate::http::{Error as HttpError, StatusCode};
/// A set of errors that can occur while connecting to an HTTP host
#[derive(Debug, Display, From)]
pub enum ConnectError {
/// SSL feature is not enabled
#[display(fmt = "SSL is not supported")]
SslIsNotSupported,
/// SSL error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslError(SslError),
/// SSL Handshake error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslHandshakeError(String),
/// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
/// No dns records
#[display(fmt = "No dns records found for the input")]
NoRecords,
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Connecting took too long
#[display(fmt = "Timeout while establishing connection")]
Timeout,
/// Connector has been disconnected
#[display(fmt = "Internal error: connector has been disconnected")]
Disconnected,
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolved,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl std::error::Error for ConnectError {}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
}
#[cfg(feature = "openssl")]
impl<T: std::fmt::Debug> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
ConnectError::SslHandshakeError(format!("{:?}", err))
}
}
#[derive(Debug, Display, From)]
pub enum InvalidUrl {
#[display(fmt = "Missing url scheme")]
MissingScheme,
#[display(fmt = "Unknown url scheme")]
UnknownScheme,
#[display(fmt = "Missing host name")]
MissingHost,
#[display(fmt = "Url parse error: {}", _0)]
HttpError(http::Error),
}
impl std::error::Error for InvalidUrl {}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Failed to connect to host
#[display(fmt = "Failed to connect to host: {}", _0)]
Connect(ConnectError),
/// Error sending request
Send(io::Error),
/// Error parsing response
Response(ParseError),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Response took too long
#[display(fmt = "Timeout while waiting for response")]
Timeout,
/// Tunnels are not supported for http2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")]
TunnelNotSupported,
/// Error sending request body
Body(Error),
}
impl std::error::Error for SendRequestError {}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn status_code(&self) -> StatusCode {
match *self {
SendRequestError::Connect(ConnectError::Timeout) => {
StatusCode::GATEWAY_TIMEOUT
}
SendRequestError::Connect(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// A set of errors that can occur during freezing a request
#[derive(Debug, Display, From)]
pub enum FreezeRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
}
impl std::error::Error for FreezeRequestError {}
impl From<FreezeRequestError> for SendRequestError {
fn from(e: FreezeRequestError) -> Self {
match e {
FreezeRequestError::Url(e) => e.into(),
FreezeRequestError::Http(e) => e.into(),
}
}
}

View File

@ -0,0 +1,298 @@
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) async fn send_request<T, B>(
io: T,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().split().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
// create Framed and send request
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed_inner.send((head, body.size()).into()).await?;
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
};
// read response and init read body
let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.codec_ref().message_type() {
h1::MessageType::None => {
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<T, B>(
body: B,
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError>
where
T: ConnectionLifetime + Unpin,
B: MessageBody,
{
pin_mut!(body);
let mut eof = false;
while !eof {
while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.as_mut().write(h1::Message::Chunk(None))?;
}
}
}
if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
})
.await?;
}
}
SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream {
framed: Some(framed),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Poll::Ready(None)
}
}
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where
T: ConnectionLifetime,
{
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
framed.io_pin().release()
} else {
framed.io_pin().close()
}
}

View File

@ -1,45 +1,46 @@
use std::convert::TryFrom;
use std::future::Future; use std::future::Future;
use std::time;
use actix_http::{ use actix_codec::{AsyncRead, AsyncWrite};
body::{BodySize, MessageBody},
header::HeaderMap,
Payload, RequestHeadType, ResponseHead,
};
use actix_utils::future::poll_fn;
use bytes::Bytes; use bytes::Bytes;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use h2::{ use h2::{
client::{Builder, Connection, SendRequest}, client::{Builder, Connection, SendRequest},
SendStream, SendStream,
}; };
use http::{ use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
header::{HeaderValue, CONNECTION, CONTENT_LENGTH, HOST, TRANSFER_ENCODING}, use http::{request::Request, Method, Version};
request::Request,
Method, Version,
};
use log::trace;
use super::{ use crate::body::{BodySize, MessageBody};
config::ConnectorConfig, use crate::header::HeaderMap;
connection::{ConnectionIo, H2Connection}, use crate::message::{RequestHeadType, ResponseHead};
error::SendRequestError, use crate::payload::Payload;
};
use crate::BoxError;
pub(crate) async fn send_request<Io, B>( use super::config::ConnectorConfig;
mut io: H2Connection<Io>, use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
pub(crate) async fn send_request<T, B>(
mut io: SendRequest<Bytes>,
head: RequestHeadType, head: RequestHeadType,
body: B, body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError> ) -> Result<(ResponseHead, Payload), SendRequestError>
where where
Io: ConnectionIo, T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody, B: MessageBody,
B::Error: Into<BoxError>,
{ {
trace!("Sending client request: {:?} {:?}", head, body.size()); trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.as_ref().method == Method::HEAD; let head_req = head.as_ref().method == Method::HEAD;
let length = body.size(); let length = body.size();
let eof = matches!(length, BodySize::None | BodySize::Sized(0)); let eof = matches!(
length,
BodySize::None | BodySize::Empty | BodySize::Sized(0)
);
let mut req = Request::new(()); let mut req = Request::new(());
*req.uri_mut() = head.as_ref().uri.clone(); *req.uri_mut() = head.as_ref().uri.clone();
@ -52,26 +53,17 @@ where
// Content length // Content length
let _ = match length { let _ = match length {
BodySize::None => None, BodySize::None => None,
BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
req.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new();
req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::from_str(buf.format(len)).unwrap(),
)
}
BodySize::Stream => { BodySize::Stream => {
skip_len = false; skip_len = false;
None None
} }
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
}; };
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate. // Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
@ -94,35 +86,32 @@ where
// copy headers // copy headers
for (key, value) in headers { for (key, value) in headers {
match *key { match *key {
// TODO: consider skipping other headers according to: CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING | HOST => continue,
CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true, // DATE => has_date = true,
_ => {} _ => (),
} }
req.headers_mut().append(key, value.clone()); req.headers_mut().append(key, value.clone());
} }
let res = poll_fn(|cx| io.poll_ready(cx)).await; let res = poll_fn(|cx| io.poll_ready(cx)).await;
if let Err(err) = res { if let Err(e) = res {
io.on_release(err.is_io()); release(io, pool, created, e.is_io());
return Err(SendRequestError::from(err)); return Err(SendRequestError::from(e));
} }
let resp = match io.send_request(req, eof) { let resp = match io.send_request(req, eof) {
Ok((fut, send)) => { Ok((fut, send)) => {
io.on_release(false); release(io, pool, created, false);
if !eof { if !eof {
send_body(body, send).await?; send_body(body, send).await?;
} }
fut.await.map_err(SendRequestError::from)? fut.await.map_err(SendRequestError::from)?
} }
Err(err) => { Err(e) => {
io.on_release(err.is_io()); release(io, pool, created, e.is_io());
return Err(err.into()); return Err(e.into());
} }
}; };
@ -135,15 +124,12 @@ where
Ok((head, payload)) Ok((head, payload))
} }
async fn send_body<B>(body: B, mut send: SendStream<Bytes>) -> Result<(), SendRequestError> async fn send_body<B: MessageBody>(
where body: B,
B: MessageBody, mut send: SendStream<Bytes>,
B::Error: Into<BoxError>, ) -> Result<(), SendRequestError> {
{
let mut buf = None; let mut buf = None;
pin_mut!(body);
actix_rt::pin!(body);
loop { loop {
if buf.is_none() { if buf.is_none() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await { match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
@ -151,10 +137,10 @@ where
send.reserve_capacity(b.len()); send.reserve_capacity(b.len());
buf = Some(b); buf = Some(b);
} }
Some(Err(err)) => return Err(SendRequestError::Body(err.into())), Some(Err(e)) => return Err(e.into()),
None => { None => {
if let Err(err) = send.send_data(Bytes::new(), true) { if let Err(e) = send.send_data(Bytes::new(), true) {
return Err(err.into()); return Err(e.into());
} }
send.reserve_capacity(0); send.reserve_capacity(0);
return Ok(()); return Ok(());
@ -169,25 +155,45 @@ where
let len = b.len(); let len = b.len();
let bytes = b.split_to(std::cmp::min(cap, len)); let bytes = b.split_to(std::cmp::min(cap, len));
if let Err(err) = send.send_data(bytes, false) { if let Err(e) = send.send_data(bytes, false) {
return Err(err.into()); return Err(e.into());
}
if !b.is_empty() {
send.reserve_capacity(b.len());
} else { } else {
buf = None; if !b.is_empty() {
send.reserve_capacity(b.len());
} else {
buf = None;
}
continue;
} }
continue;
} }
Some(Err(err)) => return Err(err.into()), Some(Err(e)) => return Err(e.into()),
} }
} }
} }
pub(crate) fn handshake<Io: ConnectionIo>( // release SendRequest object
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
io: SendRequest<Bytes>,
pool: Option<Acquired<T>>,
created: time::Instant,
close: bool,
) {
if let Some(mut pool) = pool {
if close {
pool.close(IoConnection::new(ConnectionType::H2(io), created, None));
} else {
pool.release(IoConnection::new(ConnectionType::H2(io), created, None));
}
}
}
pub(crate) fn handshake<Io>(
io: Io, io: Io,
config: &ConnectorConfig, config: &ConnectorConfig,
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>> { ) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let mut builder = Builder::new(); let mut builder = Builder::new();
builder builder
.initial_window_size(config.stream_window_size) .initial_window_size(config.stream_window_size)

View File

@ -0,0 +1,21 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -0,0 +1,644 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector));
let inner_rc = Rc::new(RefCell::new(Inner {
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: FxHashMap::default(),
waker: LocalWaker::new(),
}));
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: Rc::clone(&connector_rc),
inner: Rc::clone(&inner_rc),
});
ConnectionPool(connector_rc, inner_rc)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolved);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
))
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availability();
}
}
}
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
inner: Some(inner),
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availability();
}
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if Rc::strong_count(this.inner) == 1 {
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
// and we are safe to exit.
return Poll::Ready(());
}
let mut inner = this.inner.borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

View File

@ -0,0 +1,40 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -1,206 +1,301 @@
use std::{ use std::cell::Cell;
net, use std::fmt::Write;
rc::Rc, use std::rc::Rc;
time::{Duration, Instant}, use std::time::Duration;
}; use std::{fmt, net};
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::{future, FutureExt};
use time::OffsetDateTime;
use crate::{date::DateService, KeepAlive}; // "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
/// HTTP service configuration. #[derive(Debug, PartialEq, Clone, Copy)]
#[derive(Debug, Clone)] /// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>); pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner { struct Inner {
keep_alive: KeepAlive, keep_alive: Option<Duration>,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
ka_enabled: bool,
secure: bool, secure: bool,
local_addr: Option<std::net::SocketAddr>, local_addr: Option<std::net::SocketAddr>,
date_service: DateService, timer: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
} }
impl Default for ServiceConfig { impl Default for ServiceConfig {
fn default() -> Self { fn default() -> Self {
Self::new( Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
} }
} }
impl ServiceConfig { impl ServiceConfig {
/// Create instance of `ServiceConfig`. /// Create instance of `ServiceConfig`
pub fn new( pub fn new(
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_request_timeout: Duration, client_timeout: u64,
client_disconnect_timeout: Duration, client_disconnect: u64,
secure: bool, secure: bool,
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
) -> ServiceConfig { ) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner { ServiceConfig(Rc::new(Inner {
keep_alive: keep_alive.normalize(), keep_alive,
client_request_timeout, ka_enabled,
client_disconnect_timeout, client_timeout,
client_disconnect,
secure, secure,
local_addr, local_addr,
date_service: DateService::new(), timer: DateService::new(),
})) }))
} }
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
#[inline] #[inline]
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool { pub fn secure(&self) -> bool {
self.0.secure self.0.secure
} }
/// Returns the local address that this server is bound to.
///
/// Returns `None` for connections via UDS (Unix Domain Socket).
#[inline] #[inline]
/// Returns the local address that this server is bound to.
pub fn local_addr(&self) -> Option<net::SocketAddr> { pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr self.0.local_addr
} }
/// Connection keep-alive setting.
#[inline] #[inline]
pub fn keep_alive(&self) -> KeepAlive { /// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive self.0.keep_alive
} }
/// Creates a time object representing the deadline for this connection's keep-alive period, if #[inline]
/// enabled. /// Return state of connection keep-alive functionality
/// pub fn keep_alive_enabled(&self) -> bool {
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`. self.0.ka_enabled
pub fn keep_alive_deadline(&self) -> Option<Instant> { }
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur), #[inline]
KeepAlive::Os => None, /// Client timeout for first request.
KeepAlive::Disabled => None, pub fn client_timer(&self) -> Option<Delay> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(delay_until(
self.0.timer.now() + Duration::from_millis(delay_time),
))
} else {
None
} }
} }
/// Creates a time object representing the deadline for the client to finish sending the head of /// Client timeout for first request.
/// its first request. pub fn client_timer_expire(&self) -> Option<Instant> {
/// let delay = self.0.client_timeout;
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`. if delay != 0 {
pub fn client_request_deadline(&self) -> Option<Instant> { Some(self.0.timer.now() + Duration::from_millis(delay))
let timeout = self.0.client_request_timeout; } else {
(timeout != Duration::ZERO).then(|| self.now() + timeout) None
}
} }
/// Creates a time object representing the deadline for the client to disconnect. /// Client disconnect timer
pub fn client_disconnect_deadline(&self) -> Option<Instant> { pub fn client_disconnect_timer(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout; let delay = self.0.client_disconnect;
(timeout != Duration::ZERO).then(|| self.now() + timeout) if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
} }
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(delay_until(self.0.timer.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
}
#[inline]
pub(crate) fn now(&self) -> Instant { pub(crate) fn now(&self) -> Instant {
self.0.date_service.now() self.0.timer.now()
} }
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)] #[doc(hidden)]
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) { pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 37] = [0; 37]; let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
self.0 self.0
.date_service .timer
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes)); .set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
buf[35..].copy_from_slice(b"\r\n");
dst.extend_from_slice(&buf); dst.extend_from_slice(&buf);
} }
#[allow(unused)] // used with `http2` feature flag pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
self.0 self.0
.date_service .timer
.with_date(|date| dst.extend_from_slice(&date.bytes)); .set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
#[derive(Clone)]
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: Cell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: Cell::new(None),
}
}
fn reset(&self) {
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
self.current.set(Some((date, now)));
}
}
impl DateService {
fn new() -> Self {
DateService(Rc::new(DateServiceInner::new()))
}
fn check_date(&self) {
if self.0.current.get().is_none() {
self.0.update();
// periodic date update
let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
}
fn now(&self) -> Instant {
self.check_date();
self.0.current.get().unwrap().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
f(&self.0.current.get().unwrap().0);
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use actix_rt::{
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
use super::*; use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
#[actix_rt::test] // Test modifying the date from within the closure
async fn test_date_service_update() { // passed to `set_date`
let settings = #[test]
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None); fn test_evil_date() {
let service = DateService::new();
yield_now().await; // Make sure that `check_date` doesn't try to spawn a task
service.0.update();
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); service.set_date(|_| service.0.reset());
settings.write_date_header(&mut buf1, false);
let now1 = settings.now();
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false);
assert_ne!(now1, now2);
assert_ne!(buf1, buf2);
drop(settings);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[actix_rt::test]
async fn test_date_service_drop() {
let service = Rc::new(DateService::new());
// yield so date service have a chance to register the spawned timer update task.
yield_now().await;
let clone1 = service.clone();
let clone2 = service.clone();
let clone3 = service.clone();
drop(clone1);
assert!(!notify_on_drop::is_dropped());
drop(clone2);
assert!(!notify_on_drop::is_dropped());
drop(clone3);
assert!(!notify_on_drop::is_dropped());
drop(service);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
} }
#[test] #[test]
@ -210,27 +305,11 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_date() { async fn test_date() {
let settings = ServiceConfig::default(); let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false); settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false); settings.set_date(&mut buf2);
assert_eq!(buf1, buf2); assert_eq!(buf1, buf2);
} }
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
} }

View File

@ -1,92 +0,0 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::HttpDate::from(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View File

@ -1,36 +1,25 @@
//! Stream decoders. use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{ use actix_threadpool::{run, CpuFuture};
future::Future, use brotli2::write::BrotliDecoder;
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Decoder as ZstdDecoder;
use crate::{ use super::Writer;
encoding::Writer, use crate::error::PayloadError;
error::PayloadError, use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049; const INPLACE: usize = 2049;
pin_project_lite::pin_project! { pub struct Decoder<S> {
pub struct Decoder<S> { decoder: Option<ContentDecoder>,
decoder: Option<ContentDecoder>, stream: S,
#[pin] eof: bool,
stream: S, fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
} }
impl<S> Decoder<S> impl<S> Decoder<S>
@ -41,31 +30,17 @@ where
#[inline] #[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> { pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding { let decoder = match encoding {
#[cfg(feature = "compress-brotli")] ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new( BrotliDecoder::new(Writer::new()),
brotli::DecompressorWriter::new(Writer::new(), 8_096),
))), ))),
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
#[cfg(feature = "compress-gzip")] ZlibDecoder::new(Writer::new()),
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(ZlibDecoder::new( ))),
Writer::new(), ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
)))), GzDecoder::new(Writer::new()),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
"Failed to create zstd decoder. This is a bug. \
Please report it to the actix-web repository.",
),
))), ))),
_ => None, _ => None,
}; };
Decoder { Decoder {
decoder, decoder,
stream, stream,
@ -78,11 +53,15 @@ where
#[inline] #[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> { pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding // check content-encoding
let encoding = headers let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
.get(&CONTENT_ENCODING) if let Ok(enc) = enc.to_str() {
.and_then(|val| val.to_str().ok()) ContentEncoding::from(enc)
.and_then(|x| x.parse().ok()) } else {
.unwrap_or(ContentEncoding::Identity); ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
Self::new(stream, encoding) Self::new(stream, encoding)
} }
@ -90,63 +69,55 @@ where
impl<S> Stream for Decoder<S> impl<S> Stream for Decoder<S>
where where
S: Stream<Item = Result<Bytes, PayloadError>>, S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{ {
type Item = Result<Bytes, PayloadError>; type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(
let mut this = self.project(); mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
loop { loop {
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| { let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
PayloadError::Io(io::Error::new( Ok(item) => item,
io::ErrorKind::Other, Err(e) => return Poll::Ready(Some(Err(e.into()))),
"Blocking task was cancelled unexpectedly", };
)) self.decoder = Some(decoder);
})??; self.fut.take();
*this.decoder = Some(decoder);
this.fut.take();
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
if *this.eof { if self.eof {
return Poll::Ready(None); return Poll::Ready(None);
} }
match ready!(this.stream.as_mut().poll_next(cx)) { match Pin::new(&mut self.stream).poll_next(cx) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))), Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
Some(Ok(chunk)) => { if let Some(mut decoder) = self.decoder.take() {
if let Some(mut decoder) = this.decoder.take() { if chunk.len() < INPLACE {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
*this.decoder = Some(decoder); self.decoder = Some(decoder);
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(spawn_blocking(move || { self.fut = Some(run(move || {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder)) Ok((chunk, decoder))
})); }));
} }
continue; continue;
} else { } else {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
Poll::Ready(None) => {
None => { self.eof = true;
*this.eof = true; return if let Some(mut decoder) = self.decoder.take() {
return if let Some(mut decoder) = this.decoder.take() {
match decoder.feed_eof() { match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))), Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None), Ok(None) => Poll::Ready(None),
@ -156,59 +127,44 @@ where
Poll::Ready(None) Poll::Ready(None)
}; };
} }
Poll::Pending => break,
} }
} }
Poll::Pending
} }
} }
enum ContentDecoder { enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>), Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>), Gzip(Box<GzDecoder<Writer>>),
Br(Box<BrotliDecoder<Writer>>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]
Zstd(Box<ZstdDecoder<'static, Writer>>),
} }
impl ContentDecoder { impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> { fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() {
Ok(()) => { Ok(()) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() { ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => { Ok(_) => {
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
@ -218,61 +174,40 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(err) => Err(err),
}, },
} }
} }
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> { fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
} else { } else {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
}, },
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) { ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => { Ok(_) => {
decoder.flush()?; decoder.flush()?;
let b = decoder.get_mut().take(); let b = decoder.get_mut().take();
if !b.is_empty() { if !b.is_empty() {
Ok(Some(b)) Ok(Some(b))
@ -280,22 +215,7 @@ impl ContentDecoder {
Ok(None) Ok(None)
} }
} }
Err(err) => Err(err), Err(e) => Err(e),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(err) => Err(err),
}, },
} }
} }

View File

@ -1,221 +1,166 @@
//! Stream encoders. //! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{ use actix_threadpool::{run, CpuFuture};
error::Error as StdError, use brotli2::write::BrotliEncoder;
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes; use bytes::Bytes;
use derive_more::Display;
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready; use futures_core::ready;
use pin_project_lite::pin_project; use pin_project::pin_project;
use tracing::trace;
#[cfg(feature = "compress-zstd")] use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use zstd::stream::write::Encoder as ZstdEncoder; use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead};
use super::Writer; use super::Writer;
use crate::{
body::{self, BodySize, MessageBody},
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024; const INPLACE: usize = 1024;
pin_project! { #[pin_project]
pub struct Encoder<B> { pub struct Encoder<B> {
#[pin] eof: bool,
body: EncoderBody<B>, #[pin]
encoder: Option<ContentEncoder>, body: EncoderBody<B>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>, encoder: Option<ContentEncoder>,
eof: bool, fut: Option<CpuFuture<ContentEncoder, io::Error>>,
}
} }
impl<B: MessageBody> Encoder<B> { impl<B: MessageBody> Encoder<B> {
fn none() -> Self { pub fn response(
Encoder { encoding: ContentEncoding,
body: EncoderBody::None { head: &mut ResponseHead,
body: body::None::new(), body: ResponseBody<B>,
}, ) -> ResponseBody<Encoder<B>> {
encoder: None, let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
fut: None,
eof: true,
}
}
fn empty() -> Self {
Encoder {
body: EncoderBody::Full { body: Bytes::new() },
encoder: None,
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress empty bodies
match body.size() {
BodySize::None => return Self::none(),
BodySize::Sized(0) => return Self::empty(),
_ => {}
}
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS || head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT || head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity); || encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
let body = match body.try_into_bytes() { let body = match body {
Ok(body) => EncoderBody::Full { body }, ResponseBody::Other(b) => match b {
Err(body) => EncoderBody::Stream { body }, Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
}; };
if should_encode { if can_encode {
// wrap body only if encoder is feature-enabled // Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::select(encoding) { if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head); update_head(encoding, head);
head.no_chunking(false);
return Encoder { return ResponseBody::Body(Encoder {
body, body,
encoder: Some(enc),
fut: None,
eof: false, eof: false,
}; fut: None,
encoder: Some(enc),
});
} }
} }
ResponseBody::Body(Encoder {
Encoder {
body, body,
encoder: None,
fut: None,
eof: false, eof: false,
} fut: None,
encoder: None,
})
} }
} }
pin_project! { #[pin_project(project = EncoderBodyProj)]
#[project = EncoderBodyProj] enum EncoderBody<B> {
enum EncoderBody<B> { Bytes(Bytes),
None { body: body::None }, Stream(#[pin] B),
Full { body: Bytes }, BoxedStream(Box<dyn MessageBody + Unpin>),
Stream { #[pin] body: B },
}
} }
impl<B> MessageBody for EncoderBody<B> impl<B: MessageBody> MessageBody for EncoderBody<B> {
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
match self { match self {
EncoderBody::None { body } => body.size(), EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Full { body } => body.size(), EncoderBody::Stream(ref b) => b.size(),
EncoderBody::Stream { body } => body.size(), EncoderBody::BoxedStream(ref b) => b.size(),
} }
} }
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() { match self.project() {
EncoderBodyProj::None { body } => { EncoderBodyProj::Bytes(b) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {}) if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
} }
EncoderBodyProj::Full { body } => { EncoderBodyProj::Stream(b) => b.poll_next(cx),
Pin::new(body).poll_next(cx).map_err(|err| match err {}) EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
} }
EncoderBodyProj::Stream { body } => body
.poll_next(cx)
.map_err(|err| EncoderError::Body(err.into())),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
} }
} }
} }
impl<B> MessageBody for Encoder<B> impl<B: MessageBody> MessageBody for Encoder<B> {
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize { fn size(&self) -> BodySize {
if self.encoder.is_some() { if self.encoder.is_none() {
BodySize::Stream
} else {
self.body.size() self.body.size()
} else {
BodySize::Stream
} }
} }
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project(); let mut this = self.project();
loop { loop {
if *this.eof { if *this.eof {
return Poll::Ready(None); return Poll::Ready(None);
} }
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let mut encoder = ready!(Pin::new(fut).poll(cx)) let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
.map_err(|_| { Ok(item) => item,
EncoderError::Io(io::Error::new( Err(e) => return Poll::Ready(Some(Err(e.into()))),
io::ErrorKind::Other, };
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(EncoderError::Io)?;
let chunk = encoder.take(); let chunk = encoder.take();
*this.encoder = Some(encoder); *this.encoder = Some(encoder);
this.fut.take(); this.fut.take();
if !chunk.is_empty() { if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
let result = ready!(this.body.as_mut().poll_next(cx)); let result = this.body.as_mut().poll_next(cx);
match result { match result {
Some(Err(err)) => return Poll::Ready(Some(Err(err))), Poll::Ready(Some(Ok(chunk))) => {
Some(Ok(chunk)) => {
if let Some(mut encoder) = this.encoder.take() { if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_ENCODE_IN_PLACE { if chunk.len() < INPLACE {
encoder.write(&chunk).map_err(EncoderError::Io)?; encoder.write(&chunk)?;
let chunk = encoder.take(); let chunk = encoder.take();
*this.encoder = Some(encoder); *this.encoder = Some(encoder);
if !chunk.is_empty() { if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(spawn_blocking(move || { *this.fut = Some(run(move || {
encoder.write(&chunk)?; encoder.write(&chunk)?;
Ok(encoder) Ok(encoder)
})); }));
@ -224,11 +169,9 @@ where
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} }
Poll::Ready(None) => {
None => {
if let Some(encoder) = this.encoder.take() { if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish().map_err(EncoderError::Io)?; let chunk = encoder.finish()?;
if chunk.is_empty() { if chunk.is_empty() {
return Poll::Ready(None); return Poll::Ready(None);
} else { } else {
@ -239,78 +182,39 @@ where
return Poll::Ready(None); return Poll::Ready(None);
} }
} }
} val => return val,
}
}
#[inline]
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() {
Err(self)
} else {
match self.body.try_into_bytes() {
Ok(body) => Ok(body),
Err(body) => {
self.body = body;
Err(self)
}
} }
} }
} }
} }
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) { fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut() head.headers_mut().insert(
.insert(header::CONTENT_ENCODING, encoding.to_header_value()); CONTENT_ENCODING,
head.headers_mut() HeaderValue::from_static(encoding.as_str()),
.append(header::VARY, HeaderValue::from_static("accept-encoding")); );
head.no_chunking(false);
} }
enum ContentEncoder { enum ContentEncoder {
#[cfg(feature = "compress-gzip")]
Deflate(ZlibEncoder<Writer>), Deflate(ZlibEncoder<Writer>),
#[cfg(feature = "compress-gzip")]
Gzip(GzEncoder<Writer>), Gzip(GzEncoder<Writer>),
Br(BrotliEncoder<Writer>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
#[cfg(feature = "compress-zstd")]
Zstd(ZstdEncoder<'static, Writer>),
} }
impl ContentEncoder { impl ContentEncoder {
fn select(encoding: ContentEncoding) -> Option<Self> { fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding { match encoding {
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new( ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new( ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(), Writer::new(),
flate2::Compression::fast(), flate2::Compression::fast(),
))), ))),
ContentEncoding::Br => {
#[cfg(feature = "compress-brotli")] Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => {
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
Some(ContentEncoder::Zstd(encoder))
} }
_ => None, _ => None,
} }
} }
@ -318,60 +222,38 @@ impl ContentEncoder {
#[inline] #[inline]
pub(crate) fn take(&mut self) -> Bytes { pub(crate) fn take(&mut self) -> Bytes {
match *self { match *self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(), ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
} }
} }
fn finish(self) -> Result<Bytes, io::Error> { fn finish(self) -> Result<Bytes, io::Error> {
match self { match self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(encoder) => match encoder.finish() {
ContentEncoder::Brotli(mut encoder) => match encoder.flush() { Ok(writer) => Ok(writer.buf.freeze()),
Ok(()) => Ok(encoder.into_inner().buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(encoder) => match encoder.finish() { ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(encoder) => match encoder.finish() { ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()), Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err), Err(err) => Err(err),
}, },
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
} }
} }
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {
#[cfg(feature = "compress-brotli")] ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
trace!("Error decoding br encoding: {}", err); trace!("Error decoding br encoding: {}", err);
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -379,8 +261,6 @@ impl ContentEncoder {
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) { ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {
@ -388,52 +268,6 @@ impl ContentEncoder {
Err(err) Err(err)
} }
}, },
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding ztsd encoding: {}", err);
Err(err)
}
},
} }
} }
} }
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError {
/// Wrapped body stream error.
#[display("body")]
Body(Box<dyn StdError>),
/// Generic I/O error.
#[display("io")]
Io(io::Error),
}
impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Io(err) => Some(err),
}
}
}
impl From<EncoderError> for crate::Error {
fn from(err: EncoderError) -> Self {
crate::Error::new_encoder().with_cause(err)
}
}

View File

@ -1,5 +1,4 @@
//! Content-Encoding support. //! Content-Encoding support
use std::io; use std::io;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
@ -7,12 +6,10 @@ use bytes::{Bytes, BytesMut};
mod decoder; mod decoder;
mod encoder; mod encoder;
pub use self::{decoder::Decoder, encoder::Encoder}; pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
/// Special-purpose writer for streaming (de-)compression. pub(self) struct Writer {
///
/// Pre-allocates 8KiB of capacity.
struct Writer {
buf: BytesMut, buf: BytesMut,
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,179 +1,62 @@
use std::{ use std::any::{Any, TypeId};
any::{Any, TypeId}, use std::{fmt, mem};
collections::HashMap,
fmt,
hash::{BuildHasherDefault, Hasher},
};
/// A hasher for `TypeId`s that takes advantage of its known characteristics. use fxhash::FxHashMap;
///
/// Author of `anymap` crate has done research on the topic:
/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599
#[derive(Debug, Default)]
struct NoOpHasher(u64);
impl Hasher for NoOpHasher {
fn write(&mut self, _bytes: &[u8]) {
unimplemented!("This NoOpHasher can only handle u64s")
}
fn write_u64(&mut self, i: u64) {
self.0 = i;
}
fn finish(&self) -> u64 {
self.0
}
}
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)] #[derive(Default)]
/// A type map of request extensions.
pub struct Extensions { pub struct Extensions {
// use no-op hasher with a std HashMap with for faster lookups on the small `TypeId` keys /// Use FxHasher with a std HashMap with for faster
map: HashMap<TypeId, Box<dyn Any>, BuildHasherDefault<NoOpHasher>>, /// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
} }
impl Extensions { impl Extensions {
/// Creates an empty `Extensions`. /// Create an empty `Extensions`.
#[inline] #[inline]
pub fn new() -> Extensions { pub fn new() -> Extensions {
Extensions { Extensions {
map: HashMap::default(), map: FxHashMap::default(),
} }
} }
/// Insert an item into the map. /// Insert a type into this `Extensions`.
/// ///
/// If an item of this type was already stored, it will be replaced and returned. /// If a extension of this type already existed, it will
/// /// be returned.
/// ``` pub fn insert<T: 'static>(&mut self, val: T) {
/// # use actix_http::Extensions; self.map.insert(TypeId::of::<T>(), Box::new(val));
/// let mut map = Extensions::new();
/// assert_eq!(map.insert(""), None);
/// assert_eq!(map.insert(1u32), None);
/// assert_eq!(map.insert(2u32), Some(1u32));
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
/// ```
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
self.map
.insert(TypeId::of::<T>(), Box::new(val))
.and_then(downcast_owned)
} }
/// Check if map contains an item of a given type. /// Check if container contains entry
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert!(!map.contains::<u32>());
///
/// assert_eq!(map.insert(1u32), None);
/// assert!(map.contains::<u32>());
/// ```
pub fn contains<T: 'static>(&self) -> bool { pub fn contains<T: 'static>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>()) self.map.contains_key(&TypeId::of::<T>())
} }
/// Get a reference to an item of a given type. /// Get a reference to a type previously inserted on this `Extensions`.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
/// ```
pub fn get<T: 'static>(&self) -> Option<&T> { pub fn get<T: 'static>(&self) -> Option<&T> {
self.map self.map
.get(&TypeId::of::<T>()) .get(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_ref()) .and_then(|boxed| boxed.downcast_ref())
} }
/// Get a mutable reference to an item of a given type. /// Get a mutable reference to a type previously inserted on this `Extensions`.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
/// ```
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> { pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map self.map
.get_mut(&TypeId::of::<T>()) .get_mut(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_mut()) .and_then(|boxed| boxed.downcast_mut())
} }
/// Inserts the given `value` into the extensions if it is not present, then returns a reference /// Remove a type from this `Extensions`.
/// to the value in the extensions.
/// ///
/// ``` /// If a extension of this type existed, it will be returned.
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.get::<Vec<u32>>(), None);
///
/// map.get_or_insert(Vec::<u32>::new()).push(1);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1]));
///
/// map.get_or_insert(Vec::<u32>::new()).push(2);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1,2]));
/// ```
pub fn get_or_insert<T: 'static>(&mut self, value: T) -> &mut T {
self.get_or_insert_with(|| value)
}
/// Inserts a value computed from `f` into the extensions if the given `value` is not present,
/// then returns a reference to the value in the extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.get::<Vec<u32>>(), None);
///
/// map.get_or_insert_with(Vec::<u32>::new).push(1);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1]));
///
/// map.get_or_insert_with(Vec::<u32>::new).push(2);
/// assert_eq!(map.get::<Vec<u32>>(), Some(&vec![1,2]));
/// ```
pub fn get_or_insert_with<T: 'static, F: FnOnce() -> T>(&mut self, default: F) -> &mut T {
self.map
.entry(TypeId::of::<T>())
.or_insert_with(|| Box::new(default()))
.downcast_mut()
.expect("extensions map should now contain a T value")
}
/// Remove an item from the map of a given type.
///
/// If an item of this type was already stored, it will be returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
///
/// assert_eq!(map.remove::<u32>(), Some(1u32));
/// assert!(!map.contains::<u32>());
/// ```
pub fn remove<T: 'static>(&mut self) -> Option<T> { pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned) self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
} }
/// Clear the `Extensions` of all inserted extensions. /// Clear the `Extensions` of all inserted extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert!(map.contains::<u32>());
///
/// map.clear();
/// assert!(!map.contains::<u32>());
/// ```
#[inline] #[inline]
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.map.clear(); self.map.clear();
@ -183,6 +66,11 @@ impl Extensions {
pub fn extend(&mut self, other: Extensions) { pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map); self.map.extend(other.map);
} }
/// Sets (or overrides) items from `other` into this map.
pub(crate) fn drain_from(&mut self, other: &mut Self) {
self.map.extend(mem::take(&mut other.map));
}
} }
impl fmt::Debug for Extensions { impl fmt::Debug for Extensions {
@ -191,10 +79,6 @@ impl fmt::Debug for Extensions {
} }
} }
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
boxed.downcast().ok().map(|boxed| *boxed)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -234,8 +118,6 @@ mod tests {
#[test] #[test]
fn test_integers() { fn test_integers() {
static A: u32 = 8;
let mut map = Extensions::new(); let mut map = Extensions::new();
map.insert::<i8>(8); map.insert::<i8>(8);
@ -248,7 +130,6 @@ mod tests {
map.insert::<u32>(32); map.insert::<u32>(32);
map.insert::<u64>(64); map.insert::<u64>(64);
map.insert::<u128>(128); map.insert::<u128>(128);
map.insert::<&'static u32>(&A);
assert!(map.get::<i8>().is_some()); assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some()); assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some()); assert!(map.get::<i32>().is_some());
@ -259,7 +140,6 @@ mod tests {
assert!(map.get::<u32>().is_some()); assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some()); assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some()); assert!(map.get::<u128>().is_some());
assert!(map.get::<&'static u32>().is_some());
} }
#[test] #[test]
@ -338,4 +218,27 @@ mod tests {
assert_eq!(extensions.get(), Some(&20u8)); assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8)); assert_eq!(extensions.get_mut(), Some(&mut 20u8));
} }
#[test]
fn test_drain_from() {
let mut ext = Extensions::new();
ext.insert(2isize);
let mut more_ext = Extensions::new();
more_ext.insert(5isize);
more_ext.insert(5usize);
assert_eq!(ext.get::<isize>(), Some(&2isize));
assert_eq!(ext.get::<usize>(), None);
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
ext.drain_from(&mut more_ext);
assert_eq!(ext.get::<isize>(), Some(&5isize));
assert_eq!(ext.get::<usize>(), Some(&5usize));
assert_eq!(more_ext.get::<isize>(), None);
assert_eq!(more_ext.get::<usize>(), None);
}
} }

View File

@ -1,427 +0,0 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
use tracing::{debug, trace};
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl ChunkedState {
pub(super) fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use actix_codec::Decoder as _;
use bytes::{Bytes, BytesMut};
use http::Method;
use crate::{
error::ParseError,
h1::decoder::{MessageDecoder, PayloadItem},
HttpMessage as _, Request,
};
macro_rules! parse_ready {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Ok(Some((msg, _))) => msg,
Ok(_) => unreachable!("Eof during parsing http request"),
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
}
}};
}
macro_rules! expect_parse_err {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {}
},
_ => unreachable!("Error expected"),
}
}};
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn chunk_extension_quoted() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;hello=b;one=\"1 2 3\"\r\n\
xx",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
}
#[test]
fn hrs_chunk_extension_invalid() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;x\nx\r\n\
4c\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid character in chunk extension"));
}
#[test]
fn hrs_chunk_size_overflow() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
f0000000000000003\r\n\
abc\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid chunk size line: Size is too big"));
}
}

View File

@ -1,26 +1,23 @@
use std::{fmt, io}; use std::io;
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags; use bitflags::bitflags;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use http::{Method, Version}; use http::{Method, Version};
use tokio_util::codec::{Decoder, Encoder};
use super::{ use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, use super::{decoder, encoder, reserve_readbuf};
encoder, reserve_readbuf, Message, MessageType, use super::{Message, MessageType};
}; use crate::body::BodySize;
use crate::{ use crate::config::ServiceConfig;
body::BodySize, use crate::error::{ParseError, PayloadError};
error::{ParseError, PayloadError}, use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! { bitflags! {
#[derive(Debug, Clone, Copy)]
struct Flags: u8 { struct Flags: u8 {
const HEAD = 0b0000_0001; const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000; const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000; const STREAM = 0b0001_0000;
} }
} }
@ -39,7 +36,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>, decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>, payload: Option<PayloadDecoder>,
version: Version, version: Version,
conn_type: ConnectionType, ctype: ConnectionType,
// encoder part // encoder part
flags: Flags, flags: Flags,
@ -52,32 +49,23 @@ impl Default for ClientCodec {
} }
} }
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec { impl ClientCodec {
/// Create HTTP/1 codec. /// Create HTTP/1 codec.
/// ///
/// `keepalive_enabled` how response `connection` header get generated. /// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self { pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive().enabled() { let flags = if config.keep_alive_enabled() {
Flags::KEEP_ALIVE_ENABLED Flags::KEEPALIVE_ENABLED
} else { } else {
Flags::empty() Flags::empty()
}; };
ClientCodec { ClientCodec {
inner: ClientCodecInner { inner: ClientCodecInner {
config, config,
decoder: decoder::MessageDecoder::default(), decoder: decoder::MessageDecoder::default(),
payload: None, payload: None,
version: Version::HTTP_11, version: Version::HTTP_11,
conn_type: ConnectionType::Close, ctype: ConnectionType::Close,
flags, flags,
encoder: encoder::MessageEncoder::default(), encoder: encoder::MessageEncoder::default(),
@ -87,12 +75,12 @@ impl ClientCodec {
/// Check if request is upgrade /// Check if request is upgrade
pub fn upgrade(&self) -> bool { pub fn upgrade(&self) -> bool {
self.inner.conn_type == ConnectionType::Upgrade self.inner.ctype == ConnectionType::Upgrade
} }
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Check last request's message type /// Check last request's message type
@ -114,8 +102,8 @@ impl ClientCodec {
impl ClientPayloadCodec { impl ClientPayloadCodec {
/// Check if last response is keep-alive /// Check if last response is keep-alive
pub fn keep_alive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive self.inner.ctype == ConnectionType::KeepAlive
} }
/// Transform payload codec to a message codec /// Transform payload codec to a message codec
@ -129,18 +117,15 @@ impl Decoder for ClientCodec {
type Error = ParseError; type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!( debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
if let Some((req, payload)) = self.inner.decoder.decode(src)? { if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(conn_type) = req.conn_type() { if let Some(ctype) = req.ctype() {
// do not use peer's keep-alive // do not use peer's keep-alive
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive { self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.conn_type self.inner.ctype
} else { } else {
conn_type ctype
}; };
} }
@ -205,9 +190,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD); .set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status // connection status
inner.conn_type = match head.as_ref().connection_type() { inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => { ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) { if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive ConnectionType::KeepAlive
} else { } else {
ConnectionType::Close ConnectionType::Close
@ -224,7 +209,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false, false,
inner.version, inner.version,
length, length,
inner.conn_type, inner.ctype,
&inner.config, &inner.config,
)?; )?;
} }
@ -238,3 +223,15 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
Ok(()) Ok(())
} }
} }
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More