diff --git a/.cargo/config.toml b/.cargo/config.toml index 0bab205cd..a2345e184 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,7 +1,10 @@ [alias] -chk = "hack check --workspace --all-features --tests --examples" -lint = "hack --clean-per-run clippy --workspace --tests --examples" -ci-min = "hack check --workspace --no-default-features" -ci-min-test = "hack check --workspace --no-default-features --tests --examples" -ci-default = "hack check --workspace" -ci-full = "check --workspace --bins --examples --tests" +lint = "clippy --workspace --all-targets -- -Dclippy::todo" +lint-all = "clippy --workspace --all-features --all-targets -- -Dclippy::todo" + +# lib checking +ci-check-min = "hack --workspace check --no-default-features" +ci-check-default = "hack --workspace check" +ci-check-default-tests = "check --workspace --tests" +ci-check-all-feature-powerset="hack --workspace --feature-powerset --depth=4 --skip=__compress,experimental-io-uring check" +ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --depth=4 --skip=__compress check" diff --git a/codecov.yml b/.codecov.yml similarity index 100% rename from codecov.yml rename to .codecov.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..6164c657c --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +github: [robjtede] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 2df863ae8..a26842245 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -3,35 +3,41 @@ name: Bug Report about: Create a bug report. --- -Your issue may already be reported! -Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one. +Your issue may already be reported! Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one. ## Expected Behavior + ## Current Behavior + ## Possible Solution + ## Steps to Reproduce (for bugs) + + 1. 2. 3. 4. ## Context + ## Your Environment + -* Rust Version (I.e, output of `rustc -V`): -* Actix Web Version: +- Rust Version (I.e, output of `rustc -V`): +- Actix Web Version: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index d8c6d66ca..bfa124ffd 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,15 +1,8 @@ blank_issues_enabled: true contact_links: - - name: GitHub Discussions - url: https://github.com/actix/actix-web/discussions - about: Actix Web Q&A - - name: Gitter chat (actix-web) - url: https://gitter.im/actix/actix-web - about: Actix Web Q&A - - name: Gitter chat (actix) - url: https://gitter.im/actix/actix - about: Actix (actor framework) Q&A - name: Actix Discord url: https://discord.gg/NWpN5mmg3x about: Actix developer discussion and community chat - + - name: GitHub Discussions + url: https://github.com/actix/actix-web/discussions + about: Actix Web Q&A diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 42deadf5a..e0d17fb27 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,13 +2,15 @@ ## PR Type + + PR_TYPE - ## PR Checklist - - [ ] Tests for the changes have been added / updated. @@ -17,11 +19,10 @@ PR_TYPE - [ ] Format code with the latest stable rustfmt. - [ ] (Team) Label with affected crates and semver status. - ## Overview + - diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..c7ecf5eaa --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: / + schedule: + interval: weekly + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 828d62561..fd6bc6d73 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,28 +1,28 @@ name: Benchmark on: - pull_request: - types: [opened, synchronize, reopened] push: - branches: - - master + branches: [master] + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: check_benchmark: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - profile: minimal - override: true + run: | + rustup set profile minimal + rustup install nightly + rustup override set nightly - name: Check benchmark - uses: actions-rs/cargo@v1 - with: - command: bench - args: --bench=server -- --sample-size=15 + run: cargo bench --bench=server -- --sample-size=15 diff --git a/.github/workflows/ci-post-merge.yml b/.github/workflows/ci-post-merge.yml new file mode 100644 index 000000000..1729d9a07 --- /dev/null +++ b/.github/workflows/ci-post-merge.yml @@ -0,0 +1,91 @@ +name: CI (post-merge) + +on: + push: + branches: [master] + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_and_test_nightly: + strategy: + fail-fast: false + matrix: + # prettier-ignore + target: + - { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu } + - { name: macOS, os: macos-latest, triple: x86_64-apple-darwin } + - { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc } + version: + - { name: nightly, version: nightly } + + name: ${{ matrix.target.name }} / ${{ matrix.version.name }} + runs-on: ${{ matrix.target.os }} + + steps: + - uses: actions/checkout@v4 + + - name: Install nasm + if: matrix.target.os == 'windows-latest' + uses: ilammy/setup-nasm@v1.5.1 + + - name: Install OpenSSL + if: matrix.target.os == 'windows-latest' + shell: bash + run: | + set -e + choco install openssl --version=1.1.1.2100 -y --no-progress + echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV + echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV + + - name: Install Rust (${{ matrix.version.name }}) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: ${{ matrix.version.version }} + + - name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean + uses: taiki-e/install-action@v2.38.0 + with: + tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean + + - name: check minimal + run: cargo ci-check-min + + - name: check default + run: cargo ci-check-default + + - name: tests + timeout-minutes: 60 + run: just test + + - name: CI cache clean + run: cargo-ci-cache-clean + + ci_feature_powerset_check: + name: Verify Feature Combinations + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Free Disk Space + run: ./scripts/free-disk-space.sh + + - name: Install Rust + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + + - name: Install cargo-hack + uses: taiki-e/install-action@v2.38.0 + with: + tool: cargo-hack + + - name: check feature combinations + run: cargo ci-check-all-feature-powerset + + - name: check feature combinations + run: cargo ci-check-all-feature-powerset-linux diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 585b3f497..1b6f7b460 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,125 +3,119 @@ name: CI on: pull_request: types: [opened, synchronize, reopened] + merge_group: + types: [checks_requested] push: branches: [master] +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: + read_msrv: + name: Read MSRV + uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0 + build_and_test: + needs: read_msrv + strategy: fail-fast: false matrix: + # prettier-ignore target: - { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu } - { name: macOS, os: macos-latest, triple: x86_64-apple-darwin } - { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc } version: - - 1.46.0 # MSRV - - stable - - nightly + - { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" } + - { name: stable, version: stable } - name: ${{ matrix.target.name }} / ${{ matrix.version }} + name: ${{ matrix.target.name }} / ${{ matrix.version.name }} runs-on: ${{ matrix.target.os }} - env: - VCPKGRS_DYNAMIC: 1 - steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 + + - name: Install nasm + if: matrix.target.os == 'windows-latest' + uses: ilammy/setup-nasm@v1.5.1 - # install OpenSSL on Windows - - name: Set vcpkg root - if: matrix.target.triple == 'x86_64-pc-windows-msvc' - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append - name: Install OpenSSL - if: matrix.target.triple == 'x86_64-pc-windows-msvc' - run: vcpkg install openssl:x64-windows + if: matrix.target.os == 'windows-latest' + shell: bash + run: | + set -e + choco install openssl --version=1.1.1.2100 -y --no-progress + echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV + echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV - - name: Install ${{ matrix.version }} - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.version }}-${{ matrix.target.triple }} - profile: minimal - override: true + - name: Setup mold linker + if: matrix.target.os == 'ubuntu-latest' + uses: rui314/setup-mold@v1 - - name: Install ${{ matrix.version }} - uses: actions-rs/toolchain@v1 + - name: Install Rust (${{ matrix.version.name }}) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: - toolchain: ${{ matrix.version }}-${{ matrix.target.triple }} - profile: minimal - override: true + toolchain: ${{ matrix.version.version }} - - name: Generate Cargo.lock - uses: actions-rs/cargo@v1 + - name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean + uses: taiki-e/install-action@v2.38.0 with: - command: generate-lockfile - - name: Cache Dependencies - uses: Swatinem/rust-cache@v1.2.0 + tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean - - name: Install cargo-hack - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-hack + - name: workaround MSRV issues + if: matrix.version.name == 'msrv' + run: just downgrade-for-msrv - name: check minimal - uses: actions-rs/cargo@v1 - with: - command: hack - args: check --workspace --no-default-features + run: cargo ci-check-min - - name: check minimal + tests - uses: actions-rs/cargo@v1 - with: - command: hack - args: check --workspace --no-default-features --tests --examples - - - name: check full - uses: actions-rs/cargo@v1 - with: - command: check - args: --workspace --bins --examples --tests + - name: check default + run: cargo ci-check-default - name: tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --workspace --all-features --no-fail-fast -- --nocapture - --skip=test_h2_content_length - --skip=test_reading_deflate_encoding_large_random_rustls + timeout-minutes: 60 + run: just test - - name: tests (actix-http) - uses: actions-rs/cargo@v1 - timeout-minutes: 40 - with: - command: test - args: --package=actix-http --no-default-features --features=rustls -- --nocapture + - name: CI cache clean + run: cargo-ci-cache-clean - - name: tests (awc) - uses: actions-rs/cargo@v1 - timeout-minutes: 40 - with: - command: test - args: --package=awc --no-default-features --features=rustls -- --nocapture + io-uring: + name: io-uring tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 - - name: Generate coverage file - if: > - matrix.target.os == 'ubuntu-latest' - && matrix.version == 'stable' - && github.ref == 'refs/heads/master' - run: | - cargo install cargo-tarpaulin --vers "^0.13" - cargo tarpaulin --out Xml --verbose - - name: Upload to Codecov - if: > - matrix.target.os == 'ubuntu-latest' - && matrix.version == 'stable' - && github.ref == 'refs/heads/master' - uses: codecov/codecov-action@v1 + - name: Install Rust + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: - file: cobertura.xml + toolchain: nightly - - name: Clear the cargo caches - run: | - cargo install cargo-cache --no-default-features --features ci-autoclean - cargo-cache + - name: tests (io-uring) + timeout-minutes: 60 + run: > + sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features" + + rustdoc: + name: doc tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly + + - name: Install just + uses: taiki-e/install-action@v2.38.0 + with: + tool: just + + - name: doc tests + run: just test-docs diff --git a/.github/workflows/clippy-fmt.yml b/.github/workflows/clippy-fmt.yml deleted file mode 100644 index e966fa4ab..000000000 --- a/.github/workflows/clippy-fmt.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Lint - -on: - pull_request: - types: [opened, synchronize, reopened] - -jobs: - fmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: rustfmt - - name: Check with rustfmt - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - clippy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: clippy - override: true - - name: Check with Clippy - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --workspace --tests --all-features diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 000000000..ca3115713 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,40 @@ +name: Coverage + +on: + push: + branches: [master] + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly + components: llvm-tools + + - name: Install just, cargo-llvm-cov, cargo-nextest + uses: taiki-e/install-action@v2.38.0 + with: + tool: just,cargo-llvm-cov,cargo-nextest + + - name: Generate code coverage + run: just test-coverage-codecov + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4.4.1 + with: + files: codecov.json + fail_ci_if_error: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..ca9d2bbeb --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,116 @@ +name: Lint + +on: + pull_request: + types: [opened, synchronize, reopened] + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly + components: rustfmt + + - name: Check with Rustfmt + run: cargo fmt --all -- --check + + clippy: + permissions: + contents: read + checks: write # to add clippy checks to PR diffs + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + components: clippy + + - name: Check with Clippy + uses: giraffate/clippy-action@v1.0.1 + with: + reporter: github-pr-check + github_token: ${{ secrets.GITHUB_TOKEN }} + clippy_flags: >- + --workspace --all-features --tests --examples --bins -- + -A unknown_lints -D clippy::todo -D clippy::dbg_macro + + lint-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly + components: rust-docs + + - name: Check for broken intra-doc links + env: + RUSTDOCFLAGS: -D warnings + run: cargo +nightly doc --no-deps --workspace --all-features + + check-external-types: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Rust (nightly-2024-05-01) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly-2024-05-01 + + - name: Install just + uses: taiki-e/install-action@v2.38.0 + with: + tool: just + + - name: Install cargo-check-external-types + uses: taiki-e/cache-cargo-install-action@v1.2.2 + with: + tool: cargo-check-external-types + + - name: check external types + run: just check-external-types-all +nightly-2024-05-01 + + public-api-diff: + runs-on: ubuntu-latest + steps: + - name: Checkout main branch + uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref }} + + - name: Checkout PR branch + uses: actions/checkout@v4 + + - name: Install Rust (nightly-2024-06-07) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly-2024-06-07 + + - name: Install cargo-public-api + uses: taiki-e/install-action@v2.38.0 + with: + tool: cargo-public-api + + - name: Generate API diff + run: | + for f in $(find -mindepth 2 -maxdepth 2 -name Cargo.toml); do + cargo public-api --manifest-path "$f" --simplified diff ${{ github.event.pull_request.base.sha }}..${{ github.sha }} + done diff --git a/.github/workflows/upload-doc.yml b/.github/workflows/upload-doc.yml deleted file mode 100644 index 94a2ddfbe..000000000 --- a/.github/workflows/upload-doc.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Upload Documentation - -on: - push: - branches: [master] - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly-x86_64-unknown-linux-gnu - profile: minimal - override: true - - - name: Build Docs - uses: actions-rs/cargo@v1 - with: - command: doc - args: --workspace --all-features --no-deps - - - name: Tweak HTML - run: echo '' > target/doc/index.html - - - name: Deploy to GitHub Pages - uses: JamesIves/github-pages-deploy-action@3.7.1 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - BRANCH: gh-pages - FOLDER: target/doc diff --git a/.gitignore b/.gitignore index 638a4397a..48ccccb92 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,10 @@ guide/build/ # Configuration directory generated by CLion .idea + +# Configuration directory generated by VSCode +.vscode + +# code coverage +/lcov.info +/codecov.json diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 000000000..d70303479 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1,5 @@ +overrides: + - files: "*.md" + options: + printWidth: 9999 + proseWrap: never diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 000000000..71b9be3ae --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,3 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Crate" +use_field_init_shorthand = true diff --git a/CHANGES.md b/CHANGES.md index 162f9f61b..7bec65640 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,737 +1,5 @@ -# Changes +# Changelog -## Unreleased - 2021-xx-xx -### Added -* `HttpServer::worker_max_blocking_threads` for setting block thread pool. [#2200] +Changelogs are kept separately for each crate in this repo. -### Changed -* `ServiceResponse::error_response` now uses body type of `Body`. [#2201] -* `ServiceResponse::checked_expr` now returns a `Result`. [#2201] -* Update `language-tags` to `0.3`. -* `ServiceResponse::take_body`. [#2201] -* `ServiceResponse::map_body` closure receives and returns `B` instead of `ResponseBody` types. [#2201] - -### Removed -* `HttpResponse::take_body` and old `HttpResponse::into_body` method that casted body type. [#2201] - -[#2200]: https://github.com/actix/actix-web/pull/2200 -[#2201]: https://github.com/actix/actix-web/pull/2201 - - -## 4.0.0-beta.6 - 2021-04-17 -### Added -* `HttpResponse` and `HttpResponseBuilder` structs. [#2065] - -### Changed -* Most error types are now marked `#[non_exhaustive]`. [#2148] -* Methods on `ContentDisposition` that took `T: AsRef` now take `impl AsRef`. - -[#2065]: https://github.com/actix/actix-web/pull/2065 -[#2148]: https://github.com/actix/actix-web/pull/2148 - - -## 4.0.0-beta.5 - 2021-04-02 -### Added -* `Header` extractor for extracting common HTTP headers in handlers. [#2094] -* Added `TestServer::client_headers` method. [#2097] - -### Fixed -* Double ampersand in Logger format is escaped correctly. [#2067] - -### Changed -* `CustomResponder` would return error as `HttpResponse` when `CustomResponder::with_header` failed - instead of skipping. (Only the first error is kept when multiple error occur) [#2093] - -### Removed -* The `client` mod was removed. Clients should now use `awc` directly. - [871ca5e4](https://github.com/actix/actix-web/commit/871ca5e4ae2bdc22d1ea02701c2992fa8d04aed7) -* Integration testing was moved to new `actix-test` crate. Namely these items from the `test` - module: `TestServer`, `TestServerConfig`, `start`, `start_with`, and `unused_addr`. [#2112] - -[#2067]: https://github.com/actix/actix-web/pull/2067 -[#2093]: https://github.com/actix/actix-web/pull/2093 -[#2094]: https://github.com/actix/actix-web/pull/2094 -[#2097]: https://github.com/actix/actix-web/pull/2097 -[#2112]: https://github.com/actix/actix-web/pull/2112 - - -## 4.0.0-beta.4 - 2021-03-09 -### Changed -* Feature `cookies` is now optional and enabled by default. [#1981] -* `JsonBody::new` returns a default limit of 32kB to be consistent with `JsonConfig` and the default - behaviour of the `web::Json` extractor. [#2010] - -[#1981]: https://github.com/actix/actix-web/pull/1981 -[#2010]: https://github.com/actix/actix-web/pull/2010 - - -## 4.0.0-beta.3 - 2021-02-10 -* Update `actix-web-codegen` to `0.5.0-beta.1`. - - -## 4.0.0-beta.2 - 2021-02-10 -### Added -* The method `Either, web::Form>::into_inner()` which returns the inner type for - whichever variant was created. Also works for `Either, web::Json>`. [#1894] -* Add `services!` macro for helping register multiple services to `App`. [#1933] -* Enable registering a vec of services of the same type to `App` [#1933] - -### Changed -* Rework `Responder` trait to be sync and returns `Response`/`HttpResponse` directly. - Making it simpler and more performant. [#1891] -* `ServiceRequest::into_parts` and `ServiceRequest::from_parts` can no longer fail. [#1893] -* `ServiceRequest::from_request` can no longer fail. [#1893] -* Our `Either` type now uses `Left`/`Right` variants (instead of `A`/`B`) [#1894] -* `test::{call_service, read_response, read_response_json, send_request}` take `&Service` - in argument [#1905] -* `App::wrap_fn`, `Resource::wrap_fn` and `Scope::wrap_fn` provide `&Service` in closure - argument. [#1905] -* `web::block` no longer requires the output is a Result. [#1957] - -### Fixed -* Multiple calls to `App::data` with the same type now keeps the latest call's data. [#1906] - -### Removed -* Public field of `web::Path` has been made private. [#1894] -* Public field of `web::Query` has been made private. [#1894] -* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869] -* `AppService::set_service_data`; for custom HTTP service factories adding application data, use the - layered data model by calling `ServiceRequest::add_data_container` when handling - requests instead. [#1906] - -[#1891]: https://github.com/actix/actix-web/pull/1891 -[#1893]: https://github.com/actix/actix-web/pull/1893 -[#1894]: https://github.com/actix/actix-web/pull/1894 -[#1869]: https://github.com/actix/actix-web/pull/1869 -[#1905]: https://github.com/actix/actix-web/pull/1905 -[#1906]: https://github.com/actix/actix-web/pull/1906 -[#1933]: https://github.com/actix/actix-web/pull/1933 -[#1957]: https://github.com/actix/actix-web/pull/1957 - - -## 4.0.0-beta.1 - 2021-01-07 -### Added -* `Compat` middleware enabling generic response body/error type of middlewares like `Logger` and - `Compress` to be used in `middleware::Condition` and `Resource`, `Scope` services. [#1865] - -### Changed -* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813] -* Bumped `rand` to `0.8`. -* Update `rust-tls` to `0.19`. [#1813] -* Rename `Handler` to `HandlerService` and rename `Factory` to `Handler`. [#1852] -* The default `TrailingSlash` is now `Trim`, in line with existing documentation. See migration - guide for implications. [#1875] -* Rename `DefaultHeaders::{content_type => add_content_type}`. [#1875] -* MSRV is now 1.46.0. - -### Fixed -* Added the underlying parse error to `test::read_body_json`'s panic message. [#1812] - -### Removed -* Public modules `middleware::{normalize, err_handlers}`. All necessary middleware structs are now - exposed directly by the `middleware` module. -* Remove `actix-threadpool` as dependency. `actix_threadpool::BlockingError` error type can be imported - from `actix_web::error` module. [#1878] - -[#1812]: https://github.com/actix/actix-web/pull/1812 -[#1813]: https://github.com/actix/actix-web/pull/1813 -[#1852]: https://github.com/actix/actix-web/pull/1852 -[#1865]: https://github.com/actix/actix-web/pull/1865 -[#1875]: https://github.com/actix/actix-web/pull/1875 -[#1878]: https://github.com/actix/actix-web/pull/1878 - -## 3.3.2 - 2020-12-01 -### Fixed -* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762] -* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798] -* Increase minimum `socket2` version. [#1803] - -[#1762]: https://github.com/actix/actix-web/pull/1762 -[#1798]: https://github.com/actix/actix-web/pull/1798 -[#1803]: https://github.com/actix/actix-web/pull/1803 - - -## 3.3.1 - 2020-11-29 -* Ensure `actix-http` dependency uses same `serde_urlencoded`. - - -## 3.3.0 - 2020-11-25 -### Added -* Add `Either` extractor helper. [#1788] - -### Changed -* Upgrade `serde_urlencoded` to `0.7`. [#1773] - -[#1773]: https://github.com/actix/actix-web/pull/1773 -[#1788]: https://github.com/actix/actix-web/pull/1788 - - -## 3.2.0 - 2020-10-30 -### Added -* Implement `exclude_regex` for Logger middleware. [#1723] -* Add request-local data extractor `web::ReqData`. [#1748] -* Add ability to register closure for request middleware logging. [#1749] -* Add `app_data` to `ServiceConfig`. [#1757] -* Expose `on_connect` for access to the connection stream before request is handled. [#1754] - -### Changed -* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro. -* Print non-configured `Data` type when attempting extraction. [#1743] -* Re-export bytes::Buf{Mut} in web module. [#1750] -* Upgrade `pin-project` to `1.0`. - -[#1723]: https://github.com/actix/actix-web/pull/1723 -[#1743]: https://github.com/actix/actix-web/pull/1743 -[#1748]: https://github.com/actix/actix-web/pull/1748 -[#1750]: https://github.com/actix/actix-web/pull/1750 -[#1754]: https://github.com/actix/actix-web/pull/1754 -[#1749]: https://github.com/actix/actix-web/pull/1749 - - -## 3.1.0 - 2020-09-29 -### Changed -* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath` - to retain any trailing slashes. [#1695] -* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc` - via `web::Data::from` [#1710] - -### Fixed -* `ResourceMap` debug printing is no longer infinitely recursive. [#1708] - -[#1695]: https://github.com/actix/actix-web/pull/1695 -[#1708]: https://github.com/actix/actix-web/pull/1708 -[#1710]: https://github.com/actix/actix-web/pull/1710 - - -## 3.0.2 - 2020-09-15 -### Fixed -* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678] - -[#1678]: https://github.com/actix/actix-web/pull/1678 - - -## 3.0.1 - 2020-09-13 -### Changed -* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673] - -[#1673]: https://github.com/actix/actix-web/pull/1673 - - -## 3.0.0 - 2020-09-11 -* No significant changes from `3.0.0-beta.4`. - - -## 3.0.0-beta.4 - 2020-09-09 -### Added -* `middleware::NormalizePath` now has configurable behavior for either always having a trailing - slash, or as the new addition, always trimming trailing slashes. [#1639] - -### Changed -* Update actix-codec and actix-utils dependencies. [#1634] -* `FormConfig` and `JsonConfig` configurations are now also considered when set - using `App::data`. [#1641] -* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655] -* `HttpServer::maxconnrate` is renamed to the more expressive - `HttpServer::max_connection_rate`. [#1655] - -[#1639]: https://github.com/actix/actix-web/pull/1639 -[#1641]: https://github.com/actix/actix-web/pull/1641 -[#1634]: https://github.com/actix/actix-web/pull/1634 -[#1655]: https://github.com/actix/actix-web/pull/1655 - -## 3.0.0-beta.3 - 2020-08-17 -### Changed -* Update `rustls` to 0.18 - - -## 3.0.0-beta.2 - 2020-08-17 -### Changed -* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set - using `App::data`. [#1610] -* `web::Path` now has a public representation: `web::Path(pub T)` that enables - destructuring. [#1594] -* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to - access `HttpRequest` which already allows this. [#1618] -* Re-export all error types from `awc`. [#1621] -* MSRV is now 1.42.0. - -### Fixed -* Memory leak of app data in pooled requests. [#1609] - -[#1594]: https://github.com/actix/actix-web/pull/1594 -[#1609]: https://github.com/actix/actix-web/pull/1609 -[#1610]: https://github.com/actix/actix-web/pull/1610 -[#1618]: https://github.com/actix/actix-web/pull/1618 -[#1621]: https://github.com/actix/actix-web/pull/1621 - - -## 3.0.0-beta.1 - 2020-07-13 -### Added -* Re-export `actix_rt::main` as `actix_web::main`. -* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched - resource pattern. -* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name. - -### Changed -* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550] -* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency. -* MSRV is now 1.41.1 - -### Fixed -* `NormalizePath` improved consistency when path needs slashes added _and_ removed. - - -## 3.0.0-alpha.3 - 2020-05-21 -### Added -* Add option to create `Data` from `Arc` [#1509] - -### Changed -* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486] -* Fix audit issue logging by default peer address [#1485] -* Bump minimum supported Rust version to 1.40 -* Replace deprecated `net2` crate with `socket2` - -[#1485]: https://github.com/actix/actix-web/pull/1485 -[#1509]: https://github.com/actix/actix-web/pull/1509 - -## [3.0.0-alpha.2] - 2020-05-08 - -### Changed - -* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452] -* Implement `std::error::Error` for our custom errors [#1422] -* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433] -* Remove the `failure` feature and support. - -[#1422]: https://github.com/actix/actix-web/pull/1422 -[#1433]: https://github.com/actix/actix-web/pull/1433 -[#1452]: https://github.com/actix/actix-web/pull/1452 -[#1486]: https://github.com/actix/actix-web/pull/1486 - - -## [3.0.0-alpha.1] - 2020-03-11 - -### Added - -* Add helper function for creating routes with `TRACE` method guard `web::trace()` -* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing. - -### Changed - -* Use `sha-1` crate instead of unmaintained `sha1` crate -* Skip empty chunks when returning response from a `Stream` [#1308] -* Update the `time` dependency to 0.2.7 -* Update `actix-tls` dependency to 2.0.0-alpha.1 -* Update `rustls` dependency to 0.17 - -[#1308]: https://github.com/actix/actix-web/pull/1308 - -## [2.0.0] - 2019-12-25 - -### Changed - -* Rename `HttpServer::start()` to `HttpServer::run()` - -* Allow to gracefully stop test server via `TestServer::stop()` - -* Allow to specify multi-patterns for resources - -## [2.0.0-rc] - 2019-12-20 - -### Changed - -* Move `BodyEncoding` to `dev` module #1220 - -* Allow to set `peer_addr` for TestRequest #1074 - -* Make web::Data deref to Arc #1214 - -* Rename `App::register_data()` to `App::app_data()` - -* `HttpRequest::app_data()` returns `Option<&T>` instead of `Option<&Data>` - -### Fixed - -* Fix `AppConfig::secure()` is always false. #1202 - - -## [2.0.0-alpha.6] - 2019-12-15 - -### Fixed - -* Fixed compilation with default features off - -## [2.0.0-alpha.5] - 2019-12-13 - -### Added - -* Add test server, `test::start()` and `test::start_with()` - -## [2.0.0-alpha.4] - 2019-12-08 - -### Deleted - -* Delete HttpServer::run(), it is not useful with async/await - -## [2.0.0-alpha.3] - 2019-12-07 - -### Changed - -* Migrate to tokio 0.2 - - -## [2.0.0-alpha.1] - 2019-11-22 - -### Changed - -* Migrated to `std::future` - -* Remove implementation of `Responder` for `()`. (#1167) - - -## [1.0.9] - 2019-11-14 - -### Added - -* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110) - -### Changed - -* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129) - - -## [1.0.8] - 2019-09-25 - -### Added - -* Add `Scope::register_data` and `Resource::register_data` methods, parallel to - `App::register_data`. - -* Add `middleware::Condition` that conditionally enables another middleware - -* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload` - -* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path, - which is useful for example with systemd. - -### Changed - -* Make UrlEncodedError::Overflow more informative - -* Use actix-testing for testing utils - - -## [1.0.7] - 2019-08-29 - -### Fixed - -* Request Extensions leak #1062 - - -## [1.0.6] - 2019-08-28 - -### Added - -* Re-implement Host predicate (#989) - -* Form implements Responder, returning a `application/x-www-form-urlencoded` response - -* Add `into_inner` to `Data` - -* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set - the header in test requests. - -### Changed - -* `Query` payload made `pub`. Allows user to pattern-match the payload. - -* Enable `rust-tls` feature for client #1045 - -* Update serde_urlencoded to 0.6.1 - -* Update url to 2.1 - - -## [1.0.5] - 2019-07-18 - -### Added - -* Unix domain sockets (HttpServer::bind_uds) #92 - -* Actix now logs errors resulting in "internal server error" responses always, with the `error` - logging level - -### Fixed - -* Restored logging of errors through the `Logger` middleware - - -## [1.0.4] - 2019-07-17 - -### Added - -* Add `Responder` impl for `(T, StatusCode) where T: Responder` - -* Allow to access app's resource map via - `ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods. - -### Changed - -* Upgrade `rand` dependency version to 0.7 - - -## [1.0.3] - 2019-06-28 - -### Added - -* Support asynchronous data factories #850 - -### Changed - -* Use `encoding_rs` crate instead of unmaintained `encoding` crate - - -## [1.0.2] - 2019-06-17 - -### Changed - -* Move cors middleware to `actix-cors` crate. - -* Move identity middleware to `actix-identity` crate. - - -## [1.0.1] - 2019-06-17 - -### Added - -* Add support for PathConfig #903 - -* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`. - -### Changed - -* Move cors middleware to `actix-cors` crate. - -* Move identity middleware to `actix-identity` crate. - -* Disable default feature `secure-cookies`. - -* Allow to test an app that uses async actors #897 - -* Re-apply patch from #637 #894 - -### Fixed - -* HttpRequest::url_for is broken with nested scopes #915 - - -## [1.0.0] - 2019-06-05 - -### Added - -* Add `Scope::configure()` method. - -* Add `ServiceRequest::set_payload()` method. - -* Add `test::TestRequest::set_json()` convenience method to automatically - serialize data and set header in test requests. - -* Add macros for head, options, trace, connect and patch http methods - -### Changed - -* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863 - -### Fixed - -* Fix Logger request time format, and use rfc3339. #867 - -* Clear http requests pool on app service drop #860 - - -## [1.0.0-rc] - 2019-05-18 - -### Added - -* Add `Query::from_query()` to extract parameters from a query string. #846 -* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors. - -### Changed - -* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too. - -### Fixed - -* Codegen with parameters in the path only resolves the first registered endpoint #841 - - -## [1.0.0-beta.4] - 2019-05-12 - -### Added - -* Allow to set/override app data on scope level - -### Changed - -* `App::configure` take an `FnOnce` instead of `Fn` -* Upgrade actix-net crates - - -## [1.0.0-beta.3] - 2019-05-04 - -### Added - -* Add helper function for executing futures `test::block_fn()` - -### Changed - -* Extractor configuration could be registered with `App::data()` - or with `Resource::data()` #775 - -* Route data is unified with app data, `Route::data()` moved to resource - level to `Resource::data()` - -* CORS handling without headers #702 - -* Allow constructing `Data` instances to avoid double `Arc` for `Send + Sync` types. - -### Fixed - -* Fix `NormalizePath` middleware impl #806 - -### Deleted - -* `App::data_factory()` is deleted. - - -## [1.0.0-beta.2] - 2019-04-24 - -### Added - -* Add raw services support via `web::service()` - -* Add helper functions for reading response body `test::read_body()` - -* Add support for `remainder match` (i.e "/path/{tail}*") - -* Extend `Responder` trait, allow to override status code and headers. - -* Store visit and login timestamp in the identity cookie #502 - -### Changed - -* `.to_async()` handler can return `Responder` type #792 - -### Fixed - -* Fix async web::Data factory handling - - -## [1.0.0-beta.1] - 2019-04-20 - -### Added - -* Add helper functions for reading test response body, - `test::read_response()` and test::read_response_json()` - -* Add `.peer_addr()` #744 - -* Add `NormalizePath` middleware - -### Changed - -* Rename `RouterConfig` to `ServiceConfig` - -* Rename `test::call_success` to `test::call_service` - -* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts. - -* `CookieIdentityPolicy::max_age()` accepts value in seconds - -### Fixed - -* Fixed `TestRequest::app_data()` - - -## [1.0.0-alpha.6] - 2019-04-14 - -### Changed - -* Allow using any service as default service. - -* Remove generic type for request payload, always use default. - -* Removed `Decompress` middleware. Bytes, String, Json, Form extractors - automatically decompress payload. - -* Make extractor config type explicit. Add `FromRequest::Config` associated type. - - -## [1.0.0-alpha.5] - 2019-04-12 - -### Added - -* Added async io `TestBuffer` for testing. - -### Deleted - -* Removed native-tls support - - -## [1.0.0-alpha.4] - 2019-04-08 - -### Added - -* `App::configure()` allow to offload app configuration to different methods - -* Added `URLPath` option for logger - -* Added `ServiceRequest::app_data()`, returns `Data` - -* Added `ServiceFromRequest::app_data()`, returns `Data` - -### Changed - -* `FromRequest` trait refactoring - -* Move multipart support to actix-multipart crate - -### Fixed - -* Fix body propagation in Response::from_error. #760 - - -## [1.0.0-alpha.3] - 2019-04-02 - -### Changed - -* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()` - -* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()` - -* Removed `Deref` impls - -### Removed - -* Removed unused `actix_web::web::md()` - - -## [1.0.0-alpha.2] - 2019-03-29 - -### Added - -* Rustls support - -### Changed - -* Use forked cookie - -* Multipart::Field renamed to MultipartField - -## [1.0.0-alpha.1] - 2019-03-28 - -### Changed - -* Complete architecture re-design. - -* Return 405 response if no matching route found within resource #538 +Actix Web changelog [is now here →](./actix-web/CHANGES.md). diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ae97b3240..dbd092095 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo Examples of behavior that contributes to creating a positive environment include: -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities diff --git a/Cargo.toml b/Cargo.toml index 5aa302333..19d5dd116 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,111 +1,29 @@ -[package] -name = "actix-web" -version = "4.0.0-beta.6" -authors = ["Nikolay Kim "] -description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust" -keywords = ["actix", "http", "web", "framework", "async"] -categories = [ - "network-programming", - "asynchronous", - "web-programming::http-server", - "web-programming::websocket" +[workspace] +resolver = "2" +members = [ + "actix-files", + "actix-http-test", + "actix-http", + "actix-multipart", + "actix-multipart-derive", + "actix-router", + "actix-test", + "actix-web-actors", + "actix-web-codegen", + "actix-web", + "awc", ] + +[workspace.package] homepage = "https://actix.rs" repository = "https://github.com/actix/actix-web" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.72" -[package.metadata.docs.rs] -# features that docs.rs will build with -features = ["openssl", "rustls", "compress", "cookies", "secure-cookies"] - -[lib] -name = "actix_web" -path = "src/lib.rs" - -[workspace] -members = [ - ".", - "awc", - "actix-http", - "actix-files", - "actix-multipart", - "actix-web-actors", - "actix-web-codegen", - "actix-http-test", - "actix-test", -] -# enable when MSRV is 1.51+ -# resolver = "2" - -[features] -default = ["compress", "cookies"] - -# content-encoding support -compress = ["actix-http/compress"] - -# support for cookies -cookies = ["cookie"] - -# secure cookies feature -secure-cookies = ["cookie/secure"] - -# openssl -openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"] - -# rustls -rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"] - -[dependencies] -actix-codec = "0.4.0" -actix-macros = "0.2.0" -actix-router = "0.2.7" -actix-rt = "2.2" -actix-server = "2.0.0-beta.3" -actix-service = "2.0.0" -actix-utils = "3.0.0" -actix-tls = { version = "3.0.0-beta.5", default-features = false, optional = true } - -actix-web-codegen = "0.5.0-beta.2" -actix-http = "3.0.0-beta.6" - -ahash = "0.7" -bytes = "1" -cookie = { version = "0.15", features = ["percent-encode"], optional = true } -derive_more = "0.99.5" -either = "1.5.3" -encoding_rs = "0.8" -futures-core = { version = "0.3.7", default-features = false } -futures-util = { version = "0.3.7", default-features = false } -itoa = "0.4" -language-tags = "0.3" -once_cell = "1.5" -log = "0.4" -mime = "0.3" -paste = "1" -pin-project = "1.0.0" -regex = "1.4" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_urlencoded = "0.7" -smallvec = "1.6" -socket2 = "0.4.0" -time = { version = "0.2.23", default-features = false, features = ["std"] } -url = "2.1" - -[dev-dependencies] -actix-test = { version = "0.1.0-beta.2", features = ["openssl", "rustls"] } -awc = { version = "3.0.0-beta.5", features = ["openssl"] } - -brotli2 = "0.3.2" -criterion = "0.3" -env_logger = "0.8" -flate2 = "1.0.13" -rand = "0.8" -rcgen = "0.8" -serde_derive = "1.0" -tls-openssl = { package = "openssl", version = "0.10.9" } -tls-rustls = { package = "rustls", version = "0.19.0" } +[profile.dev] +# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much. +debug = 0 [profile.release] lto = true @@ -117,36 +35,19 @@ actix-files = { path = "actix-files" } actix-http = { path = "actix-http" } actix-http-test = { path = "actix-http-test" } actix-multipart = { path = "actix-multipart" } +actix-multipart-derive = { path = "actix-multipart-derive" } +actix-router = { path = "actix-router" } actix-test = { path = "actix-test" } -actix-web = { path = "." } +actix-web = { path = "actix-web" } actix-web-actors = { path = "actix-web-actors" } actix-web-codegen = { path = "actix-web-codegen" } awc = { path = "awc" } -[[test]] -name = "test_server" -required-features = ["compress", "cookies"] - -[[example]] -name = "basic" -required-features = ["compress"] - -[[example]] -name = "uds" -required-features = ["compress"] - -[[example]] -name = "on_connect" -required-features = [] - -[[bench]] -name = "server" -harness = false - -[[bench]] -name = "service" -harness = false - -[[bench]] -name = "responder" -harness = false +# uncomment for quick testing against local actix-net repo +# actix-service = { path = "../actix-net/actix-service" } +# actix-macros = { path = "../actix-net/actix-macros" } +# actix-rt = { path = "../actix-net/actix-rt" } +# actix-codec = { path = "../actix-net/actix-codec" } +# actix-utils = { path = "../actix-net/actix-utils" } +# actix-tls = { path = "../actix-net/actix-tls" } +# actix-server = { path = "../actix-net/actix-server" } diff --git a/MIGRATION.md b/MIGRATION.md deleted file mode 100644 index e01702868..000000000 --- a/MIGRATION.md +++ /dev/null @@ -1,662 +0,0 @@ -## Unreleased - -* The default `NormalizePath` behavior now strips trailing slashes by default. This was - previously documented to be the case in v3 but the behavior now matches. The effect is that - routes defined with trailing slashes will become inaccessible when - using `NormalizePath::default()`. - - Before: `#[get("/test/")` - After: `#[get("/test")` - - Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`. - - -## 3.0.0 - -* The return type for `ServiceRequest::app_data::()` was changed from returning a `Data` to - simply a `T`. To access a `Data` use `ServiceRequest::app_data::>()`. - -* Cookie handling has been offloaded to the `cookie` crate: - * `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs. - * Some types now require lifetime parameters. - -* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects - any `actix-web` method previously expecting a time v0.1 input. - -* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now - result in `SameSite=None` being sent with the response Set-Cookie header. - To create a cookie without a SameSite attribute, remove any calls setting same_site. - -* actix-http support for Actors messages was moved to actix-http crate and is enabled - with feature `actors` - -* content_length function is removed from actix-http. - You can set Content-Length by normally setting the response body or calling no_chunking function. - -* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a - `u64` instead of a `usize`. - -* Code that was using `path.` to access a `web::Path<(A, B, C)>`s elements now needs to use - destructuring or `.into_inner()`. For example: - - ```rust - // Previously: - async fn some_route(path: web::Path<(String, String)>) -> String { - format!("Hello, {} {}", path.0, path.1) - } - - // Now (this also worked before): - async fn some_route(path: web::Path<(String, String)>) -> String { - let (first_name, last_name) = path.into_inner(); - format!("Hello, {} {}", first_name, last_name) - } - // Or (this wasn't previously supported): - async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String { - format!("Hello, {} {}", first_name, last_name) - } - ``` - -* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one. - It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`, - or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`. - -* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. - -* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`. - - -## 2.0.0 - -* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to - `.await` on `run` method result, in that case it awaits server exit. - -* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`. - Stored data is available via `HttpRequest::app_data()` method at runtime. - -* Extractor configuration must be registered with `App::app_data()` instead of `App::data()` - -* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()` - replace `fn` with `async fn` to convert sync handler to async - -* `actix_http_test::TestServer` moved to `actix_web::test` module. To start - test server use `test::start()` or `test_start_with_config()` methods - -* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders - http response. - -* Feature `rust-tls` renamed to `rustls` - - instead of - - ```rust - actix-web = { version = "2.0.0", features = ["rust-tls"] } - ``` - - use - - ```rust - actix-web = { version = "2.0.0", features = ["rustls"] } - ``` - -* Feature `ssl` renamed to `openssl` - - instead of - - ```rust - actix-web = { version = "2.0.0", features = ["ssl"] } - ``` - - use - - ```rust - actix-web = { version = "2.0.0", features = ["openssl"] } - ``` -* `Cors` builder now requires that you call `.finish()` to construct the middleware - -## 1.0.1 - -* Cors middleware has been moved to `actix-cors` crate - - instead of - - ```rust - use actix_web::middleware::cors::Cors; - ``` - - use - - ```rust - use actix_cors::Cors; - ``` - -* Identity middleware has been moved to `actix-identity` crate - - instead of - - ```rust - use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService}; - ``` - - use - - ```rust - use actix_identity::{Identity, CookieIdentityPolicy, IdentityService}; - ``` - - -## 1.0.0 - -* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration - - instead of - - ```rust - - #[derive(Default)] - struct ExtractorConfig { - config: String, - } - - impl FromRequest for YourExtractor { - type Config = ExtractorConfig; - type Result = Result; - - fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result { - println!("use the config: {:?}", cfg.config); - ... - } - } - - App::new().resource("/route_with_config", |r| { - r.post().with_config(handler_fn, |cfg| { - cfg.0.config = "test".to_string(); - }) - }) - - ``` - - use the HttpRequest to get the configuration like any other `Data` with `req.app_data::()` and set it with the `data()` method on the `resource` - - ```rust - #[derive(Default)] - struct ExtractorConfig { - config: String, - } - - impl FromRequest for YourExtractor { - type Error = Error; - type Future = Result; - type Config = ExtractorConfig; - - fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { - let cfg = req.app_data::(); - println!("config data?: {:?}", cfg.unwrap().role); - ... - } - } - - App::new().service( - resource("/route_with_config") - .data(ExtractorConfig { - config: "test".to_string(), - }) - .route(post().to(handler_fn)), - ) - ``` - -* Resource registration. 1.0 version uses generalized resource - registration via `.service()` method. - - instead of - - ```rust - App.new().resource("/welcome", |r| r.f(welcome)) - ``` - - use App's or Scope's `.service()` method. `.service()` method accepts - object that implements `HttpServiceFactory` trait. By default - actix-web provides `Resource` and `Scope` services. - - ```rust - App.new().service( - web::resource("/welcome") - .route(web::get().to(welcome)) - .route(web::post().to(post_handler)) - ``` - -* Scope registration. - - instead of - - ```rust - let app = App::new().scope("/{project_id}", |scope| { - scope - .resource("/path1", |r| r.f(|_| HttpResponse::Ok())) - .resource("/path2", |r| r.f(|_| HttpResponse::Ok())) - .resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed())) - }); - ``` - - use `.service()` for registration and `web::scope()` as scope object factory. - - ```rust - let app = App::new().service( - web::scope("/{project_id}") - .service(web::resource("/path1").to(|| HttpResponse::Ok())) - .service(web::resource("/path2").to(|| HttpResponse::Ok())) - .service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed())) - ); - ``` - -* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`. - - instead of - - ```rust - App.new().resource("/welcome", |r| r.with(welcome)) - ``` - - use `.to()` or `.to_async()` methods - - ```rust - App.new().service(web::resource("/welcome").to(welcome)) - ``` - -* Passing arguments to handler with extractors, multiple arguments are allowed - - instead of - - ```rust - fn welcome((body, req): (Bytes, HttpRequest)) -> ... { - ... - } - ``` - - use multiple arguments - - ```rust - fn welcome(body: Bytes, req: HttpRequest) -> ... { - ... - } - ``` - -* `.f()`, `.a()` and `.h()` handler registration methods have been removed. - Use `.to()` for handlers and `.to_async()` for async handlers. Handler function - must use extractors. - - instead of - - ```rust - App.new().resource("/welcome", |r| r.f(welcome)) - ``` - - use App's `to()` or `to_async()` methods - - ```rust - App.new().service(web::resource("/welcome").to(welcome)) - ``` - -* `HttpRequest` does not provide access to request's payload stream. - - instead of - - ```rust - fn index(req: &HttpRequest) -> Box> { - req - .payload() - .from_err() - .fold((), |_, chunk| { - ... - }) - .map(|_| HttpResponse::Ok().finish()) - .responder() - } - ``` - - use `Payload` extractor - - ```rust - fn index(stream: web::Payload) -> impl Future { - stream - .from_err() - .fold((), |_, chunk| { - ... - }) - .map(|_| HttpResponse::Ok().finish()) - } - ``` - -* `State` is now `Data`. You register Data during the App initialization process - and then access it from handlers either using a Data extractor or using - HttpRequest's api. - - instead of - - ```rust - App.with_state(T) - ``` - - use App's `data` method - - ```rust - App.new() - .data(T) - ``` - - and either use the Data extractor within your handler - - ```rust - use actix_web::web::Data; - - fn endpoint_handler(Data)){ - ... - } - ``` - - .. or access your Data element from the HttpRequest - - ```rust - fn endpoint_handler(req: HttpRequest) { - let data: Option> = req.app_data::(); - } - ``` - - -* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type. - - instead of - - ```rust - use actix_web::AsyncResponder; - - fn endpoint_handler(...) -> impl Future{ - ... - .responder() - } - ``` - - .. simply omit AsyncResponder and the corresponding responder() finish method - - -* Middleware - - instead of - - ```rust - let app = App::new() - .middleware(middleware::Logger::default()) - ``` - - use `.wrap()` method - - ```rust - let app = App::new() - .wrap(middleware::Logger::default()) - .route("/index.html", web::get().to(index)); - ``` - -* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()` - method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead. - - instead of - - ```rust - fn index(req: &HttpRequest) -> Responder { - req.body() - .and_then(|body| { - ... - }) - } - ``` - - use - - ```rust - fn index(body: Bytes) -> Responder { - ... - } - ``` - -* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type - -* StaticFiles and NamedFile have been moved to a separate crate. - - instead of `use actix_web::fs::StaticFile` - - use `use actix_files::Files` - - instead of `use actix_web::fs::Namedfile` - - use `use actix_files::NamedFile` - -* Multipart has been moved to a separate crate. - - instead of `use actix_web::multipart::Multipart` - - use `use actix_multipart::Multipart` - -* Response compression is not enabled by default. - To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`. - -* Session middleware moved to actix-session crate - -* Actors support have been moved to `actix-web-actors` crate - -* Custom Error - - Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller. - - Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError: - - ```rust - fn render_response(&self) -> HttpResponse { - self.error_response() - } - ``` - -## 0.7.15 - -* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in - your routes, you should use `%20`. - - instead of - - ```rust - fn main() { - let app = App::new().resource("/my index", |r| { - r.method(http::Method::GET) - .with(index); - }); - } - ``` - - use - - ```rust - fn main() { - let app = App::new().resource("/my%20index", |r| { - r.method(http::Method::GET) - .with(index); - }); - } - ``` - -* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future` - - -## 0.7.4 - -* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple - even for handler with one parameter. - - -## 0.7 - -* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload - use `HttpMessage::payload()` method. - - instead of - - ```rust - fn index(req: HttpRequest) -> impl Responder { - req - .from_err() - .fold(...) - .... - } - ``` - - use `.payload()` - - ```rust - fn index(req: HttpRequest) -> impl Responder { - req - .payload() // <- get request payload stream - .from_err() - .fold(...) - .... - } - ``` - -* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html) - trait uses `&HttpRequest` instead of `&mut HttpRequest`. - -* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead. - - instead of - - ```rust - fn index(query: Query<..>, info: Json impl Responder {} - ``` - - use tuple of extractors and use `.with()` for registration: - - ```rust - fn index((query, json): (Query<..>, Json impl Responder {} - ``` - -* `Handler::handle()` uses `&self` instead of `&mut self` - -* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value - -* Removed deprecated `HttpServer::threads()`, use - [HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead. - -* Renamed `client::ClientConnectorError::Connector` to - `client::ClientConnectorError::Resolver` - -* `Route::with()` does not return `ExtractorConfig`, to configure - extractor use `Route::with_config()` - - instead of - - ```rust - fn main() { - let app = App::new().resource("/index.html", |r| { - r.method(http::Method::GET) - .with(index) - .limit(4096); // <- limit size of the payload - }); - } - ``` - - use - - ```rust - - fn main() { - let app = App::new().resource("/index.html", |r| { - r.method(http::Method::GET) - .with_config(index, |cfg| { // <- register handler - cfg.limit(4096); // <- limit size of the payload - }) - }); - } - ``` - -* `Route::with_async()` does not return `ExtractorConfig`, to configure - extractor use `Route::with_async_config()` - - -## 0.6 - -* `Path` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest` - -* `ws::Message::Close` now includes optional close reason. - `ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed. - -* `HttpServer::threads()` renamed to `HttpServer::workers()`. - -* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated. - Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead. - -* `HttpRequest::extensions()` returns read only reference to the request's Extension - `HttpRequest::extensions_mut()` returns mutable reference. - -* Instead of - - `use actix_web::middleware::{ - CookieSessionBackend, CookieSessionError, RequestSession, - Session, SessionBackend, SessionImpl, SessionStorage};` - - use `actix_web::middleware::session` - - `use actix_web::middleware::session{CookieSessionBackend, CookieSessionError, - RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};` - -* `FromRequest::from_request()` accepts mutable reference to a request - -* `FromRequest::Result` has to implement `Into>` - -* [`Responder::respond_to()`]( - https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to) - is generic over `S` - -* Use `Query` extractor instead of HttpRequest::query()`. - - ```rust - fn index(q: Query>) -> Result<..> { - ... - } - ``` - - or - - ```rust - let q = Query::>::extract(req); - ``` - -* Websocket operations are implemented as `WsWriter` trait. - you need to use `use actix_web::ws::WsWriter` - - -## 0.5 - -* `HttpResponseBuilder::body()`, `.finish()`, `.json()` - methods return `HttpResponse` instead of `Result` - -* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version` - moved to `actix_web::http` module - -* `actix_web::header` moved to `actix_web::http::header` - -* `NormalizePath` moved to `actix_web::http` module - -* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function, - shortcut for `actix_web::server::HttpServer::new()` - -* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself - -* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead. - -* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type - -* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()` - functions should be used instead - -* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>` - instead of `Result<_, http::Error>` - -* `Application` renamed to a `App` - -* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev` diff --git a/README.md b/README.md deleted file mode 100644 index 60ec57c60..000000000 --- a/README.md +++ /dev/null @@ -1,109 +0,0 @@ -
-

Actix Web

-

- Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust -

-

- -[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web) -[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.5)](https://docs.rs/actix-web/4.0.0-beta.5) -[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html) -![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg) -[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.5/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.5) -
-[![build status](https://github.com/actix/actix-web/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-web/actions) -[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) -![downloads](https://img.shields.io/crates/d/actix-web.svg) -[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) - -

-
- -## Features - -* Supports *HTTP/1.x* and *HTTP/2* -* Streaming and pipelining -* Keep-alive and slow requests handling -* Client/server [WebSockets](https://actix.rs/docs/websockets/) support -* Transparent content compression/decompression (br, gzip, deflate) -* Powerful [request routing](https://actix.rs/docs/url-dispatch/) -* Multipart streams -* Static assets -* SSL support using OpenSSL or Rustls -* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/)) -* Includes an async [HTTP client](https://docs.rs/awc/) -* Runs on stable Rust 1.46+ - -## Documentation - -* [Website & User Guide](https://actix.rs) -* [Examples Repository](https://github.com/actix/examples) -* [API Documentation](https://docs.rs/actix-web) -* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web) - -## Example - -Dependencies: - -```toml -[dependencies] -actix-web = "3" -``` - -Code: - -```rust -use actix_web::{get, web, App, HttpServer, Responder}; - -#[get("/{id}/{name}/index.html")] -async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder { - format!("Hello {}! id:{}", name, id) -} - -#[actix_web::main] -async fn main() -> std::io::Result<()> { - HttpServer::new(|| App::new().service(index)) - .bind("127.0.0.1:8080")? - .run() - .await -} -``` - -### More examples - -* [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/) -* [Application State](https://github.com/actix/examples/tree/master/basics/state/) -* [JSON Handling](https://github.com/actix/examples/tree/master/json/json/) -* [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/) -* [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/) -* [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/) -* [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/) -* [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/) -* [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/) -* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/) -* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/) -* [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/) - -You may consider checking out -[this directory](https://github.com/actix/examples/tree/master/) for more examples. - -## Benchmarks - -One of the fastest web frameworks available according to the -[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite). - -## License - -This project is licensed under either of - -* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - [http://www.apache.org/licenses/LICENSE-2.0]) -* MIT license ([LICENSE-MIT](LICENSE-MIT) or - [http://opensource.org/licenses/MIT]) - -at your option. - -## Code of Conduct - -Contribution to the actix-web repo is organized under the terms of the Contributor Covenant. -The Actix team promises to intervene to uphold that code of conduct. diff --git a/README.md b/README.md new file mode 120000 index 000000000..16b750c17 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +actix-web/README.md \ No newline at end of file diff --git a/actix-files/CHANGES.md b/actix-files/CHANGES.md index 12b70c135..e94f43907 100644 --- a/actix-files/CHANGES.md +++ b/actix-files/CHANGES.md @@ -1,141 +1,250 @@ # Changes -## Unreleased - 2021-xx-xx -* `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135] -* For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156] -* `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225] +## Unreleased + +## 0.6.6 + +- Update `tokio-uring` dependency to `0.4`. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 0.6.5 + +- Fix handling of special characters in filenames. + +## 0.6.4 + +- Fix handling of newlines in filenames. +- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. + +## 0.6.3 + +- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903] +- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency. +- Update `tokio-uring` dependency to `0.4`. + +[#2903]: https://github.com/actix/actix-web/pull/2903 + +## 0.6.2 + +- Allow partial range responses for video content to start streaming sooner. [#2817] +- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency. + +[#2817]: https://github.com/actix/actix-web/pull/2817 + +## 0.6.1 + +- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021] +- Update `tokio-uring` dependency to `0.3`. +- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645] +- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency. + +[#2021]: https://github.com/actix/actix-web/pull/2021 +[#2645]: https://github.com/actix/actix-web/pull/2645 + +## 0.6.0 + +- No significant changes since `0.6.0-beta.16`. + +## 0.6.0-beta.16 + +- No significant changes since `0.6.0-beta.15`. + +## 0.6.0-beta.15 + +- No significant changes since `0.6.0-beta.14`. + +## 0.6.0-beta.14 + +- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583] + +[#2583]: https://github.com/actix/actix-web/pull/2583 + +## 0.6.0-beta.13 + +- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398] +- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398] +- Minimum supported Rust version (MSRV) is now 1.54. + +[#2398]: https://github.com/actix/actix-web/pull/2398 + +## 0.6.0-beta.12 + +- No significant changes since `0.6.0-beta.11`. + +## 0.6.0-beta.11 + +- No significant changes since `0.6.0-beta.10`. + +## 0.6.0-beta.10 + +- No significant changes since `0.6.0-beta.9`. + +## 0.6.0-beta.9 + +- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408] +- Add `NamedFile::open_async`. [#2408] +- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453] +- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408] +- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408] +- Add `impl Clone` for `FilesService`. [#2408] + +[#2408]: https://github.com/actix/actix-web/pull/2408 +[#2453]: https://github.com/actix/actix-web/pull/2453 + +## 0.6.0-beta.8 + +- Minimum supported Rust version (MSRV) is now 1.52. + +## 0.6.0-beta.7 + +- Minimum supported Rust version (MSRV) is now 1.51. + +## 0.6.0-beta.6 + +- Added `Files::path_filter()`. [#2274] +- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228] + +[#2274]: https://github.com/actix/actix-web/pull/2274 +[#2228]: https://github.com/actix/actix-web/pull/2228 + +## 0.6.0-beta.5 + +- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135] +- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156] +- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225] +- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257] [#2135]: https://github.com/actix/actix-web/pull/2135 [#2156]: https://github.com/actix/actix-web/pull/2156 [#2225]: https://github.com/actix/actix-web/pull/2225 +[#2257]: https://github.com/actix/actix-web/pull/2257 +## 0.6.0-beta.4 -## 0.6.0-beta.4 - 2021-04-02 -* No notable changes. - -* Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046] +- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046] [#2046]: https://github.com/actix/actix-web/pull/2046 -## 0.6.0-beta.3 - 2021-03-09 -* No notable changes. +## 0.6.0-beta.3 +- No notable changes. -## 0.6.0-beta.2 - 2021-02-10 -* Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887] -* Replace `v_htmlescape` with `askama_escape`. [#1953] +## 0.6.0-beta.2 + +- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887] +- Replace `v_htmlescape` with `askama_escape`. [#1953] [#1887]: https://github.com/actix/actix-web/pull/1887 [#1953]: https://github.com/actix/actix-web/pull/1953 +## 0.6.0-beta.1 -## 0.6.0-beta.1 - 2021-01-07 -* `HttpRange::parse` now has its own error type. -* Update `bytes` to `1.0`. [#1813] +- `HttpRange::parse` now has its own error type. +- Update `bytes` to `1.0`. [#1813] [#1813]: https://github.com/actix/actix-web/pull/1813 +## 0.5.0 -## 0.5.0 - 2020-12-26 -* Optionally support hidden files/directories. [#1811] +- Optionally support hidden files/directories. [#1811] [#1811]: https://github.com/actix/actix-web/pull/1811 +## 0.4.1 -## 0.4.1 - 2020-11-24 -* Clarify order of parameters in `Files::new` and improve docs. +- Clarify order of parameters in `Files::new` and improve docs. +## 0.4.0 -## 0.4.0 - 2020-10-06 -* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714] +- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714] [#1714]: https://github.com/actix/actix-web/pull/1714 +## 0.3.0 -## 0.3.0 - 2020-09-11 -* No significant changes from 0.3.0-beta.1. +- No significant changes from 0.3.0-beta.1. +## 0.3.0-beta.1 -## 0.3.0-beta.1 - 2020-07-15 -* Update `v_htmlescape` to 0.10 -* Update `actix-web` and `actix-http` dependencies to beta.1 +- Update `v_htmlescape` to 0.10 +- Update `actix-web` and `actix-http` dependencies to beta.1 +## 0.3.0-alpha.1 -## 0.3.0-alpha.1 - 2020-05-23 -* Update `actix-web` and `actix-http` dependencies to alpha -* Fix some typos in the docs -* Bump minimum supported Rust version to 1.40 -* Support sending Content-Length when Content-Range is specified [#1384] +- Update `actix-web` and `actix-http` dependencies to alpha +- Fix some typos in the docs +- Bump minimum supported Rust version to 1.40 +- Support sending Content-Length when Content-Range is specified [#1384] [#1384]: https://github.com/actix/actix-web/pull/1384 +## 0.2.1 -## 0.2.1 - 2019-12-22 -* Use the same format for file URLs regardless of platforms +- Use the same format for file URLs regardless of platforms +## 0.2.0 -## 0.2.0 - 2019-12-20 -* Fix BodyEncoding trait import #1220 +- Fix BodyEncoding trait import #1220 +## 0.2.0-alpha.1 -## 0.2.0-alpha.1 - 2019-12-07 -* Migrate to `std::future` +- Migrate to `std::future` +## 0.1.7 -## 0.1.7 - 2019-11-06 -* Add an additional `filename*` param in the `Content-Disposition` header of - `actix_files::NamedFile` to be more compatible. (#1151) +- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) -## 0.1.6 - 2019-10-14 -* Add option to redirect to a slash-ended path `Files` #1132 +## 0.1.6 +- Add option to redirect to a slash-ended path `Files` #1132 -## 0.1.5 - 2019-10-08 -* Bump up `mime_guess` crate version to 2.0.1 -* Bump up `percent-encoding` crate version to 2.1 -* Allow user defined request guards for `Files` #1113 +## 0.1.5 +- Bump up `mime_guess` crate version to 2.0.1 +- Bump up `percent-encoding` crate version to 2.1 +- Allow user defined request guards for `Files` #1113 -## 0.1.4 - 2019-07-20 -* Allow to disable `Content-Disposition` header #686 +## 0.1.4 +- Allow to disable `Content-Disposition` header #686 -## 0.1.3 - 2019-06-28 -* Do not set `Content-Length` header, let actix-http set it #930 +## 0.1.3 +- Do not set `Content-Length` header, let actix-http set it #930 -## 0.1.2 - 2019-06-13 -* Content-Length is 0 for NamedFile HEAD request #914 -* Fix ring dependency from actix-web default features for #741 +## 0.1.2 +- Content-Length is 0 for NamedFile HEAD request #914 +- Fix ring dependency from actix-web default features for #741 -## 0.1.1 - 2019-06-01 -* Static files are incorrectly served as both chunked and with length #812 +## 0.1.1 +- Static files are incorrectly served as both chunked and with length #812 -## 0.1.0 - 2019-05-25 -* NamedFile last-modified check always fails due to nano-seconds in file modified date #820 +## 0.1.0 +- NamedFile last-modified check always fails due to nano-seconds in file modified date #820 -## 0.1.0-beta.4 - 2019-05-12 -* Update actix-web to beta.4 +## 0.1.0-beta.4 +- Update actix-web to beta.4 -## 0.1.0-beta.1 - 2019-04-20 -* Update actix-web to beta.1 +## 0.1.0-beta.1 +- Update actix-web to beta.1 -## 0.1.0-alpha.6 - 2019-04-14 -* Update actix-web to alpha6 +## 0.1.0-alpha.6 +- Update actix-web to alpha6 -## 0.1.0-alpha.4 - 2019-04-08 -* Update actix-web to alpha4 +## 0.1.0-alpha.4 +- Update actix-web to alpha4 -## 0.1.0-alpha.2 - 2019-04-02 -* Add default handler support +## 0.1.0-alpha.2 +- Add default handler support -## 0.1.0-alpha.1 - 2019-03-28 -* Initial impl +## 0.1.0-alpha.1 + +- Initial impl diff --git a/actix-files/Cargo.toml b/actix-files/Cargo.toml index b97badd3e..57cd4e913 100644 --- a/actix-files/Cargo.toml +++ b/actix-files/Cargo.toml @@ -1,38 +1,56 @@ [package] name = "actix-files" -version = "0.6.0-beta.4" -authors = ["Nikolay Kim "] +version = "0.6.6" +authors = [ + "Nikolay Kim ", + "Rob Ede ", +] description = "Static file serving for Actix Web" -readme = "README.md" keywords = ["actix", "http", "async", "futures"] homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web.git" -documentation = "https://docs.rs/actix-files/" +repository = "https://github.com/actix/actix-web" categories = ["asynchronous", "web-programming::http-server"] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" -[lib] -name = "actix_files" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_http::*", + "actix_service::*", + "actix_web::*", + "http::*", + "mime::*", +] + +[features] +experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"] [dependencies] -actix-web = { version = "4.0.0-beta.6", default-features = false } -actix-service = "2.0.0" -actix-utils = "3.0.0" +actix-http = "3" +actix-service = "2" +actix-utils = "3" +actix-web = { version = "4", default-features = false } -askama_escape = "0.10" -bitflags = "1" +bitflags = "2" bytes = "1" -futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] } -http-range = "0.1.4" derive_more = "0.99.5" +futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } +http-range = "0.1.4" log = "0.4" -mime = "0.3" +mime = "0.3.9" mime_guess = "2.0.1" percent-encoding = "2.1" +pin-project-lite = "0.2.7" +v_htmlescape = "0.15.5" + +# experimental-io-uring +[target.'cfg(target_os = "linux")'.dependencies] +tokio-uring = { version = "0.5", optional = true, features = ["bytes"] } +actix-server = { version = "2.4", optional = true } # ensure matching tokio-uring versions [dev-dependencies] -actix-rt = "2.2" -actix-web = "4.0.0-beta.6" -actix-test = "0.1.0-beta.2" +actix-rt = "2.7" +actix-test = "0.1" +actix-web = "4" +env_logger = "0.11" +tempfile = "3.2" diff --git a/actix-files/README.md b/actix-files/README.md index 895d5e687..f6d5143f5 100644 --- a/actix-files/README.md +++ b/actix-files/README.md @@ -1,19 +1,32 @@ -# actix-files +# `actix-files` -> Static file serving for Actix Web + [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) -[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.4)](https://docs.rs/actix-files/0.6.0-beta.4) -[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html) +[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.6)](https://docs.rs/actix-files/0.6.6) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
-[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.4/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.4) +[![dependency status](https://deps.rs/crate/actix-files/0.6.6/status.svg)](https://deps.rs/crate/actix-files/0.6.6) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) -[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-files/) -- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index) -- [Chat on Gitter](https://gitter.im/actix/actix-web) -- Minimum supported Rust version: 1.46 or later + + +Static file serving for Actix Web. + +Provides a non-blocking service for serving static files from disk. + +## Examples + +```rust +use actix_web::App; +use actix_files::Files; + +let app = App::new() + .service(Files::new("/static", ".").prefer_utf8(true)); +``` + + diff --git a/actix-files/examples/guarded-listing.rs b/actix-files/examples/guarded-listing.rs new file mode 100644 index 000000000..e8cde0c85 --- /dev/null +++ b/actix-files/examples/guarded-listing.rs @@ -0,0 +1,33 @@ +use actix_files::Files; +use actix_web::{get, guard, middleware, App, HttpServer, Responder}; + +const EXAMPLES_DIR: &str = concat![env!("CARGO_MANIFEST_DIR"), "/examples"]; + +#[get("/")] +async fn index() -> impl Responder { + "Hello world!" +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + log::info!("starting HTTP server at http://localhost:8080"); + + HttpServer::new(|| { + App::new() + .service(index) + .service( + Files::new("/assets", EXAMPLES_DIR) + .show_files_listing() + .guard(guard::Header("show-listing", "?1")), + ) + .service(Files::new("/assets", EXAMPLES_DIR)) + .wrap(middleware::Compress::default()) + .wrap(middleware::Logger::default()) + }) + .bind(("127.0.0.1", 8080))? + .workers(2) + .run() + .await +} diff --git a/actix-files/src/chunked.rs b/actix-files/src/chunked.rs index f639848c9..c6c019038 100644 --- a/actix-files/src/chunked.rs +++ b/actix-files/src/chunked.rs @@ -1,95 +1,214 @@ use std::{ cmp, fmt, - fs::File, future::Future, - io::{self, Read, Seek}, + io, pin::Pin, task::{Context, Poll}, }; -use actix_web::{ - error::{BlockingError, Error}, - rt::task::{spawn_blocking, JoinHandle}, -}; -use bytes::Bytes; +use actix_web::{error::Error, web::Bytes}; +#[cfg(feature = "experimental-io-uring")] +use bytes::BytesMut; use futures_core::{ready, Stream}; +use pin_project_lite::pin_project; -#[doc(hidden)] -/// A helper created from a `std::fs::File` which reads the file -/// chunk-by-chunk on a `ThreadPool`. -pub struct ChunkedReadFile { - size: u64, - offset: u64, - state: ChunkedReadFileState, - counter: u64, -} +use super::named::File; -enum ChunkedReadFileState { - File(Option), - Future(JoinHandle>), -} - -impl ChunkedReadFile { - pub(crate) fn new(size: u64, offset: u64, file: File) -> Self { - Self { - size, - offset, - state: ChunkedReadFileState::File(Some(file)), - counter: 0, - } +pin_project! { + /// Adapter to read a `std::file::File` in chunks. + #[doc(hidden)] + pub struct ChunkedReadFile { + size: u64, + offset: u64, + #[pin] + state: ChunkedReadFileState, + counter: u64, + callback: F, } } -impl fmt::Debug for ChunkedReadFile { +#[cfg(not(feature = "experimental-io-uring"))] +pin_project! { + #[project = ChunkedReadFileStateProj] + #[project_replace = ChunkedReadFileStateProjReplace] + enum ChunkedReadFileState { + File { file: Option, }, + Future { #[pin] fut: Fut }, + } +} + +#[cfg(feature = "experimental-io-uring")] +pin_project! { + #[project = ChunkedReadFileStateProj] + #[project_replace = ChunkedReadFileStateProjReplace] + enum ChunkedReadFileState { + File { file: Option<(File, BytesMut)> }, + Future { #[pin] fut: Fut }, + } +} + +impl fmt::Debug for ChunkedReadFile { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("ChunkedReadFile") } } -impl Stream for ChunkedReadFile { +pub(crate) fn new_chunked_read( + size: u64, + offset: u64, + file: File, +) -> impl Stream> { + ChunkedReadFile { + size, + offset, + #[cfg(not(feature = "experimental-io-uring"))] + state: ChunkedReadFileState::File { file: Some(file) }, + #[cfg(feature = "experimental-io-uring")] + state: ChunkedReadFileState::File { + file: Some((file, BytesMut::new())), + }, + counter: 0, + callback: chunked_read_file_callback, + } +} + +#[cfg(not(feature = "experimental-io-uring"))] +async fn chunked_read_file_callback( + mut file: File, + offset: u64, + max_bytes: usize, +) -> Result<(File, Bytes), Error> { + use io::{Read as _, Seek as _}; + + let res = actix_web::web::block(move || { + let mut buf = Vec::with_capacity(max_bytes); + + file.seek(io::SeekFrom::Start(offset))?; + + let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; + + if n_bytes == 0 { + Err(io::Error::from(io::ErrorKind::UnexpectedEof)) + } else { + Ok((file, Bytes::from(buf))) + } + }) + .await??; + + Ok(res) +} + +#[cfg(feature = "experimental-io-uring")] +async fn chunked_read_file_callback( + file: File, + offset: u64, + max_bytes: usize, + mut bytes_mut: BytesMut, +) -> io::Result<(File, Bytes, BytesMut)> { + bytes_mut.reserve(max_bytes); + + let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await; + let n_bytes = res?; + + if n_bytes == 0 { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + let bytes = bytes_mut.split_to(n_bytes).freeze(); + + Ok((file, bytes, bytes_mut)) +} + +#[cfg(feature = "experimental-io-uring")] +impl Stream for ChunkedReadFile +where + F: Fn(File, u64, usize, BytesMut) -> Fut, + Fut: Future>, +{ type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.as_mut().get_mut(); - match this.state { - ChunkedReadFileState::File(ref mut file) => { - let size = this.size; - let offset = this.offset; - let counter = this.counter; + let mut this = self.as_mut().project(); + match this.state.as_mut().project() { + ChunkedReadFileStateProj::File { file } => { + let size = *this.size; + let offset = *this.offset; + let counter = *this.counter; if size == counter { Poll::Ready(None) } else { - let mut file = file + let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize; + + let (file, bytes_mut) = file .take() .expect("ChunkedReadFile polled after completion"); - let fut = spawn_blocking(move || { - let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize; + let fut = (this.callback)(file, offset, max_bytes, bytes_mut); - let mut buf = Vec::with_capacity(max_bytes); - file.seek(io::SeekFrom::Start(offset))?; + this.state + .project_replace(ChunkedReadFileState::Future { fut }); - let n_bytes = - file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; - - if n_bytes == 0 { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - - Ok((file, Bytes::from(buf))) - }); - this.state = ChunkedReadFileState::Future(fut); self.poll_next(cx) } } - ChunkedReadFileState::Future(ref mut fut) => { - let (file, bytes) = - ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??; - this.state = ChunkedReadFileState::File(Some(file)); + ChunkedReadFileStateProj::Future { fut } => { + let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?; - this.offset += bytes.len() as u64; - this.counter += bytes.len() as u64; + this.state.project_replace(ChunkedReadFileState::File { + file: Some((file, bytes_mut)), + }); + + *this.offset += bytes.len() as u64; + *this.counter += bytes.len() as u64; + + Poll::Ready(Some(Ok(bytes))) + } + } + } +} + +#[cfg(not(feature = "experimental-io-uring"))] +impl Stream for ChunkedReadFile +where + F: Fn(File, u64, usize) -> Fut, + Fut: Future>, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut().project(); + match this.state.as_mut().project() { + ChunkedReadFileStateProj::File { file } => { + let size = *this.size; + let offset = *this.offset; + let counter = *this.counter; + + if size == counter { + Poll::Ready(None) + } else { + let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize; + + let file = file + .take() + .expect("ChunkedReadFile polled after completion"); + + let fut = (this.callback)(file, offset, max_bytes); + + this.state + .project_replace(ChunkedReadFileState::Future { fut }); + + self.poll_next(cx) + } + } + ChunkedReadFileStateProj::Future { fut } => { + let (file, bytes) = ready!(fut.poll(cx))?; + + this.state + .project_replace(ChunkedReadFileState::File { file: Some(file) }); + + *this.offset += bytes.len() as u64; + *this.counter += bytes.len() as u64; Poll::Ready(Some(Ok(bytes))) } diff --git a/actix-files/src/directory.rs b/actix-files/src/directory.rs index 80e0c98d0..6ade424b9 100644 --- a/actix-files/src/directory.rs +++ b/actix-files/src/directory.rs @@ -1,8 +1,13 @@ -use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf}; +use std::{ + fmt::Write, + fs::DirEntry, + io, + path::{Path, PathBuf}, +}; use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse}; -use askama_escape::{escape as escape_html_entity, Html}; use percent_encoding::{utf8_percent_encode, CONTROLS}; +use v_htmlescape::escape as escape_html_entity; /// A directory; responds with the generated directory listing. #[derive(Debug)] @@ -40,17 +45,26 @@ impl Directory { pub(crate) type DirectoryRenderer = dyn Fn(&Directory, &HttpRequest) -> Result; -// show file url as relative to static path +/// Returns percent encoded file URL path. macro_rules! encode_file_url { ($path:ident) => { utf8_percent_encode(&$path, CONTROLS) }; } -// " -- " & -- & ' -- ' < -- < > -- > / -- / +/// Returns HTML entity encoded formatter. +/// +/// ```plain +/// " => " +/// & => & +/// ' => ' +/// < => < +/// > => > +/// / => / +/// ``` macro_rules! encode_file_name { ($entry:ident) => { - escape_html_entity(&$entry.file_name().to_string_lossy(), Html) + escape_html_entity(&$entry.file_name().to_string_lossy()) }; } @@ -66,7 +80,7 @@ pub(crate) fn directory_listing( if dir.is_visible(&entry) { let entry = entry.unwrap(); let p = match entry.path().strip_prefix(&dir.path) { - Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"), + Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"), Ok(p) => base.join(p).to_string_lossy().into_owned(), Err(_) => continue, }; diff --git a/actix-files/src/error.rs b/actix-files/src/error.rs index e5f2d4779..d614651fc 100644 --- a/actix-files/src/error.rs +++ b/actix-files/src/error.rs @@ -2,40 +2,47 @@ use actix_web::{http::StatusCode, ResponseError}; use derive_more::Display; /// Errors which can occur when serving static files. -#[derive(Display, Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq, Display)] pub enum FilesError { - /// Path is not a directory + /// Path is not a directory. #[allow(dead_code)] - #[display(fmt = "Path is not a directory. Unable to serve static files")] + #[display(fmt = "path is not a directory. Unable to serve static files")] IsNotDirectory, - /// Cannot render directory - #[display(fmt = "Unable to render directory without index file")] + /// Cannot render directory. + #[display(fmt = "unable to render directory without index file")] IsDirectory, } -/// Return `NotFound` for `FilesError` impl ResponseError for FilesError { + /// Returns `404 Not Found`. fn status_code(&self) -> StatusCode { StatusCode::NOT_FOUND } } -#[derive(Display, Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq, Display)] +#[non_exhaustive] pub enum UriSegmentError { - /// The segment started with the wrapped invalid character. - #[display(fmt = "The segment started with the wrapped invalid character")] + /// Segment started with the wrapped invalid character. + #[display(fmt = "segment started with invalid character: ('{_0}')")] BadStart(char), - /// The segment contained the wrapped invalid character. - #[display(fmt = "The segment contained the wrapped invalid character")] + + /// Segment contained the wrapped invalid character. + #[display(fmt = "segment contained invalid character ('{_0}')")] BadChar(char), - /// The segment ended with the wrapped invalid character. - #[display(fmt = "The segment ended with the wrapped invalid character")] + + /// Segment ended with the wrapped invalid character. + #[display(fmt = "segment ended with invalid character: ('{_0}')")] BadEnd(char), + + /// Path is not a valid UTF-8 string after percent-decoding. + #[display(fmt = "path is not a valid UTF-8 string after percent-decoding")] + NotValidUtf8, } -/// Return `BadRequest` for `UriSegmentError` impl ResponseError for UriSegmentError { + /// Returns `400 Bad Request`. fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } diff --git a/actix-files/src/files.rs b/actix-files/src/files.rs index fc8fd2531..cfd3b9c22 100644 --- a/actix-files/src/files.rs +++ b/actix-files/src/files.rs @@ -1,9 +1,15 @@ -use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc}; +use std::{ + cell::RefCell, + fmt, io, + path::{Path, PathBuf}, + rc::Rc, +}; use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt}; -use actix_utils::future::ok; use actix_web::{ - dev::{AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse}, + dev::{ + AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest, ServiceResponse, + }, error::Error, guard::Guard, http::header::DispositionType, @@ -12,14 +18,16 @@ use actix_web::{ use futures_core::future::LocalBoxFuture; use crate::{ - directory_listing, named, Directory, DirectoryRenderer, FilesService, HttpNewService, - MimeOverride, + directory_listing, named, + service::{FilesService, FilesServiceInner}, + Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter, }; /// Static files handling service. /// /// `Files` service must be registered with `App::service()` method. /// +/// # Examples /// ``` /// use actix_web::App; /// use actix_files::Files; @@ -28,7 +36,7 @@ use crate::{ /// .service(Files::new("/static", ".")); /// ``` pub struct Files { - path: String, + mount_path: String, directory: PathBuf, index: Option, show_index: bool, @@ -36,6 +44,7 @@ pub struct Files { default: Rc>>>, renderer: Rc, mime_override: Option>, + path_filter: Option>, file_flags: named::Flags, use_guards: Option>, guards: Vec>, @@ -58,8 +67,9 @@ impl Clone for Files { default: self.default.clone(), renderer: self.renderer.clone(), file_flags: self.file_flags, - path: self.path.clone(), + mount_path: self.mount_path.clone(), mime_override: self.mime_override.clone(), + path_filter: self.path_filter.clone(), use_guards: self.use_guards.clone(), guards: self.guards.clone(), hidden_files: self.hidden_files, @@ -96,7 +106,7 @@ impl Files { }; Files { - path: mount_path.to_owned(), + mount_path: mount_path.trim_end_matches('/').to_owned(), directory: dir, index: None, show_index: false, @@ -104,6 +114,7 @@ impl Files { default: Rc::new(RefCell::new(None)), renderer: Rc::new(directory_listing), mime_override: None, + path_filter: None, file_flags: named::Flags::default(), use_guards: None, guards: Vec::new(), @@ -114,6 +125,9 @@ impl Files { /// Show files listing for directories. /// /// By default show files listing is disabled. + /// + /// When used with [`Files::index_file()`], files listing is shown as a fallback + /// when the index file is not found. pub fn show_files_listing(mut self) -> Self { self.show_index = true; self @@ -127,7 +141,7 @@ impl Files { self } - /// Set custom directory renderer + /// Set custom directory renderer. pub fn files_listing_renderer(mut self, f: F) -> Self where for<'r, 's> F: @@ -137,7 +151,7 @@ impl Files { self } - /// Specifies mime override callback + /// Specifies MIME override callback. pub fn mime_override(mut self, f: F) -> Self where F: Fn(&mime::Name<'_>) -> DispositionType + 'static, @@ -146,10 +160,45 @@ impl Files { self } + /// Sets path filtering closure. + /// + /// The path provided to the closure is relative to `serve_from` path. + /// You can safely join this path with the `serve_from` path to get the real path. + /// However, the real path may not exist since the filter is called before checking path existence. + /// + /// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise, + /// `404 Not Found` is returned. + /// + /// # Examples + /// ``` + /// use std::path::Path; + /// use actix_files::Files; + /// + /// // prevent searching subdirectories and following symlinks + /// let files_service = Files::new("/", "./static").path_filter(|path, _| { + /// path.components().count() == 1 + /// && Path::new("./static") + /// .join(path) + /// .symlink_metadata() + /// .map(|m| !m.file_type().is_symlink()) + /// .unwrap_or(false) + /// }); + /// ``` + pub fn path_filter(mut self, f: F) -> Self + where + F: Fn(&Path, &RequestHead) -> bool + 'static, + { + self.path_filter = Some(Rc::new(f)); + self + } + /// Set index file /// - /// Shows specific index file for directory "/" instead of + /// Shows specific index file for directories instead of /// showing files listing. + /// + /// If the index file is not found, files listing is shown as a fallback if + /// [`Files::show_files_listing()`] is set. pub fn index_file>(mut self, index: T) -> Self { self.index = Some(index.into()); self @@ -186,7 +235,7 @@ impl Files { /// request starts being handled by the file service, it will not be able to back-out and try /// the next service, you will simply get a 404 (or 405) error response. /// - /// To allow `POST` requests to retrieve files, see [`Files::use_guards`]. + /// To allow `POST` requests to retrieve files, see [`Files::method_guard()`]. /// /// # Examples /// ``` @@ -213,9 +262,9 @@ impl Files { self } + /// See [`Files::method_guard`]. #[doc(hidden)] #[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")] - /// See [`Files::method_guard`]. pub fn use_guards(self, guard: G) -> Self { self.method_guard(guard) } @@ -234,23 +283,25 @@ impl Files { /// Setting a fallback static file handler: /// ``` /// use actix_files::{Files, NamedFile}; + /// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service}; /// /// # fn run() -> Result<(), actix_web::Error> { /// let files = Files::new("/", "./static") /// .index_file("index.html") - /// .default_handler(NamedFile::open("./static/404.html")?); + /// .default_handler(fn_service(|req: ServiceRequest| async { + /// let (req, _) = req.into_parts(); + /// let file = NamedFile::open_async("./static/404.html").await?; + /// let res = file.into_response(&req); + /// Ok(ServiceResponse::new(req, res)) + /// })); /// # Ok(()) /// # } /// ``` pub fn default_handler(mut self, f: F) -> Self where F: IntoServiceFactory, - U: ServiceFactory< - ServiceRequest, - Config = (), - Response = ServiceResponse, - Error = Error, - > + 'static, + U: ServiceFactory + + 'static, { // create and configure default resource self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory( @@ -286,9 +337,9 @@ impl HttpServiceFactory for Files { } let rdef = if config.is_root() { - ResourceDef::root_prefix(&self.path) + ResourceDef::root_prefix(&self.mount_path) } else { - ResourceDef::prefix(&self.path) + ResourceDef::prefix(&self.mount_path) }; config.register_service(rdef, guards, self, None) @@ -304,7 +355,7 @@ impl ServiceFactory for Files { type Future = LocalBoxFuture<'static, Result>; fn new_service(&self, _: ()) -> Self::Future { - let mut srv = FilesService { + let mut inner = FilesServiceInner { directory: self.directory.clone(), index: self.index.clone(), show_index: self.show_index, @@ -312,6 +363,7 @@ impl ServiceFactory for Files { default: None, renderer: self.renderer.clone(), mime_override: self.mime_override.clone(), + path_filter: self.path_filter.clone(), file_flags: self.file_flags, guards: self.use_guards.clone(), hidden_files: self.hidden_files, @@ -322,14 +374,57 @@ impl ServiceFactory for Files { Box::pin(async { match fut.await { Ok(default) => { - srv.default = Some(default); - Ok(srv) + inner.default = Some(default); + Ok(FilesService(Rc::new(inner))) } Err(_) => Err(()), } }) } else { - Box::pin(ok(srv)) + Box::pin(async move { Ok(FilesService(Rc::new(inner))) }) } } } + +#[cfg(test)] +mod tests { + use actix_web::{ + http::StatusCode, + test::{self, TestRequest}, + App, HttpResponse, + }; + + use super::*; + + #[actix_web::test] + async fn custom_files_listing_renderer() { + let srv = test::init_service( + App::new().service( + Files::new("/", "./tests") + .show_files_listing() + .files_listing_renderer(|dir, req| { + Ok(ServiceResponse::new( + req.clone(), + HttpResponse::Ok().body(dir.path.to_str().unwrap().to_owned()), + )) + }), + ), + ) + .await; + + let req = TestRequest::with_uri("/").to_request(); + let res = test::call_service(&srv, req).await; + + assert_eq!(res.status(), StatusCode::OK); + let body = test::read_body(res).await; + let body_str = std::str::from_utf8(&body).unwrap(); + let actual_path = Path::new(&body_str); + let expected_path = Path::new("actix-files/tests"); + assert!( + actual_path.ends_with(expected_path), + "body {:?} does not end with {:?}", + actual_path, + expected_path + ); + } +} diff --git a/actix-files/src/lib.rs b/actix-files/src/lib.rs index aa5960b5b..167f996c0 100644 --- a/actix-files/src/lib.rs +++ b/actix-files/src/lib.rs @@ -2,7 +2,7 @@ //! //! Provides a non-blocking service for serving static files from disk. //! -//! # Example +//! # Examples //! ``` //! use actix_web::App; //! use actix_files::Files; @@ -11,12 +11,17 @@ //! .service(Files::new("/static", ".").prefer_utf8(true)); //! ``` -#![deny(rust_2018_idioms)] -#![warn(missing_docs, missing_debug_implementations)] +#![deny(rust_2018_idioms, nonstandard_style)] +#![warn(future_incompatible, missing_docs, missing_debug_implementations)] +#![doc(html_logo_url = "https://actix.rs/img/logo.png")] +#![doc(html_favicon_url = "https://actix.rs/favicon.ico")] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +use std::path::Path; use actix_service::boxed::{BoxService, BoxServiceFactory}; use actix_web::{ - dev::{ServiceRequest, ServiceResponse}, + dev::{RequestHead, ServiceRequest, ServiceResponse}, error::Error, http::header::DispositionType, }; @@ -32,16 +37,15 @@ mod path_buf; mod range; mod service; -pub use crate::chunked::ChunkedReadFile; -pub use crate::directory::Directory; -pub use crate::files::Files; -pub use crate::named::NamedFile; -pub use crate::range::HttpRange; -pub use crate::service::FilesService; - -use self::directory::{directory_listing, DirectoryRenderer}; -use self::error::FilesError; -use self::path_buf::PathBufWrap; +pub use self::{ + chunked::ChunkedReadFile, directory::Directory, files::Files, named::NamedFile, + range::HttpRange, service::FilesService, +}; +use self::{ + directory::{directory_listing, DirectoryRenderer}, + error::FilesError, + path_buf::PathBufWrap, +}; type HttpService = BoxService; type HttpNewService = BoxServiceFactory<(), ServiceRequest, ServiceResponse, Error, ()>; @@ -56,20 +60,22 @@ pub fn file_extension_to_mime(ext: &str) -> mime::Mime { type MimeOverride = dyn Fn(&mime::Name<'_>) -> DispositionType; +type PathFilter = dyn Fn(&Path, &RequestHead) -> bool; + #[cfg(test)] mod tests { use std::{ - fs::{self, File}, + fmt::Write as _, + fs::{self}, ops::Add, time::{Duration, SystemTime}, }; - use actix_service::ServiceFactory; - use actix_utils::future::ok; use actix_web::{ + dev::ServiceFactory, guard, http::{ - header::{self, ContentDisposition, DispositionParam, DispositionType}, + header::{self, ContentDisposition, DispositionParam}, Method, StatusCode, }, middleware::Compress, @@ -79,8 +85,9 @@ mod tests { }; use super::*; + use crate::named::File; - #[actix_rt::test] + #[actix_web::test] async fn test_file_extension_to_mime() { let m = file_extension_to_mime(""); assert_eq!(m, mime::APPLICATION_OCTET_STREAM); @@ -97,69 +104,69 @@ mod tests { #[actix_rt::test] async fn test_if_modified_since_without_if_none_match() { - let file = NamedFile::open("Cargo.toml").unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60))); let req = TestRequest::default() .insert_header((header::IF_MODIFIED_SINCE, since)) .to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); } #[actix_rt::test] async fn test_if_modified_since_without_if_none_match_same() { - let file = NamedFile::open("Cargo.toml").unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let since = file.last_modified().unwrap(); let req = TestRequest::default() .insert_header((header::IF_MODIFIED_SINCE, since)) .to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!(resp.status(), StatusCode::NOT_MODIFIED); } #[actix_rt::test] async fn test_if_modified_since_with_if_none_match() { - let file = NamedFile::open("Cargo.toml").unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let since = header::HttpDate::from(SystemTime::now().add(Duration::from_secs(60))); let req = TestRequest::default() .insert_header((header::IF_NONE_MATCH, "miss_etag")) .insert_header((header::IF_MODIFIED_SINCE, since)) .to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_ne!(resp.status(), StatusCode::NOT_MODIFIED); } #[actix_rt::test] async fn test_if_unmodified_since() { - let file = NamedFile::open("Cargo.toml").unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let since = file.last_modified().unwrap(); let req = TestRequest::default() .insert_header((header::IF_UNMODIFIED_SINCE, since)) .to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!(resp.status(), StatusCode::OK); } #[actix_rt::test] async fn test_if_unmodified_since_failed() { - let file = NamedFile::open("Cargo.toml").unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); let since = header::HttpDate::from(SystemTime::UNIX_EPOCH); let req = TestRequest::default() .insert_header((header::IF_UNMODIFIED_SINCE, since)) .to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED); } #[actix_rt::test] async fn test_named_file_text() { - assert!(NamedFile::open("test--").is_err()); - let mut file = NamedFile::open("Cargo.toml").unwrap(); + assert!(NamedFile::open_async("test--").await.is_err()); + let mut file = NamedFile::open_async("Cargo.toml").await.unwrap(); { file.file(); let _f: &File = &file; @@ -169,7 +176,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "text/x-toml" @@ -182,8 +189,8 @@ mod tests { #[actix_rt::test] async fn test_named_file_content_disposition() { - assert!(NamedFile::open("test--").is_err()); - let mut file = NamedFile::open("Cargo.toml").unwrap(); + assert!(NamedFile::open_async("test--").await.is_err()); + let mut file = NamedFile::open_async("Cargo.toml").await.unwrap(); { file.file(); let _f: &File = &file; @@ -193,24 +200,36 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), "inline; filename=\"Cargo.toml\"" ); - let file = NamedFile::open("Cargo.toml") + let file = NamedFile::open_async("Cargo.toml") + .await .unwrap() .disable_content_disposition(); let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none()); } #[actix_rt::test] async fn test_named_file_non_ascii_file_name() { - let mut file = - NamedFile::from_file(File::open("Cargo.toml").unwrap(), "貨物.toml").unwrap(); + let file = { + #[cfg(feature = "experimental-io-uring")] + { + crate::named::File::open("Cargo.toml").await.unwrap() + } + + #[cfg(not(feature = "experimental-io-uring"))] + { + crate::named::File::open("Cargo.toml").unwrap() + } + }; + + let mut file = NamedFile::from_file(file, "貨物.toml").unwrap(); { file.file(); let _f: &File = &file; @@ -220,7 +239,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "text/x-toml" @@ -233,7 +252,8 @@ mod tests { #[actix_rt::test] async fn test_named_file_set_content_type() { - let mut file = NamedFile::open("Cargo.toml") + let mut file = NamedFile::open_async("Cargo.toml") + .await .unwrap() .set_content_type(mime::TEXT_XML); { @@ -245,7 +265,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "text/xml" @@ -258,7 +278,7 @@ mod tests { #[actix_rt::test] async fn test_named_file_image() { - let mut file = NamedFile::open("tests/test.png").unwrap(); + let mut file = NamedFile::open_async("tests/test.png").await.unwrap(); { file.file(); let _f: &File = &file; @@ -268,7 +288,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "image/png" @@ -279,13 +299,30 @@ mod tests { ); } + #[actix_rt::test] + async fn test_named_file_javascript() { + let file = NamedFile::open_async("tests/test.js").await.unwrap(); + + let req = TestRequest::default().to_http_request(); + let resp = file.respond_to(&req); + assert_eq!( + resp.headers().get(header::CONTENT_TYPE).unwrap(), + "application/javascript; charset=utf-8" + ); + assert_eq!( + resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), + "inline; filename=\"test.js\"" + ); + } + #[actix_rt::test] async fn test_named_file_image_attachment() { let cd = ContentDisposition { disposition: DispositionType::Attachment, parameters: vec![DispositionParam::Filename(String::from("test.png"))], }; - let mut file = NamedFile::open("tests/test.png") + let mut file = NamedFile::open_async("tests/test.png") + .await .unwrap() .set_content_disposition(cd); { @@ -297,7 +334,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "image/png" @@ -310,7 +347,7 @@ mod tests { #[actix_rt::test] async fn test_named_file_binary() { - let mut file = NamedFile::open("tests/test.binary").unwrap(); + let mut file = NamedFile::open_async("tests/test.binary").await.unwrap(); { file.file(); let _f: &File = &file; @@ -320,7 +357,7 @@ mod tests { } let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "application/octet-stream" @@ -331,21 +368,45 @@ mod tests { ); } + #[allow(deprecated)] #[actix_rt::test] - async fn test_named_file_status_code_text() { - let mut file = NamedFile::open("Cargo.toml") + async fn status_code_customize_same_output() { + let file1 = NamedFile::open_async("Cargo.toml") + .await .unwrap() .set_status_code(StatusCode::NOT_FOUND); + + let file2 = NamedFile::open_async("Cargo.toml") + .await + .unwrap() + .customize() + .with_status(StatusCode::NOT_FOUND); + + let req = TestRequest::default().to_http_request(); + let res1 = file1.respond_to(&req); + let res2 = file2.respond_to(&req); + + assert_eq!(res1.status(), StatusCode::NOT_FOUND); + assert_eq!(res2.status(), StatusCode::NOT_FOUND); + } + + #[actix_rt::test] + async fn test_named_file_status_code_text() { + let mut file = NamedFile::open_async("Cargo.toml").await.unwrap(); + { file.file(); let _f: &File = &file; } + { let _f: &mut File = &mut file; } + let file = file.customize().with_status(StatusCode::NOT_FOUND); + let req = TestRequest::default().to_http_request(); - let resp = file.respond_to(&req).await.unwrap(); + let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), "text/x-toml" @@ -493,10 +554,9 @@ mod tests { #[actix_rt::test] async fn test_static_files_with_spaces() { - let srv = test::init_service( - App::new().service(Files::new("/", ".").index_file("Cargo.toml")), - ) - .await; + let srv = + test::init_service(App::new().service(Files::new("/", ".").index_file("Cargo.toml"))) + .await; let request = TestRequest::get() .uri("/tests/test%20space.binary") .to_request(); @@ -508,6 +568,30 @@ mod tests { assert_eq!(bytes, data); } + #[cfg(not(target_os = "windows"))] + #[actix_rt::test] + async fn test_static_files_with_special_characters() { + // Create the file we want to test against ad-hoc. We can't check it in as otherwise + // Windows can't even checkout this repository. + let temp_dir = tempfile::tempdir().unwrap(); + let file_with_newlines = temp_dir.path().join("test\n\x0B\x0C\rnewline.text"); + fs::write(&file_with_newlines, "Look at my newlines").unwrap(); + + let srv = test::init_service( + App::new().service(Files::new("/", temp_dir.path()).index_file("Cargo.toml")), + ) + .await; + let request = TestRequest::get() + .uri("/test%0A%0B%0C%0Dnewline.text") + .to_request(); + let response = test::call_service(&srv, request).await; + assert_eq!(response.status(), StatusCode::OK); + + let bytes = test::read_body(response).await; + let data = web::Bytes::from(fs::read(file_with_newlines).unwrap()); + assert_eq!(bytes, data); + } + #[actix_rt::test] async fn test_files_not_allowed() { let srv = test::init_service(App::new().service(Files::new("/", "."))).await; @@ -549,7 +633,8 @@ mod tests { async fn test_named_file_content_encoding() { let srv = test::init_service(App::new().wrap(Compress::default()).service( web::resource("/").to(|| async { - NamedFile::open("Cargo.toml") + NamedFile::open_async("Cargo.toml") + .await .unwrap() .set_content_encoding(header::ContentEncoding::Identity) }), @@ -562,14 +647,16 @@ mod tests { .to_request(); let res = test::call_service(&srv, request).await; assert_eq!(res.status(), StatusCode::OK); - assert!(!res.headers().contains_key(header::CONTENT_ENCODING)); + assert!(res.headers().contains_key(header::CONTENT_ENCODING)); + assert!(!test::read_body(res).await.is_empty()); } #[actix_rt::test] async fn test_named_file_content_encoding_gzip() { let srv = test::init_service(App::new().wrap(Compress::default()).service( web::resource("/").to(|| async { - NamedFile::open("Cargo.toml") + NamedFile::open_async("Cargo.toml") + .await .unwrap() .set_content_encoding(header::ContentEncoding::Gzip) }), @@ -595,16 +682,15 @@ mod tests { #[actix_rt::test] async fn test_named_file_allowed_method() { let req = TestRequest::default().method(Method::GET).to_http_request(); - let file = NamedFile::open("Cargo.toml").unwrap(); - let resp = file.respond_to(&req).await.unwrap(); + let file = NamedFile::open_async("Cargo.toml").await.unwrap(); + let resp = file.respond_to(&req); assert_eq!(resp.status(), StatusCode::OK); } #[actix_rt::test] async fn test_static_files() { let srv = - test::init_service(App::new().service(Files::new("/", ".").show_files_listing())) - .await; + test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await; let req = TestRequest::with_uri("/missing").to_request(); let resp = test::call_service(&srv, req).await; @@ -617,8 +703,7 @@ mod tests { assert_eq!(resp.status(), StatusCode::NOT_FOUND); let srv = - test::init_service(App::new().service(Files::new("/", ".").show_files_listing())) - .await; + test::init_service(App::new().service(Files::new("/", ".").show_files_listing())).await; let req = TestRequest::with_uri("/tests").to_request(); let resp = test::call_service(&srv, req).await; assert_eq!( @@ -686,8 +771,8 @@ mod tests { #[actix_rt::test] async fn test_default_handler_file_missing() { let st = Files::new("/", ".") - .default_handler(|req: ServiceRequest| { - ok(req.into_response(HttpResponse::Ok().body("default content"))) + .default_handler(|req: ServiceRequest| async { + Ok(req.into_response(HttpResponse::Ok().body("default content"))) }) .new_service(()) .await @@ -766,13 +851,46 @@ mod tests { let req = TestRequest::get().uri("/test/%43argo.toml").to_request(); let res = test::call_service(&srv, req).await; assert_eq!(res.status(), StatusCode::OK); + + // `%2F` == `/` + let req = TestRequest::get().uri("/test%2Ftest.binary").to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let req = TestRequest::get().uri("/test/Cargo.toml%00").to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + } + + #[actix_rt::test] + async fn test_percent_encoding_2() { + let temp_dir = tempfile::tempdir().unwrap(); + let filename = match cfg!(unix) { + true => "ض:?#[]{}<>()@!$&'`|*+,;= %20\n.test", + false => "ض#[]{}()@!$&'`+,;= %20.test", + }; + let filename_encoded = filename + .as_bytes() + .iter() + .fold(String::new(), |mut buf, c| { + write!(&mut buf, "%{:02X}", c).unwrap(); + buf + }); + std::fs::File::create(temp_dir.path().join(filename)).unwrap(); + + let srv = test::init_service(App::new().service(Files::new("/", temp_dir.path()))).await; + + let req = TestRequest::get() + .uri(&format!("/{}", filename_encoded)) + .to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::OK); } #[actix_rt::test] async fn test_serve_named_file() { - let srv = - test::init_service(App::new().service(NamedFile::open("Cargo.toml").unwrap())) - .await; + let factory = NamedFile::open_async("Cargo.toml").await.unwrap(); + let srv = test::init_service(App::new().service(factory)).await; let req = TestRequest::get().uri("/Cargo.toml").to_request(); let res = test::call_service(&srv, req).await; @@ -789,11 +907,9 @@ mod tests { #[actix_rt::test] async fn test_serve_named_file_prefix() { - let srv = test::init_service( - App::new() - .service(web::scope("/test").service(NamedFile::open("Cargo.toml").unwrap())), - ) - .await; + let factory = NamedFile::open_async("Cargo.toml").await.unwrap(); + let srv = + test::init_service(App::new().service(web::scope("/test").service(factory))).await; let req = TestRequest::get().uri("/test/Cargo.toml").to_request(); let res = test::call_service(&srv, req).await; @@ -810,10 +926,8 @@ mod tests { #[actix_rt::test] async fn test_named_file_default_service() { - let srv = test::init_service( - App::new().default_service(NamedFile::open("Cargo.toml").unwrap()), - ) - .await; + let factory = NamedFile::open_async("Cargo.toml").await.unwrap(); + let srv = test::init_service(App::new().default_service(factory)).await; for route in ["/foobar", "/baz", "/"].iter() { let req = TestRequest::get().uri(route).to_request(); @@ -828,8 +942,9 @@ mod tests { #[actix_rt::test] async fn test_default_handler_named_file() { + let factory = NamedFile::open_async("Cargo.toml").await.unwrap(); let st = Files::new("/", ".") - .default_handler(NamedFile::open("Cargo.toml").unwrap()) + .default_handler(factory) .new_service(()) .await .unwrap(); @@ -856,4 +971,69 @@ mod tests { "inline; filename=\"symlink-test.png\"" ); } + + #[actix_rt::test] + async fn test_index_with_show_files_listing() { + let service = Files::new(".", ".") + .index_file("lib.rs") + .show_files_listing() + .new_service(()) + .await + .unwrap(); + + // Serve the index if exists + let req = TestRequest::default().uri("/src").to_srv_request(); + let resp = test::call_service(&service, req).await; + assert_eq!(resp.status(), StatusCode::OK); + assert_eq!( + resp.headers().get(header::CONTENT_TYPE).unwrap(), + "text/x-rust" + ); + + // Show files listing, otherwise. + let req = TestRequest::default().uri("/tests").to_srv_request(); + let resp = test::call_service(&service, req).await; + assert_eq!( + resp.headers().get(header::CONTENT_TYPE).unwrap(), + "text/html; charset=utf-8" + ); + let bytes = test::read_body(resp).await; + assert!(format!("{:?}", bytes).contains("/tests/test.png")); + } + + #[actix_rt::test] + async fn test_path_filter() { + // prevent searching subdirectories + let st = Files::new("/", ".") + .path_filter(|path, _| path.components().count() == 1) + .new_service(()) + .await + .unwrap(); + + let req = TestRequest::with_uri("/Cargo.toml").to_srv_request(); + let resp = test::call_service(&st, req).await; + assert_eq!(resp.status(), StatusCode::OK); + + let req = TestRequest::with_uri("/src/lib.rs").to_srv_request(); + let resp = test::call_service(&st, req).await; + assert_eq!(resp.status(), StatusCode::NOT_FOUND); + } + + #[actix_rt::test] + async fn test_default_handler_filter() { + let st = Files::new("/", ".") + .default_handler(|req: ServiceRequest| async { + Ok(req.into_response(HttpResponse::Ok().body("default content"))) + }) + .path_filter(|path, _| path.extension() == Some("png".as_ref())) + .new_service(()) + .await + .unwrap(); + let req = TestRequest::with_uri("/Cargo.toml").to_srv_request(); + let resp = test::call_service(&st, req).await; + + assert_eq!(resp.status(), StatusCode::OK); + let bytes = test::read_body(resp).await; + assert_eq!(bytes, web::Bytes::from_static(b"default content")); + } } diff --git a/actix-files/src/named.rs b/actix-files/src/named.rs index 519234f0d..9e4a37737 100644 --- a/actix-files/src/named.rs +++ b/actix-files/src/named.rs @@ -1,32 +1,34 @@ -use actix_service::{Service, ServiceFactory}; -use actix_utils::future::{ok, ready, Ready}; -use actix_web::dev::{AppService, HttpServiceFactory, ResourceDef}; -use std::fs::{File, Metadata}; -use std::io; -use std::ops::{Deref, DerefMut}; -use std::path::{Path, PathBuf}; -use std::time::{SystemTime, UNIX_EPOCH}; - -#[cfg(unix)] -use std::os::unix::fs::MetadataExt; +use std::{ + fs::Metadata, + io, + path::{Path, PathBuf}, + time::{SystemTime, UNIX_EPOCH}, +}; use actix_web::{ - dev::{BodyEncoding, ServiceRequest, ServiceResponse, SizedStream}, + body::{self, BoxBody, SizedStream}, + dev::{ + self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory, ServiceRequest, + ServiceResponse, + }, http::{ header::{ - self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue, + self, Charset, ContentDisposition, ContentEncoding, DispositionParam, DispositionType, + ExtendedValue, HeaderValue, }, - ContentEncoding, StatusCode, + StatusCode, }, Error, HttpMessage, HttpRequest, HttpResponse, Responder, }; use bitflags::bitflags; -use mime_guess::from_path; +use derive_more::{Deref, DerefMut}; +use futures_core::future::LocalBoxFuture; +use mime::Mime; -use crate::ChunkedReadFile; use crate::{encoding::equiv_utf8_text, range::HttpRange}; bitflags! { + #[derive(Debug, Clone, Copy)] pub(crate) struct Flags: u8 { const ETAG = 0b0000_0001; const LAST_MD = 0b0000_0010; @@ -37,7 +39,7 @@ bitflags! { impl Default for Flags { fn default() -> Self { - Flags::from_bits_truncate(0b0000_0111) + Flags::from_bits_truncate(0b0000_1111) } } @@ -48,9 +50,9 @@ impl Default for Flags { /// use actix_web::App; /// use actix_files::NamedFile; /// -/// # fn run() -> Result<(), Box> { -/// let app = App::new() -/// .service(NamedFile::open("./static/index.html")?); +/// # async fn run() -> Result<(), Box> { +/// let file = NamedFile::open_async("./static/index.html").await?; +/// let app = App::new().service(file); /// # Ok(()) /// # } /// ``` @@ -62,22 +64,32 @@ impl Default for Flags { /// /// #[get("/")] /// async fn index() -> impl Responder { -/// NamedFile::open("./static/index.html") +/// NamedFile::open_async("./static/index.html").await /// } /// ``` -#[derive(Debug)] +#[derive(Debug, Deref, DerefMut)] pub struct NamedFile { - path: PathBuf, + #[deref] + #[deref_mut] file: File, + path: PathBuf, modified: Option, pub(crate) md: Metadata, pub(crate) flags: Flags, pub(crate) status_code: StatusCode, - pub(crate) content_type: mime::Mime, - pub(crate) content_disposition: header::ContentDisposition, + pub(crate) content_type: Mime, + pub(crate) content_disposition: ContentDisposition, pub(crate) encoding: Option, } +#[cfg(not(feature = "experimental-io-uring"))] +pub(crate) use std::fs::File; + +#[cfg(feature = "experimental-io-uring")] +pub(crate) use tokio_uring::fs::File; + +use super::chunked; + impl NamedFile { /// Creates an instance from a previously opened file. /// @@ -85,20 +97,19 @@ impl NamedFile { /// `ContentDisposition` headers. /// /// # Examples - /// - /// ``` + /// ```ignore + /// use std::{ + /// io::{self, Write as _}, + /// env, + /// fs::File + /// }; /// use actix_files::NamedFile; - /// use std::io::{self, Write}; - /// use std::env; - /// use std::fs::File; /// - /// fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt")?; - /// file.write_all(b"Hello, world!")?; - /// let named_file = NamedFile::from_file(file, "bar.txt")?; - /// # std::fs::remove_file("foo.txt"); - /// Ok(()) - /// } + /// let mut file = File::create("foo.txt")?; + /// file.write_all(b"Hello, world!")?; + /// let named_file = NamedFile::from_file(file, "bar.txt")?; + /// # std::fs::remove_file("foo.txt"); + /// Ok(()) /// ``` pub fn from_file>(file: File, path: P) -> io::Result { let path = path.as_ref().to_path_buf(); @@ -116,15 +127,25 @@ impl NamedFile { } }; - let ct = from_path(&path).first_or_octet_stream(); + let ct = mime_guess::from_path(&path).first_or_octet_stream(); let disposition = match ct.type_() { - mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline, + mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline, + mime::APPLICATION => match ct.subtype() { + mime::JAVASCRIPT | mime::JSON => DispositionType::Inline, + name if name == "wasm" || name == "xhtml" => DispositionType::Inline, + _ => DispositionType::Attachment, + }, _ => DispositionType::Attachment, }; - let mut parameters = - vec![DispositionParam::Filename(String::from(filename.as_ref()))]; + // replace special characters in filenames which could occur on some filesystems + let filename_s = filename + .replace('\n', "%0A") // \n line break + .replace('\x0B', "%0B") // \v vertical tab + .replace('\x0C', "%0C") // \f form feed + .replace('\r', "%0D"); // \r carriage return + let mut parameters = vec![DispositionParam::Filename(filename_s)]; if !filename.is_ascii() { parameters.push(DispositionParam::FilenameExt(ExtendedValue { @@ -142,7 +163,30 @@ impl NamedFile { (ct, cd) }; - let md = file.metadata()?; + let md = { + #[cfg(not(feature = "experimental-io-uring"))] + { + file.metadata()? + } + + #[cfg(feature = "experimental-io-uring")] + { + use std::os::unix::prelude::{AsRawFd, FromRawFd}; + + let fd = file.as_raw_fd(); + + // SAFETY: fd is borrowed and lives longer than the unsafe block + unsafe { + let file = std::fs::File::from_raw_fd(fd); + let md = file.metadata(); + // SAFETY: forget the fd before exiting block in success or error case but don't + // run destructor (that would close file handle) + std::mem::forget(file); + md? + } + } + }; + let modified = md.modified().ok(); let encoding = None; @@ -162,32 +206,59 @@ impl NamedFile { /// Attempts to open a file in read-only mode. /// /// # Examples - /// /// ``` /// use actix_files::NamedFile; - /// /// let file = NamedFile::open("foo.txt"); /// ``` + #[cfg(not(feature = "experimental-io-uring"))] pub fn open>(path: P) -> io::Result { - Self::from_file(File::open(&path)?, path) + let file = File::open(&path)?; + Self::from_file(file, path) } - /// Returns reference to the underlying `File` object. + /// Attempts to open a file asynchronously in read-only mode. + /// + /// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it + /// will behave just like `open`. + /// + /// # Examples + /// ``` + /// use actix_files::NamedFile; + /// # async fn open() { + /// let file = NamedFile::open_async("foo.txt").await.unwrap(); + /// # } + /// ``` + pub async fn open_async>(path: P) -> io::Result { + let file = { + #[cfg(not(feature = "experimental-io-uring"))] + { + File::open(&path)? + } + + #[cfg(feature = "experimental-io-uring")] + { + File::open(&path).await? + } + }; + + Self::from_file(file, path) + } + + /// Returns reference to the underlying file object. #[inline] pub fn file(&self) -> &File { &self.file } - /// Retrieve the path of this file. + /// Returns the filesystem path to this file. /// /// # Examples - /// /// ``` /// # use std::io; /// use actix_files::NamedFile; /// - /// # fn path() -> io::Result<()> { - /// let file = NamedFile::open("test.txt")?; + /// # async fn path() -> io::Result<()> { + /// let file = NamedFile::open_async("test.txt").await?; /// assert_eq!(file.path().as_os_str(), "foo.txt"); /// # Ok(()) /// # } @@ -197,51 +268,92 @@ impl NamedFile { self.path.as_path() } - /// Set response **Status Code** + /// Returns the time the file was last modified. + /// + /// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`]. + /// Therefore, it is usually safe to unwrap this. + #[inline] + pub fn modified(&self) -> Option { + self.modified + } + + /// Returns the filesystem metadata associated with this file. + #[inline] + pub fn metadata(&self) -> &Metadata { + &self.md + } + + /// Returns the `Content-Type` header that will be used when serving this file. + #[inline] + pub fn content_type(&self) -> &Mime { + &self.content_type + } + + /// Returns the `Content-Disposition` that will be used when serving this file. + #[inline] + pub fn content_disposition(&self) -> &ContentDisposition { + &self.content_disposition + } + + /// Returns the `Content-Encoding` that will be used when serving this file. + /// + /// A return value of `None` indicates that the content is not already using a compressed + /// representation and may be subject to compression downstream. + #[inline] + pub fn content_encoding(&self) -> Option { + self.encoding + } + + /// Set response status code. + #[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")] pub fn set_status_code(mut self, status: StatusCode) -> Self { self.status_code = status; self } - /// Set the MIME Content-Type for serving this file. By default - /// the Content-Type is inferred from the filename extension. + /// Sets the `Content-Type` header that will be used when serving this file. By default the + /// `Content-Type` is inferred from the filename extension. #[inline] - pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self { + pub fn set_content_type(mut self, mime_type: Mime) -> Self { self.content_type = mime_type; self } - /// Set the Content-Disposition for serving this file. This allows - /// changing the inline/attachment disposition as well as the filename - /// sent to the peer. By default the disposition is `inline` for text, - /// image, and video content types, and `attachment` otherwise, and - /// the filename is taken from the path provided in the `open` method - /// after converting it to UTF-8 using. - /// [`std::ffi::OsStr::to_string_lossy`] + /// Set the Content-Disposition for serving this file. This allows changing the + /// `inline/attachment` disposition as well as the filename sent to the peer. + /// + /// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and + /// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the + /// filename is taken from the path provided in the `open` method after converting it to UTF-8 + /// (using `to_string_lossy`). #[inline] - pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self { + pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self { self.content_disposition = cd; self.flags.insert(Flags::CONTENT_DISPOSITION); self } - /// Disable `Content-Disposition` header. + /// Disables `Content-Disposition` header. /// - /// By default Content-Disposition` header is enabled. + /// By default, the `Content-Disposition` header is sent. #[inline] pub fn disable_content_disposition(mut self) -> Self { self.flags.remove(Flags::CONTENT_DISPOSITION); self } - /// Set content encoding for serving this file + /// Sets content encoding for this file. + /// + /// This prevents the `Compress` middleware from modifying the file contents and signals to + /// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g., + /// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`. #[inline] pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { self.encoding = Some(enc); self } - /// Specifies whether to use ETag or not. + /// Specifies whether to return `ETag` header in response. /// /// Default is true. #[inline] @@ -250,7 +362,7 @@ impl NamedFile { self } - /// Specifies whether to use Last-Modified or not. + /// Specifies whether to return `Last-Modified` header in response. /// /// Default is true. #[inline] @@ -268,14 +380,18 @@ impl NamedFile { self } + /// Creates an `ETag` in a format is similar to Apache's. pub(crate) fn etag(&self) -> Option { - // This etag format is similar to Apache's. self.modified.as_ref().map(|mtime| { let ino = { #[cfg(unix)] { + #[cfg(unix)] + use std::os::unix::fs::MetadataExt as _; + self.md.ino() } + #[cfg(not(unix))] { 0 @@ -286,7 +402,7 @@ impl NamedFile { .duration_since(UNIX_EPOCH) .expect("modification time must be after epoch"); - header::EntityTag::strong(format!( + header::EntityTag::new_strong(format!( "{:x}:{:x}:{:x}:{:x}", ino, self.md.len(), @@ -301,16 +417,17 @@ impl NamedFile { } /// Creates an `HttpResponse` with file as a streaming body. - pub fn into_response(self, req: &HttpRequest) -> HttpResponse { + pub fn into_response(self, req: &HttpRequest) -> HttpResponse { if self.status_code != StatusCode::OK { let mut res = HttpResponse::build(self.status_code); - if self.flags.contains(Flags::PREFER_UTF8) { - let ct = equiv_utf8_text(self.content_type.clone()); - res.insert_header((header::CONTENT_TYPE, ct.to_string())); + let ct = if self.flags.contains(Flags::PREFER_UTF8) { + equiv_utf8_text(self.content_type.clone()) } else { - res.insert_header((header::CONTENT_TYPE, self.content_type.to_string())); - } + self.content_type + }; + + res.insert_header((header::CONTENT_TYPE, ct.to_string())); if self.flags.contains(Flags::CONTENT_DISPOSITION) { res.insert_header(( @@ -320,10 +437,10 @@ impl NamedFile { } if let Some(current_encoding) = self.encoding { - res.encoding(current_encoding); + res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); } - let reader = ChunkedReadFile::new(self.md.len(), 0, self.file); + let reader = chunked::new_chunked_read(self.md.len(), 0, self.file); return res.streaming(reader); } @@ -346,8 +463,8 @@ impl NamedFile { } else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) = (last_modified, req.get_header()) { - let t1: SystemTime = m.clone().into(); - let t2: SystemTime = since.clone().into(); + let t1: SystemTime = (*m).into(); + let t2: SystemTime = (*since).into(); match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { (Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(), @@ -365,8 +482,8 @@ impl NamedFile { } else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) = (last_modified, req.get_header()) { - let t1: SystemTime = m.clone().into(); - let t2: SystemTime = since.clone().into(); + let t1: SystemTime = (*m).into(); + let t2: SystemTime = (*since).into(); match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { (Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(), @@ -376,36 +493,36 @@ impl NamedFile { false }; - let mut resp = HttpResponse::build(self.status_code); + let mut res = HttpResponse::build(self.status_code); - if self.flags.contains(Flags::PREFER_UTF8) { - let ct = equiv_utf8_text(self.content_type.clone()); - resp.insert_header((header::CONTENT_TYPE, ct.to_string())); + let ct = if self.flags.contains(Flags::PREFER_UTF8) { + equiv_utf8_text(self.content_type.clone()) } else { - resp.insert_header((header::CONTENT_TYPE, self.content_type.to_string())); - } + self.content_type + }; + + res.insert_header((header::CONTENT_TYPE, ct.to_string())); if self.flags.contains(Flags::CONTENT_DISPOSITION) { - resp.insert_header(( + res.insert_header(( header::CONTENT_DISPOSITION, self.content_disposition.to_string(), )); } - // default compressing if let Some(current_encoding) = self.encoding { - resp.encoding(current_encoding); + res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); } if let Some(lm) = last_modified { - resp.insert_header((header::LAST_MODIFIED, lm.to_string())); + res.insert_header((header::LAST_MODIFIED, lm.to_string())); } if let Some(etag) = etag { - resp.insert_header((header::ETAG, etag.to_string())); + res.insert_header((header::ETAG, etag.to_string())); } - resp.insert_header((header::ACCEPT_RANGES, "bytes")); + res.insert_header((header::ACCEPT_RANGES, "bytes")); let mut length = self.md.len(); let mut offset = 0; @@ -417,47 +534,56 @@ impl NamedFile { length = ranges[0].length; offset = ranges[0].start; - resp.encoding(ContentEncoding::Identity); - resp.insert_header(( + // When a Content-Encoding header is present in a 206 partial content response + // for video content, it prevents browser video players from starting playback + // before loading the whole video and also prevents seeking. + // + // See: https://github.com/actix/actix-web/issues/2815 + // + // The assumption of this fix is that the video player knows to not send an + // Accept-Encoding header for this request and that downstream middleware will + // not attempt compression for requests without it. + // + // TODO: Solve question around what to do if self.encoding is set and partial + // range is requested. Reject request? Ignoring self.encoding seems wrong, too. + // In practice, it should not come up. + if req.headers().contains_key(&header::ACCEPT_ENCODING) { + // don't allow compression middleware to modify partial content + res.insert_header(( + header::CONTENT_ENCODING, + HeaderValue::from_static("identity"), + )); + } + + res.insert_header(( header::CONTENT_RANGE, format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()), )); } else { - resp.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length))); - return resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish(); + res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length))); + return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish(); }; } else { - return resp.status(StatusCode::BAD_REQUEST).finish(); + return res.status(StatusCode::BAD_REQUEST).finish(); }; }; if precondition_failed { - return resp.status(StatusCode::PRECONDITION_FAILED).finish(); + return res.status(StatusCode::PRECONDITION_FAILED).finish(); } else if not_modified { - return resp.status(StatusCode::NOT_MODIFIED).finish(); + return res + .status(StatusCode::NOT_MODIFIED) + .body(body::None::new()) + .map_into_boxed_body(); } - let reader = ChunkedReadFile::new(length, offset, self.file); + let reader = chunked::new_chunked_read(length, offset, self.file); if offset != 0 || length != self.md.len() { - resp.status(StatusCode::PARTIAL_CONTENT); + res.status(StatusCode::PARTIAL_CONTENT); } - resp.body(SizedStream::new(length, reader)) - } -} - -impl Deref for NamedFile { - type Target = File; - - fn deref(&self) -> &File { - &self.file - } -} - -impl DerefMut for NamedFile { - fn deref_mut(&mut self) -> &mut File { - &mut self.file + res.body(SizedStream::new(length, reader)) } } @@ -502,7 +628,9 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool { } impl Responder for NamedFile { - fn respond_to(self, req: &HttpRequest) -> HttpResponse { + type Body = BoxBody; + + fn respond_to(self, req: &HttpRequest) -> HttpResponse { self.into_response(req) } } @@ -511,14 +639,16 @@ impl ServiceFactory for NamedFile { type Response = ServiceResponse; type Error = Error; type Config = (); - type InitError = (); type Service = NamedFileService; - type Future = Ready>; + type InitError = (); + type Future = LocalBoxFuture<'static, Result>; fn new_service(&self, _: ()) -> Self::Future { - ok(NamedFileService { + let service = NamedFileService { path: self.path.clone(), - }) + }; + + Box::pin(async move { Ok(service) }) } } @@ -531,18 +661,19 @@ pub struct NamedFileService { impl Service for NamedFileService { type Response = ServiceResponse; type Error = Error; - type Future = Ready>; + type Future = LocalBoxFuture<'static, Result>; - actix_service::always_ready!(); + dev::always_ready!(); fn call(&self, req: ServiceRequest) -> Self::Future { let (req, _) = req.into_parts(); - ready( - NamedFile::open(&self.path) - .map_err(|e| e.into()) - .map(|f| f.into_response(&req)) - .map(|res| ServiceResponse::new(req, res)), - ) + + let path = self.path.clone(); + Box::pin(async move { + let file = NamedFile::open_async(path).await?; + let res = file.into_response(&req); + Ok(ServiceResponse::new(req, res)) + }) } } diff --git a/actix-files/src/path_buf.rs b/actix-files/src/path_buf.rs index 8a87acd5d..c1983279b 100644 --- a/actix-files/src/path_buf.rs +++ b/actix-files/src/path_buf.rs @@ -1,5 +1,5 @@ use std::{ - path::{Path, PathBuf}, + path::{Component, Path, PathBuf}, str::FromStr, }; @@ -8,7 +8,7 @@ use actix_web::{dev::Payload, FromRequest, HttpRequest}; use crate::error::UriSegmentError; -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub(crate) struct PathBufWrap(PathBuf); impl FromStr for PathBufWrap { @@ -21,11 +21,28 @@ impl FromStr for PathBufWrap { impl PathBufWrap { /// Parse a path, giving the choice of allowing hidden files to be considered valid segments. + /// + /// Path traversal is guarded by this method. pub fn parse_path(path: &str, hidden_files: bool) -> Result { let mut buf = PathBuf::new(); + // equivalent to `path.split('/').count()` + let mut segment_count = path.matches('/').count() + 1; + + // we can decode the whole path here (instead of per-segment decoding) + // because we will reject `%2F` in paths using `segment_count`. + let path = percent_encoding::percent_decode_str(path) + .decode_utf8() + .map_err(|_| UriSegmentError::NotValidUtf8)?; + + // disallow decoding `%2F` into `/` + if segment_count != path.matches('/').count() + 1 { + return Err(UriSegmentError::BadChar('/')); + } + for segment in path.split('/') { if segment == ".." { + segment_count -= 1; buf.pop(); } else if !hidden_files && segment.starts_with('.') { return Err(UriSegmentError::BadStart('.')); @@ -38,14 +55,27 @@ impl PathBufWrap { } else if segment.ends_with('<') { return Err(UriSegmentError::BadEnd('<')); } else if segment.is_empty() { + segment_count -= 1; continue; } else if cfg!(windows) && segment.contains('\\') { return Err(UriSegmentError::BadChar('\\')); + } else if cfg!(windows) && segment.contains(':') { + return Err(UriSegmentError::BadChar(':')); } else { buf.push(segment) } } + // make sure we agree with stdlib parser + for (i, component) in buf.components().enumerate() { + assert!( + matches!(component, Component::Normal(_)), + "component `{:?}` is not normal", + component + ); + assert!(i < segment_count); + } + Ok(PathBufWrap(buf)) } } @@ -59,17 +89,14 @@ impl AsRef for PathBufWrap { impl FromRequest for PathBufWrap { type Error = UriSegmentError; type Future = Ready>; - type Config = (); fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { - ready(req.match_info().path().parse()) + ready(req.match_info().unprocessed().parse()) } } #[cfg(test)] mod tests { - use std::iter::FromIterator; - use super::*; #[test] @@ -116,4 +143,46 @@ mod tests { PathBuf::from_iter(vec!["test", ".tt"]) ); } + + #[test] + fn path_traversal() { + assert_eq!( + PathBufWrap::parse_path("/../README.md", false).unwrap().0, + PathBuf::from_iter(vec!["README.md"]) + ); + + assert_eq!( + PathBufWrap::parse_path("/../README.md", true).unwrap().0, + PathBuf::from_iter(vec!["README.md"]) + ); + + assert_eq!( + PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false) + .unwrap() + .0, + PathBuf::from_iter(vec!["etc/passwd"]) + ); + } + + #[test] + #[cfg_attr(windows, should_panic)] + fn windows_drive_traversal() { + // detect issues in windows that could lead to path traversal + // see for HttpRangeParseError { + fn from(err: http_range::HttpRangeParseError) -> Self { + match err { + http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange, + http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap, + } + } +} + +#[derive(Debug, Clone, Error)] +#[non_exhaustive] +pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError); + +impl fmt::Display for ParseRangeErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid Range header: ")?; + f.write_str(match self.0 { + HttpRangeParseError::InvalidRange => "invalid syntax", + HttpRangeParseError::NoOverlap => "range starts after end of content", + }) + } +} /// HTTP Range header representation. #[derive(Debug, Clone, Copy)] @@ -10,26 +42,22 @@ pub struct HttpRange { pub length: u64, } -#[derive(Debug, Clone, Display, Error)] -#[display(fmt = "Parse HTTP Range failed")] -pub struct ParseRangeErr(#[error(not(source))] ()); - impl HttpRange { /// Parses Range HTTP header string as per RFC 2616. /// /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `size` is full size of response (file). pub fn parse(header: &str, size: u64) -> Result, ParseRangeErr> { - match http_range::HttpRange::parse(header, size) { - Ok(ranges) => Ok(ranges - .iter() - .map(|range| HttpRange { - start: range.start, - length: range.length, - }) - .collect()), - Err(_) => Err(ParseRangeErr(())), - } + let ranges = + http_range::HttpRange::parse(header, size).map_err(|err| ParseRangeErr(err.into()))?; + + Ok(ranges + .iter() + .map(|range| HttpRange { + start: range.start, + length: range.length, + }) + .collect()) } } diff --git a/actix-files/src/service.rs b/actix-files/src/service.rs index 831115dc6..3d3b36c40 100644 --- a/actix-files/src/service.rs +++ b/actix-files/src/service.rs @@ -1,9 +1,8 @@ -use std::{fmt, io, path::PathBuf, rc::Rc}; +use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc}; -use actix_service::Service; -use actix_utils::future::ok; use actix_web::{ - dev::{ServiceRequest, ServiceResponse}, + body::BoxBody, + dev::{self, Service, ServiceRequest, ServiceResponse}, error::Error, guard::Guard, http::{header, Method}, @@ -13,11 +12,22 @@ use futures_core::future::LocalBoxFuture; use crate::{ named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile, - PathBufWrap, + PathBufWrap, PathFilter, }; /// Assembled file serving service. -pub struct FilesService { +#[derive(Clone)] +pub struct FilesService(pub(crate) Rc); + +impl Deref for FilesService { + type Target = FilesServiceInner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub struct FilesServiceInner { pub(crate) directory: PathBuf, pub(crate) index: Option, pub(crate) show_index: bool, @@ -25,25 +35,52 @@ pub struct FilesService { pub(crate) default: Option, pub(crate) renderer: Rc, pub(crate) mime_override: Option>, + pub(crate) path_filter: Option>, pub(crate) file_flags: named::Flags, pub(crate) guards: Option>, pub(crate) hidden_files: bool, } +impl fmt::Debug for FilesServiceInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("FilesServiceInner") + } +} + impl FilesService { - fn handle_err( + async fn handle_err( &self, err: io::Error, req: ServiceRequest, - ) -> LocalBoxFuture<'static, Result> { + ) -> Result { log::debug!("error handling {}: {}", req.path(), err); if let Some(ref default) = self.default { - Box::pin(default.call(req)) + default.call(req).await } else { - Box::pin(ok(req.error_response(err))) + Ok(req.error_response(err)) } } + + fn serve_named_file(&self, req: ServiceRequest, mut named_file: NamedFile) -> ServiceResponse { + if let Some(ref mime_override) = self.mime_override { + let new_disposition = mime_override(&named_file.content_type.type_()); + named_file.content_disposition.disposition = new_disposition; + } + named_file.flags = self.file_flags; + + let (req, _) = req.into_parts(); + let res = named_file.into_response(&req); + ServiceResponse::new(req, res) + } + + fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse { + let dir = Directory::new(self.directory.clone(), path); + + let (req, _) = req.into_parts(); + + (self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req)) + } } impl fmt::Debug for FilesService { @@ -53,104 +90,99 @@ impl fmt::Debug for FilesService { } impl Service for FilesService { - type Response = ServiceResponse; + type Response = ServiceResponse; type Error = Error; - type Future = LocalBoxFuture<'static, Result>; + type Future = LocalBoxFuture<'static, Result>; - actix_service::always_ready!(); + dev::always_ready!(); fn call(&self, req: ServiceRequest) -> Self::Future { let is_method_valid = if let Some(guard) = &self.guards { // execute user defined guards - (**guard).check(req.head()) + (**guard).check(&req.guard_ctx()) } else { // default behavior matches!(*req.method(), Method::HEAD | Method::GET) }; - if !is_method_valid { - return Box::pin(ok(req.into_response( - actix_web::HttpResponse::MethodNotAllowed() - .insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8)) - .body("Request did not meet this resource's requirements."), - ))); - } + let this = self.clone(); - let real_path = - match PathBufWrap::parse_path(req.match_info().path(), self.hidden_files) { - Ok(item) => item, - Err(e) => return Box::pin(ok(req.error_response(e))), - }; - - // full file path - let path = self.directory.join(&real_path); - if let Err(err) = path.canonicalize() { - return Box::pin(self.handle_err(err, req)); - } - - if path.is_dir() { - if self.redirect_to_slash - && !req.path().ends_with('/') - && (self.index.is_some() || self.show_index) - { - let redirect_to = format!("{}/", req.path()); - - return Box::pin(ok(req.into_response( - HttpResponse::Found() - .insert_header((header::LOCATION, redirect_to)) - .finish(), - ))); + Box::pin(async move { + if !is_method_valid { + return Ok(req.into_response( + HttpResponse::MethodNotAllowed() + .insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8)) + .body("Request did not meet this resource's requirements."), + )); } - if let Some(ref redir_index) = self.index { - let path = path.join(redir_index); + let path_on_disk = + match PathBufWrap::parse_path(req.match_info().unprocessed(), this.hidden_files) { + Ok(item) => item, + Err(err) => return Ok(req.error_response(err)), + }; - match NamedFile::open(path) { + if let Some(filter) = &this.path_filter { + if !filter(path_on_disk.as_ref(), req.head()) { + if let Some(ref default) = this.default { + return default.call(req).await; + } else { + return Ok(req.into_response(HttpResponse::NotFound().finish())); + } + } + } + + // full file path + let path = this.directory.join(&path_on_disk); + if let Err(err) = path.canonicalize() { + return this.handle_err(err, req).await; + } + + if path.is_dir() { + if this.redirect_to_slash + && !req.path().ends_with('/') + && (this.index.is_some() || this.show_index) + { + let redirect_to = format!("{}/", req.path()); + + return Ok(req.into_response( + HttpResponse::Found() + .insert_header((header::LOCATION, redirect_to)) + .finish(), + )); + } + + match this.index { + Some(ref index) => { + let named_path = path.join(index); + match NamedFile::open_async(named_path).await { + Ok(named_file) => Ok(this.serve_named_file(req, named_file)), + Err(_) if this.show_index => Ok(this.show_index(req, path)), + Err(err) => this.handle_err(err, req).await, + } + } + None if this.show_index => Ok(this.show_index(req, path)), + None => Ok(ServiceResponse::from_err( + FilesError::IsDirectory, + req.into_parts().0, + )), + } + } else { + match NamedFile::open_async(&path).await { Ok(mut named_file) => { - if let Some(ref mime_override) = self.mime_override { - let new_disposition = - mime_override(&named_file.content_type.type_()); + if let Some(ref mime_override) = this.mime_override { + let new_disposition = mime_override(&named_file.content_type.type_()); named_file.content_disposition.disposition = new_disposition; } - named_file.flags = self.file_flags; + named_file.flags = this.file_flags; let (req, _) = req.into_parts(); let res = named_file.into_response(&req); - Box::pin(ok(ServiceResponse::new(req, res))) + Ok(ServiceResponse::new(req, res)) } - Err(err) => self.handle_err(err, req), + Err(err) => this.handle_err(err, req).await, } - } else if self.show_index { - let dir = Directory::new(self.directory.clone(), path); - - let (req, _) = req.into_parts(); - let x = (self.renderer)(&dir, &req); - - Box::pin(match x { - Ok(resp) => ok(resp), - Err(err) => ok(ServiceResponse::from_err(err, req)), - }) - } else { - Box::pin(ok(ServiceResponse::from_err( - FilesError::IsDirectory, - req.into_parts().0, - ))) } - } else { - match NamedFile::open(path) { - Ok(mut named_file) => { - if let Some(ref mime_override) = self.mime_override { - let new_disposition = mime_override(&named_file.content_type.type_()); - named_file.content_disposition.disposition = new_disposition; - } - named_file.flags = self.file_flags; - - let (req, _) = req.into_parts(); - let res = named_file.into_response(&req); - Box::pin(ok(ServiceResponse::new(req, res))) - } - Err(err) => self.handle_err(err, req), - } - } + }) } } diff --git a/actix-files/tests/encoding.rs b/actix-files/tests/encoding.rs index d21d4f8fd..3c8bdb59b 100644 --- a/actix-files/tests/encoding.rs +++ b/actix-files/tests/encoding.rs @@ -1,14 +1,14 @@ -use actix_files::Files; +use actix_files::{Files, NamedFile}; use actix_web::{ http::{ header::{self, HeaderValue}, StatusCode, }, test::{self, TestRequest}, - App, + web, App, }; -#[actix_rt::test] +#[actix_web::test] async fn test_utf8_file_contents() { // use default ISO-8859-1 encoding let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await; @@ -19,13 +19,12 @@ async fn test_utf8_file_contents() { assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.headers().get(header::CONTENT_TYPE), - Some(&HeaderValue::from_static("text/plain")), + Some(&HeaderValue::from_static("text/plain; charset=utf-8")), ); - // prefer UTF-8 encoding + // disable UTF-8 attribute let srv = - test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true))) - .await; + test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false))).await; let req = TestRequest::with_uri("/utf8.txt").to_request(); let res = test::call_service(&srv, req).await; @@ -33,6 +32,34 @@ async fn test_utf8_file_contents() { assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.headers().get(header::CONTENT_TYPE), - Some(&HeaderValue::from_static("text/plain; charset=utf-8")), + Some(&HeaderValue::from_static("text/plain")), + ); +} + +#[actix_web::test] +async fn partial_range_response_encoding() { + let srv = test::init_service(App::new().default_service(web::to(|| async { + NamedFile::open_async("./tests/test.binary").await.unwrap() + }))) + .await; + + // range request without accept-encoding returns no content-encoding header + let req = TestRequest::with_uri("/") + .append_header((header::RANGE, "bytes=10-20")) + .to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + assert!(!res.headers().contains_key(header::CONTENT_ENCODING)); + + // range request with accept-encoding returns a content-encoding header + let req = TestRequest::with_uri("/") + .append_header((header::RANGE, "bytes=10-20")) + .append_header((header::ACCEPT_ENCODING, "identity")) + .to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + assert_eq!( + res.headers().get(header::CONTENT_ENCODING).unwrap(), + "identity" ); } diff --git a/actix-files/tests/guard.rs b/actix-files/tests/guard.rs index 8b1785e7f..5a97f75d6 100644 --- a/actix-files/tests/guard.rs +++ b/actix-files/tests/guard.rs @@ -7,14 +7,12 @@ use actix_web::{ }; use bytes::Bytes; -#[actix_rt::test] +#[actix_web::test] async fn test_guard_filter() { let srv = test::init_service( App::new() .service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com"))) - .service( - Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")), - ), + .service(Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com"))), ) .await; diff --git a/actix-files/tests/test.js b/actix-files/tests/test.js new file mode 100644 index 000000000..2ee135561 --- /dev/null +++ b/actix-files/tests/test.js @@ -0,0 +1 @@ +// this file is empty. diff --git a/actix-files/tests/traversal.rs b/actix-files/tests/traversal.rs new file mode 100644 index 000000000..4eecb8dde --- /dev/null +++ b/actix-files/tests/traversal.rs @@ -0,0 +1,26 @@ +use actix_files::Files; +use actix_web::{ + http::StatusCode, + test::{self, TestRequest}, + App, +}; + +#[actix_rt::test] +async fn test_directory_traversal_prevention() { + let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await; + + let req = TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let req = TestRequest::with_uri( + "/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd", + ) + .to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request(); + let res = test::call_service(&srv, req).await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} diff --git a/actix-http-test/CHANGES.md b/actix-http-test/CHANGES.md index 1dbd9a15b..4d133e3ec 100644 --- a/actix-http-test/CHANGES.md +++ b/actix-http-test/CHANGES.md @@ -1,100 +1,175 @@ # Changes -## Unreleased - 2021-xx-xx +## Unreleased +- Minimum supported Rust version (MSRV) is now 1.72. -## 3.0.0-beta.4 - 2021-04-02 -* Added `TestServer::client_headers` method. [#2097] +## 3.2.0 + +- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. + +## 3.1.0 + +- Minimum supported Rust version (MSRV) is now 1.59. + +## 3.0.0 + +- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442] +- Added `TestServer::client_headers` method. [#2097] +- Update `actix-server` dependency to `2`. +- Update `actix-tls` dependency to `3`. +- Update `bytes` to `1.0`. [#1813] +- Minimum supported Rust version (MSRV) is now 1.57. + +[#2442]: https://github.com/actix/actix-web/pull/2442 +[#2097]: https://github.com/actix/actix-web/pull/2097 +[#1813]: https://github.com/actix/actix-web/pull/1813 + +
+3.0.0 Pre-Releases + +## 3.0.0-beta.13 + +- No significant changes since `3.0.0-beta.12`. + +## 3.0.0-beta.12 + +- No significant changes since `3.0.0-beta.11`. + +## 3.0.0-beta.11 + +- Minimum supported Rust version (MSRV) is now 1.54. + +## 3.0.0-beta.10 + +- Update `actix-server` to `2.0.0-rc.2`. [#2550] + +[#2550]: https://github.com/actix/actix-web/pull/2550 + +## 3.0.0-beta.9 + +- No significant changes since `3.0.0-beta.8`. + +## 3.0.0-beta.8 + +- Update `actix-tls` to `3.0.0-rc.1`. [#2474] + +[#2474]: https://github.com/actix/actix-web/pull/2474 + +## 3.0.0-beta.7 + +- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408] + +[#2408]: https://github.com/actix/actix-web/pull/2408 + +## 3.0.0-beta.6 + +- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442] +- Update `actix-server` to `2.0.0-beta.9`. [#2442] +- Minimum supported Rust version (MSRV) is now 1.52. + +[#2442]: https://github.com/actix/actix-web/pull/2442 + +## 3.0.0-beta.5 + +- Minimum supported Rust version (MSRV) is now 1.51. + +## 3.0.0-beta.4 + +- Added `TestServer::client_headers` method. [#2097] [#2097]: https://github.com/actix/actix-web/pull/2097 +## 3.0.0-beta.3 -## 3.0.0-beta.3 - 2021-03-09 -* No notable changes. +- No notable changes. +## 3.0.0-beta.2 -## 3.0.0-beta.2 - 2021-02-10 -* No notable changes. +- No notable changes. +## 3.0.0-beta.1 -## 3.0.0-beta.1 - 2021-01-07 -* Update `bytes` to `1.0`. [#1813] +- Update `bytes` to `1.0`. [#1813] [#1813]: https://github.com/actix/actix-web/pull/1813 +
-## 2.1.0 - 2020-11-25 -* Add ability to set address for `TestServer`. [#1645] -* Upgrade `base64` to `0.13`. -* Upgrade `serde_urlencoded` to `0.7`. [#1773] +## 2.1.0 + +- Add ability to set address for `TestServer`. [#1645] +- Upgrade `base64` to `0.13`. +- Upgrade `serde_urlencoded` to `0.7`. [#1773] [#1773]: https://github.com/actix/actix-web/pull/1773 [#1645]: https://github.com/actix/actix-web/pull/1645 +## 2.0.0 -## 2.0.0 - 2020-09-11 -* Update actix-codec and actix-utils dependencies. +- Update actix-codec and actix-utils dependencies. +## 2.0.0-alpha.1 -## 2.0.0-alpha.1 - 2020-05-23 -* Update the `time` dependency to 0.2.7 -* Update `actix-connect` dependency to 2.0.0-alpha.2 -* Make `test_server` `async` fn. -* Bump minimum supported Rust version to 1.40 -* Replace deprecated `net2` crate with `socket2` -* Update `base64` dependency to 0.12 -* Update `env_logger` dependency to 0.7 +- Update the `time` dependency to 0.2.7 +- Update `actix-connect` dependency to 2.0.0-alpha.2 +- Make `test_server` `async` fn. +- Bump minimum supported Rust version to 1.40 +- Replace deprecated `net2` crate with `socket2` +- Update `base64` dependency to 0.12 +- Update `env_logger` dependency to 0.7 -## 1.0.0 - 2019-12-13 -* Replaced `TestServer::start()` with `test_server()` +## 1.0.0 +- Replaced `TestServer::start()` with `test_server()` -## 1.0.0-alpha.3 - 2019-12-07 -* Migrate to `std::future` +## 1.0.0-alpha.3 +- Migrate to `std::future` -## 0.2.5 - 2019-09-17 -* Update serde_urlencoded to "0.6.1" -* Increase TestServerRuntime timeouts from 500ms to 3000ms -* Do not override current `System` +## 0.2.5 +- Update serde_urlencoded to "0.6.1" +- Increase TestServerRuntime timeouts from 500ms to 3000ms +- Do not override current `System` -## 0.2.4 - 2019-07-18 -* Update actix-server to 0.6 +## 0.2.4 +- Update actix-server to 0.6 -## 0.2.3 - 2019-07-16 -* Add `delete`, `options`, `patch` methods to `TestServerRunner` +## 0.2.3 +- Add `delete`, `options`, `patch` methods to `TestServerRunner` -## 0.2.2 - 2019-06-16 -* Add .put() and .sput() methods +## 0.2.2 +- Add .put() and .sput() methods -## 0.2.1 - 2019-06-05 -* Add license files +## 0.2.1 +- Add license files -## 0.2.0 - 2019-05-12 -* Update awc and actix-http deps +## 0.2.0 +- Update awc and actix-http deps -## 0.1.1 - 2019-04-24 -* Always make new connection for http client +## 0.1.1 +- Always make new connection for http client -## 0.1.0 - 2019-04-16 -* No changes +## 0.1.0 +- No changes -## 0.1.0-alpha.3 - 2019-04-02 -* Request functions accept path #743 +## 0.1.0-alpha.3 +- Request functions accept path #743 -## 0.1.0-alpha.2 - 2019-03-29 -* Added TestServerRuntime::load_body() method -* Update actix-http and awc libraries +## 0.1.0-alpha.2 +- Added TestServerRuntime::load_body() method +- Update actix-http and awc libraries -## 0.1.0-alpha.1 - 2019-03-28 -* Initial impl +## 0.1.0-alpha.1 + +- Initial impl diff --git a/actix-http-test/Cargo.toml b/actix-http-test/Cargo.toml index 88b4bd04a..0947579a5 100644 --- a/actix-http-test/Cargo.toml +++ b/actix-http-test/Cargo.toml @@ -1,26 +1,34 @@ [package] name = "actix-http-test" -version = "3.0.0-beta.4" +version = "3.2.0" authors = ["Nikolay Kim "] description = "Various helpers for Actix applications to use during testing" -readme = "README.md" keywords = ["http", "web", "framework", "async", "futures"] homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web.git" -documentation = "https://docs.rs/actix-http-test/" -categories = ["network-programming", "asynchronous", - "web-programming::http-server", - "web-programming::websocket"] +repository = "https://github.com/actix/actix-web" +categories = [ + "network-programming", + "asynchronous", + "web-programming::http-server", + "web-programming::websocket", +] license = "MIT OR Apache-2.0" -exclude = [".gitignore", ".cargo/config"] -edition = "2018" +edition = "2021" [package.metadata.docs.rs] features = [] -[lib] -name = "actix_http_test" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_http::*", + "actix_server::*", + "awc::*", + "bytes::*", + "futures_core::*", + "http::*", + "tokio::*", +] [features] default = [] @@ -29,27 +37,25 @@ default = [] openssl = ["tls-openssl", "awc/openssl"] [dependencies] -actix-service = "2.0.0" -actix-codec = "0.4.0" -actix-tls = "3.0.0-beta.5" -actix-utils = "3.0.0" +actix-service = "2" +actix-codec = "0.5" +actix-tls = "3" +actix-utils = "3" actix-rt = "2.2" -actix-server = "2.0.0-beta.3" -awc = { version = "3.0.0-beta.5", default-features = false } +actix-server = "2" +awc = { version = "3", default-features = false } -base64 = "0.13" bytes = "1" -futures-core = { version = "0.3.7", default-features = false } -http = "0.2.2" +futures-core = { version = "0.3.17", default-features = false } +http = "0.2.7" log = "0.4" -socket2 = "0.4" -serde = "1.0" -serde_json = "1.0" +socket2 = "0.5" +serde = "1" +serde_json = "1" slab = "0.4" serde_urlencoded = "0.7" -time = { version = "0.2.23", default-features = false, features = ["std"] } -tls-openssl = { version = "0.10.9", package = "openssl", optional = true } +tls-openssl = { version = "0.10.55", package = "openssl", optional = true } +tokio = { version = "1.24.2", features = ["sync"] } [dev-dependencies] -actix-web = { version = "4.0.0-beta.6", default-features = false, features = ["cookies"] } -actix-http = "3.0.0-beta.6" +actix-http = "3" diff --git a/actix-http-test/README.md b/actix-http-test/README.md index b8cf450d4..939028121 100644 --- a/actix-http-test/README.md +++ b/actix-http-test/README.md @@ -1,15 +1,20 @@ -# actix-http-test +# `actix-http-test` -> Various helpers for Actix applications to use during testing. + [![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test) -[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.4)](https://docs.rs/actix-http-test/3.0.0-beta.4) +[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.2.0)](https://docs.rs/actix-http-test/3.2.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test) -[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.4) -[![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +
+[![Dependency Status](https://deps.rs/crate/actix-http-test/3.2.0/status.svg)](https://deps.rs/crate/actix-http-test/3.2.0) +[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test) +[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-http-test) -- [Chat on Gitter](https://gitter.im/actix/actix-web) -- Minimum Supported Rust Version (MSRV): 1.46.0 + + +Various helpers for Actix applications to use during testing. + + diff --git a/actix-http-test/src/lib.rs b/actix-http-test/src/lib.rs index 0f126c99a..554af9102 100644 --- a/actix-http-test/src/lib.rs +++ b/actix-http-test/src/lib.rs @@ -1,127 +1,147 @@ //! Various helpers for Actix applications to use during testing. -#![deny(rust_2018_idioms)] +#![deny(rust_2018_idioms, nonstandard_style)] +#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #[cfg(feature = "openssl")] extern crate tls_openssl as openssl; -use std::sync::mpsc; -use std::{net, thread, time}; +use std::{net, thread, time::Duration}; use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_rt::{net::TcpStream, System}; -use actix_server::{Server, ServiceFactory}; +use actix_server::{Server, ServerServiceFactory}; use awc::{ - error::PayloadError, http::HeaderMap, ws, Client, ClientRequest, ClientResponse, Connector, + error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse, + Connector, }; use bytes::Bytes; use futures_core::stream::Stream; use http::Method; use socket2::{Domain, Protocol, Socket, Type}; +use tokio::sync::mpsc; -/// Start test server +/// Start test server. /// -/// `TestServer` is very simple test server that simplify process of writing -/// integration tests cases for actix web applications. +/// `TestServer` is very simple test server that simplify process of writing integration tests cases +/// for HTTP applications. /// /// # Examples /// /// ``` -/// use actix_http::HttpService; -/// use actix_http_test::TestServer; -/// use actix_web::{web, App, HttpResponse, Error}; -/// -/// async fn my_handler() -> Result { -/// Ok(HttpResponse::Ok().into()) -/// } +/// use actix_http::{HttpService, Response, Error, StatusCode}; +/// use actix_http_test::test_server; +/// use actix_service::{fn_service, map_config, ServiceFactoryExt as _}; /// /// #[actix_rt::test] +/// # async fn hidden_test() {} /// async fn test_example() { -/// let mut srv = TestServer::start( -/// || HttpService::new( -/// App::new().service( -/// web::resource("/").to(my_handler)) -/// ) -/// ); +/// let srv = test_server(|| { +/// HttpService::build() +/// .h1(fn_service(|req| async move { +/// Ok::<_, Error>(Response::ok()) +/// })) +/// .tcp() +/// .map_err(|_| ()) +/// }) +/// .await; /// /// let req = srv.get("/"); /// let response = req.send().await.unwrap(); -/// assert!(response.status().is_success()); +/// +/// assert_eq!(response.status(), StatusCode::OK); /// } +/// # actix_rt::System::new().block_on(test_example()); /// ``` -pub async fn test_server>(factory: F) -> TestServer { +pub async fn test_server>(factory: F) -> TestServer { let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); test_server_with_addr(tcp, factory).await } -/// Start [`test server`](test_server()) on a concrete Address -pub async fn test_server_with_addr>( +/// Start [`test server`](test_server()) on an existing address binding. +pub async fn test_server_with_addr>( tcp: net::TcpListener, factory: F, ) -> TestServer { - let (tx, rx) = mpsc::channel(); + let (started_tx, started_rx) = std::sync::mpsc::channel(); + let (thread_stop_tx, thread_stop_rx) = mpsc::channel(1); // run server in separate thread thread::spawn(move || { - let sys = System::new(); - let local_addr = tcp.local_addr().unwrap(); + System::new().block_on(async move { + let local_addr = tcp.local_addr().unwrap(); - let srv = Server::build() - .listen("test", tcp, factory)? - .workers(1) - .disable_signals(); + let srv = Server::build() + .workers(1) + .disable_signals() + .system_exit() + .listen("test", tcp, factory) + .expect("test server could not be created"); - sys.block_on(async { - srv.run(); - tx.send((System::current(), local_addr)).unwrap(); + let srv = srv.run(); + started_tx + .send((System::current(), srv.handle(), local_addr)) + .unwrap(); + + // drive server loop + srv.await.unwrap(); }); - sys.run() + // notify TestServer that server and system have shut down + // all thread managed resources should be dropped at this point + #[allow(clippy::let_underscore_future)] + let _ = thread_stop_tx.send(()); }); - let (system, addr) = rx.recv().unwrap(); + let (system, server, addr) = started_rx.recv().unwrap(); let client = { + #[cfg(feature = "openssl")] let connector = { - #[cfg(feature = "openssl")] - { - use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; + use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; - let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); - builder.set_verify(SslVerifyMode::NONE); - let _ = builder - .set_alpn_protos(b"\x02h2\x08http/1.1") - .map_err(|e| log::error!("Can not set alpn protocol: {:?}", e)); - Connector::new() - .conn_lifetime(time::Duration::from_secs(0)) - .timeout(time::Duration::from_millis(30000)) - .ssl(builder.build()) - } - #[cfg(not(feature = "openssl"))] - { - Connector::new() - .conn_lifetime(time::Duration::from_secs(0)) - .timeout(time::Duration::from_millis(30000)) - } + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + + builder.set_verify(SslVerifyMode::NONE); + let _ = builder + .set_alpn_protos(b"\x02h2\x08http/1.1") + .map_err(|e| log::error!("Can not set alpn protocol: {:?}", e)); + + Connector::new() + .conn_lifetime(Duration::from_secs(0)) + .timeout(Duration::from_millis(30000)) + .openssl(builder.build()) + }; + + #[cfg(not(feature = "openssl"))] + let connector = { + Connector::new() + .conn_lifetime(Duration::from_secs(0)) + .timeout(Duration::from_millis(30000)) }; Client::builder().connector(connector).finish() }; TestServer { - addr, + server, client, system, + addr, + thread_stop_rx, } } /// Test server controller pub struct TestServer { + server: actix_server::ServerHandle, + client: awc::Client, + system: actix_rt::System, addr: net::SocketAddr, - client: Client, - system: System, + thread_stop_rx: mpsc::Receiver<()>, } impl TestServer { @@ -258,15 +278,33 @@ impl TestServer { self.client.headers() } - /// Stop HTTP server - fn stop(&mut self) { + /// Stop HTTP server. + /// + /// Waits for spawned `Server` and `System` to (force) shutdown. + pub async fn stop(&mut self) { + // signal server to stop + self.server.stop(false).await; + + // also signal system to stop + // though this is handled by `ServerBuilder::exit_system` too self.system.stop(); + + // wait for thread to be stopped but don't care about result + let _ = self.thread_stop_rx.recv().await; } } impl Drop for TestServer { fn drop(&mut self) { - self.stop() + // calls in this Drop impl should be enough to shut down the server, system, and thread + // without needing to await anything + + // signal server to stop + #[allow(clippy::let_underscore_future)] + let _ = self.server.stop(true); + + // signal system to stop + self.system.stop(); } } diff --git a/actix-http/CHANGES.md b/actix-http/CHANGES.md index ec7d8ee3b..85ba03100 100644 --- a/actix-http/CHANGES.md +++ b/actix-http/CHANGES.md @@ -1,33 +1,816 @@ # Changes -## Unreleased - 2021-xx-xx +## Unreleased + ### Added -* Alias `body::Body` as `body::AnyBody`. [#2215] -* `BoxAnyBody`: a boxed message body with boxed errors. [#2183] -* Re-export `http` crate's `Error` type as `error::HttpError`. [#2171] -* Re-export `StatusCode`, `Method`, `Version` and `Uri` at the crate root. [#2171] -* Re-export `ContentEncoding` and `ConnectionType` at the crate root. [#2171] -* `Response::into_body` that consumes response and returns body type. [#2201] -* `impl Default` for `Response`. [#2201] + +- Add `error::InvalidStatusCode` re-export. + +## 3.7.0 + +### Added + +- Add `rustls-0_23` crate feature +- Add `{h1::H1Service, h2::H2Service, HttpService}::rustls_0_23()` and `HttpService::rustls_0_23_with_config()` service constructors. ### Changed -* The `MessageBody` trait now has an associated `Error` type. [#2183] -* Places in `Response` where `ResponseBody` was received or returned now simply use `B`. [#2201] -* `header` mod is now public. [#2171] -* `uri` mod is now public. [#2171] -* Update `language-tags` to `0.3`. -* Reduce the level from `error` to `debug` for the log line that is emitted when a `500 Internal Server Error` is built using `HttpResponse::from_error`. [#2201] -* `ResponseBuilder::message_body` now returns a `Result`. [#2201] + +- Update `brotli` dependency to `6`. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 3.6.0 + +### Added + +- Add `rustls-0_22` crate feature. +- Add `{h1::H1Service, h2::H2Service, HttpService}::rustls_0_22()` and `HttpService::rustls_0_22_with_config()` service constructors. +- Implement `From<&HeaderMap>` for `http::HeaderMap`. + +## 3.5.1 + +### Fixed + +- Prevent hang when returning zero-sized response bodies through compression layer. + +## 3.5.0 + +### Added + +- Implement `From` for `http::HeaderMap`. + +### Changed + +- Updated `zstd` dependency to `0.13`. + +### Fixed + +- Prevent compression of zero-sized response bodies. + +## 3.4.0 + +### Added + +- Add `rustls-0_20` crate feature. +- Add `{h1::H1Service, h2::H2Service, HttpService}::rustls_021()` and `HttpService::rustls_021_with_config()` service constructors. +- Add `body::to_bytes_limited()` function. +- Add `body::BodyLimitExceeded` error type. + +### Changed + +- Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. + +## 3.3.1 + +### Fixed + +- Use correct `http` version requirement to ensure support for const `HeaderName` definitions. + +## 3.3.0 + +### Added + +- Implement `MessageBody` for `Cow<'static, str>` and `Cow<'static, [u8]>`. [#2959] +- Implement `MessageBody` for `&mut B` where `B: MessageBody + Unpin`. [#2868] +- Implement `MessageBody` for `Pin` where `B::Target: MessageBody`. [#2868] +- Automatic h2c detection via new service finalizer `HttpService::tcp_auto_h2c()`. [#2957] +- `HeaderMap::retain()`. [#2955] +- Header name constants in `header` module. [#2956] [#2968] + - `CACHE_STATUS` + - `CDN_CACHE_CONTROL` + - `CROSS_ORIGIN_EMBEDDER_POLICY` + - `CROSS_ORIGIN_OPENER_POLICY` + - `PERMISSIONS_POLICY` + - `X_FORWARDED_FOR` + - `X_FORWARDED_HOST` + - `X_FORWARDED_PROTO` + +### Fixed + +- Fix non-empty body of HTTP/2 HEAD responses. [#2920] + +### Performance + +- Improve overall performance of operations on `Extensions`. [#2890] + +[#2959]: https://github.com/actix/actix-web/pull/2959 +[#2868]: https://github.com/actix/actix-web/pull/2868 +[#2890]: https://github.com/actix/actix-web/pull/2890 +[#2920]: https://github.com/actix/actix-web/pull/2920 +[#2957]: https://github.com/actix/actix-web/pull/2957 +[#2955]: https://github.com/actix/actix-web/pull/2955 +[#2956]: https://github.com/actix/actix-web/pull/2956 +[#2968]: https://github.com/actix/actix-web/pull/2968 + +## 3.2.2 + +### Changed + +- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency. + +### Fixed + +- Avoid possibility of dispatcher getting stuck while back-pressuring I/O. [#2369] + +[#2369]: https://github.com/actix/actix-web/pull/2369 + +## 3.2.1 + +### Fixed + +- Fix parsing ambiguity in Transfer-Encoding and Content-Length headers for HTTP/1.0 requests. [#2794] + +[#2794]: https://github.com/actix/actix-web/pull/2794 + +## 3.2.0 + +### Changed + +- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency. + +### Fixed + +- Websocket parser no longer throws endless overflow errors after receiving an oversized frame. [#2790] +- Retain previously set Vary headers when using compression encoder. [#2798] + +[#2790]: https://github.com/actix/actix-web/pull/2790 +[#2798]: https://github.com/actix/actix-web/pull/2798 + +## 3.1.0 + +### Changed + +- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency. + +### Fixed + +- Revert broken fix in [#2624] that caused erroneous 500 error responses. Temporarily re-introduces [#2357] bug. [#2779] + +[#2624]: https://github.com/actix/actix-web/pull/2624 +[#2357]: https://github.com/actix/actix-web/issues/2357 +[#2779]: https://github.com/actix/actix-web/pull/2779 + +## 3.0.4 + +### Fixed + +- Document on docs.rs with `ws` feature enabled. + +## 3.0.3 + +### Fixed + +- Allow spaces between header name and colon when parsing responses. [#2684] + +[#2684]: https://github.com/actix/actix-web/pull/2684 + +## 3.0.2 + +### Fixed + +- Fix encoding camel-case header names with more than one hyphen. [#2683] + +[#2683]: https://github.com/actix/actix-web/pull/2683 + +## 3.0.1 + +- Fix panic in H1 dispatcher when pipelining is used with keep-alive. [#2678] + +[#2678]: https://github.com/actix/actix-web/issues/2678 + +## 3.0.0 + +### Dependencies + +- Updated `actix-*` to Tokio v1-based versions. [#1813] +- Updated `bytes` to `1.0`. [#1813] +- Updated `h2` to `0.3`. [#1813] +- Updated `rustls` to `0.20.0`. [#2414] +- Updated `language-tags` to `0.3`. +- Updated `tokio` to `1`. + +### Added + +- Crate Features: + - `ws`; disabled by default. [#2618] + - `http2`; disabled by default. [#2618] + - `compress-brotli`; disabled by default. [#2618] + - `compress-gzip`; disabled by default. [#2618] + - `compress-zstd`; disabled by default. [#2618] +- Functions: + - `body::to_bytes` for async collecting message body into Bytes. [#2158] +- Traits: + - `TryIntoHeaderPair`; allows using typed and untyped headers in the same methods. [#1869] +- Types: + - `body::BoxBody`; a boxed message body with boxed errors. [#2183] + - `body::EitherBody` enum. [#2468] + - `body::None` struct. [#2468] + - Re-export `http` crate's `Error` type as `error::HttpError`. [#2171] +- Variants: + - `ContentEncoding::Zstd` along with . [#2244] + - `Protocol::Http3` for future compatibility and also mark `#[non_exhaustive]`. [00ba8d55] +- Methods: + - `ContentEncoding::to_header_value()`. [#2501] + - `header::QualityItem::{max, min}()`. [#2486] + - `header::QualityItem::zero()` that uses `Quality::ZERO`. [#2501] + - `HeaderMap::drain()` as an efficient draining iterator. [#1964] + - `HeaderMap::len_keys()` has the behavior of the old `len` method. [#1964] + - `MessageBody::boxed` trait method for wrapping boxing types efficiently. [#2520] + - `MessageBody::try_into_bytes` trait method, with default implementation, for optimizations on body types that complete in exactly one poll. [#2522] + - `Request::conn_data()`. [#2491] + - `Request::take_conn_data()`. [#2491] + - `Request::take_req_data()`. [#2487] + - `Response::{ok, bad_request, not_found, internal_server_error}()`. [#2159] + - `Response::into_body()` that consumes response and returns body type. [#2201] + - `Response::map_into_boxed_body()`. [#2468] + - `ResponseBuilder::append_header()` method which allows using typed and untyped headers. [#1869] + - `ResponseBuilder::insert_header()` method which allows using typed and untyped headers. [#1869] + - `ResponseHead::set_camel_case_headers()`. [#2587] + - `TestRequest::insert_header()` method which allows using typed and untyped headers. [#1869] +- Implementations: + - Implement `Clone for ws::HandshakeError`. [#2468] + - Implement `Clone` for `body::AnyBody where S: Clone`. [#2448] + - Implement `Clone` for `RequestHead`. [#2487] + - Implement `Clone` for `ResponseHead`. [#2585] + - Implement `Copy` for `QualityItem where T: Copy`. [#2501] + - Implement `Default` for `ContentEncoding`. [#1912] + - Implement `Default` for `HttpServiceBuilder`. [#2611] + - Implement `Default` for `KeepAlive`. [#2611] + - Implement `Default` for `Response`. [#2201] + - Implement `Default` for `ws::Codec`. [#1920] + - Implement `Display` for `header::Quality`. [#2486] + - Implement `Eq` for `header::ContentEncoding`. [#2501] + - Implement `ExactSizeIterator` and `FusedIterator` for all `HeaderMap` iterators. [#2470] + - Implement `From` for `KeepAlive`. [#2611] + - Implement `From>` for `KeepAlive`. [#2611] + - Implement `From>` for `Response>`. [#2625] + - Implement `FromStr` for `ContentEncoding`. [#1912] + - Implement `Header` for `ContentEncoding`. [#1912] + - Implement `IntoHeaderValue` for `ContentEncoding`. [#1912] + - Implement `IntoIterator` for `HeaderMap`. [#1964] + - Implement `MessageBody` for `bytestring::ByteString`. [#2468] + - Implement `MessageBody` for `Pin> where T: MessageBody`. [#2152] +- Misc: + - Re-export `StatusCode`, `Method`, `Version` and `Uri` at the crate root. [#2171] + - Re-export `ContentEncoding` and `ConnectionType` at the crate root. [#2171] + - `Quality::ZERO` associated constant equivalent to `q=0`. [#2501] + - `header::Quality::{MAX, MIN}` associated constants equivalent to `q=1` and `q=0.001`, respectively. [#2486] + - Timeout for canceling HTTP/2 server side connection handshake. Configurable with `ServiceConfig::client_timeout`; defaults to 5 seconds. [#2483] + - `#[must_use]` for `ws::Codec` to prevent subtle bugs. [#1920] + +### Changed + +- Traits: + - Rename `IntoHeaderValue => TryIntoHeaderValue`. [#2510] + - `MessageBody` now has an associated `Error` type. [#2183] +- Types: + - `Protocol` enum is now marked `#[non_exhaustive]`. + - `error::DispatcherError` enum is now marked `#[non_exhaustive]`. [#2624] + - `ContentEncoding` is now marked `#[non_exhaustive]`. [#2377] + - Error enums are marked `#[non_exhaustive]`. [#2161] + - Rename `PayloadStream` to `BoxedPayloadStream`. [#2545] + - The body type parameter of `Response` no longer has a default. [#2152] +- Enum Variants: + - Rename `ContentEncoding::{Br => Brotli}`. [#2501] + - `Payload` inner fields are now named. [#2545] + - `ws::Message::Text` now contains a `bytestring::ByteString`. [#1864] +- Methods: + - Rename `ServiceConfig::{client_timer_expire => client_request_deadline}`. [#2611] + - Rename `ServiceConfig::{client_disconnect_timer => client_disconnect_deadline}`. [#2611] + - Rename `h1::Codec::{keepalive => keep_alive}`. [#2611] + - Rename `h1::Codec::{keepalive_enabled => keep_alive_enabled}`. [#2611] + - Rename `h1::ClientCodec::{keepalive => keep_alive}`. [#2611] + - Rename `h1::ClientPayloadCodec::{keepalive => keep_alive}`. [#2611] + - Rename `header::EntityTag::{weak => new_weak, strong => new_strong}`. [#2565] + - Rename `TryIntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std `TryInto` trait. [#1894] + - Deadline methods in `ServiceConfig` now return `std::time::Instant`s instead of Tokio's wrapper type. [#2611] + - Places in `Response` where `ResponseBody` was received or returned now simply use `B`. [#2201] + - `encoding::Encoder::response` now returns `AnyBody>`. [#2448] + - `Extensions::insert` returns replaced item. [#1904] + - `HeaderMap::get_all` now returns a `std::slice::Iter`. [#2527] + - `HeaderMap::insert` now returns iterator of removed values. [#1964] + - `HeaderMap::len` now returns number of values instead of number of keys. [#1964] + - `HeaderMap::remove` now returns iterator of removed values. [#1964] + - `ResponseBuilder::body(B)` now returns `Response>`. [#2468] + - `ResponseBuilder::content_type` now takes an `impl TryIntoHeaderValue` to support using typed `mime` types. [#1894] + - `ResponseBuilder::finish()` now returns `Response>`. [#2468] + - `ResponseBuilder::json` now takes `impl Serialize`. [#2052] + - `ResponseBuilder::message_body` now returns a `Result`. [#2201]∑ + - `ServiceConfig::keep_alive` now returns a `KeepAlive`. [#2611] + - `ws::hash_key` now returns array. [#2035] +- Trait Implementations: + - Implementation of `Stream` for `Payload` no longer requires the `Stream` variant be `Unpin`. [#2545] + - Implementation of `Future` for `h1::SendResponse` no longer requires the body type be `Unpin`. [#2545] + - Implementation of `Stream` for `encoding::Decoder` no longer requires the stream type be `Unpin`. [#2545] + - Implementation of `From` for error types now return a `Response`. [#2468] +- Misc: + - `header` module is now public. [#2171] + - `uri` module is now public. [#2171] + - Request-local data container is no longer part of a `RequestHead`. Instead it is a distinct part of a `Request`. [#2487] + - All error trait bounds in server service builders have changed from `Into` to `Into>`. [#2253] + - All error trait bounds in message body and stream impls changed from `Into` to `Into>`. [#2253] + - Guarantee ordering of `header::GetAll` iterator to be same as insertion order. [#2467] + - Connection data set through the `on_connect_ext` callbacks is now accessible only from the new `Request::conn_data()` method. [#2491] + - Brotli (de)compression support is now provided by the `brotli` crate. [#2538] + - Minimum supported Rust version (MSRV) is now 1.54. + +### Fixed + +- A `Vary` header is now correctly sent along with compressed content. [#2501] +- HTTP/1.1 dispatcher correctly uses client request timeout. [#2611] +- Fixed issue where handlers that took payload but then dropped without reading it to EOF it would cause keep-alive connections to become stuck. [#2624] +- `ContentEncoding`'s `Identity` variant can now be parsed from a string. [#2501] +- `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuration parameter. [#2226] +- Remove unnecessary `Into` bound on `Encoder` body types. [#2375] +- Remove unnecessary `Unpin` bound on `ResponseBuilder::streaming`. [#2253] +- `BodyStream` and `SizedStream` are no longer restricted to `Unpin` types. [#2152] +- Fixed slice creation pointing to potential uninitialized data on h1 encoder. [#2364] +- Fixed quality parse error in Accept-Encoding header. [#2344] ### Removed -* Stop re-exporting `http` crate's `HeaderMap` types in addition to ours. [#2171] -* Down-casting for `MessageBody` types. [#2183] -* `error::Result` alias. [#2201] -* Error field from `Response` and `Response::error`. [#2205] -* `impl Future` for `Response`. [#2201] -* `Response::take_body` and old `Response::into_body` method that casted body type. [#2201] -* `InternalError` and all the error types it constructed. [#2215] -* Conversion (`impl Into`) of `Response` and `ResponseBuilder` to `Error`. [#2215] + +- Crate Features: + - `compress` feature. [#2065] + - `cookies` feature. [#2065] + - `trust-dns` feature. [#2425] + - `actors` optional feature and trait implementation for `actix` types. [#1969] +- Functions: + - `header::qitem` helper. Replaced with `header::QualityItem::max`. [#2486] +- Types: + - `body::Body`; replaced with `EitherBody` and `BoxBody`. [#2468] + - `body::ResponseBody`. [#2446] + - `ConnectError::SslHandshakeError` and re-export of `HandshakeError`. Due to the removal of this type from `tokio-openssl` crate. OpenSSL handshake error now returns `ConnectError::SslError`. [#1813] + - `error::Canceled` re-export. [#1994] + - `error::Result` type alias. [#2201] + - `error::BlockingError` [#2660] + - `InternalError` and all the error types it constructed were moved up to `actix-web`. [#2215] + - Typed HTTP headers; they have moved up to `actix-web`. [2094] + - Re-export of `http` crate's `HeaderMap` types in addition to ours. [#2171] +- Enum Variants: + - `body::BodySize::Empty`; an empty body can now only be represented as a `Sized(0)` variant. [#2446] + - `ContentEncoding::Auto`. [#2501] + - `EncoderError::Boxed`. [#2446] +- Methods: + - `ContentEncoding::is_compression()`. [#2501] + - `h1::Payload::readany()`. [#2545] + - `HttpMessage::cookie[s]()` trait methods. [#2065] + - `HttpServiceBuilder::new()`; use `default` instead. [#2611] + - `on_connect` (previously deprecated) methods have been removed; use `on_connect_ext`. [#1857] + - `Response::build_from()`. [#2159] + - `Response::error()` [#2205] + - `Response::take_body()` and old `Response::into_body()` method that casted body type. [#2201] + - `Response`'s status code builders. [#2159] + - `ResponseBuilder::{if_true, if_some}()` (previously deprecated). [#2148] + - `ResponseBuilder::{set, set_header}()`; use `ResponseBuilder::insert_header()`. [#1869] + - `ResponseBuilder::extensions[_mut]()`. [#2585] + - `ResponseBuilder::header()`; use `ResponseBuilder::append_header()`. [#1869] + - `ResponseBuilder::json()`. [#2148] + - `ResponseBuilder::json2()`. [#1903] + - `ResponseBuilder::streaming()`. [#2468] + - `ResponseHead::extensions[_mut]()`. [#2585] + - `ServiceConfig::{client_timer, keep_alive_timer}()`. [#2611] + - `TestRequest::with_hdr()`; use `TestRequest::default().insert_header()`. [#1869] + - `TestRequest::with_header()`; use `TestRequest::default().insert_header()`. [#1869] +- Trait implementations: + - Implementation of `Copy` for `ws::Codec`. [#1920] + - Implementation of `From> for KeepAlive`; use `Duration`s instead. [#2611] + - Implementation of `From` for `Body`. [#2148] + - Implementation of `From for KeepAlive`; use `Duration`s instead. [#2611] + - Implementation of `Future` for `Response`. [#2201] + - Implementation of `Future` for `ResponseBuilder`. [#2468] + - Implementation of `Into` for `Response`. [#2215] + - Implementation of `Into` for `ResponseBuilder`. [#2215] + - Implementation of `ResponseError` for `actix_utils::timeout::TimeoutError`. [#2127] + - Implementation of `ResponseError` for `CookieParseError`. [#2065] + - Implementation of `TryFrom` for `header::Quality`. [#2486] +- Misc: + - `http` module; most everything it contained is exported at the crate root. [#2488] + - `cookies` module (re-export). [#2065] + - `client` module. Connector types now live in `awc`. [#2425] + - `error` field from `Response`. [#2205] + - `downcast` and `downcast_get_type_id` macros. [#2291] + - Down-casting for `MessageBody` types; use standard `Any` trait. [#2183] + +[#1813]: https://github.com/actix/actix-web/pull/1813 +[#1845]: https://github.com/actix/actix-web/pull/1845 +[#1857]: https://github.com/actix/actix-web/pull/1857 +[#1864]: https://github.com/actix/actix-web/pull/1864 +[#1869]: https://github.com/actix/actix-web/pull/1869 +[#1878]: https://github.com/actix/actix-web/pull/1878 +[#1894]: https://github.com/actix/actix-web/pull/1894 +[#1903]: https://github.com/actix/actix-web/pull/1903 +[#1904]: https://github.com/actix/actix-web/pull/1904 +[#1912]: https://github.com/actix/actix-web/pull/1912 +[#1920]: https://github.com/actix/actix-web/pull/1920 +[#1964]: https://github.com/actix/actix-web/pull/1964 +[#1969]: https://github.com/actix/actix-web/pull/1969 +[#1981]: https://github.com/actix/actix-web/pull/1981 +[#1994]: https://github.com/actix/actix-web/pull/1994 +[#2035]: https://github.com/actix/actix-web/pull/2035 +[#2052]: https://github.com/actix/actix-web/pull/2052 +[#2065]: https://github.com/actix/actix-web/pull/2065 +[#2094]: https://github.com/actix/actix-web/pull/2094 +[#2127]: https://github.com/actix/actix-web/pull/2127 +[#2148]: https://github.com/actix/actix-web/pull/2148 +[#2152]: https://github.com/actix/actix-web/pull/2152 +[#2158]: https://github.com/actix/actix-web/pull/2158 +[#2159]: https://github.com/actix/actix-web/pull/2159 +[#2161]: https://github.com/actix/actix-web/pull/2161 +[#2171]: https://github.com/actix/actix-web/pull/2171 +[#2183]: https://github.com/actix/actix-web/pull/2183 +[#2196]: https://github.com/actix/actix-web/pull/2196 +[#2201]: https://github.com/actix/actix-web/pull/2201 +[#2205]: https://github.com/actix/actix-web/pull/2205 +[#2215]: https://github.com/actix/actix-web/pull/2215 +[#2244]: https://github.com/actix/actix-web/pull/2244 +[#2250]: https://github.com/actix/actix-web/pull/2250 +[#2253]: https://github.com/actix/actix-web/pull/2253 +[#2291]: https://github.com/actix/actix-web/pull/2291 +[#2344]: https://github.com/actix/actix-web/pull/2344 +[#2364]: https://github.com/actix/actix-web/pull/2364 +[#2375]: https://github.com/actix/actix-web/pull/2375 +[#2377]: https://github.com/actix/actix-web/pull/2377 +[#2414]: https://github.com/actix/actix-web/pull/2414 +[#2425]: https://github.com/actix/actix-web/pull/2425 +[#2442]: https://github.com/actix/actix-web/pull/2442 +[#2446]: https://github.com/actix/actix-web/pull/2446 +[#2448]: https://github.com/actix/actix-web/pull/2448 +[#2456]: https://github.com/actix/actix-web/pull/2456 +[#2467]: https://github.com/actix/actix-web/pull/2467 +[#2468]: https://github.com/actix/actix-web/pull/2468 +[#2470]: https://github.com/actix/actix-web/pull/2470 +[#2474]: https://github.com/actix/actix-web/pull/2474 +[#2483]: https://github.com/actix/actix-web/pull/2483 +[#2486]: https://github.com/actix/actix-web/pull/2486 +[#2487]: https://github.com/actix/actix-web/pull/2487 +[#2488]: https://github.com/actix/actix-web/pull/2488 +[#2491]: https://github.com/actix/actix-web/pull/2491 +[#2497]: https://github.com/actix/actix-web/pull/2497 +[#2501]: https://github.com/actix/actix-web/pull/2501 +[#2510]: https://github.com/actix/actix-web/pull/2510 +[#2520]: https://github.com/actix/actix-web/pull/2520 +[#2522]: https://github.com/actix/actix-web/pull/2522 +[#2527]: https://github.com/actix/actix-web/pull/2527 +[#2538]: https://github.com/actix/actix-web/pull/2538 +[#2545]: https://github.com/actix/actix-web/pull/2545 +[#2565]: https://github.com/actix/actix-web/pull/2565 +[#2585]: https://github.com/actix/actix-web/pull/2585 +[#2587]: https://github.com/actix/actix-web/pull/2587 +[#2611]: https://github.com/actix/actix-web/pull/2611 +[#2618]: https://github.com/actix/actix-web/pull/2618 +[#2624]: https://github.com/actix/actix-web/pull/2624 +[#2625]: https://github.com/actix/actix-web/pull/2625 +[#2660]: https://github.com/actix/actix-web/pull/2660 +[00ba8d55]: https://github.com/actix/actix-web/commit/00ba8d55492284581695d824648590715a8bd386 + +
+3.0.0 Pre-Releases + +## 3.0.0-rc.4 + +### Fixed + +- Fix h1 dispatcher panic. [1ce58ecb] + +[1ce58ecb]: https://github.com/actix/actix-web/commit/1ce58ecb305c60e51db06e6c913b7a1344e229ca + +## 3.0.0-rc.3 + +- No significant changes since `3.0.0-rc.2`. + +## 3.0.0-rc.2 + +### Added + +- Implement `From>` for `Response>`. [#2625] + +### Changed + +- `error::DispatcherError` enum is now marked `#[non_exhaustive]`. [#2624] + +### Fixed + +- Issue where handlers that took payload but then dropped without reading it to EOF it would cause keep-alive connections to become stuck. [#2624] + +[#2624]: https://github.com/actix/actix-web/pull/2624 +[#2625]: https://github.com/actix/actix-web/pull/2625 + +## 3.0.0-rc.1 + +### Added + +- Implement `Default` for `KeepAlive`. [#2611] +- Implement `From` for `KeepAlive`. [#2611] +- Implement `From>` for `KeepAlive`. [#2611] +- Implement `Default` for `HttpServiceBuilder`. [#2611] +- Crate `ws` feature flag, disabled by default. [#2618] +- Crate `http2` feature flag, disabled by default. [#2618] + +### Changed + +- Rename `ServiceConfig::{client_timer_expire => client_request_deadline}`. [#2611] +- Rename `ServiceConfig::{client_disconnect_timer => client_disconnect_deadline}`. [#2611] +- Deadline methods in `ServiceConfig` now return `std::time::Instant`s instead of Tokio's wrapper type. [#2611] +- Rename `h1::Codec::{keepalive => keep_alive}`. [#2611] +- Rename `h1::Codec::{keepalive_enabled => keep_alive_enabled}`. [#2611] +- Rename `h1::ClientCodec::{keepalive => keep_alive}`. [#2611] +- Rename `h1::ClientPayloadCodec::{keepalive => keep_alive}`. [#2611] +- `ServiceConfig::keep_alive` now returns a `KeepAlive`. [#2611] + +### Fixed + +- HTTP/1.1 dispatcher correctly uses client request timeout. [#2611] + +### Removed + +- `ServiceConfig::{client_timer, keep_alive_timer}`. [#2611] +- `impl From for KeepAlive`; use `Duration`s instead. [#2611] +- `impl From> for KeepAlive`; use `Duration`s instead. [#2611] +- `HttpServiceBuilder::new`; use `default` instead. [#2611] + +[#2611]: https://github.com/actix/actix-web/pull/2611 +[#2618]: https://github.com/actix/actix-web/pull/2618 + +## 3.0.0-beta.19 + +### Added + +- Response headers can be sent as camel case using `res.head_mut().set_camel_case_headers(true)`. [#2587] +- `ResponseHead` now implements `Clone`. [#2585] + +### Changed + +- Brotli (de)compression support is now provided by the `brotli` crate. [#2538] + +### Removed + +- `ResponseHead::extensions[_mut]()`. [#2585] +- `ResponseBuilder::extensions[_mut]()`. [#2585] + +[#2538]: https://github.com/actix/actix-web/pull/2538 +[#2585]: https://github.com/actix/actix-web/pull/2585 +[#2587]: https://github.com/actix/actix-web/pull/2587 + +## 3.0.0-beta.18 + +### Added + +- `impl Eq` for `header::ContentEncoding`. [#2501] +- `impl Copy` for `QualityItem` where `T: Copy`. [#2501] +- `Quality::ZERO` equivalent to `q=0`. [#2501] +- `QualityItem::zero` that uses `Quality::ZERO`. [#2501] +- `ContentEncoding::to_header_value()`. [#2501] + +### Changed + +- `Quality::MIN` is now the smallest non-zero value. [#2501] +- `QualityItem::min` semantics changed with `QualityItem::MIN`. [#2501] +- Rename `ContentEncoding::{Br => Brotli}`. [#2501] +- Rename `header::EntityTag::{weak => new_weak, strong => new_strong}`. [#2565] +- Minimum supported Rust version (MSRV) is now 1.54. + +### Fixed + +- `ContentEncoding::Identity` can now be parsed from a string. [#2501] +- A `Vary` header is now correctly sent along with compressed content. [#2501] + +### Removed + +- `ContentEncoding::Auto` variant. [#2501] +- `ContentEncoding::is_compression()`. [#2501] + +[#2501]: https://github.com/actix/actix-web/pull/2501 +[#2565]: https://github.com/actix/actix-web/pull/2565 + +## 3.0.0-beta.17 + +### Changed + +- `HeaderMap::get_all` now returns a `std::slice::Iter`. [#2527] +- `Payload` inner fields are now named. [#2545] +- `impl Stream` for `Payload` no longer requires the `Stream` variant be `Unpin`. [#2545] +- `impl Future` for `h1::SendResponse` no longer requires the body type be `Unpin`. [#2545] +- `impl Stream` for `encoding::Decoder` no longer requires the stream type be `Unpin`. [#2545] +- Rename `PayloadStream` to `BoxedPayloadStream`. [#2545] + +### Removed + +- `h1::Payload::readany`. [#2545] + +[#2527]: https://github.com/actix/actix-web/pull/2527 +[#2545]: https://github.com/actix/actix-web/pull/2545 + +## 3.0.0-beta.16 + +### Added + +- New method on `MessageBody` trait, `try_into_bytes`, with default implementation, for optimizations on body types that complete in exactly one poll. Replaces `is_complete_body` and `take_complete_body`. [#2522] + +### Changed + +- Rename trait `IntoHeaderPair => TryIntoHeaderPair`. [#2510] +- Rename `TryIntoHeaderPair::{try_into_header_pair => try_into_pair}`. [#2510] +- Rename trait `IntoHeaderValue => TryIntoHeaderValue`. [#2510] + +### Removed + +- `MessageBody::{is_complete_body,take_complete_body}`. [#2522] + +[#2510]: https://github.com/actix/actix-web/pull/2510 +[#2522]: https://github.com/actix/actix-web/pull/2522 + +## 3.0.0-beta.15 + +### Added + +- Add timeout for canceling HTTP/2 server side connection handshake. Default to 5 seconds. [#2483] +- HTTP/2 handshake timeout can be configured with `ServiceConfig::client_timeout`. [#2483] +- `Response::map_into_boxed_body`. [#2468] +- `body::EitherBody` enum. [#2468] +- `body::None` struct. [#2468] +- Impl `MessageBody` for `bytestring::ByteString`. [#2468] +- `impl Clone for ws::HandshakeError`. [#2468] +- `#[must_use]` for `ws::Codec` to prevent subtle bugs. [#1920] +- `impl Default ` for `ws::Codec`. [#1920] +- `header::QualityItem::{max, min}`. [#2486] +- `header::Quality::{MAX, MIN}`. [#2486] +- `impl Display` for `header::Quality`. [#2486] +- Connection data set through the `on_connect_ext` callbacks is now accessible only from the new `Request::conn_data()` method. [#2491] +- `Request::take_conn_data()`. [#2491] +- `Request::take_req_data()`. [#2487] +- `impl Clone` for `RequestHead`. [#2487] +- New methods on `MessageBody` trait, `is_complete_body` and `take_complete_body`, both with default implementations, for optimizations on body types that are done in exactly one poll/chunk. [#2497] +- New `boxed` method on `MessageBody` trait for wrapping body type. [#2520] + +### Changed + +- Rename `body::BoxBody::{from_body => new}`. [#2468] +- Body type for `Responses` returned from `Response::{new, ok, etc...}` is now `BoxBody`. [#2468] +- The `Error` associated type on `MessageBody` type now requires `impl Error` (or similar). [#2468] +- Error types using in service builders now require `Into>`. [#2468] +- `From` implementations on error types now return a `Response`. [#2468] +- `ResponseBuilder::body(B)` now returns `Response>`. [#2468] +- `ResponseBuilder::finish()` now returns `Response>`. [#2468] + +### Removed + +- `ResponseBuilder::streaming`. [#2468] +- `impl Future` for `ResponseBuilder`. [#2468] +- Remove unnecessary `MessageBody` bound on types passed to `body::AnyBody::new`. [#2468] +- Move `body::AnyBody` to `awc`. Replaced with `EitherBody` and `BoxBody`. [#2468] +- `impl Copy` for `ws::Codec`. [#1920] +- `header::qitem` helper. Replaced with `header::QualityItem::max`. [#2486] +- `impl TryFrom` for `header::Quality`. [#2486] +- `http` module. Most everything it contained is exported at the crate root. [#2488] + +[#2483]: https://github.com/actix/actix-web/pull/2483 +[#2468]: https://github.com/actix/actix-web/pull/2468 +[#1920]: https://github.com/actix/actix-web/pull/1920 +[#2486]: https://github.com/actix/actix-web/pull/2486 +[#2487]: https://github.com/actix/actix-web/pull/2487 +[#2488]: https://github.com/actix/actix-web/pull/2488 +[#2491]: https://github.com/actix/actix-web/pull/2491 +[#2497]: https://github.com/actix/actix-web/pull/2497 +[#2520]: https://github.com/actix/actix-web/pull/2520 + +## 3.0.0-beta.14 + +### Changed + +- Guarantee ordering of `header::GetAll` iterator to be same as insertion order. [#2467] +- Expose `header::map` module. [#2467] +- Implement `ExactSizeIterator` and `FusedIterator` for all `HeaderMap` iterators. [#2470] +- Update `actix-tls` to `3.0.0-rc.1`. [#2474] + +[#2467]: https://github.com/actix/actix-web/pull/2467 +[#2470]: https://github.com/actix/actix-web/pull/2470 +[#2474]: https://github.com/actix/actix-web/pull/2474 + +## 3.0.0-beta.13 + +### Added + +- `body::AnyBody::empty` for quickly creating an empty body. [#2446] +- `body::AnyBody::none` for quickly creating a "none" body. [#2456] +- `impl Clone` for `body::AnyBody where S: Clone`. [#2448] +- `body::AnyBody::into_boxed` for quickly converting to a type-erased, boxed body type. [#2448] + +### Changed + +- Rename `body::AnyBody::{Message => Body}`. [#2446] +- Rename `body::AnyBody::{from_message => new_boxed}`. [#2448] +- Rename `body::AnyBody::{from_slice => copy_from_slice}`. [#2448] +- Rename `body::{BoxAnyBody => BoxBody}`. [#2448] +- Change representation of `AnyBody` to include a type parameter in `Body` variant. Defaults to `BoxBody`. [#2448] +- `Encoder::response` now returns `AnyBody>`. [#2448] + +### Removed + +- `body::AnyBody::Empty`; an empty body can now only be represented as a zero-length `Bytes` variant. [#2446] +- `body::BodySize::Empty`; an empty body can now only be represented as a `Sized(0)` variant. [#2446] +- `EncoderError::Boxed`; it is no longer required. [#2446] +- `body::ResponseBody`; is function is replaced by the new `body::AnyBody` enum. [#2446] + +[#2446]: https://github.com/actix/actix-web/pull/2446 +[#2448]: https://github.com/actix/actix-web/pull/2448 +[#2456]: https://github.com/actix/actix-web/pull/2456 + +## 3.0.0-beta.12 + +### Changed + +- Update `actix-server` to `2.0.0-beta.9`. [#2442] + +### Removed + +- `client` module. [#2425] +- `trust-dns` feature. [#2425] + +[#2425]: https://github.com/actix/actix-web/pull/2425 +[#2442]: https://github.com/actix/actix-web/pull/2442 + +## 3.0.0-beta.11 + +### Changed + +- Updated rustls to v0.20. [#2414] +- Minimum supported Rust version (MSRV) is now 1.52. + +[#2414]: https://github.com/actix/actix-web/pull/2414 + +## 3.0.0-beta.10 + +### Changed + +- `ContentEncoding` is now marked `#[non_exhaustive]`. [#2377] +- Minimum supported Rust version (MSRV) is now 1.51. + +### Fixed + +- Remove slice creation pointing to potential uninitialized data on h1 encoder. [#2364] +- Remove `Into` bound on `Encoder` body types. [#2375] +- Fix quality parse error in Accept-Encoding header. [#2344] + +[#2364]: https://github.com/actix/actix-web/pull/2364 +[#2375]: https://github.com/actix/actix-web/pull/2375 +[#2344]: https://github.com/actix/actix-web/pull/2344 +[#2377]: https://github.com/actix/actix-web/pull/2377 + +## 3.0.0-beta.9 + +### Fixed + +- Potential HTTP request smuggling vulnerabilities. [RUSTSEC-2021-0081](https://github.com/rustsec/advisory-db/pull/977) + +## 3.0.0-beta.8 + +### Changed + +- Change compression algorithm features flags. [#2250] + +### Removed + +- `downcast` and `downcast_get_type_id` macros. [#2291] + +[#2291]: https://github.com/actix/actix-web/pull/2291 +[#2250]: https://github.com/actix/actix-web/pull/2250 + +## 3.0.0-beta.7 + +### Added + +- Alias `body::Body` as `body::AnyBody`. [#2215] +- `BoxAnyBody`: a boxed message body with boxed errors. [#2183] +- Re-export `http` crate's `Error` type as `error::HttpError`. [#2171] +- Re-export `StatusCode`, `Method`, `Version` and `Uri` at the crate root. [#2171] +- Re-export `ContentEncoding` and `ConnectionType` at the crate root. [#2171] +- `Response::into_body` that consumes response and returns body type. [#2201] +- `impl Default` for `Response`. [#2201] +- Add zstd support for `ContentEncoding`. [#2244] + +### Changed + +- The `MessageBody` trait now has an associated `Error` type. [#2183] +- All error trait bounds in server service builders have changed from `Into` to `Into>`. [#2253] +- All error trait bounds in message body and stream impls changed from `Into` to `Into>`. [#2253] +- Places in `Response` where `ResponseBody` was received or returned now simply use `B`. [#2201] +- `header` mod is now public. [#2171] +- `uri` mod is now public. [#2171] +- Update `language-tags` to `0.3`. +- Reduce the level from `error` to `debug` for the log line that is emitted when a `500 Internal Server Error` is built using `HttpResponse::from_error`. [#2201] +- `ResponseBuilder::message_body` now returns a `Result`. [#2201] +- Remove `Unpin` bound on `ResponseBuilder::streaming`. [#2253] +- `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuration parameter. [#2226] + +### Removed + +- Stop re-exporting `http` crate's `HeaderMap` types in addition to ours. [#2171] +- Down-casting for `MessageBody` types. [#2183] +- `error::Result` alias. [#2201] +- Error field from `Response` and `Response::error`. [#2205] +- `impl Future` for `Response`. [#2201] +- `Response::take_body` and old `Response::into_body` method that casted body type. [#2201] +- `InternalError` and all the error types it constructed. [#2215] +- Conversion (`impl Into`) of `Response` and `ResponseBuilder` to `Error`. [#2215] [#2171]: https://github.com/actix/actix-web/pull/2171 [#2183]: https://github.com/actix/actix-web/pull/2183 @@ -35,31 +818,36 @@ [#2201]: https://github.com/actix/actix-web/pull/2201 [#2205]: https://github.com/actix/actix-web/pull/2205 [#2215]: https://github.com/actix/actix-web/pull/2215 +[#2253]: https://github.com/actix/actix-web/pull/2253 +[#2244]: https://github.com/actix/actix-web/pull/2244 +## 3.0.0-beta.6 -## 3.0.0-beta.6 - 2021-04-17 ### Added -* `impl MessageBody for Pin>`. [#2152] -* `Response::{ok, bad_request, not_found, internal_server_error}`. [#2159] -* Helper `body::to_bytes` for async collecting message body into Bytes. [#2158] -### Changes -* The type parameter of `Response` no longer has a default. [#2152] -* The `Message` variant of `body::Body` is now `Pin>`. [#2152] -* `BodyStream` and `SizedStream` are no longer restricted to Unpin types. [#2152] -* Error enum types are marked `#[non_exhaustive]`. [#2161] +- `impl MessageBody for Pin>`. [#2152] +- `Response::{ok, bad_request, not_found, internal_server_error}`. [#2159] +- Helper `body::to_bytes` for async collecting message body into Bytes. [#2158] + +### Changed + +- The type parameter of `Response` no longer has a default. [#2152] +- The `Message` variant of `body::Body` is now `Pin>`. [#2152] +- `BodyStream` and `SizedStream` are no longer restricted to Unpin types. [#2152] +- Error enum types are marked `#[non_exhaustive]`. [#2161] ### Removed -* `cookies` feature flag. [#2065] -* Top-level `cookies` mod (re-export). [#2065] -* `HttpMessage` trait loses the `cookies` and `cookie` methods. [#2065] -* `impl ResponseError for CookieParseError`. [#2065] -* Deprecated methods on `ResponseBuilder`: `if_true`, `if_some`. [#2148] -* `ResponseBuilder::json`. [#2148] -* `ResponseBuilder::{set_header, header}`. [#2148] -* `impl From for Body`. [#2148] -* `Response::build_from`. [#2159] -* Most of the status code builders on `Response`. [#2159] + +- `cookies` feature flag. [#2065] +- Top-level `cookies` mod (re-export). [#2065] +- `HttpMessage` trait loses the `cookies` and `cookie` methods. [#2065] +- `impl ResponseError for CookieParseError`. [#2065] +- Deprecated methods on `ResponseBuilder`: `if_true`, `if_some`. [#2148] +- `ResponseBuilder::json`. [#2148] +- `ResponseBuilder::{set_header, header}`. [#2148] +- `impl From for Body`. [#2148] +- `Response::build_from`. [#2159] +- Most of the status code builders on `Response`. [#2159] [#2065]: https://github.com/actix/actix-web/pull/2065 [#2148]: https://github.com/actix/actix-web/pull/2148 @@ -68,85 +856,91 @@ [#2158]: https://github.com/actix/actix-web/pull/2158 [#2161]: https://github.com/actix/actix-web/pull/2161 +## 3.0.0-beta.5 -## 3.0.0-beta.5 - 2021-04-02 ### Added -* `client::Connector::handshake_timeout` method for customizing TLS connection handshake timeout. [#2081] -* `client::ConnectorService` as `client::Connector::finish` method's return type [#2081] -* `client::ConnectionIo` trait alias [#2081] + +- `client::Connector::handshake_timeout` method for customizing TLS connection handshake timeout. [#2081] +- `client::ConnectorService` as `client::Connector::finish` method's return type [#2081] +- `client::ConnectionIo` trait alias [#2081] ### Changed -* `client::Connector` type now only have one generic type for `actix_service::Service`. [#2063] + +- `client::Connector` type now only have one generic type for `actix_service::Service`. [#2063] ### Removed -* Common typed HTTP headers were moved to actix-web. [2094] -* `ResponseError` impl for `actix_utils::timeout::TimeoutError`. [#2127] + +- Common typed HTTP headers were moved to actix-web. [2094] +- `ResponseError` impl for `actix_utils::timeout::TimeoutError`. [#2127] [#2063]: https://github.com/actix/actix-web/pull/2063 [#2081]: https://github.com/actix/actix-web/pull/2081 [#2094]: https://github.com/actix/actix-web/pull/2094 [#2127]: https://github.com/actix/actix-web/pull/2127 +## 3.0.0-beta.4 -## 3.0.0-beta.4 - 2021-03-08 ### Changed -* Feature `cookies` is now optional and disabled by default. [#1981] -* `ws::hash_key` now returns array. [#2035] -* `ResponseBuilder::json` now takes `impl Serialize`. [#2052] + +- Feature `cookies` is now optional and disabled by default. [#1981] +- `ws::hash_key` now returns array. [#2035] +- `ResponseBuilder::json` now takes `impl Serialize`. [#2052] ### Removed -* Re-export of `futures_channel::oneshot::Canceled` is removed from `error` mod. [#1994] -* `ResponseError` impl for `futures_channel::oneshot::Canceled` is removed. [#1994] + +- Re-export of `futures_channel::oneshot::Canceled` is removed from `error` mod. [#1994] +- `ResponseError` impl for `futures_channel::oneshot::Canceled` is removed. [#1994] [#1981]: https://github.com/actix/actix-web/pull/1981 [#1994]: https://github.com/actix/actix-web/pull/1994 [#2035]: https://github.com/actix/actix-web/pull/2035 [#2052]: https://github.com/actix/actix-web/pull/2052 +## 3.0.0-beta.3 -## 3.0.0-beta.3 - 2021-02-10 -* No notable changes. +- No notable changes. +## 3.0.0-beta.2 -## 3.0.0-beta.2 - 2021-02-10 ### Added -* `IntoHeaderPair` trait that allows using typed and untyped headers in the same methods. [#1869] -* `ResponseBuilder::insert_header` method which allows using typed headers. [#1869] -* `ResponseBuilder::append_header` method which allows using typed headers. [#1869] -* `TestRequest::insert_header` method which allows using typed headers. [#1869] -* `ContentEncoding` implements all necessary header traits. [#1912] -* `HeaderMap::len_keys` has the behavior of the old `len` method. [#1964] -* `HeaderMap::drain` as an efficient draining iterator. [#1964] -* Implement `IntoIterator` for owned `HeaderMap`. [#1964] -* `trust-dns` optional feature to enable `trust-dns-resolver` as client dns resolver. [#1969] + +- `TryIntoHeaderPair` trait that allows using typed and untyped headers in the same methods. [#1869] +- `ResponseBuilder::insert_header` method which allows using typed headers. [#1869] +- `ResponseBuilder::append_header` method which allows using typed headers. [#1869] +- `TestRequest::insert_header` method which allows using typed headers. [#1869] +- `ContentEncoding` implements all necessary header traits. [#1912] +- `HeaderMap::len_keys` has the behavior of the old `len` method. [#1964] +- `HeaderMap::drain` as an efficient draining iterator. [#1964] +- Implement `IntoIterator` for owned `HeaderMap`. [#1964] +- `trust-dns` optional feature to enable `trust-dns-resolver` as client dns resolver. [#1969] ### Changed -* `ResponseBuilder::content_type` now takes an `impl IntoHeaderValue` to support using typed - `mime` types. [#1894] -* Renamed `IntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std - `TryInto` trait. [#1894] -* `Extensions::insert` returns Option of replaced item. [#1904] -* Remove `HttpResponseBuilder::json2()`. [#1903] -* Enable `HttpResponseBuilder::json()` to receive data by value and reference. [#1903] -* `client::error::ConnectError` Resolver variant contains `Box` type. [#1905] -* `client::ConnectorConfig` default timeout changed to 5 seconds. [#1905] -* Simplify `BlockingError` type to a unit struct. It's now only triggered when blocking thread pool - is dead. [#1957] -* `HeaderMap::len` now returns number of values instead of number of keys. [#1964] -* `HeaderMap::insert` now returns iterator of removed values. [#1964] -* `HeaderMap::remove` now returns iterator of removed values. [#1964] + +- `ResponseBuilder::content_type` now takes an `impl TryIntoHeaderValue` to support using typed `mime` types. [#1894] +- Renamed `TryIntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std `TryInto` trait. [#1894] +- `Extensions::insert` returns Option of replaced item. [#1904] +- Remove `HttpResponseBuilder::json2()`. [#1903] +- Enable `HttpResponseBuilder::json()` to receive data by value and reference. [#1903] +- `client::error::ConnectError` Resolver variant contains `Box` type. [#1905] +- `client::ConnectorConfig` default timeout changed to 5 seconds. [#1905] +- Simplify `BlockingError` type to a unit struct. It's now only triggered when blocking thread pool is dead. [#1957] +- `HeaderMap::len` now returns number of values instead of number of keys. [#1964] +- `HeaderMap::insert` now returns iterator of removed values. [#1964] +- `HeaderMap::remove` now returns iterator of removed values. [#1964] ### Removed -* `ResponseBuilder::set`; use `ResponseBuilder::insert_header`. [#1869] -* `ResponseBuilder::set_header`; use `ResponseBuilder::insert_header`. [#1869] -* `ResponseBuilder::header`; use `ResponseBuilder::append_header`. [#1869] -* `TestRequest::with_hdr`; use `TestRequest::default().insert_header()`. [#1869] -* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869] -* `actors` optional feature. [#1969] -* `ResponseError` impl for `actix::MailboxError`. [#1969] + +- `ResponseBuilder::set`; use `ResponseBuilder::insert_header`. [#1869] +- `ResponseBuilder::set_header`; use `ResponseBuilder::insert_header`. [#1869] +- `ResponseBuilder::header`; use `ResponseBuilder::append_header`. [#1869] +- `TestRequest::with_hdr`; use `TestRequest::default().insert_header()`. [#1869] +- `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869] +- `actors` optional feature. [#1969] +- `ResponseError` impl for `actix::MailboxError`. [#1969] ### Documentation -* Vastly improve docs and add examples for `HeaderMap`. [#1964] + +- Vastly improve docs and add examples for `HeaderMap`. [#1964] [#1869]: https://github.com/actix/actix-web/pull/1869 [#1894]: https://github.com/actix/actix-web/pull/1894 @@ -158,47 +952,63 @@ [#1964]: https://github.com/actix/actix-web/pull/1964 [#1969]: https://github.com/actix/actix-web/pull/1969 +## 3.0.0-beta.1 -## 3.0.0-beta.1 - 2021-01-07 ### Added -* Add `Http3` to `Protocol` enum for future compatibility and also mark `#[non_exhaustive]`. + +- Add `Http3` to `Protocol` enum for future compatibility and also mark `#[non_exhaustive]`. ### Changed -* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813] -* Bumped `rand` to `0.8`. -* Update `bytes` to `1.0`. [#1813] -* Update `h2` to `0.3`. [#1813] -* The `ws::Message::Text` enum variant now contains a `bytestring::ByteString`. [#1864] + +- Update `actix-*` dependencies to tokio `1.0` based versions. [#1813] +- Bumped `rand` to `0.8`. +- Update `bytes` to `1.0`. [#1813] +- Update `h2` to `0.3`. [#1813] +- The `ws::Message::Text` enum variant now contains a `bytestring::ByteString`. [#1864] ### Removed -* Deprecated `on_connect` methods have been removed. Prefer the new - `on_connect_ext` technique. [#1857] -* Remove `ResponseError` impl for `actix::actors::resolver::ResolverError` - due to deprecate of resolver actor. [#1813] -* Remove `ConnectError::SslHandshakeError` and re-export of `HandshakeError`. - due to the removal of this type from `tokio-openssl` crate. openssl handshake - error would return as `ConnectError::SslError`. [#1813] -* Remove `actix-threadpool` dependency. Use `actix_rt::task::spawn_blocking`. - Due to this change `actix_threadpool::BlockingError` type is moved into - `actix_http::error` module. [#1878] + +- Deprecated `on_connect` methods have been removed. Prefer the new `on_connect_ext` technique. [#1857] +- Remove `ResponseError` impl for `actix::actors::resolver::ResolverError` due to deprecate of resolver actor. [#1813] +- Remove `ConnectError::SslHandshakeError` and re-export of `HandshakeError`. due to the removal of this type from `tokio-openssl` crate. openssl handshake error would return as `ConnectError::SslError`. [#1813] +- Remove `actix-threadpool` dependency. Use `actix_rt::task::spawn_blocking`. Due to this change `actix_threadpool::BlockingError` type is moved into `actix_http::error` module. [#1878] [#1813]: https://github.com/actix/actix-web/pull/1813 [#1857]: https://github.com/actix/actix-web/pull/1857 [#1864]: https://github.com/actix/actix-web/pull/1864 [#1878]: https://github.com/actix/actix-web/pull/1878 +
-## 2.2.0 - 2020-11-25 -### Added -* HttpResponse builders for 1xx status codes. [#1768] -* `Accept::mime_precedence` and `Accept::mime_preference`. [#1793] -* `TryFrom` and `TryFrom` for `http::header::Quality`. [#1797] - -### Fixed -* Started dropping `transfer-encoding: chunked` and `Content-Length` for 1XX and 204 responses. [#1767] +## 2.2.2 ### Changed -* Upgrade `serde_urlencoded` to `0.7`. [#1773] + +- Migrate to `brotli` crate. [ad7e3c06] + +[ad7e3c06]: https://github.com/actix/actix-web/commit/ad7e3c06 + +## 2.2.1 + +### Fixed + +- Potential HTTP request smuggling vulnerabilities. [RUSTSEC-2021-0081](https://github.com/rustsec/advisory-db/pull/977) + +## 2.2.0 + +### Added + +- HttpResponse builders for 1xx status codes. [#1768] +- `Accept::mime_precedence` and `Accept::mime_preference`. [#1793] +- `TryFrom` and `TryFrom` for `http::header::Quality`. [#1797] + +### Fixed + +- Started dropping `transfer-encoding: chunked` and `Content-Length` for 1XX and 204 responses. [#1767] + +### Changed + +- Upgrade `serde_urlencoded` to `0.7`. [#1773] [#1773]: https://github.com/actix/actix-web/pull/1773 [#1767]: https://github.com/actix/actix-web/pull/1767 @@ -206,333 +1016,381 @@ [#1793]: https://github.com/actix/actix-web/pull/1793 [#1797]: https://github.com/actix/actix-web/pull/1797 +## 2.1.0 -## 2.1.0 - 2020-10-30 ### Added -* Added more flexible `on_connect_ext` methods for on-connect handling. [#1754] + +- Added more flexible `on_connect_ext` methods for on-connect handling. [#1754] ### Changed -* Upgrade `base64` to `0.13`. [#1744] -* Upgrade `pin-project` to `1.0`. [#1733] -* Deprecate `ResponseBuilder::{if_some, if_true}`. [#1760] + +- Upgrade `base64` to `0.13`. [#1744] +- Upgrade `pin-project` to `1.0`. [#1733] +- Deprecate `ResponseBuilder::{if_some, if_true}`. [#1760] [#1760]: https://github.com/actix/actix-web/pull/1760 [#1754]: https://github.com/actix/actix-web/pull/1754 [#1733]: https://github.com/actix/actix-web/pull/1733 [#1744]: https://github.com/actix/actix-web/pull/1744 +## 2.0.0 -## 2.0.0 - 2020-09-11 -* No significant changes from `2.0.0-beta.4`. +- No significant changes from `2.0.0-beta.4`. +## 2.0.0-beta.4 -## 2.0.0-beta.4 - 2020-09-09 ### Changed -* Update actix-codec and actix-utils dependencies. -* Update actix-connect and actix-tls dependencies. +- Update actix-codec and actix-utils dependencies. +- Update actix-connect and actix-tls dependencies. + +## 2.0.0-beta.3 -## 2.0.0-beta.3 - 2020-08-14 ### Fixed -* Memory leak of `client::pool::ConnectorPoolSupport`. [#1626] + +- Memory leak of `client::pool::ConnectorPoolSupport`. [#1626] [#1626]: https://github.com/actix/actix-web/pull/1626 +## 2.0.0-beta.2 -## 2.0.0-beta.2 - 2020-07-21 ### Fixed -* Potential UB in h1 decoder using uninitialized memory. [#1614] + +- Potential UB in h1 decoder using uninitialized memory. [#1614] ### Changed -* Fix illegal chunked encoding. [#1615] + +- Fix illegal chunked encoding. [#1615] [#1614]: https://github.com/actix/actix-web/pull/1614 [#1615]: https://github.com/actix/actix-web/pull/1615 +## 2.0.0-beta.1 -## 2.0.0-beta.1 - 2020-07-11 ### Changed -* Migrate cookie handling to `cookie` crate. [#1558] -* Update `sha-1` to 0.9. [#1586] -* Fix leak in client pool. [#1580] -* MSRV is now 1.41.1. + +- Migrate cookie handling to `cookie` crate. [#1558] +- Update `sha-1` to 0.9. [#1586] +- Fix leak in client pool. [#1580] +- MSRV is now 1.41.1. [#1558]: https://github.com/actix/actix-web/pull/1558 [#1586]: https://github.com/actix/actix-web/pull/1586 [#1580]: https://github.com/actix/actix-web/pull/1580 +## 2.0.0-alpha.4 -## 2.0.0-alpha.4 - 2020-05-21 ### Changed -* Bump minimum supported Rust version to 1.40 -* content_length function is removed, and you can set Content-Length by calling - no_chunking function [#1439] -* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a - `u64` instead of a `usize`. -* Update `base64` dependency to 0.12 + +- Bump minimum supported Rust version to 1.40 +- content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439] +- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a `u64` instead of a `usize`. +- Update `base64` dependency to 0.12 ### Fixed -* Support parsing of `SameSite=None` [#1503] + +- Support parsing of `SameSite=None` [#1503] [#1439]: https://github.com/actix/actix-web/pull/1439 [#1503]: https://github.com/actix/actix-web/pull/1503 +## 2.0.0-alpha.3 -## 2.0.0-alpha.3 - 2020-05-08 ### Fixed -* Correct spelling of ConnectError::Unresolved [#1487] -* Fix a mistake in the encoding of websocket continuation messages wherein - Item::FirstText and Item::FirstBinary are each encoded as the other. + +- Correct spelling of ConnectError::Unresolved [#1487] +- Fix a mistake in the encoding of websocket continuation messages wherein Item::FirstText and Item::FirstBinary are each encoded as the other. ### Changed -* Implement `std::error::Error` for our custom errors [#1422] -* Remove `failure` support for `ResponseError` since that crate - will be deprecated in the near future. + +- Implement `std::error::Error` for our custom errors [#1422] +- Remove `failure` support for `ResponseError` since that crate will be deprecated in the near future. [#1422]: https://github.com/actix/actix-web/pull/1422 [#1487]: https://github.com/actix/actix-web/pull/1487 +## 2.0.0-alpha.2 -## 2.0.0-alpha.2 - 2020-03-07 ### Changed -* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395] -* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB - respectively to improve download speed for awc when downloading large objects. [#1394] -* client::Connector accepts initial_window_size and initial_connection_window_size - HTTP2 configuration. [#1394] -* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394] + +- Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395] +- Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively to improve download speed for awc when downloading large objects. [#1394] +- client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394] +- client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394] [#1394]: https://github.com/actix/actix-web/pull/1394 [#1395]: https://github.com/actix/actix-web/pull/1395 +## 2.0.0-alpha.1 -## 2.0.0-alpha.1 - 2020-02-27 ### Changed -* Update the `time` dependency to 0.2.7. -* Moved actors messages support from actix crate, enabled with feature `actors`. -* Breaking change: trait MessageBody requires Unpin and accepting `Pin<&mut Self>` instead of - `&mut self` in the poll_next(). -* MessageBody is not implemented for &'static [u8] anymore. + +- Update the `time` dependency to 0.2.7. +- Moved actors messages support from actix crate, enabled with feature `actors`. +- Breaking change: trait MessageBody requires Unpin and accepting `Pin<&mut Self>` instead of `&mut self` in the poll_next(). +- MessageBody is not implemented for &'static [u8] anymore. ### Fixed -* Allow `SameSite=None` cookies to be sent in a response. +- Allow `SameSite=None` cookies to be sent in a response. + +## 1.0.1 -## 1.0.1 - 2019-12-20 ### Fixed -* Poll upgrade service's readiness from HTTP service handlers -* Replace brotli with brotli2 #1224 +- Poll upgrade service's readiness from HTTP service handlers +- Replace brotli with brotli2 #1224 + +## 1.0.0 -## 1.0.0 - 2019-12-13 ### Added -* Add websockets continuation frame support + +- Add websockets continuation frame support ### Changed -* Replace `flate2-xxx` features with `compress` +- Replace `flate2-xxx` features with `compress` -## 1.0.0-alpha.5 - 2019-12-09 -### Fixed -* Check `Upgrade` service readiness before calling it -* Fix buffer remaining capacity calculation - -### Changed -* Websockets: Ping and Pong should have binary data #1049 - - -## 1.0.0-alpha.4 - 2019-12-08 -### Added -* Add impl ResponseBuilder for Error - -### Changed -* Use rust based brotli compression library - -## 1.0.0-alpha.3 - 2019-12-07 -### Changed -* Migrate to tokio 0.2 -* Migrate to `std::future` - - -## 0.2.11 - 2019-11-06 -### Added -* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body() -* Add an additional `filename*` param in the `Content-Disposition` header of - `actix_files::NamedFile` to be more compatible. (#1151) -* Allow to use `std::convert::Infallible` as `actix_http::error::Error` +## 1.0.0-alpha.5 ### Fixed -* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; - charset=utf-8` header [#1118] + +- Check `Upgrade` service readiness before calling it +- Fix buffer remaining capacity calculation + +### Changed + +- Websockets: Ping and Pong should have binary data #1049 + +## 1.0.0-alpha.4 + +### Added + +- Add impl ResponseBuilder for Error + +### Changed + +- Use rust based brotli compression library + +## 1.0.0-alpha.3 + +### Changed + +- Migrate to tokio 0.2 +- Migrate to `std::future` + +## 0.2.11 + +### Added + +- Add support for serde_json::Value to be passed as argument to ResponseBuilder.body() +- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) +- Allow to use `std::convert::Infallible` as `actix_http::error::Error` + +### Fixed + +- To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header [#1118] [#1878]: https://github.com/actix/actix-web/pull/1878 +## 0.2.10 -## 0.2.10 - 2019-09-11 ### Added -* Add support for sending HTTP requests with `Rc` in addition to sending HTTP requests - with `RequestHead` + +- Add support for sending HTTP requests with `Rc` in addition to sending HTTP requests with `RequestHead` ### Fixed -* h2 will use error response #1080 -* on_connect result isn't added to request extensions for http2 requests #1009 +- h2 will use error response #1080 +- on_connect result isn't added to request extensions for http2 requests #1009 -## 0.2.9 - 2019-08-13 -### Changed -* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation -* Update percent-encoding to 2.1 -* Update serde_urlencoded to 0.6.1 - -### Fixed -* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031) - - -## 0.2.8 - 2019-08-01 -### Added -* Add `rustls` support -* Add `Clone` impl for `HeaderMap` - -### Fixed -* awc client panic #1016 -* Invalid response with compression middleware enabled, but compression-related features - disabled #997 - - -## 0.2.7 - 2019-07-18 -### Added -* Add support for downcasting response errors #986 - - -## 0.2.6 - 2019-07-17 -### Changed -* Replace `ClonableService` with local copy -* Upgrade `rand` dependency version to 0.7 - - -## 0.2.5 - 2019-06-28 -### Added -* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946 +## 0.2.9 ### Changed -* Use `encoding_rs` crate instead of unmaintained `encoding` crate -* Add `Copy` and `Clone` impls for `ws::Codec` +- Dropped the `byteorder`-dependency in favor of `stdlib`-implementation +- Update percent-encoding to 2.1 +- Update serde_urlencoded to 0.6.1 -## 0.2.4 - 2019-06-16 ### Fixed -* Do not compress NoContent (204) responses #918 +- Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031) + +## 0.2.8 -## 0.2.3 - 2019-06-02 ### Added -* Debug impl for ResponseBuilder -* From SizedStream and BodyStream for Body + +- Add `rustls` support +- Add `Clone` impl for `HeaderMap` + +### Fixed + +- awc client panic #1016 +- Invalid response with compression middleware enabled, but compression-related features disabled #997 + +## 0.2.7 + +### Added + +- Add support for downcasting response errors #986 + +## 0.2.6 ### Changed -* SizedStream uses u64 +- Replace `ClonableService` with local copy +- Upgrade `rand` dependency version to 0.7 -## 0.2.2 - 2019-05-29 -### Fixed -* Parse incoming stream before closing stream on disconnect #868 +## 0.2.5 +### Added -## 0.2.1 - 2019-05-25 -### Fixed -* Handle socket read disconnect +- Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946 - -## 0.2.0 - 2019-05-12 ### Changed -* Update actix-service to 0.4 -* Expect and upgrade services accept `ServerConfig` config. + +- Use `encoding_rs` crate instead of unmaintained `encoding` crate +- Add `Copy` and `Clone` impls for `ws::Codec` + +## 0.2.4 + +### Fixed + +- Do not compress NoContent (204) responses #918 + +## 0.2.3 + +### Added + +- Debug impl for ResponseBuilder +- From SizedStream and BodyStream for Body + +### Changed + +- SizedStream uses u64 + +## 0.2.2 + +### Fixed + +- Parse incoming stream before closing stream on disconnect #868 + +## 0.2.1 + +### Fixed + +- Handle socket read disconnect + +## 0.2.0 + +### Changed + +- Update actix-service to 0.4 +- Expect and upgrade services accept `ServerConfig` config. ### Deleted -* `OneRequest` service +- `OneRequest` service -## 0.1.5 - 2019-05-04 -### Fixed -* Clean up response extensions in response pool #817 - - -## 0.1.4 - 2019-04-24 -### Added -* Allow to render h1 request headers in `Camel-Case` +## 0.1.5 ### Fixed -* Read until eof for http/1.0 responses #771 +- Clean up response extensions in response pool #817 + +## 0.1.4 + +### Added + +- Allow to render h1 request headers in `Camel-Case` -## 0.1.3 - 2019-04-23 ### Fixed -* Fix http client pool management -* Fix http client wait queue management #794 +- Read until eof for http/1.0 responses #771 + +## 0.1.3 -## 0.1.2 - 2019-04-23 ### Fixed -* Fix BorrowMutError panic in client connector #793 +- Fix http client pool management +- Fix http client wait queue management #794 + +## 0.1.2 + +### Fixed + +- Fix BorrowMutError panic in client connector #793 + +## 0.1.1 -## 0.1.1 - 2019-04-19 ### Changed -* Cookie::max_age() accepts value in seconds -* Cookie::max_age_time() accepts value in time::Duration -* Allow to specify server address for client connector +- Cookie::max_age() accepts value in seconds +- Cookie::max_age_time() accepts value in time::Duration +- Allow to specify server address for client connector + +## 0.1.0 -## 0.1.0 - 2019-04-16 ### Added -* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr` + +- Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr` ### Changed -* `actix_http::encoding` always available -* use trust-dns-resolver 0.11.0 +- `actix_http::encoding` always available +- use trust-dns-resolver 0.11.0 + +## 0.1.0-alpha.5 -## 0.1.0-alpha.5 - 2019-04-12 ### Added -* Allow to use custom service for upgrade requests -* Added `h1::SendResponse` future. + +- Allow to use custom service for upgrade requests +- Added `h1::SendResponse` future. ### Changed -* MessageBody::length() renamed to MessageBody::size() for consistency -* ws handshake verification functions take RequestHead instead of Request +- MessageBody::length() renamed to MessageBody::size() for consistency +- ws handshake verification functions take RequestHead instead of Request + +## 0.1.0-alpha.4 -## 0.1.0-alpha.4 - 2019-04-08 ### Added -* Allow to use custom `Expect` handler -* Add minimal `std::error::Error` impl for `Error` + +- Allow to use custom `Expect` handler +- Add minimal `std::error::Error` impl for `Error` ### Changed -* Export IntoHeaderValue -* Render error and return as response body -* Use thread pool for response body compression + +- Export IntoHeaderValue +- Render error and return as response body +- Use thread pool for response body compression ### Deleted -* Removed PayloadBuffer +- Removed PayloadBuffer + +## 0.1.0-alpha.3 -## 0.1.0-alpha.3 - 2019-04-02 ### Added -* Warn when an unsealed private cookie isn't valid UTF-8 + +- Warn when an unsealed private cookie isn't valid UTF-8 ### Fixed -* Rust 1.31.0 compatibility -* Preallocate read buffer for h1 codec -* Detect socket disconnection during protocol selection +- Rust 1.31.0 compatibility +- Preallocate read buffer for h1 codec +- Detect socket disconnection during protocol selection + +## 0.1.0-alpha.2 -## 0.1.0-alpha.2 - 2019-03-29 ### Added -* Added ws::Message::Nop, no-op websockets message + +- Added ws::Message::Nop, no-op websockets message ### Changed -* Do not use thread pool for decompression if chunk size is smaller than 2048. +- Do not use thread pool for decompression if chunk size is smaller than 2048. -## 0.1.0-alpha.1 - 2019-03-28 -* Initial impl +## 0.1.0-alpha.1 + +- Initial impl diff --git a/actix-http/Cargo.toml b/actix-http/Cargo.toml index 1f7df39a6..4dc0f0bd8 100644 --- a/actix-http/Cargo.toml +++ b/actix-http/Cargo.toml @@ -1,108 +1,181 @@ [package] name = "actix-http" -version = "3.0.0-beta.6" -authors = ["Nikolay Kim "] -description = "HTTP primitives for the Actix ecosystem" -readme = "README.md" +version = "3.7.0" +authors = [ + "Nikolay Kim ", + "Rob Ede ", +] +description = "HTTP types and services for the Actix ecosystem" keywords = ["actix", "http", "framework", "async", "futures"] homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web.git" -documentation = "https://docs.rs/actix-http/" -categories = ["network-programming", "asynchronous", - "web-programming::http-server", - "web-programming::websocket"] -license = "MIT OR Apache-2.0" -edition = "2018" +repository = "https://github.com/actix/actix-web" +categories = [ + "network-programming", + "asynchronous", + "web-programming::http-server", + "web-programming::websocket", +] +license.workspace = true +edition.workspace = true +rust-version.workspace = true [package.metadata.docs.rs] -# features that docs.rs will build with -features = ["openssl", "rustls", "compress"] +rustdoc-args = ["--cfg", "docsrs"] +features = [ + "http2", + "ws", + "openssl", + "rustls-0_20", + "rustls-0_21", + "rustls-0_22", + "rustls-0_23", + "compress-brotli", + "compress-gzip", + "compress-zstd", +] -[lib] -name = "actix_http" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_service::*", + "actix_tls::*", + "actix_utils::*", + "bytes::*", + "bytestring::*", + "encoding_rs::*", + "futures_core::*", + "h2::*", + "http::*", + "httparse::*", + "language_tags::*", + "mime::*", + "openssl::*", + "rustls::*", + "tokio_util::*", + "tokio::*", +] [features] default = [] -# openssl -openssl = ["actix-tls/openssl"] +# HTTP/2 protocol support +http2 = ["h2"] -# rustls support -rustls = ["actix-tls/rustls"] +# WebSocket protocol implementation +ws = [ + "local-channel", + "base64", + "rand", + "sha1", +] -# enable compression support -compress = ["flate2", "brotli2"] +# TLS via OpenSSL +openssl = ["actix-tls/accept", "actix-tls/openssl"] -# trust-dns as client dns resolver -trust-dns = ["trust-dns-resolver"] +# TLS via Rustls v0.20 +rustls = ["rustls-0_20"] + +# TLS via Rustls v0.20 +rustls-0_20 = ["actix-tls/accept", "actix-tls/rustls-0_20"] + +# TLS via Rustls v0.21 +rustls-0_21 = ["actix-tls/accept", "actix-tls/rustls-0_21"] + +# TLS via Rustls v0.22 +rustls-0_22 = ["actix-tls/accept", "actix-tls/rustls-0_22"] + +# TLS via Rustls v0.23 +rustls-0_23 = ["actix-tls/accept", "actix-tls/rustls-0_23"] + +# Compression codecs +compress-brotli = ["__compress", "brotli"] +compress-gzip = ["__compress", "flate2"] +compress-zstd = ["__compress", "zstd"] + +# Internal (PRIVATE!) features used to aid testing and checking feature status. +# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime. +__compress = [] [dependencies] -actix-service = "2.0.0" -actix-codec = "0.4.0" -actix-utils = "3.0.0" -actix-rt = "2.2" -actix-tls = { version = "3.0.0-beta.5", features = ["accept", "connect"] } +actix-service = "2" +actix-codec = "0.5" +actix-utils = "3" +actix-rt = { version = "2.2", default-features = false } -ahash = "0.7" -base64 = "0.13" -bitflags = "1.2" +ahash = "0.8" +bitflags = "2" bytes = "1" bytestring = "1" derive_more = "0.99.5" encoding_rs = "0.8" -futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] } -futures-util = { version = "0.3.7", default-features = false, features = ["alloc", "sink"] } -h2 = "0.3.1" -http = "0.2.2" -httparse = "1.3" -itoa = "0.4" +futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } +http = "0.2.7" +httparse = "1.5.1" +httpdate = "1.0.1" +itoa = "1" language-tags = "0.3" -local-channel = "0.1" -once_cell = "1.5" -log = "0.4" -mime = "0.3" +mime = "0.3.4" percent-encoding = "2.1" -pin-project = "1.0.0" pin-project-lite = "0.2" -rand = "0.8" -regex = "1.3" -serde = "1.0" -sha-1 = "0.9" -smallvec = "1.6" -time = { version = "0.2.23", default-features = false, features = ["std"] } -tokio = { version = "1.2", features = ["sync"] } +smallvec = "1.6.1" +tokio = { version = "1.24.2", features = [] } +tokio-util = { version = "0.7", features = ["io", "codec"] } +tracing = { version = "0.1.30", default-features = false, features = ["log"] } -# compression -brotli2 = { version="0.3.2", optional = true } +# http2 +h2 = { version = "0.3.26", optional = true } + +# websockets +local-channel = { version = "0.1", optional = true } +base64 = { version = "0.22", optional = true } +rand = { version = "0.8", optional = true } +sha1 = { version = "0.10", optional = true } + +# openssl/rustls +actix-tls = { version = "3.4", default-features = false, optional = true } + +# compress-* +brotli = { version = "6", optional = true } flate2 = { version = "1.0.13", optional = true } - -trust-dns-resolver = { version = "0.20.0", optional = true } +zstd = { version = "0.13", optional = true } [dev-dependencies] -actix-server = "2.0.0-beta.3" -actix-http-test = { version = "3.0.0-beta.4", features = ["openssl"] } -actix-tls = { version = "3.0.0-beta.5", features = ["openssl"] } -criterion = "0.3" -env_logger = "0.8" -rcgen = "0.8" +actix-http-test = { version = "3", features = ["openssl"] } +actix-server = "2" +actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23-webpki-roots"] } +actix-web = "4" + +async-stream = "0.3" +criterion = { version = "0.5", features = ["html_reports"] } +divan = "0.1.8" +env_logger = "0.11" +futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } +memchr = "2.4" +once_cell = "1.9" +rcgen = "0.13" +regex = "1.3" +rustversion = "1" +rustls-pemfile = "2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -tls-openssl = { version = "0.10", package = "openssl" } -tls-rustls = { version = "0.19", package = "rustls" } +static_assertions = "1" +tls-openssl = { package = "openssl", version = "0.10.55" } +tls-rustls_023 = { package = "rustls", version = "0.23" } +tokio = { version = "1.24.2", features = ["net", "rt", "macros"] } [[example]] name = "ws" -required-features = ["rustls"] +required-features = ["ws", "rustls-0_23"] + +[[example]] +name = "tls_rustls" +required-features = ["http2", "rustls-0_23"] [[bench]] -name = "write-camel-case" +name = "response-body-compression" harness = false +required-features = ["compress-brotli", "compress-gzip", "compress-zstd"] [[bench]] -name = "status-line" -harness = false - -[[bench]] -name = "uninit-headers" +name = "date-formatting" harness = false diff --git a/actix-http/README.md b/actix-http/README.md index 87eb38e5d..0ba3fdcac 100644 --- a/actix-http/README.md +++ b/actix-http/README.md @@ -1,23 +1,21 @@ -# actix-http +# `actix-http` -> HTTP primitives for the Actix ecosystem. +> HTTP types and services for the Actix ecosystem. + + [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) -[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.6)](https://docs.rs/actix-http/3.0.0-beta.6) -[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html) +[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.7.0)](https://docs.rs/actix-http/3.7.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
-[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.6/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.6) +[![dependency status](https://deps.rs/crate/actix-http/3.7.0/status.svg)](https://deps.rs/crate/actix-http/3.7.0) [![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http) -[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-http) -- [Chat on Gitter](https://gitter.im/actix/actix-web) -- Minimum Supported Rust Version (MSRV): 1.46.0 - -## Example +## Examples ```rust use std::{env, io}; @@ -26,7 +24,7 @@ use actix_http::{HttpService, Response}; use actix_server::Server; use futures_util::future; use http::header::HeaderValue; -use log::info; +use tracing::info; #[actix_rt::main] async fn main() -> io::Result<()> { @@ -50,18 +48,3 @@ async fn main() -> io::Result<()> { .await } ``` - -## License - -This project is licensed under either of - -* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) -* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) - -at your option. - -## Code of Conduct - -Contribution to the actix-http crate is organized under the terms of the -Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to -intervene to uphold that code of conduct. diff --git a/actix-http/benches/date-formatting.rs b/actix-http/benches/date-formatting.rs new file mode 100644 index 000000000..26d0f3daa --- /dev/null +++ b/actix-http/benches/date-formatting.rs @@ -0,0 +1,20 @@ +use std::time::SystemTime; + +use actix_http::header::HttpDate; +use divan::{black_box, AllocProfiler, Bencher}; + +#[global_allocator] +static ALLOC: AllocProfiler = AllocProfiler::system(); + +#[divan::bench] +fn date_formatting(b: Bencher<'_, '_>) { + let now = SystemTime::now(); + + b.bench(|| { + black_box(HttpDate::from(black_box(now)).to_string()); + }) +} + +fn main() { + divan::main(); +} diff --git a/actix-http/benches/response-body-compression.rs b/actix-http/benches/response-body-compression.rs new file mode 100644 index 000000000..53279e312 --- /dev/null +++ b/actix-http/benches/response-body-compression.rs @@ -0,0 +1,88 @@ +use std::convert::Infallible; + +use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode}; +use actix_service::{fn_service, Service as _}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; + +static BODY: &[u8] = include_bytes!("../Cargo.toml"); + +fn compression_responses(c: &mut Criterion) { + let mut group = c.benchmark_group("compression responses"); + + group.bench_function("identity", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Identity, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("gzip", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Gzip, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("br", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Brotli, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("zstd", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Zstd, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.finish(); +} + +criterion_group!(benches, compression_responses); +criterion_main!(benches); diff --git a/actix-http/benches/status-line.rs b/actix-http/benches/status-line.rs deleted file mode 100644 index f62d18ed8..000000000 --- a/actix-http/benches/status-line.rs +++ /dev/null @@ -1,222 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; - -use bytes::BytesMut; -use http::Version; - -const CODES: &[u16] = &[201, 303, 404, 515]; - -fn bench_write_status_line_11(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v1.1"); - - let version = Version::HTTP_11; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -fn bench_write_status_line_10(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v1.0"); - - let version = Version::HTTP_10; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -fn bench_write_status_line_09(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v0.9"); - - let version = Version::HTTP_09; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -criterion_group!( - benches, - bench_write_status_line_11, - bench_write_status_line_10, - bench_write_status_line_09 -); -criterion_main!(benches); - -mod _naive { - use bytes::{BufMut, BytesMut}; - use http::Version; - - pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) { - match version { - Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "), - Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "), - Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "), - _ => { - // other HTTP version handlers do not use this method - } - } - - bytes.put_slice(n.to_string().as_bytes()); - } -} - -mod _new { - use bytes::{BufMut, BytesMut}; - use http::Version; - - const DIGITS_START: u8 = b'0'; - - pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) { - match version { - Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "), - Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "), - Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "), - _ => { - // other HTTP version handlers do not use this method - } - } - - let d100 = (n / 100) as u8; - let d10 = ((n / 10) % 10) as u8; - let d1 = (n % 10) as u8; - - bytes.put_u8(DIGITS_START + d100); - bytes.put_u8(DIGITS_START + d10); - bytes.put_u8(DIGITS_START + d1); - - bytes.put_u8(b' '); - } -} - -mod _original { - use std::ptr; - - use bytes::{BufMut, BytesMut}; - use http::Version; - - const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\ - 2021222324252627282930313233343536373839\ - 4041424344454647484950515253545556575859\ - 6061626364656667686970717273747576777879\ - 8081828384858687888990919293949596979899"; - - pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13; - - pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) { - let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 "; - - match version { - Version::HTTP_2 => buf[5] = b'2', - Version::HTTP_10 => buf[7] = b'0', - Version::HTTP_09 => { - buf[5] = b'0'; - buf[7] = b'9'; - } - _ => {} - } - - let mut curr: isize = 12; - let buf_ptr = buf.as_mut_ptr(); - let lut_ptr = DEC_DIGITS_LUT.as_ptr(); - let four = n > 999; - - // decode 2 more chars, if > 2 chars - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; - unsafe { - ptr::copy_nonoverlapping( - lut_ptr.offset(d1 as isize), - buf_ptr.offset(curr), - 2, - ); - } - - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; - unsafe { - *buf_ptr.offset(curr) = (n as u8) + b'0'; - } - } else { - let d1 = n << 1; - curr -= 2; - unsafe { - ptr::copy_nonoverlapping( - lut_ptr.offset(d1 as isize), - buf_ptr.offset(curr), - 2, - ); - } - } - - bytes.put_slice(&buf); - if four { - bytes.put_u8(b' '); - } - } -} diff --git a/actix-http/benches/uninit-headers.rs b/actix-http/benches/uninit-headers.rs deleted file mode 100644 index 83e74171c..000000000 --- a/actix-http/benches/uninit-headers.rs +++ /dev/null @@ -1,137 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; - -use bytes::BytesMut; - -// A Miri run detects UB, seen on this playground: -// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915 - -fn bench_header_parsing(c: &mut Criterion) { - c.bench_function("Original (Unsound) [short]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ_SHORT); - _original::parse_headers(&mut buf); - }) - }); - - c.bench_function("New (safe) [short]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ_SHORT); - _new::parse_headers(&mut buf); - }) - }); - - c.bench_function("Original (Unsound) [realistic]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ); - _original::parse_headers(&mut buf); - }) - }); - - c.bench_function("New (safe) [realistic]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ); - _new::parse_headers(&mut buf); - }) - }); -} - -criterion_group!(benches, bench_header_parsing); -criterion_main!(benches); - -const MAX_HEADERS: usize = 96; - -const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] = - [httparse::EMPTY_HEADER; MAX_HEADERS]; - -#[derive(Clone, Copy)] -struct HeaderIndex { - name: (usize, usize), - value: (usize, usize), -} - -const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex { - name: (0, 0), - value: (0, 0), -}; - -const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = - [EMPTY_HEADER_INDEX; MAX_HEADERS]; - -impl HeaderIndex { - fn record( - bytes: &[u8], - headers: &[httparse::Header<'_>], - indices: &mut [HeaderIndex], - ) { - let bytes_ptr = bytes.as_ptr() as usize; - for (header, indices) in headers.iter().zip(indices.iter_mut()) { - let name_start = header.name.as_ptr() as usize - bytes_ptr; - let name_end = name_start + header.name.len(); - indices.name = (name_start, name_end); - let value_start = header.value.as_ptr() as usize - bytes_ptr; - let value_end = value_start + header.value.len(); - indices.value = (value_start, value_end); - } - } -} - -// test cases taken from: -// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs - -const REQ_SHORT: &'static [u8] = b"\ -GET / HTTP/1.0\r\n\ -Host: example.com\r\n\ -Cookie: session=60; user_id=1\r\n\r\n"; - -const REQ: &'static [u8] = b"\ -GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\ -Host: www.kittyhell.com\r\n\ -User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\ -Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\ -Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\ -Accept-Encoding: gzip,deflate\r\n\ -Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\ -Keep-Alive: 115\r\n\ -Connection: keep-alive\r\n\ -Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n"; - -mod _new { - use super::*; - - pub fn parse_headers(src: &mut BytesMut) -> usize { - let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY; - - let mut req = httparse::Request::new(&mut parsed); - match req.parse(src).unwrap() { - httparse::Status::Complete(_len) => { - HeaderIndex::record(src, req.headers, &mut headers); - req.headers.len() - } - _ => unreachable!(), - } - } -} - -mod _original { - use super::*; - - use std::mem::MaybeUninit; - - pub fn parse_headers(src: &mut BytesMut) -> usize { - let mut headers: [HeaderIndex; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - - let mut req = httparse::Request::new(&mut parsed); - match req.parse(src).unwrap() { - httparse::Status::Complete(_len) => { - HeaderIndex::record(src, req.headers, &mut headers); - req.headers.len() - } - _ => unreachable!(), - } - } -} diff --git a/actix-http/benches/write-camel-case.rs b/actix-http/benches/write-camel-case.rs deleted file mode 100644 index fa4930eb9..000000000 --- a/actix-http/benches/write-camel-case.rs +++ /dev/null @@ -1,89 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; - -fn bench_write_camel_case(c: &mut Criterion) { - let mut group = c.benchmark_group("write_camel_case"); - - let names = ["connection", "Transfer-Encoding", "transfer-encoding"]; - - for &i in &names { - let bts = i.as_bytes(); - - group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| { - b.iter(|| { - let mut buf = black_box([0; 24]); - _original::write_camel_case(black_box(bts), &mut buf) - }); - }); - - group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| { - b.iter(|| { - let mut buf = black_box([0; 24]); - _new::write_camel_case(black_box(bts), &mut buf) - }); - }); - } - - group.finish(); -} - -criterion_group!(benches, bench_write_camel_case); -criterion_main!(benches); - -mod _new { - pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) { - // first copy entire (potentially wrong) slice to output - buffer[..value.len()].copy_from_slice(value); - - let mut iter = value.iter(); - - // first character should be uppercase - if let Some(c @ b'a'..=b'z') = iter.next() { - buffer[0] = c & 0b1101_1111; - } - - // track 1 ahead of the current position since that's the location being assigned to - let mut index = 2; - - // remaining characters after hyphens should also be uppercase - while let Some(&c) = iter.next() { - if c == b'-' { - // advance iter by one and uppercase if needed - if let Some(c @ b'a'..=b'z') = iter.next() { - buffer[index] = c & 0b1101_1111; - } - } - - index += 1; - } - } -} - -mod _original { - pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) { - let mut index = 0; - let key = value; - let mut key_iter = key.iter(); - - if let Some(c) = key_iter.next() { - if *c >= b'a' && *c <= b'z' { - buffer[index] = *c ^ b' '; - index += 1; - } - } else { - return; - } - - while let Some(c) = key_iter.next() { - buffer[index] = *c; - index += 1; - if *c == b'-' { - if let Some(c) = key_iter.next() { - if *c >= b'a' && *c <= b'z' { - buffer[index] = *c ^ b' '; - index += 1; - } - } - } - } - } -} diff --git a/actix-http/examples/actix-web.rs b/actix-http/examples/actix-web.rs new file mode 100644 index 000000000..449e5899b --- /dev/null +++ b/actix-http/examples/actix-web.rs @@ -0,0 +1,27 @@ +use actix_http::HttpService; +use actix_server::Server; +use actix_service::map_config; +use actix_web::{dev::AppConfig, get, App}; + +#[get("/")] +async fn index() -> &'static str { + "Hello, world. From Actix Web!" +} + +#[tokio::main(flavor = "current_thread")] +async fn main() -> std::io::Result<()> { + Server::build() + .bind("hello-world", "127.0.0.1:8080", || { + // construct actix-web app + let app = App::new().service(index); + + HttpService::build() + // pass the app to service builder + // map_config is used to map App's configuration to ServiceBuilder + // h1 will configure server to only use HTTP/1.1 + .h1(map_config(app, |_| AppConfig::default())) + .tcp() + })? + .run() + .await +} diff --git a/actix-http/examples/bench.rs b/actix-http/examples/bench.rs new file mode 100644 index 000000000..e41c0bb4f --- /dev/null +++ b/actix-http/examples/bench.rs @@ -0,0 +1,27 @@ +use std::{convert::Infallible, io, time::Duration}; + +use actix_http::{HttpService, Request, Response, StatusCode}; +use actix_server::Server; +use once_cell::sync::Lazy; + +static STR: Lazy = Lazy::new(|| "HELLO WORLD ".repeat(20)); + +#[actix_rt::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + Server::build() + .bind("dispatcher-benchmark", ("127.0.0.1", 8080), || { + HttpService::build() + .client_request_timeout(Duration::from_secs(1)) + .finish(|_: Request| async move { + let mut res = Response::build(StatusCode::OK); + Ok::<_, Infallible>(res.body(&**STR)) + }) + .tcp() + })? + // limiting number of workers so that bench client is not sharing as many resources + .workers(4) + .run() + .await +} diff --git a/actix-http/examples/echo.rs b/actix-http/examples/echo.rs index 54a71a106..ae6f00cce 100644 --- a/actix-http/examples/echo.rs +++ b/actix-http/examples/echo.rs @@ -1,21 +1,22 @@ -use std::io; +use std::{io, time::Duration}; -use actix_http::{http::StatusCode, Error, HttpService, Request, Response}; +use actix_http::{Error, HttpService, Request, Response, StatusCode}; use actix_server::Server; use bytes::BytesMut; use futures_util::StreamExt as _; use http::header::HeaderValue; -use log::info; +use tracing::info; #[actix_rt::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); Server::build() - .bind("echo", "127.0.0.1:8080", || { + .bind("echo", ("127.0.0.1", 8080), || { HttpService::build() - .client_timeout(1000) - .client_disconnect(1000) + .client_request_timeout(Duration::from_secs(1)) + .client_disconnect_timeout(Duration::from_secs(1)) + // handles HTTP/1.1 and HTTP/2 .finish(|mut req: Request| async move { let mut body = BytesMut::new(); while let Some(item) = req.payload().next().await { @@ -23,15 +24,14 @@ async fn main() -> io::Result<()> { } info!("request body: {:?}", body); - Ok::<_, Error>( - Response::build(StatusCode::OK) - .insert_header(( - "x-head", - HeaderValue::from_static("dummy value!"), - )) - .body(body), - ) + + let res = Response::build(StatusCode::OK) + .insert_header(("x-head", HeaderValue::from_static("dummy value!"))) + .body(body); + + Ok::<_, Error>(res) }) + // No TLS .tcp() })? .run() diff --git a/actix-http/examples/echo2.rs b/actix-http/examples/echo2.rs index 3974cf20b..605572d8b 100644 --- a/actix-http/examples/echo2.rs +++ b/actix-http/examples/echo2.rs @@ -1,31 +1,34 @@ use std::io; -use actix_http::{body::Body, http::HeaderValue, http::StatusCode}; -use actix_http::{Error, HttpService, Request, Response}; -use actix_server::Server; -use bytes::BytesMut; -use futures_util::StreamExt as _; -use log::info; +use actix_http::{ + body::{BodyStream, MessageBody}, + header, Error, HttpMessage, HttpService, Request, Response, StatusCode, +}; -async fn handle_request(mut req: Request) -> Result, Error> { - let mut body = BytesMut::new(); - while let Some(item) = req.payload().next().await { - body.extend_from_slice(&item?) +async fn handle_request(mut req: Request) -> Result, Error> { + let mut res = Response::build(StatusCode::OK); + + if let Some(ct) = req.headers().get(header::CONTENT_TYPE) { + res.insert_header((header::CONTENT_TYPE, ct)); } - info!("request body: {:?}", body); - Ok(Response::build(StatusCode::OK) - .insert_header(("x-head", HeaderValue::from_static("dummy value!"))) - .body(body)) + // echo request payload stream as (chunked) response body + let res = res.message_body(BodyStream::new(req.payload().take()))?; + + Ok(res) } #[actix_rt::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); - Server::build() - .bind("echo", "127.0.0.1:8080", || { - HttpService::build().finish(handle_request).tcp() + actix_server::Server::build() + .bind("echo", ("127.0.0.1", 8080), || { + HttpService::build() + // handles HTTP/1.1 only + .h1(handle_request) + // No TLS + .tcp() })? .run() .await diff --git a/actix-http/examples/h2c-detect.rs b/actix-http/examples/h2c-detect.rs new file mode 100644 index 000000000..b0bde3fe6 --- /dev/null +++ b/actix-http/examples/h2c-detect.rs @@ -0,0 +1,34 @@ +//! An example that supports automatic selection of plaintext h1/h2c connections. +//! +//! Notably, both the following commands will work. +//! ```console +//! $ curl --http1.1 'http://localhost:8080/' +//! $ curl --http2-prior-knowledge 'http://localhost:8080/' +//! ``` + +use std::{convert::Infallible, io}; + +use actix_http::{body::BodyStream, HttpService, Request, Response, StatusCode}; +use actix_server::Server; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + Server::build() + .bind("h2c-detect", ("127.0.0.1", 8080), || { + HttpService::build() + .finish(|_req: Request| async move { + Ok::<_, Infallible>(Response::build(StatusCode::OK).body(BodyStream::new( + futures_util::stream::iter([ + Ok::<_, String>("123".into()), + Err("wertyuikmnbvcxdfty6t".to_owned()), + ]), + ))) + }) + .tcp_auto_h2c() + })? + .workers(2) + .run() + .await +} diff --git a/actix-http/examples/h2spec.rs b/actix-http/examples/h2spec.rs new file mode 100644 index 000000000..4ab426c6c --- /dev/null +++ b/actix-http/examples/h2spec.rs @@ -0,0 +1,25 @@ +use std::{convert::Infallible, io}; + +use actix_http::{HttpService, Request, Response, StatusCode}; +use actix_server::Server; +use once_cell::sync::Lazy; + +static STR: Lazy = Lazy::new(|| "HELLO WORLD ".repeat(100)); + +#[actix_rt::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + Server::build() + .bind("h2spec", ("127.0.0.1", 8080), || { + HttpService::build() + .h2(|_: Request| async move { + let mut res = Response::build(StatusCode::OK); + Ok::<_, Infallible>(res.body(&**STR)) + }) + .tcp() + })? + .workers(4) + .run() + .await +} diff --git a/actix-http/examples/hello-world.rs b/actix-http/examples/hello-world.rs index d51de6f4e..cf10beddf 100644 --- a/actix-http/examples/hello-world.rs +++ b/actix-http/examples/hello-world.rs @@ -1,28 +1,31 @@ -use std::io; +use std::{convert::Infallible, io, time::Duration}; -use actix_http::{http::StatusCode, HttpService, Response}; +use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode}; use actix_server::Server; -use actix_utils::future; -use http::header::HeaderValue; -use log::info; +use tracing::info; #[actix_rt::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); Server::build() - .bind("hello-world", "127.0.0.1:8080", || { + .bind("hello-world", ("127.0.0.1", 8080), || { HttpService::build() - .client_timeout(1000) - .client_disconnect(1000) - .finish(|_req| { - info!("{:?}", _req); + .client_request_timeout(Duration::from_secs(1)) + .client_disconnect_timeout(Duration::from_secs(1)) + .on_connect_ext(|_, ext| { + ext.insert(42u32); + }) + .finish(|req: Request| async move { + info!("{:?}", req); + let mut res = Response::build(StatusCode::OK); - res.insert_header(( - "x-head", - HeaderValue::from_static("dummy value!"), - )); - future::ok::<_, ()>(res.body("Hello world!")) + res.insert_header(("x-head", HeaderValue::from_static("dummy value!"))); + + let forty_two = req.conn_data::().unwrap().to_string(); + res.insert_header(("x-forty-two", HeaderValue::from_str(&forty_two).unwrap())); + + Ok::<_, Infallible>(res.body("Hello world!")) }) .tcp() })? diff --git a/actix-http/examples/streaming-error.rs b/actix-http/examples/streaming-error.rs new file mode 100644 index 000000000..8c8a249cb --- /dev/null +++ b/actix-http/examples/streaming-error.rs @@ -0,0 +1,41 @@ +//! Example showing response body (chunked) stream erroring. +//! +//! Test using `nc` or `curl`. +//! ```sh +//! $ curl -vN 127.0.0.1:8080 +//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080 +//! ``` + +use std::{convert::Infallible, io, time::Duration}; + +use actix_http::{body::BodyStream, HttpService, Response}; +use actix_server::Server; +use async_stream::stream; +use bytes::Bytes; +use tracing::info; + +#[actix_rt::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + Server::build() + .bind("streaming-error", ("127.0.0.1", 8080), || { + HttpService::build() + .finish(|req| async move { + info!("{:?}", req); + let res = Response::ok(); + + Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! { + yield Ok(Bytes::from("123")); + yield Ok(Bytes::from("456")); + + actix_rt::time::sleep(Duration::from_millis(1000)).await; + + yield Err(io::Error::new(io::ErrorKind::Other, "")); + }))) + }) + .tcp() + })? + .run() + .await +} diff --git a/actix-http/examples/tls_rustls.rs b/actix-http/examples/tls_rustls.rs new file mode 100644 index 000000000..17303c556 --- /dev/null +++ b/actix-http/examples/tls_rustls.rs @@ -0,0 +1,76 @@ +//! Demonstrates TLS configuration (via Rustls) for HTTP/1.1 and HTTP/2 connections. +//! +//! Test using cURL: +//! +//! ```console +//! $ curl --insecure https://127.0.0.1:8443 +//! Hello World! +//! Protocol: HTTP/2.0 +//! +//! $ curl --insecure --http1.1 https://127.0.0.1:8443 +//! Hello World! +//! Protocol: HTTP/1.1 +//! ``` + +extern crate tls_rustls_023 as rustls; + +use std::io; + +use actix_http::{Error, HttpService, Request, Response}; +use actix_utils::future::ok; + +#[actix_rt::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + tracing::info!("starting HTTP server at https://127.0.0.1:8443"); + + actix_server::Server::build() + .bind("echo", ("127.0.0.1", 8443), || { + HttpService::build() + .finish(|req: Request| { + let body = format!( + "Hello World!\n\ + Protocol: {:?}", + req.head().version + ); + ok::<_, Error>(Response::ok().set_body(body)) + }) + .rustls_0_23(rustls_config()) + })? + .run() + .await +} + +fn rustls_config() -> rustls::ServerConfig { + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + + let cert_file = &mut io::BufReader::new(cert_file.as_bytes()); + let key_file = &mut io::BufReader::new(key_file.as_bytes()); + + let cert_chain = rustls_pemfile::certs(cert_file) + .collect::, _>>() + .unwrap(); + let mut keys = rustls_pemfile::pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); + + let mut config = rustls::ServerConfig::builder() + .with_no_client_auth() + .with_single_cert( + cert_chain, + rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)), + ) + .unwrap(); + + const H1_ALPN: &[u8] = b"http/1.1"; + const H2_ALPN: &[u8] = b"h2"; + + config.alpn_protocols.push(H2_ALPN.to_vec()); + config.alpn_protocols.push(H1_ALPN.to_vec()); + + config +} diff --git a/actix-http/examples/ws.rs b/actix-http/examples/ws.rs index d3cedf870..fb86bc5ea 100644 --- a/actix-http/examples/ws.rs +++ b/actix-http/examples/ws.rs @@ -1,7 +1,7 @@ //! Sets up a WebSocket server over TCP and TLS. //! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames. -extern crate tls_rustls as rustls; +extern crate tls_rustls_023 as rustls; use std::{ io, @@ -10,13 +10,14 @@ use std::{ time::Duration, }; -use actix_codec::Encoder; use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response}; use actix_rt::time::{interval, Interval}; use actix_server::Server; use bytes::{Bytes, BytesMut}; use bytestring::ByteString; use futures_core::{ready, Stream}; +use tokio_util::codec::Encoder; +use tracing::{info, trace}; #[actix_rt::main] async fn main() -> io::Result<()> { @@ -27,20 +28,22 @@ async fn main() -> io::Result<()> { HttpService::build().h1(handler).tcp() })? .bind("tls", ("127.0.0.1", 8443), || { - HttpService::build().finish(handler).rustls(tls_config()) + HttpService::build() + .finish(handler) + .rustls_0_23(tls_config()) })? .run() .await } async fn handler(req: Request) -> Result>, Error> { - log::info!("handshaking"); + info!("handshaking"); let mut res = ws::handshake(req.head())?; // handshake will always fail under HTTP/2 - log::info!("responding"); - Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?) + info!("responding"); + res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new()))) } struct Heartbeat { @@ -60,11 +63,8 @@ impl Heartbeat { impl Stream for Heartbeat { type Item = Result; - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - log::trace!("poll"); + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + trace!("poll"); ready!(self.as_mut().interval.poll_tick(cx)); @@ -85,22 +85,31 @@ impl Stream for Heartbeat { fn tls_config() -> rustls::ServerConfig { use std::io::BufReader; - use rustls::{ - internal::pemfile::{certs, pkcs8_private_keys}, - NoClientAuth, ServerConfig, - }; + use rustls_pemfile::{certs, pkcs8_private_keys}; - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); - let mut config = ServerConfig::new(NoClientAuth::new()); let cert_file = &mut BufReader::new(cert_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes()); - let cert_chain = certs(cert_file).unwrap(); - let mut keys = pkcs8_private_keys(key_file).unwrap(); - config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); + let cert_chain = certs(cert_file).collect::, _>>().unwrap(); + let mut keys = pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); + + let mut config = rustls::ServerConfig::builder() + .with_no_client_auth() + .with_single_cert( + cert_chain, + rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)), + ) + .unwrap(); + + config.alpn_protocols.push(b"http/1.1".to_vec()); + config.alpn_protocols.push(b"h2".to_vec()); config } diff --git a/actix-http/rustfmt.toml b/actix-http/rustfmt.toml deleted file mode 100644 index 5fcaaca0f..000000000 --- a/actix-http/rustfmt.toml +++ /dev/null @@ -1,5 +0,0 @@ -max_width = 89 -reorder_imports = true -#wrap_comments = true -#fn_args_density = "Compressed" -#use_small_heuristics = false diff --git a/actix-http/src/body/body.rs b/actix-http/src/body/body.rs deleted file mode 100644 index 3fda8ae11..000000000 --- a/actix-http/src/body/body.rs +++ /dev/null @@ -1,230 +0,0 @@ -use std::{ - borrow::Cow, - error::Error as StdError, - fmt, mem, - pin::Pin, - task::{Context, Poll}, -}; - -use bytes::{Bytes, BytesMut}; -use futures_core::{ready, Stream}; - -use crate::error::Error; - -use super::{BodySize, BodyStream, MessageBody, MessageBodyMapErr, SizedStream}; - -pub type Body = AnyBody; - -/// Represents various types of HTTP message body. -pub enum AnyBody { - /// Empty response. `Content-Length` header is not set. - None, - - /// Zero sized response body. `Content-Length` header is set to `0`. - Empty, - - /// Specific response body. - Bytes(Bytes), - - /// Generic message body. - Message(BoxAnyBody), -} - -impl AnyBody { - /// Create body from slice (copy) - pub fn from_slice(s: &[u8]) -> Self { - Self::Bytes(Bytes::copy_from_slice(s)) - } - - /// Create body from generic message body. - pub fn from_message(body: B) -> Self - where - B: MessageBody + 'static, - B::Error: Into>, - { - Self::Message(BoxAnyBody::from_body(body)) - } -} - -impl MessageBody for AnyBody { - type Error = Error; - - fn size(&self) -> BodySize { - match self { - AnyBody::None => BodySize::None, - AnyBody::Empty => BodySize::Empty, - AnyBody::Bytes(ref bin) => BodySize::Sized(bin.len() as u64), - AnyBody::Message(ref body) => body.size(), - } - } - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - match self.get_mut() { - AnyBody::None => Poll::Ready(None), - AnyBody::Empty => Poll::Ready(None), - AnyBody::Bytes(ref mut bin) => { - let len = bin.len(); - if len == 0 { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(mem::take(bin)))) - } - } - - // TODO: MSRV 1.51: poll_map_err - AnyBody::Message(body) => match ready!(body.as_pin_mut().poll_next(cx)) { - Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), - Some(Ok(val)) => Poll::Ready(Some(Ok(val))), - None => Poll::Ready(None), - }, - } - } -} - -impl PartialEq for AnyBody { - fn eq(&self, other: &Body) -> bool { - match *self { - AnyBody::None => matches!(*other, AnyBody::None), - AnyBody::Empty => matches!(*other, AnyBody::Empty), - AnyBody::Bytes(ref b) => match *other { - AnyBody::Bytes(ref b2) => b == b2, - _ => false, - }, - AnyBody::Message(_) => false, - } - } -} - -impl fmt::Debug for AnyBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - AnyBody::None => write!(f, "AnyBody::None"), - AnyBody::Empty => write!(f, "AnyBody::Empty"), - AnyBody::Bytes(ref b) => write!(f, "AnyBody::Bytes({:?})", b), - AnyBody::Message(_) => write!(f, "AnyBody::Message(_)"), - } - } -} - -impl From<&'static str> for AnyBody { - fn from(s: &'static str) -> Body { - AnyBody::Bytes(Bytes::from_static(s.as_ref())) - } -} - -impl From<&'static [u8]> for AnyBody { - fn from(s: &'static [u8]) -> Body { - AnyBody::Bytes(Bytes::from_static(s)) - } -} - -impl From> for AnyBody { - fn from(vec: Vec) -> Body { - AnyBody::Bytes(Bytes::from(vec)) - } -} - -impl From for AnyBody { - fn from(s: String) -> Body { - s.into_bytes().into() - } -} - -impl From<&'_ String> for AnyBody { - fn from(s: &String) -> Body { - AnyBody::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s))) - } -} - -impl From> for AnyBody { - fn from(s: Cow<'_, str>) -> Body { - match s { - Cow::Owned(s) => AnyBody::from(s), - Cow::Borrowed(s) => { - AnyBody::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(s))) - } - } - } -} - -impl From for AnyBody { - fn from(s: Bytes) -> Body { - AnyBody::Bytes(s) - } -} - -impl From for AnyBody { - fn from(s: BytesMut) -> Body { - AnyBody::Bytes(s.freeze()) - } -} - -impl From> for AnyBody -where - S: Stream> + 'static, -{ - fn from(s: SizedStream) -> Body { - AnyBody::from_message(s) - } -} - -impl From> for AnyBody -where - S: Stream> + 'static, - E: Into + 'static, -{ - fn from(s: BodyStream) -> Body { - AnyBody::from_message(s) - } -} - -/// A boxed message body with boxed errors. -pub struct BoxAnyBody(Pin>>>); - -impl BoxAnyBody { - /// Boxes a `MessageBody` and any errors it generates. - pub fn from_body(body: B) -> Self - where - B: MessageBody + 'static, - B::Error: Into>, - { - let body = MessageBodyMapErr::new(body, Into::into); - Self(Box::pin(body)) - } - - /// Returns a mutable pinned reference to the inner message body type. - pub fn as_pin_mut( - &mut self, - ) -> Pin<&mut (dyn MessageBody>)> { - self.0.as_mut() - } -} - -impl fmt::Debug for BoxAnyBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("BoxAnyBody(dyn MessageBody)") - } -} - -impl MessageBody for BoxAnyBody { - type Error = Error; - - fn size(&self) -> BodySize { - self.0.size() - } - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - // TODO: MSRV 1.51: poll_map_err - match ready!(self.0.as_mut().poll_next(cx)) { - Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), - Some(Ok(val)) => Poll::Ready(Some(Ok(val))), - None => Poll::Ready(None), - } - } -} diff --git a/actix-http/src/body/body_stream.rs b/actix-http/src/body/body_stream.rs index ebe872022..4574b2519 100644 --- a/actix-http/src/body/body_stream.rs +++ b/actix-http/src/body/body_stream.rs @@ -1,4 +1,5 @@ use std::{ + error::Error as StdError, pin::Pin, task::{Context, Poll}, }; @@ -7,8 +8,6 @@ use bytes::Bytes; use futures_core::{ready, Stream}; use pin_project_lite::pin_project; -use crate::error::Error; - use super::{BodySize, MessageBody}; pin_project! { @@ -21,11 +20,14 @@ pin_project! { } } +// TODO: from_infallible method + impl BodyStream where S: Stream>, - E: Into, + E: Into> + 'static, { + #[inline] pub fn new(stream: S) -> Self { BodyStream { stream } } @@ -34,19 +36,19 @@ where impl MessageBody for BodyStream where S: Stream>, - E: Into, + E: Into> + 'static, { - type Error = Error; + type Error = E; + #[inline] fn size(&self) -> BodySize { BodySize::Stream } /// Attempts to pull out the next value of the underlying [`Stream`]. /// - /// Empty values are skipped to prevent [`BodyStream`]'s transmission being - /// ended on a zero-length chunk, but rather proceed until the underlying - /// [`Stream`] ends. + /// Empty values are skipped to prevent [`BodyStream`]'s transmission being ended on a + /// zero-length chunk, but rather proceed until the underlying [`Stream`] ends. fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -56,7 +58,7 @@ where let chunk = match ready!(stream.poll_next(cx)) { Some(Ok(ref bytes)) if bytes.is_empty() => continue, - opt => opt.map(|res| res.map_err(Into::into)), + opt => opt, }; return Poll::Ready(chunk); @@ -66,19 +68,39 @@ where #[cfg(test)] mod tests { - use actix_rt::pin; + use std::{convert::Infallible, time::Duration}; + + use actix_rt::{ + pin, + time::{sleep, Sleep}, + }; use actix_utils::future::poll_fn; - use futures_util::stream; + use derive_more::{Display, Error}; + use futures_core::ready; + use futures_util::{stream, FutureExt as _}; + use pin_project_lite::pin_project; + use static_assertions::{assert_impl_all, assert_not_impl_any}; use super::*; use crate::body::to_bytes; + assert_impl_all!(BodyStream>>: MessageBody); + assert_impl_all!(BodyStream>>: MessageBody); + assert_impl_all!(BodyStream>>: MessageBody); + assert_impl_all!(BodyStream>>: MessageBody); + assert_impl_all!(BodyStream>>: MessageBody); + + assert_not_impl_any!(BodyStream>: MessageBody); + assert_not_impl_any!(BodyStream>: MessageBody); + // crate::Error is not Clone + assert_not_impl_any!(BodyStream>>: MessageBody); + #[actix_rt::test] async fn skips_empty_chunks() { let body = BodyStream::new(stream::iter( ["1", "", "2"] .iter() - .map(|&v| Ok(Bytes::from(v)) as Result), + .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), )); pin!(body); @@ -103,9 +125,90 @@ mod tests { let body = BodyStream::new(stream::iter( ["1", "", "2"] .iter() - .map(|&v| Ok(Bytes::from(v)) as Result), + .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), )); assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12"))); } + #[derive(Debug, Display, Error)] + #[display(fmt = "stream error")] + struct StreamErr; + + #[actix_rt::test] + async fn stream_immediate_error() { + let body = BodyStream::new(stream::once(async { Err(StreamErr) })); + assert!(matches!(to_bytes(body).await, Err(StreamErr))); + } + + #[actix_rt::test] + async fn stream_string_error() { + // `&'static str` does not impl `Error` + // but it does impl `Into>` + + let body = BodyStream::new(stream::once(async { Err("stringy error") })); + assert!(matches!(to_bytes(body).await, Err("stringy error"))); + } + + #[actix_rt::test] + async fn stream_boxed_error() { + // `Box` does not impl `Error` + // but it does impl `Into>` + + let body = BodyStream::new(stream::once(async { + Err(Box::::from("stringy error")) + })); + + assert_eq!( + to_bytes(body).await.unwrap_err().to_string(), + "stringy error" + ); + } + + #[actix_rt::test] + async fn stream_delayed_error() { + let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)])); + assert!(matches!(to_bytes(body).await, Err(StreamErr))); + + pin_project! { + #[derive(Debug)] + #[project = TimeDelayStreamProj] + enum TimeDelayStream { + Start, + Sleep { delay: Pin> }, + Done, + } + } + + impl Stream for TimeDelayStream { + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.as_mut().get_mut() { + TimeDelayStream::Start => { + let sleep = sleep(Duration::from_millis(1)); + self.as_mut().set(TimeDelayStream::Sleep { + delay: Box::pin(sleep), + }); + cx.waker().wake_by_ref(); + Poll::Pending + } + + TimeDelayStream::Sleep { ref mut delay } => { + ready!(delay.poll_unpin(cx)); + self.set(TimeDelayStream::Done); + cx.waker().wake_by_ref(); + Poll::Pending + } + + TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))), + } + } + } + + let body = BodyStream::new(TimeDelayStream::Start); + assert!(matches!(to_bytes(body).await, Err(StreamErr))); + } } diff --git a/actix-http/src/body/boxed.rs b/actix-http/src/body/boxed.rs new file mode 100644 index 000000000..0151ff3a5 --- /dev/null +++ b/actix-http/src/body/boxed.rs @@ -0,0 +1,121 @@ +use std::{ + error::Error as StdError, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; + +use super::{BodySize, MessageBody, MessageBodyMapErr}; +use crate::body; + +/// A boxed message body with boxed errors. +#[derive(Debug)] +pub struct BoxBody(BoxBodyInner); + +enum BoxBodyInner { + None(body::None), + Bytes(Bytes), + Stream(Pin>>>), +} + +impl fmt::Debug for BoxBodyInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(), + Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(), + Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(), + } + } +} + +impl BoxBody { + /// Boxes body type, erasing type information. + /// + /// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to + /// avoid double boxing. + #[inline] + pub fn new(body: B) -> Self + where + B: MessageBody + 'static, + { + match body.size() { + BodySize::None => Self(BoxBodyInner::None(body::None)), + _ => match body.try_into_bytes() { + Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)), + Err(body) => { + let body = MessageBodyMapErr::new(body, Into::into); + Self(BoxBodyInner::Stream(Box::pin(body))) + } + }, + } + } + + /// Returns a mutable pinned reference to the inner message body type. + #[inline] + pub fn as_pin_mut(&mut self) -> Pin<&mut Self> { + Pin::new(self) + } +} + +impl MessageBody for BoxBody { + type Error = Box; + + #[inline] + fn size(&self) -> BodySize { + match &self.0 { + BoxBodyInner::None(none) => none.size(), + BoxBodyInner::Bytes(bytes) => bytes.size(), + BoxBodyInner::Stream(stream) => stream.size(), + } + } + + #[inline] + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + match &mut self.0 { + BoxBodyInner::None(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}), + BoxBodyInner::Bytes(body) => Pin::new(body).poll_next(cx).map_err(|err| match err {}), + BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx), + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + match self.0 { + BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()), + BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()), + _ => Err(self), + } + } + + #[inline] + fn boxed(self) -> BoxBody { + self + } +} + +#[cfg(test)] +mod tests { + use static_assertions::{assert_impl_all, assert_not_impl_any}; + + use super::*; + use crate::body::to_bytes; + + assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin); + assert_not_impl_any!(BoxBody: Send, Sync); + + #[actix_rt::test] + async fn nested_boxed_body() { + let body = Bytes::from_static(&[1, 2, 3]); + let boxed_body = BoxBody::new(BoxBody::new(body)); + + assert_eq!( + to_bytes(boxed_body).await.unwrap(), + Bytes::from(vec![1, 2, 3]), + ); + } +} diff --git a/actix-http/src/body/either.rs b/actix-http/src/body/either.rs new file mode 100644 index 000000000..92bd89984 --- /dev/null +++ b/actix-http/src/body/either.rs @@ -0,0 +1,122 @@ +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use pin_project_lite::pin_project; + +use super::{BodySize, BoxBody, MessageBody}; +use crate::Error; + +pin_project! { + /// An "either" type specialized for body types. + /// + /// It is common, in middleware especially, to conditionally return an inner service's unknown/ + /// generic body `B` type or return early with a new response. This type's "right" variant + /// defaults to `BoxBody` since error responses are the common case. + /// + /// For example, middleware will often have `type Response = ServiceResponse>`. + /// This means that the inner service's response body type maps to the `Left` variant and the + /// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course, + /// there's no reason it couldn't use `EitherBody` instead if its alternative + /// responses have a known type. + #[project = EitherBodyProj] + #[derive(Debug, Clone)] + pub enum EitherBody { + /// A body of type `L`. + Left { #[pin] body: L }, + + /// A body of type `R`. + Right { #[pin] body: R }, + } +} + +impl EitherBody { + /// Creates new `EitherBody` left variant with a boxed right variant. + /// + /// If the expected `R` type will be inferred and is not `BoxBody` then use the + /// [`left`](Self::left) constructor instead. + #[inline] + pub fn new(body: L) -> Self { + Self::Left { body } + } +} + +impl EitherBody { + /// Creates new `EitherBody` using left variant. + #[inline] + pub fn left(body: L) -> Self { + Self::Left { body } + } + + /// Creates new `EitherBody` using right variant. + #[inline] + pub fn right(body: R) -> Self { + Self::Right { body } + } +} + +impl MessageBody for EitherBody +where + L: MessageBody + 'static, + R: MessageBody + 'static, +{ + type Error = Error; + + #[inline] + fn size(&self) -> BodySize { + match self { + EitherBody::Left { body } => body.size(), + EitherBody::Right { body } => body.size(), + } + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + match self.project() { + EitherBodyProj::Left { body } => body + .poll_next(cx) + .map_err(|err| Error::new_body().with_cause(err)), + EitherBodyProj::Right { body } => body + .poll_next(cx) + .map_err(|err| Error::new_body().with_cause(err)), + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + match self { + EitherBody::Left { body } => body + .try_into_bytes() + .map_err(|body| EitherBody::Left { body }), + EitherBody::Right { body } => body + .try_into_bytes() + .map_err(|body| EitherBody::Right { body }), + } + } + + #[inline] + fn boxed(self) -> BoxBody { + match self { + EitherBody::Left { body } => body.boxed(), + EitherBody::Right { body } => body.boxed(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn type_parameter_inference() { + let _body: EitherBody<(), _> = EitherBody::new(()); + + let _body: EitherBody<_, ()> = EitherBody::left(()); + let _body: EitherBody<(), _> = EitherBody::right(()); + } +} diff --git a/actix-http/src/body/message_body.rs b/actix-http/src/body/message_body.rs index 2d2642ba7..739fe5027 100644 --- a/actix-http/src/body/message_body.rs +++ b/actix-http/src/body/message_body.rs @@ -2,6 +2,7 @@ use std::{ convert::Infallible, + error::Error as StdError, mem, pin::Pin, task::{Context, Poll}, @@ -11,172 +12,459 @@ use bytes::{Bytes, BytesMut}; use futures_core::ready; use pin_project_lite::pin_project; -use crate::error::Error; +use super::{BodySize, BoxBody}; -use super::BodySize; - -/// An interface for response bodies. +/// An interface for types that can be used as a response body. +/// +/// It is not usually necessary to create custom body types, this trait is already [implemented for +/// a large number of sensible body types](#foreign-impls) including: +/// - Empty body: `()` +/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1). +/// - Byte-based: `Bytes`, `BytesMut`, `Vec`, `&'static [u8]`; +/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream) +/// +/// # Examples +/// ``` +/// # use std::convert::Infallible; +/// # use std::task::{Poll, Context}; +/// # use std::pin::Pin; +/// # use bytes::Bytes; +/// # use actix_http::body::{BodySize, MessageBody}; +/// struct Repeat { +/// chunk: String, +/// n_times: usize, +/// } +/// +/// impl MessageBody for Repeat { +/// type Error = Infallible; +/// +/// fn size(&self) -> BodySize { +/// BodySize::Sized((self.chunk.len() * self.n_times) as u64) +/// } +/// +/// fn poll_next( +/// self: Pin<&mut Self>, +/// _cx: &mut Context<'_>, +/// ) -> Poll>> { +/// let payload_string = self.chunk.repeat(self.n_times); +/// let payload_bytes = Bytes::from(payload_string); +/// Poll::Ready(Some(Ok(payload_bytes))) +/// } +/// } +/// ``` pub trait MessageBody { - type Error; + /// The type of error that will be returned if streaming body fails. + /// + /// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for + /// internal use and logging. + type Error: Into>; /// Body size hint. + /// + /// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed. fn size(&self) -> BodySize; /// Attempt to pull out the next chunk of body bytes. + /// + /// # Return Value + /// Similar to the `Stream` interface, there are several possible return values, each indicating + /// a distinct state: + /// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must + /// ensure that the current task will be notified when the next chunk may be ready. + /// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`, + /// and may produce further values on subsequent `poll_next` calls. + /// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be + /// invoked again. + /// + /// # Panics + /// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next` + /// method again may panic, block forever, or cause other kinds of problems; this trait places + /// no requirements on the effects of such a call. However, as the `poll_next` method is not + /// marked unsafe, Rust’s usual rules apply: calls must never cause UB, regardless of its state. fn poll_next( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>>; -} -impl MessageBody for () { - type Error = Infallible; - - fn size(&self) -> BodySize { - BodySize::Empty + /// Try to convert into the complete chunk of body bytes. + /// + /// Override this method if the complete body can be trivially extracted. This is useful for + /// optimizations where `poll_next` calls can be avoided. + /// + /// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling + /// this method, it is recommended to check `size` first and return early. + /// + /// # Errors + /// The default implementation will error and return the original type back to the caller for + /// further use. + #[inline] + fn try_into_bytes(self) -> Result + where + Self: Sized, + { + Err(self) } - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(None) + /// Wraps this body into a `BoxBody`. + /// + /// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling + /// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body + /// is required. + #[inline] + fn boxed(self) -> BoxBody + where + Self: Sized + 'static, + { + BoxBody::new(self) } } -impl MessageBody for Box -where - B: MessageBody + Unpin, - B::Error: Into, -{ - type Error = B::Error; +mod foreign_impls { + use std::{borrow::Cow, ops::DerefMut}; - fn size(&self) -> BodySize { - self.as_ref().size() - } + use super::*; - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(self.get_mut().as_mut()).poll_next(cx) - } -} + impl MessageBody for &mut B + where + B: MessageBody + Unpin + ?Sized, + { + type Error = B::Error; -impl MessageBody for Pin> -where - B: MessageBody, - B::Error: Into, -{ - type Error = B::Error; + fn size(&self) -> BodySize { + (**self).size() + } - fn size(&self) -> BodySize { - self.as_ref().size() - } - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.as_mut().poll_next(cx) - } -} - -impl MessageBody for Bytes { - type Error = Infallible; - - fn size(&self) -> BodySize { - BodySize::Sized(self.len() as u64) - } - - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - if self.is_empty() { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(mem::take(self.get_mut())))) + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::new(&mut **self).poll_next(cx) } } -} -impl MessageBody for BytesMut { - type Error = Infallible; + impl MessageBody for Infallible { + type Error = Infallible; - fn size(&self) -> BodySize { - BodySize::Sized(self.len() as u64) - } + fn size(&self) -> BodySize { + match *self {} + } - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - if self.is_empty() { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze()))) + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + match *self {} } } -} -impl MessageBody for &'static str { - type Error = Infallible; + impl MessageBody for () { + type Error = Infallible; - fn size(&self) -> BodySize { - BodySize::Sized(self.len() as u64) - } + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(0) + } - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - if self.is_empty() { + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(Bytes::from_static( - mem::take(self.get_mut()).as_ref(), - )))) + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::new()) } } -} -impl MessageBody for Vec { - type Error = Infallible; + impl MessageBody for Box + where + B: MessageBody + Unpin + ?Sized, + { + type Error = B::Error; - fn size(&self) -> BodySize { - BodySize::Sized(self.len() as u64) - } + #[inline] + fn size(&self) -> BodySize { + self.as_ref().size() + } - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - if self.is_empty() { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut()))))) + #[inline] + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::new(self.get_mut().as_mut()).poll_next(cx) } } -} -impl MessageBody for String { - type Error = Infallible; + impl MessageBody for Pin + where + T: DerefMut + Unpin, + B: MessageBody + ?Sized, + { + type Error = B::Error; - fn size(&self) -> BodySize { - BodySize::Sized(self.len() as u64) + #[inline] + fn size(&self) -> BodySize { + self.as_ref().size() + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + self.get_mut().as_mut().poll_next(cx) + } } - fn poll_next( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>> { - if self.is_empty() { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(Bytes::from( - mem::take(self.get_mut()).into_bytes(), - )))) + impl MessageBody for &'static [u8] { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut()))))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::from_static(self)) + } + } + + impl MessageBody for Bytes { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + Poll::Ready(Some(Ok(mem::take(self.get_mut())))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(self) + } + } + + impl MessageBody for BytesMut { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze()))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(self.freeze()) + } + } + + impl MessageBody for Vec { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + Poll::Ready(Some(Ok(mem::take(self.get_mut()).into()))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::from(self)) + } + } + + impl MessageBody for Cow<'static, [u8]> { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + let bytes = match mem::take(self.get_mut()) { + Cow::Borrowed(b) => Bytes::from_static(b), + Cow::Owned(b) => Bytes::from(b), + }; + Poll::Ready(Some(Ok(bytes))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + match self { + Cow::Borrowed(b) => Ok(Bytes::from_static(b)), + Cow::Owned(b) => Ok(Bytes::from(b)), + } + } + } + + impl MessageBody for &'static str { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + let string = mem::take(self.get_mut()); + let bytes = Bytes::from_static(string.as_bytes()); + Poll::Ready(Some(Ok(bytes))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::from_static(self.as_bytes())) + } + } + + impl MessageBody for String { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + let string = mem::take(self.get_mut()); + Poll::Ready(Some(Ok(Bytes::from(string)))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::from(self)) + } + } + + impl MessageBody for Cow<'static, str> { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if self.is_empty() { + Poll::Ready(None) + } else { + let bytes = match mem::take(self.get_mut()) { + Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()), + Cow::Owned(s) => Bytes::from(s.into_bytes()), + }; + Poll::Ready(Some(Ok(bytes))) + } + } + + #[inline] + fn try_into_bytes(self) -> Result { + match self { + Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())), + Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())), + } + } + } + + impl MessageBody for bytestring::ByteString { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::Sized(self.len() as u64) + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + let string = mem::take(self.get_mut()); + Poll::Ready(Some(Ok(string.into_bytes()))) + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(self.into_bytes()) } } } @@ -206,9 +494,11 @@ impl MessageBody for MessageBodyMapErr where B: MessageBody, F: FnOnce(B::Error) -> E, + E: Into>, { type Error = E; + #[inline] fn size(&self) -> BodySize { self.body.size() } @@ -229,4 +519,222 @@ where None => Poll::Ready(None), } } + + #[inline] + fn try_into_bytes(self) -> Result { + let Self { body, mapper } = self; + body.try_into_bytes().map_err(|body| Self { body, mapper }) + } +} + +#[cfg(test)] +mod tests { + use actix_rt::pin; + use actix_utils::future::poll_fn; + use futures_util::stream; + + use super::*; + use crate::body::{self, EitherBody}; + + macro_rules! assert_poll_next { + ($pin:expr, $exp:expr) => { + assert_eq!( + poll_fn(|cx| $pin.as_mut().poll_next(cx)) + .await + .unwrap() // unwrap option + .unwrap(), // unwrap result + $exp + ); + }; + } + + macro_rules! assert_poll_next_none { + ($pin:expr) => { + assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none()); + }; + } + + #[allow(unused_allocation)] // triggered by `Box::new(()).size()` + #[actix_rt::test] + async fn boxing_equivalence() { + assert_eq!(().size(), BodySize::Sized(0)); + assert_eq!(().size(), Box::new(()).size()); + assert_eq!(().size(), Box::pin(()).size()); + + let pl = Box::new(()); + pin!(pl); + assert_poll_next_none!(pl); + + let mut pl = Box::pin(()); + assert_poll_next_none!(pl); + } + + #[actix_rt::test] + async fn mut_equivalence() { + assert_eq!(().size(), BodySize::Sized(0)); + assert_eq!(().size(), (&(&mut ())).size()); + + let pl = &mut (); + pin!(pl); + assert_poll_next_none!(pl); + + let pl = &mut Box::new(()); + pin!(pl); + assert_poll_next_none!(pl); + + let mut body = body::SizedStream::new( + 8, + stream::iter([ + Ok::<_, std::io::Error>(Bytes::from("1234")), + Ok(Bytes::from("5678")), + ]), + ); + let body = &mut body; + assert_eq!(body.size(), BodySize::Sized(8)); + pin!(body); + assert_poll_next!(body, Bytes::from_static(b"1234")); + assert_poll_next!(body, Bytes::from_static(b"5678")); + assert_poll_next_none!(body); + } + + #[allow(clippy::let_unit_value)] + #[actix_rt::test] + async fn test_unit() { + let pl = (); + assert_eq!(pl.size(), BodySize::Sized(0)); + pin!(pl); + assert_poll_next_none!(pl); + } + + #[actix_rt::test] + async fn test_static_str() { + assert_eq!("".size(), BodySize::Sized(0)); + assert_eq!("test".size(), BodySize::Sized(4)); + + let pl = "test"; + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn test_static_bytes() { + assert_eq!(b"".as_ref().size(), BodySize::Sized(0)); + assert_eq!(b"test".as_ref().size(), BodySize::Sized(4)); + + let pl = b"test".as_ref(); + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn test_vec() { + assert_eq!(vec![0; 0].size(), BodySize::Sized(0)); + assert_eq!(Vec::from("test").size(), BodySize::Sized(4)); + + let pl = Vec::from("test"); + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn test_bytes() { + assert_eq!(Bytes::new().size(), BodySize::Sized(0)); + assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4)); + + let pl = Bytes::from_static(b"test"); + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn test_bytes_mut() { + assert_eq!(BytesMut::new().size(), BodySize::Sized(0)); + assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4)); + + let pl = BytesMut::from("test"); + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn test_string() { + assert_eq!(String::new().size(), BodySize::Sized(0)); + assert_eq!("test".to_owned().size(), BodySize::Sized(4)); + + let pl = "test".to_owned(); + pin!(pl); + assert_poll_next!(pl, Bytes::from("test")); + } + + #[actix_rt::test] + async fn complete_body_combinators() { + let body = Bytes::from_static(b"test"); + let body = BoxBody::new(body); + let body = EitherBody::<_, ()>::left(body); + let body = EitherBody::<(), _>::right(body); + // Do not support try_into_bytes: + // let body = Box::new(body); + // let body = Box::pin(body); + + assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test")); + } + + #[actix_rt::test] + async fn complete_body_combinators_poll() { + let body = Bytes::from_static(b"test"); + let body = BoxBody::new(body); + let body = EitherBody::<_, ()>::left(body); + let body = EitherBody::<(), _>::right(body); + let mut body = body; + + assert_eq!(body.size(), BodySize::Sized(4)); + assert_poll_next!(Pin::new(&mut body), Bytes::from("test")); + assert_poll_next_none!(Pin::new(&mut body)); + } + + #[actix_rt::test] + async fn none_body_combinators() { + fn none_body() -> BoxBody { + let body = body::None; + let body = BoxBody::new(body); + let body = EitherBody::<_, ()>::left(body); + let body = EitherBody::<(), _>::right(body); + body.boxed() + } + + assert_eq!(none_body().size(), BodySize::None); + assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new()); + assert_poll_next_none!(Pin::new(&mut none_body())); + } + + // down-casting used to be done with a method on MessageBody trait + // test is kept to demonstrate equivalence of Any trait + #[actix_rt::test] + async fn test_body_casting() { + let mut body = String::from("hello cast"); + // let mut resp_body: &mut dyn MessageBody = &mut body; + let resp_body: &mut dyn std::any::Any = &mut body; + let body = resp_body.downcast_ref::().unwrap(); + assert_eq!(body, "hello cast"); + let body = &mut resp_body.downcast_mut::().unwrap(); + body.push('!'); + let body = resp_body.downcast_ref::().unwrap(); + assert_eq!(body, "hello cast!"); + let not_body = resp_body.downcast_ref::<()>(); + assert!(not_body.is_none()); + } + + #[actix_rt::test] + async fn non_owning_to_bytes() { + let mut body = BoxBody::new(()); + let bytes = body::to_bytes(&mut body).await.unwrap(); + assert_eq!(bytes, Bytes::new()); + + let mut body = body::BodyStream::new(stream::iter([ + Ok::<_, std::io::Error>(Bytes::from("1234")), + Ok(Bytes::from("5678")), + ])); + let bytes = body::to_bytes(&mut body).await.unwrap(); + assert_eq!(bytes, Bytes::from_static(b"12345678")); + } } diff --git a/actix-http/src/body/mod.rs b/actix-http/src/body/mod.rs index 11aff039e..1a12f3336 100644 --- a/actix-http/src/body/mod.rs +++ b/actix-http/src/body/mod.rs @@ -1,259 +1,27 @@ //! Traits and structures to aid consuming and writing HTTP payloads. +//! +//! "Body" and "payload" are used somewhat interchangeably in this documentation. -use std::task::Poll; +// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message +// and the "body" is the intended possibly-decoded version of that. -use actix_rt::pin; -use actix_utils::future::poll_fn; -use bytes::{Bytes, BytesMut}; -use futures_core::ready; - -#[allow(clippy::module_inception)] -mod body; mod body_stream; +mod boxed; +mod either; mod message_body; -mod response_body; +mod none; mod size; mod sized_stream; +mod utils; -pub use self::body::{AnyBody, Body, BoxAnyBody}; -pub use self::body_stream::BodyStream; -pub use self::message_body::MessageBody; pub(crate) use self::message_body::MessageBodyMapErr; -pub use self::response_body::ResponseBody; -pub use self::size::BodySize; -pub use self::sized_stream::SizedStream; - -/// Collects the body produced by a `MessageBody` implementation into `Bytes`. -/// -/// Any errors produced by the body stream are returned immediately. -/// -/// # Examples -/// ``` -/// use actix_http::body::{Body, to_bytes}; -/// use bytes::Bytes; -/// -/// # async fn test_to_bytes() { -/// let body = Body::Empty; -/// let bytes = to_bytes(body).await.unwrap(); -/// assert!(bytes.is_empty()); -/// -/// let body = Body::Bytes(Bytes::from_static(b"123")); -/// let bytes = to_bytes(body).await.unwrap(); -/// assert_eq!(bytes, b"123"[..]); -/// # } -/// ``` -pub async fn to_bytes(body: B) -> Result { - let cap = match body.size() { - BodySize::None | BodySize::Empty | BodySize::Sized(0) => return Ok(Bytes::new()), - BodySize::Sized(size) => size as usize, - BodySize::Stream => 32_768, - }; - - let mut buf = BytesMut::with_capacity(cap); - - pin!(body); - - poll_fn(|cx| loop { - let body = body.as_mut(); - - match ready!(body.poll_next(cx)) { - Some(Ok(bytes)) => buf.extend_from_slice(&*bytes), - None => return Poll::Ready(Ok(())), - Some(Err(err)) => return Poll::Ready(Err(err)), - } - }) - .await?; - - Ok(buf.freeze()) -} - -#[cfg(test)] -mod tests { - use std::pin::Pin; - - use actix_rt::pin; - use actix_utils::future::poll_fn; - use bytes::{Bytes, BytesMut}; - - use super::*; - - impl Body { - pub(crate) fn get_ref(&self) -> &[u8] { - match *self { - Body::Bytes(ref bin) => &bin, - _ => panic!(), - } - } - } - - #[actix_rt::test] - async fn test_static_str() { - assert_eq!(Body::from("").size(), BodySize::Sized(0)); - assert_eq!(Body::from("test").size(), BodySize::Sized(4)); - assert_eq!(Body::from("test").get_ref(), b"test"); - - assert_eq!("test".size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| Pin::new(&mut "test").poll_next(cx)) - .await - .unwrap() - .ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_static_bytes() { - assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4)); - assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test"); - assert_eq!( - Body::from_slice(b"test".as_ref()).size(), - BodySize::Sized(4) - ); - assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test"); - let sb = Bytes::from(&b"test"[..]); - pin!(sb); - - assert_eq!(sb.size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_vec() { - assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4)); - assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test"); - let test_vec = Vec::from("test"); - pin!(test_vec); - - assert_eq!(test_vec.size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| test_vec.as_mut().poll_next(cx)) - .await - .unwrap() - .ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_bytes() { - let b = Bytes::from("test"); - assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4)); - assert_eq!(Body::from(b.clone()).get_ref(), b"test"); - pin!(b); - - assert_eq!(b.size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_bytes_mut() { - let b = BytesMut::from("test"); - assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4)); - assert_eq!(Body::from(b.clone()).get_ref(), b"test"); - pin!(b); - - assert_eq!(b.size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_string() { - let b = "test".to_owned(); - assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4)); - assert_eq!(Body::from(b.clone()).get_ref(), b"test"); - assert_eq!(Body::from(&b).size(), BodySize::Sized(4)); - assert_eq!(Body::from(&b).get_ref(), b"test"); - pin!(b); - - assert_eq!(b.size(), BodySize::Sized(4)); - assert_eq!( - poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(), - Some(Bytes::from("test")) - ); - } - - #[actix_rt::test] - async fn test_unit() { - assert_eq!(().size(), BodySize::Empty); - assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx)) - .await - .is_none()); - } - - #[actix_rt::test] - async fn test_box() { - let val = Box::new(()); - pin!(val); - assert_eq!(val.size(), BodySize::Empty); - assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none()); - } - - #[actix_rt::test] - async fn test_body_eq() { - assert!( - Body::Bytes(Bytes::from_static(b"1")) - == Body::Bytes(Bytes::from_static(b"1")) - ); - assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None); - } - - #[actix_rt::test] - async fn test_body_debug() { - assert!(format!("{:?}", Body::None).contains("Body::None")); - assert!(format!("{:?}", Body::Empty).contains("Body::Empty")); - assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1')); - } - - #[actix_rt::test] - async fn test_serde_json() { - use serde_json::{json, Value}; - assert_eq!( - Body::from(serde_json::to_vec(&Value::String("test".to_owned())).unwrap()) - .size(), - BodySize::Sized(6) - ); - assert_eq!( - Body::from(serde_json::to_vec(&json!({"test-key":"test-value"})).unwrap()) - .size(), - BodySize::Sized(25) - ); - } - - // down-casting used to be done with a method on MessageBody trait - // test is kept to demonstrate equivalence of Any trait - #[actix_rt::test] - async fn test_body_casting() { - let mut body = String::from("hello cast"); - // let mut resp_body: &mut dyn MessageBody = &mut body; - let resp_body: &mut dyn std::any::Any = &mut body; - let body = resp_body.downcast_ref::().unwrap(); - assert_eq!(body, "hello cast"); - let body = &mut resp_body.downcast_mut::().unwrap(); - body.push('!'); - let body = resp_body.downcast_ref::().unwrap(); - assert_eq!(body, "hello cast!"); - let not_body = resp_body.downcast_ref::<()>(); - assert!(not_body.is_none()); - } - - #[actix_rt::test] - async fn test_to_bytes() { - let body = Body::Empty; - let bytes = to_bytes(body).await.unwrap(); - assert!(bytes.is_empty()); - - let body = Body::Bytes(Bytes::from_static(b"123")); - let bytes = to_bytes(body).await.unwrap(); - assert_eq!(bytes, b"123"[..]); - } -} +pub use self::{ + body_stream::BodyStream, + boxed::BoxBody, + either::EitherBody, + message_body::MessageBody, + none::None, + size::BodySize, + sized_stream::SizedStream, + utils::{to_bytes, to_bytes_limited, BodyLimitExceeded}, +}; diff --git a/actix-http/src/body/none.rs b/actix-http/src/body/none.rs new file mode 100644 index 000000000..b1d3f7f2a --- /dev/null +++ b/actix-http/src/body/none.rs @@ -0,0 +1,51 @@ +use std::{ + convert::Infallible, + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; + +use super::{BodySize, MessageBody}; + +/// Body type for responses that forbid payloads. +/// +/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header. +/// For an "empty" body, use `()` or `Bytes::new()`. +/// +/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response. +/// In this case, the payload (or lack thereof) is implicit from the status code, so a +/// `Content-Length` header is not required. +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] +pub struct None; + +impl None { + /// Constructs new "none" body. + #[inline] + pub fn new() -> Self { + None + } +} + +impl MessageBody for None { + type Error = Infallible; + + #[inline] + fn size(&self) -> BodySize { + BodySize::None + } + + #[inline] + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(Option::None) + } + + #[inline] + fn try_into_bytes(self) -> Result { + Ok(Bytes::new()) + } +} diff --git a/actix-http/src/body/response_body.rs b/actix-http/src/body/response_body.rs deleted file mode 100644 index 855c742f2..000000000 --- a/actix-http/src/body/response_body.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::{ - mem, - pin::Pin, - task::{Context, Poll}, -}; - -use bytes::Bytes; -use futures_core::{ready, Stream}; -use pin_project::pin_project; - -use crate::error::Error; - -use super::{Body, BodySize, MessageBody}; - -#[pin_project(project = ResponseBodyProj)] -pub enum ResponseBody { - Body(#[pin] B), - Other(Body), -} - -impl ResponseBody { - pub fn into_body(self) -> ResponseBody { - match self { - ResponseBody::Body(b) => ResponseBody::Other(b), - ResponseBody::Other(b) => ResponseBody::Other(b), - } - } -} - -impl ResponseBody { - pub fn take_body(&mut self) -> ResponseBody { - mem::replace(self, ResponseBody::Other(Body::None)) - } -} - -impl ResponseBody { - pub fn as_ref(&self) -> Option<&B> { - if let ResponseBody::Body(ref b) = self { - Some(b) - } else { - None - } - } -} - -impl MessageBody for ResponseBody -where - B: MessageBody, - B::Error: Into, -{ - type Error = Error; - - fn size(&self) -> BodySize { - match self { - ResponseBody::Body(ref body) => body.size(), - ResponseBody::Other(ref body) => body.size(), - } - } - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Stream::poll_next(self, cx) - } -} - -impl Stream for ResponseBody -where - B: MessageBody, - B::Error: Into, -{ - type Item = Result; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - match self.project() { - // TODO: MSRV 1.51: poll_map_err - ResponseBodyProj::Body(body) => match ready!(body.poll_next(cx)) { - Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), - Some(Ok(val)) => Poll::Ready(Some(Ok(val))), - None => Poll::Ready(None), - }, - ResponseBodyProj::Other(body) => Pin::new(body).poll_next(cx), - } - } -} diff --git a/actix-http/src/body/size.rs b/actix-http/src/body/size.rs index 775d5b8f1..ec7873ca5 100644 --- a/actix-http/src/body/size.rs +++ b/actix-http/src/body/size.rs @@ -1,19 +1,16 @@ /// Body size hint. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BodySize { - /// Absence of body can be assumed from method or status code. + /// Implicitly empty body. /// - /// Will skip writing Content-Length header. + /// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or + /// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size + /// hint are allowed to make optimizations that skip reading or writing the payload. None, - /// Zero size body. - /// - /// Will write `Content-Length: 0` header. - Empty, - /// Known size body. /// - /// Will write `Content-Length: N` header. `Sized(0)` is treated the same as `Empty`. + /// Will write `Content-Length: N` header. Sized(u64), /// Unknown size body. @@ -23,18 +20,22 @@ pub enum BodySize { } impl BodySize { - /// Returns true if size hint indicates no or empty body. + /// Equivalent to `BodySize::Sized(0)`; + pub const ZERO: Self = Self::Sized(0); + + /// Returns true if size hint indicates omitted or empty body. + /// + /// Streams will return false because it cannot be known without reading the stream. /// /// ``` /// # use actix_http::body::BodySize; /// assert!(BodySize::None.is_eof()); - /// assert!(BodySize::Empty.is_eof()); /// assert!(BodySize::Sized(0).is_eof()); /// /// assert!(!BodySize::Sized(64).is_eof()); /// assert!(!BodySize::Stream.is_eof()); /// ``` pub fn is_eof(&self) -> bool { - matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0)) + matches!(self, BodySize::None | BodySize::Sized(0)) } } diff --git a/actix-http/src/body/sized_stream.rs b/actix-http/src/body/sized_stream.rs index 4af132389..08cd81a0d 100644 --- a/actix-http/src/body/sized_stream.rs +++ b/actix-http/src/body/sized_stream.rs @@ -1,4 +1,5 @@ use std::{ + error::Error as StdError, pin::Pin, task::{Context, Poll}, }; @@ -7,15 +8,13 @@ use bytes::Bytes; use futures_core::{ready, Stream}; use pin_project_lite::pin_project; -use crate::error::Error; - use super::{BodySize, MessageBody}; pin_project! { /// Known sized streaming response wrapper. /// - /// This body implementation should be used if total size of stream is known. Data get sent as is - /// without using transfer encoding. + /// This body implementation should be used if total size of stream is known. Data is sent as-is + /// without using chunked transfer encoding. pub struct SizedStream { size: u64, #[pin] @@ -23,23 +22,29 @@ pin_project! { } } -impl SizedStream +impl SizedStream where - S: Stream>, + S: Stream>, + E: Into> + 'static, { + #[inline] pub fn new(size: u64, stream: S) -> Self { SizedStream { size, stream } } } -impl MessageBody for SizedStream -where - S: Stream>, -{ - type Error = Error; +// TODO: from_infallible method +impl MessageBody for SizedStream +where + S: Stream>, + E: Into> + 'static, +{ + type Error = E; + + #[inline] fn size(&self) -> BodySize { - BodySize::Sized(self.size as u64) + BodySize::Sized(self.size) } /// Attempts to pull out the next value of the underlying [`Stream`]. @@ -66,18 +71,36 @@ where #[cfg(test)] mod tests { + use std::convert::Infallible; + use actix_rt::pin; use actix_utils::future::poll_fn; use futures_util::stream; + use static_assertions::{assert_impl_all, assert_not_impl_any}; use super::*; use crate::body::to_bytes; + assert_impl_all!(SizedStream>>: MessageBody); + assert_impl_all!(SizedStream>>: MessageBody); + assert_impl_all!(SizedStream>>: MessageBody); + assert_impl_all!(SizedStream>>: MessageBody); + assert_impl_all!(SizedStream>>: MessageBody); + + assert_not_impl_any!(SizedStream>: MessageBody); + assert_not_impl_any!(SizedStream>: MessageBody); + // crate::Error is not Clone + assert_not_impl_any!(SizedStream>>: MessageBody); + #[actix_rt::test] async fn skips_empty_chunks() { let body = SizedStream::new( 2, - stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))), + stream::iter( + ["1", "", "2"] + .iter() + .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), + ), ); pin!(body); @@ -103,9 +126,46 @@ mod tests { async fn read_to_bytes() { let body = SizedStream::new( 2, - stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))), + stream::iter( + ["1", "", "2"] + .iter() + .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), + ), ); assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12"))); } + + #[actix_rt::test] + async fn stream_string_error() { + // `&'static str` does not impl `Error` + // but it does impl `Into>` + + let body = SizedStream::new(0, stream::once(async { Err("stringy error") })); + assert_eq!(to_bytes(body).await, Ok(Bytes::new())); + + let body = SizedStream::new(1, stream::once(async { Err("stringy error") })); + assert!(matches!(to_bytes(body).await, Err("stringy error"))); + } + + #[actix_rt::test] + async fn stream_boxed_error() { + // `Box` does not impl `Error` + // but it does impl `Into>` + + let body = SizedStream::new( + 0, + stream::once(async { Err(Box::::from("stringy error")) }), + ); + assert_eq!(to_bytes(body).await.unwrap(), Bytes::new()); + + let body = SizedStream::new( + 1, + stream::once(async { Err(Box::::from("stringy error")) }), + ); + assert_eq!( + to_bytes(body).await.unwrap_err().to_string(), + "stringy error" + ); + } } diff --git a/actix-http/src/body/utils.rs b/actix-http/src/body/utils.rs new file mode 100644 index 000000000..d1449179f --- /dev/null +++ b/actix-http/src/body/utils.rs @@ -0,0 +1,198 @@ +use std::task::Poll; + +use actix_rt::pin; +use actix_utils::future::poll_fn; +use bytes::{Bytes, BytesMut}; +use derive_more::{Display, Error}; +use futures_core::ready; + +use super::{BodySize, MessageBody}; + +/// Collects all the bytes produced by `body`. +/// +/// Any errors produced by the body stream are returned immediately. +/// +/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion. +/// +/// # Examples +/// +/// ``` +/// use actix_http::body::{self, to_bytes}; +/// use bytes::Bytes; +/// +/// # actix_rt::System::new().block_on(async { +/// let body = body::None::new(); +/// let bytes = to_bytes(body).await.unwrap(); +/// assert!(bytes.is_empty()); +/// +/// let body = Bytes::from_static(b"123"); +/// let bytes = to_bytes(body).await.unwrap(); +/// assert_eq!(bytes, "123"); +/// # }); +/// ``` +pub async fn to_bytes(body: B) -> Result { + to_bytes_limited(body, usize::MAX) + .await + .expect("body should never yield more than usize::MAX bytes") +} + +/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit. +#[derive(Debug, Display, Error)] +#[display(fmt = "limit exceeded while collecting body bytes")] +#[non_exhaustive] +pub struct BodyLimitExceeded; + +/// Collects the bytes produced by `body`, up to `limit` bytes. +/// +/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an +/// `Err(BodyLimitExceeded)` is returned. +/// +/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`. +/// +/// # Examples +/// +/// ``` +/// use actix_http::body::{self, to_bytes_limited}; +/// use bytes::Bytes; +/// +/// # actix_rt::System::new().block_on(async { +/// let body = body::None::new(); +/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap(); +/// assert!(bytes.is_empty()); +/// +/// let body = Bytes::from_static(b"123"); +/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap(); +/// assert_eq!(bytes, "123"); +/// +/// let body = Bytes::from_static(b"123"); +/// assert!(to_bytes_limited(body, 2).await.is_err()); +/// # }); +/// ``` +pub async fn to_bytes_limited( + body: B, + limit: usize, +) -> Result, BodyLimitExceeded> { + /// Sensible default (32kB) for initial, bounded allocation when collecting body bytes. + const INITIAL_ALLOC_BYTES: usize = 32 * 1024; + + let cap = match body.size() { + BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())), + BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded), + BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES), + BodySize::Stream => INITIAL_ALLOC_BYTES, + }; + + let mut exceeded_limit = false; + let mut buf = BytesMut::with_capacity(cap); + + pin!(body); + + match poll_fn(|cx| loop { + let body = body.as_mut(); + + match ready!(body.poll_next(cx)) { + Some(Ok(bytes)) => { + // if limit is exceeded... + if buf.len() + bytes.len() > limit { + // ...set flag to true and break out of poll_fn + exceeded_limit = true; + return Poll::Ready(Ok(())); + } + + buf.extend_from_slice(&bytes) + } + None => return Poll::Ready(Ok(())), + Some(Err(err)) => return Poll::Ready(Err(err)), + } + }) + .await + { + // propagate error returned from body poll + Err(err) => Ok(Err(err)), + + // limit was exceeded while reading body + Ok(()) if exceeded_limit => Err(BodyLimitExceeded), + + // otherwise return body buffer + Ok(()) => Ok(Ok(buf.freeze())), + } +} + +#[cfg(test)] +mod tests { + use std::io; + + use futures_util::{stream, StreamExt as _}; + + use super::*; + use crate::{ + body::{BodyStream, SizedStream}, + Error, + }; + + #[actix_rt::test] + async fn to_bytes_complete() { + let bytes = to_bytes(()).await.unwrap(); + assert!(bytes.is_empty()); + + let body = Bytes::from_static(b"123"); + let bytes = to_bytes(body).await.unwrap(); + assert_eq!(bytes, b"123"[..]); + } + + #[actix_rt::test] + async fn to_bytes_streams() { + let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")]) + .map(Ok::<_, Error>); + let body = BodyStream::new(stream); + let bytes = to_bytes(body).await.unwrap(); + assert_eq!(bytes, b"123abc"[..]); + } + + #[actix_rt::test] + async fn to_bytes_limited_complete() { + let bytes = to_bytes_limited((), 0).await.unwrap().unwrap(); + assert!(bytes.is_empty()); + + let bytes = to_bytes_limited((), 1).await.unwrap().unwrap(); + assert!(bytes.is_empty()); + + assert!(to_bytes_limited(Bytes::from_static(b"12"), 0) + .await + .is_err()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 1) + .await + .is_err()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok()); + } + + #[actix_rt::test] + async fn to_bytes_limited_streams() { + // hinting a larger body fails + let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>)); + assert!(to_bytes_limited(body, 3).await.is_err()); + + // hinting a smaller body is okay + let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>)); + assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty()); + + // hinting a smaller body then returning a larger one fails + let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>); + let body = SizedStream::new(3, stream); + assert!(to_bytes_limited(body, 3).await.is_err()); + + let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")]) + .map(Ok::<_, Error>); + let body = BodyStream::new(stream); + assert!(to_bytes_limited(body, 3).await.is_err()); + } + + #[actix_rt::test] + async fn to_body_limit_error() { + let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) }); + let body = SizedStream::new(8, err_stream); + // not too big, but propagates error from body stream + assert!(to_bytes_limited(body, 10).await.unwrap().is_err()); + } +} diff --git a/actix-http/src/builder.rs b/actix-http/src/builder.rs index 660cd9817..916083a98 100644 --- a/actix-http/src/builder.rs +++ b/actix-http/src/builder.rs @@ -1,28 +1,22 @@ -use std::marker::PhantomData; -use std::rc::Rc; -use std::{fmt, net}; +use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration}; use actix_codec::Framed; use actix_service::{IntoServiceFactory, Service, ServiceFactory}; -use crate::body::MessageBody; -use crate::config::{KeepAlive, ServiceConfig}; -use crate::error::Error; -use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler}; -use crate::h2::H2Service; -use crate::request::Request; -use crate::response::Response; -use crate::service::HttpService; -use crate::{ConnectCallback, Extensions}; +use crate::{ + body::{BoxBody, MessageBody}, + h1::{self, ExpectHandler, H1Service, UpgradeHandler}, + service::HttpService, + ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig, +}; -/// A HTTP service builder +/// An HTTP service builder. /// -/// This type can be used to construct an instance of [`HttpService`] through a -/// builder-like pattern. +/// This type can construct an instance of [`HttpService`] through a builder-like pattern. pub struct HttpServiceBuilder { keep_alive: KeepAlive, - client_timeout: u64, - client_disconnect: u64, + client_request_timeout: Duration, + client_disconnect_timeout: Duration, secure: bool, local_addr: Option, expect: X, @@ -31,21 +25,23 @@ pub struct HttpServiceBuilder { _phantom: PhantomData, } -impl HttpServiceBuilder +impl Default for HttpServiceBuilder where S: ServiceFactory, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::InitError: fmt::Debug, >::Future: 'static, { - /// Create instance of `ServiceConfigBuilder` - pub fn new() -> Self { + fn default() -> Self { HttpServiceBuilder { - keep_alive: KeepAlive::Timeout(5), - client_timeout: 5000, - client_disconnect: 0, + // ServiceConfig parts (make sure defaults match) + keep_alive: KeepAlive::default(), + client_request_timeout: Duration::from_secs(5), + client_disconnect_timeout: Duration::ZERO, secure: false, local_addr: None, + + // dispatcher parts expect: ExpectHandler, upgrade: None, on_connect_ext: None, @@ -57,19 +53,21 @@ where impl HttpServiceBuilder where S: ServiceFactory, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::InitError: fmt::Debug, >::Future: 'static, X: ServiceFactory, - X::Error: Into, + X::Error: Into>, X::InitError: fmt::Debug, - U: ServiceFactory<(Request, Framed), Config = (), Response = ()>, + U: ServiceFactory<(Request, Framed), Config = (), Response = ()>, U::Error: fmt::Display, U::InitError: fmt::Debug, { - /// Set server keep-alive setting. + /// Set connection keep-alive setting. /// - /// By default keep alive is set to a 5 seconds. + /// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong. + /// + /// By default keep-alive is 5 seconds. pub fn keep_alive>(mut self, val: W) -> Self { self.keep_alive = val.into(); self @@ -87,33 +85,45 @@ where self } - /// Set server client timeout in milliseconds for first request. + /// Set client request timeout (for first request). /// - /// Defines a timeout for reading client request header. If a client does not transmit - /// the entire set headers within this time, the request is terminated with - /// the 408 (Request Time-out) error. + /// Defines a timeout for reading client request header. If the client does not transmit the + /// request head within this duration, the connection is terminated with a `408 Request Timeout` + /// response error. /// - /// To disable timeout set value to 0. + /// A duration of zero disables the timeout. /// - /// By default client timeout is set to 5000 milliseconds. - pub fn client_timeout(mut self, val: u64) -> Self { - self.client_timeout = val; + /// By default, the client timeout is 5 seconds. + pub fn client_request_timeout(mut self, dur: Duration) -> Self { + self.client_request_timeout = dur; self } - /// Set server connection disconnect timeout in milliseconds. + #[doc(hidden)] + #[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")] + pub fn client_timeout(self, dur: Duration) -> Self { + self.client_request_timeout(dur) + } + + /// Set client connection disconnect timeout. /// /// Defines a timeout for disconnect connection. If a disconnect procedure does not complete /// within this time, the request get dropped. This timeout affects secure connections. /// - /// To disable timeout set value to 0. + /// A duration of zero disables the timeout. /// - /// By default disconnect timeout is set to 0. - pub fn client_disconnect(mut self, val: u64) -> Self { - self.client_disconnect = val; + /// By default, the disconnect timeout is disabled. + pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self { + self.client_disconnect_timeout = dur; self } + #[doc(hidden)] + #[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")] + pub fn client_disconnect(self, dur: Duration) -> Self { + self.client_disconnect_timeout(dur) + } + /// Provide service for `EXPECT: 100-Continue` support. /// /// Service get called with request that contains `EXPECT` header. @@ -123,13 +133,13 @@ where where F: IntoServiceFactory, X1: ServiceFactory, - X1::Error: Into, + X1::Error: Into>, X1::InitError: fmt::Debug, { HttpServiceBuilder { keep_alive: self.keep_alive, - client_timeout: self.client_timeout, - client_disconnect: self.client_disconnect, + client_request_timeout: self.client_request_timeout, + client_disconnect_timeout: self.client_disconnect_timeout, secure: self.secure, local_addr: self.local_addr, expect: expect.into_factory(), @@ -145,15 +155,15 @@ where /// and this service get called with original request and framed object. pub fn upgrade(self, upgrade: F) -> HttpServiceBuilder where - F: IntoServiceFactory)>, - U1: ServiceFactory<(Request, Framed), Config = (), Response = ()>, + F: IntoServiceFactory)>, + U1: ServiceFactory<(Request, Framed), Config = (), Response = ()>, U1::Error: fmt::Display, U1::InitError: fmt::Debug, { HttpServiceBuilder { keep_alive: self.keep_alive, - client_timeout: self.client_timeout, - client_disconnect: self.client_disconnect, + client_request_timeout: self.client_request_timeout, + client_disconnect_timeout: self.client_disconnect_timeout, secure: self.secure, local_addr: self.local_addr, expect: self.expect, @@ -176,19 +186,19 @@ where self } - /// Finish service configuration and create a HTTP Service for HTTP/1 protocol. + /// Finish service configuration and create a service for the HTTP/1 protocol. pub fn h1(self, service: F) -> H1Service where B: MessageBody, F: IntoServiceFactory, - S::Error: Into, + S::Error: Into>, S::InitError: fmt::Debug, S::Response: Into>, { let cfg = ServiceConfig::new( self.keep_alive, - self.client_timeout, - self.client_disconnect, + self.client_request_timeout, + self.client_disconnect_timeout, self.secure, self.local_addr, ); @@ -199,26 +209,26 @@ where .on_connect_ext(self.on_connect_ext) } - /// Finish service configuration and create a HTTP service for HTTP/2 protocol. - pub fn h2(self, service: F) -> H2Service + /// Finish service configuration and create a service for the HTTP/2 protocol. + #[cfg(feature = "http2")] + pub fn h2(self, service: F) -> crate::h2::H2Service where F: IntoServiceFactory, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::InitError: fmt::Debug, S::Response: Into> + 'static, B: MessageBody + 'static, - B::Error: Into, { let cfg = ServiceConfig::new( self.keep_alive, - self.client_timeout, - self.client_disconnect, + self.client_request_timeout, + self.client_disconnect_timeout, self.secure, self.local_addr, ); - H2Service::with_config(cfg, service.into_factory()) + crate::h2::H2Service::with_config(cfg, service.into_factory()) .on_connect_ext(self.on_connect_ext) } @@ -226,17 +236,16 @@ where pub fn finish(self, service: F) -> HttpService where F: IntoServiceFactory, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::InitError: fmt::Debug, S::Response: Into> + 'static, B: MessageBody + 'static, - B::Error: Into, { let cfg = ServiceConfig::new( self.keep_alive, - self.client_timeout, - self.client_disconnect, + self.client_request_timeout, + self.client_disconnect_timeout, self.secure, self.local_addr, ); diff --git a/actix-http/src/client/connector.rs b/actix-http/src/client/connector.rs deleted file mode 100644 index 508fe748b..000000000 --- a/actix-http/src/client/connector.rs +++ /dev/null @@ -1,766 +0,0 @@ -use std::{ - fmt, - future::Future, - net::IpAddr, - pin::Pin, - rc::Rc, - task::{Context, Poll}, - time::Duration, -}; - -use actix_rt::{ - net::{ActixStream, TcpStream}, - time::{sleep, Sleep}, -}; -use actix_service::Service; -use actix_tls::connect::{ - new_connector, Connect as TcpConnect, ConnectError as TcpConnectError, - Connection as TcpConnection, Resolver, -}; -use futures_core::{future::LocalBoxFuture, ready}; -use http::Uri; -use pin_project::pin_project; - -use super::config::ConnectorConfig; -use super::connection::{Connection, ConnectionIo}; -use super::error::ConnectError; -use super::pool::ConnectionPool; -use super::Connect; -use super::Protocol; - -#[cfg(feature = "openssl")] -use actix_tls::connect::ssl::openssl::SslConnector as OpensslConnector; -#[cfg(feature = "rustls")] -use actix_tls::connect::ssl::rustls::ClientConfig; - -enum SslConnector { - #[allow(dead_code)] - None, - #[cfg(feature = "openssl")] - Openssl(OpensslConnector), - #[cfg(feature = "rustls")] - Rustls(std::sync::Arc), -} - -/// Manages HTTP client network connectivity. -/// -/// The `Connector` type uses a builder-like combinator pattern for service -/// construction that finishes by calling the `.finish()` method. -/// -/// ```ignore -/// use std::time::Duration; -/// use actix_http::client::Connector; -/// -/// let connector = Connector::new() -/// .timeout(Duration::from_secs(5)) -/// .finish(); -/// ``` -pub struct Connector { - connector: T, - config: ConnectorConfig, - #[allow(dead_code)] - ssl: SslConnector, -} - -impl Connector<()> { - #[allow(clippy::new_ret_no_self, clippy::let_unit_value)] - pub fn new() -> Connector< - impl Service< - TcpConnect, - Response = TcpConnection, - Error = actix_tls::connect::ConnectError, - > + Clone, - > { - Connector { - ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]), - connector: new_connector(resolver::resolver()), - config: ConnectorConfig::default(), - } - } - - // Build Ssl connector with openssl, based on supplied alpn protocols - #[cfg(feature = "openssl")] - fn build_ssl(protocols: Vec>) -> SslConnector { - use actix_tls::connect::ssl::openssl::SslMethod; - use bytes::{BufMut, BytesMut}; - - let mut alpn = BytesMut::with_capacity(20); - for proto in protocols.iter() { - alpn.put_u8(proto.len() as u8); - alpn.put(proto.as_slice()); - } - - let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap(); - let _ = ssl - .set_alpn_protos(&alpn) - .map_err(|e| error!("Can not set alpn protocol: {:?}", e)); - SslConnector::Openssl(ssl.build()) - } - - // Build Ssl connector with rustls, based on supplied alpn protocols - #[cfg(all(not(feature = "openssl"), feature = "rustls"))] - fn build_ssl(protocols: Vec>) -> SslConnector { - let mut config = ClientConfig::new(); - config.set_protocols(&protocols); - config.root_store.add_server_trust_anchors( - &actix_tls::connect::ssl::rustls::TLS_SERVER_ROOTS, - ); - SslConnector::Rustls(std::sync::Arc::new(config)) - } - - // ssl turned off, provides empty ssl connector - #[cfg(not(any(feature = "openssl", feature = "rustls")))] - fn build_ssl(_: Vec>) -> SslConnector { - SslConnector::None - } -} - -impl Connector { - /// Use custom connector. - pub fn connector(self, connector: S1) -> Connector - where - Io1: ActixStream + fmt::Debug + 'static, - S1: Service< - TcpConnect, - Response = TcpConnection, - Error = TcpConnectError, - > + Clone, - { - Connector { - connector, - config: self.config, - ssl: self.ssl, - } - } -} - -impl Connector -where - // Note: - // Input Io type is bound to ActixStream trait but internally in client module they - // are bound to ConnectionIo trait alias. And latter is the trait exposed to public - // in the form of Box type. - // - // This remap is to hide ActixStream's trait methods. They are not meant to be called - // from user code. - Io: ActixStream + fmt::Debug + 'static, - S: Service< - TcpConnect, - Response = TcpConnection, - Error = TcpConnectError, - > + Clone - + 'static, -{ - /// Tcp connection timeout, i.e. max time to connect to remote host including dns name - /// resolution. Set to 5 second by default. - pub fn timeout(mut self, timeout: Duration) -> Self { - self.config.timeout = timeout; - self - } - - /// Tls handshake timeout, i.e. max time to do tls handshake with remote host after tcp - /// connection established. Set to 5 second by default. - pub fn handshake_timeout(mut self, timeout: Duration) -> Self { - self.config.handshake_timeout = timeout; - self - } - - #[cfg(feature = "openssl")] - /// Use custom `SslConnector` instance. - pub fn ssl(mut self, connector: OpensslConnector) -> Self { - self.ssl = SslConnector::Openssl(connector); - self - } - - #[cfg(feature = "rustls")] - /// Use custom `SslConnector` instance. - pub fn rustls(mut self, connector: std::sync::Arc) -> Self { - self.ssl = SslConnector::Rustls(connector); - self - } - - /// Maximum supported HTTP major version. - /// - /// Supported versions are HTTP/1.1 and HTTP/2. - pub fn max_http_version(mut self, val: http::Version) -> Self { - let versions = match val { - http::Version::HTTP_11 => vec![b"http/1.1".to_vec()], - http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()], - _ => { - unimplemented!("actix-http:client: supported versions http/1.1, http/2") - } - }; - self.ssl = Connector::build_ssl(versions); - self - } - - /// Indicates the initial window size (in octets) for - /// HTTP2 stream-level flow control for received data. - /// - /// The default value is 65,535 and is good for APIs, but not for big objects. - pub fn initial_window_size(mut self, size: u32) -> Self { - self.config.stream_window_size = size; - self - } - - /// Indicates the initial window size (in octets) for - /// HTTP2 connection-level flow control for received data. - /// - /// The default value is 65,535 and is good for APIs, but not for big objects. - pub fn initial_connection_window_size(mut self, size: u32) -> Self { - self.config.conn_window_size = size; - self - } - - /// Set total number of simultaneous connections per type of scheme. - /// - /// If limit is 0, the connector has no limit. - /// The default limit size is 100. - pub fn limit(mut self, limit: usize) -> Self { - self.config.limit = limit; - self - } - - /// Set keep-alive period for opened connection. - /// - /// Keep-alive period is the period between connection usage. If - /// the delay between repeated usages of the same connection - /// exceeds this period, the connection is closed. - /// Default keep-alive period is 15 seconds. - pub fn conn_keep_alive(mut self, dur: Duration) -> Self { - self.config.conn_keep_alive = dur; - self - } - - /// Set max lifetime period for connection. - /// - /// Connection lifetime is max lifetime of any opened connection - /// until it is closed regardless of keep-alive period. - /// Default lifetime period is 75 seconds. - pub fn conn_lifetime(mut self, dur: Duration) -> Self { - self.config.conn_lifetime = dur; - self - } - - /// Set server connection disconnect timeout in milliseconds. - /// - /// Defines a timeout for disconnect connection. If a disconnect procedure does not complete - /// within this time, the socket get dropped. This timeout affects only secure connections. - /// - /// To disable timeout set value to 0. - /// - /// By default disconnect timeout is set to 3000 milliseconds. - pub fn disconnect_timeout(mut self, dur: Duration) -> Self { - self.config.disconnect_timeout = Some(dur); - self - } - - /// Set local IP Address the connector would use for establishing connection. - pub fn local_address(mut self, addr: IpAddr) -> Self { - self.config.local_address = Some(addr); - self - } - - /// Finish configuration process and create connector service. - /// The Connector builder always concludes by calling `finish()` last in - /// its combinator chain. - pub fn finish(self) -> ConnectorService { - let local_address = self.config.local_address; - let timeout = self.config.timeout; - - let tcp_service_inner = - TcpConnectorInnerService::new(self.connector, timeout, local_address); - - #[allow(clippy::redundant_clone)] - let tcp_service = TcpConnectorService { - service: tcp_service_inner.clone(), - }; - - let tls_service = match self.ssl { - SslConnector::None => None, - #[cfg(feature = "openssl")] - SslConnector::Openssl(tls) => { - const H2: &[u8] = b"h2"; - - use actix_tls::connect::ssl::openssl::{OpensslConnector, SslStream}; - - impl IntoConnectionIo for TcpConnection> { - fn into_connection_io(self) -> (Box, Protocol) { - let sock = self.into_parts().0; - let h2 = sock - .ssl() - .selected_alpn_protocol() - .map(|protos| protos.windows(2).any(|w| w == H2)) - .unwrap_or(false); - if h2 { - (Box::new(sock), Protocol::Http2) - } else { - (Box::new(sock), Protocol::Http1) - } - } - } - - let handshake_timeout = self.config.handshake_timeout; - - let tls_service = TlsConnectorService { - tcp_service: tcp_service_inner, - tls_service: OpensslConnector::service(tls), - timeout: handshake_timeout, - }; - - Some(actix_service::boxed::rc_service(tls_service)) - } - #[cfg(feature = "rustls")] - SslConnector::Rustls(tls) => { - const H2: &[u8] = b"h2"; - - use actix_tls::connect::ssl::rustls::{ - RustlsConnector, Session, TlsStream, - }; - - impl IntoConnectionIo for TcpConnection> { - fn into_connection_io(self) -> (Box, Protocol) { - let sock = self.into_parts().0; - let h2 = sock - .get_ref() - .1 - .get_alpn_protocol() - .map(|protos| protos.windows(2).any(|w| w == H2)) - .unwrap_or(false); - if h2 { - (Box::new(sock), Protocol::Http2) - } else { - (Box::new(sock), Protocol::Http1) - } - } - } - - let handshake_timeout = self.config.handshake_timeout; - - let tls_service = TlsConnectorService { - tcp_service: tcp_service_inner, - tls_service: RustlsConnector::service(tls), - timeout: handshake_timeout, - }; - - Some(actix_service::boxed::rc_service(tls_service)) - } - }; - - let tcp_config = self.config.no_disconnect_timeout(); - - let tcp_pool = ConnectionPool::new(tcp_service, tcp_config); - - let tls_config = self.config; - let tls_pool = tls_service - .map(move |tls_service| ConnectionPool::new(tls_service, tls_config)); - - ConnectorServicePriv { tcp_pool, tls_pool } - } -} - -/// tcp service for map `TcpConnection` type to `(Io, Protocol)` -#[derive(Clone)] -pub struct TcpConnectorService { - service: S, -} - -impl Service for TcpConnectorService -where - S: Service, Error = ConnectError> - + Clone - + 'static, -{ - type Response = (Io, Protocol); - type Error = ConnectError; - type Future = TcpConnectorFuture; - - actix_service::forward_ready!(service); - - fn call(&self, req: Connect) -> Self::Future { - TcpConnectorFuture { - fut: self.service.call(req), - } - } -} - -#[pin_project] -pub struct TcpConnectorFuture { - #[pin] - fut: Fut, -} - -impl Future for TcpConnectorFuture -where - Fut: Future, ConnectError>>, -{ - type Output = Result<(Io, Protocol), ConnectError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.project() - .fut - .poll(cx) - .map_ok(|res| (res.into_parts().0, Protocol::Http1)) - } -} - -/// service for establish tcp connection and do client tls handshake. -/// operation is canceled when timeout limit reached. -struct TlsConnectorService { - /// tcp connection is canceled on `TcpConnectorInnerService`'s timeout setting. - tcp_service: S, - /// tls connection is canceled on `TlsConnectorService`'s timeout setting. - tls_service: St, - timeout: Duration, -} - -impl Service for TlsConnectorService -where - S: Service, Error = ConnectError> - + Clone - + 'static, - St: Service, Error = std::io::Error> + Clone + 'static, - Io: ConnectionIo, - St::Response: IntoConnectionIo, -{ - type Response = (Box, Protocol); - type Error = ConnectError; - type Future = TlsConnectorFuture; - - fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { - ready!(self.tcp_service.poll_ready(cx))?; - ready!(self.tls_service.poll_ready(cx))?; - Poll::Ready(Ok(())) - } - - fn call(&self, req: Connect) -> Self::Future { - let fut = self.tcp_service.call(req); - let tls_service = self.tls_service.clone(); - let timeout = self.timeout; - - TlsConnectorFuture::TcpConnect { - fut, - tls_service: Some(tls_service), - timeout, - } - } -} - -#[pin_project(project = TlsConnectorProj)] -#[allow(clippy::large_enum_variant)] -enum TlsConnectorFuture { - TcpConnect { - #[pin] - fut: Fut1, - tls_service: Option, - timeout: Duration, - }, - TlsConnect { - #[pin] - fut: Fut2, - #[pin] - timeout: Sleep, - }, -} - -/// helper trait for generic over different TlsStream types between tls crates. -trait IntoConnectionIo { - fn into_connection_io(self) -> (Box, Protocol); -} - -impl Future for TlsConnectorFuture -where - S: Service< - TcpConnection, - Response = Res, - Error = std::io::Error, - Future = Fut2, - >, - S::Response: IntoConnectionIo, - Fut1: Future, ConnectError>>, - Fut2: Future>, - Io: ConnectionIo, -{ - type Output = Result<(Box, Protocol), ConnectError>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.as_mut().project() { - TlsConnectorProj::TcpConnect { - fut, - tls_service, - timeout, - } => { - let res = ready!(fut.poll(cx))?; - let fut = tls_service - .take() - .expect("TlsConnectorFuture polled after complete") - .call(res); - let timeout = sleep(*timeout); - self.set(TlsConnectorFuture::TlsConnect { fut, timeout }); - self.poll(cx) - } - TlsConnectorProj::TlsConnect { fut, timeout } => match fut.poll(cx)? { - Poll::Ready(res) => Poll::Ready(Ok(res.into_connection_io())), - Poll::Pending => timeout.poll(cx).map(|_| Err(ConnectError::Timeout)), - }, - } - } -} - -/// service for establish tcp connection. -/// operation is canceled when timeout limit reached. -#[derive(Clone)] -pub struct TcpConnectorInnerService { - service: S, - timeout: Duration, - local_address: Option, -} - -impl TcpConnectorInnerService { - fn new( - service: S, - timeout: Duration, - local_address: Option, - ) -> Self { - Self { - service, - timeout, - local_address, - } - } -} - -impl Service for TcpConnectorInnerService -where - S: Service< - TcpConnect, - Response = TcpConnection, - Error = TcpConnectError, - > + Clone - + 'static, -{ - type Response = S::Response; - type Error = ConnectError; - type Future = TcpConnectorInnerFuture; - - actix_service::forward_ready!(service); - - fn call(&self, req: Connect) -> Self::Future { - let mut req = TcpConnect::new(req.uri).set_addr(req.addr); - - if let Some(local_addr) = self.local_address { - req = req.set_local_addr(local_addr); - } - - TcpConnectorInnerFuture { - fut: self.service.call(req), - timeout: sleep(self.timeout), - } - } -} - -#[pin_project] -pub struct TcpConnectorInnerFuture { - #[pin] - fut: Fut, - #[pin] - timeout: Sleep, -} - -impl Future for TcpConnectorInnerFuture -where - Fut: Future, TcpConnectError>>, -{ - type Output = Result, ConnectError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - match this.fut.poll(cx) { - Poll::Ready(res) => Poll::Ready(res.map_err(ConnectError::from)), - Poll::Pending => this.timeout.poll(cx).map(|_| Err(ConnectError::Timeout)), - } - } -} - -/// Connector service for pooled Plain/Tls Tcp connections. -pub type ConnectorService = ConnectorServicePriv< - TcpConnectorService>, - Rc< - dyn Service< - Connect, - Response = (Box, Protocol), - Error = ConnectError, - Future = LocalBoxFuture< - 'static, - Result<(Box, Protocol), ConnectError>, - >, - >, - >, - Io, - Box, ->; - -pub struct ConnectorServicePriv -where - S1: Service, - S2: Service, - Io1: ConnectionIo, - Io2: ConnectionIo, -{ - tcp_pool: ConnectionPool, - tls_pool: Option>, -} - -impl Service for ConnectorServicePriv -where - S1: Service - + Clone - + 'static, - S2: Service - + Clone - + 'static, - Io1: ConnectionIo, - Io2: ConnectionIo, -{ - type Response = Connection; - type Error = ConnectError; - type Future = ConnectorServiceFuture; - - fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { - ready!(self.tcp_pool.poll_ready(cx))?; - if let Some(ref tls_pool) = self.tls_pool { - ready!(tls_pool.poll_ready(cx))?; - } - Poll::Ready(Ok(())) - } - - fn call(&self, req: Connect) -> Self::Future { - match req.uri.scheme_str() { - Some("https") | Some("wss") => match self.tls_pool { - None => ConnectorServiceFuture::SslIsNotSupported, - Some(ref pool) => ConnectorServiceFuture::Tls(pool.call(req)), - }, - _ => ConnectorServiceFuture::Tcp(self.tcp_pool.call(req)), - } - } -} - -#[pin_project(project = ConnectorServiceProj)] -pub enum ConnectorServiceFuture -where - S1: Service - + Clone - + 'static, - S2: Service - + Clone - + 'static, - Io1: ConnectionIo, - Io2: ConnectionIo, -{ - Tcp(#[pin] as Service>::Future), - Tls(#[pin] as Service>::Future), - SslIsNotSupported, -} - -impl Future for ConnectorServiceFuture -where - S1: Service - + Clone - + 'static, - S2: Service - + Clone - + 'static, - Io1: ConnectionIo, - Io2: ConnectionIo, -{ - type Output = Result, ConnectError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.project() { - ConnectorServiceProj::Tcp(fut) => fut.poll(cx).map_ok(Connection::Tcp), - ConnectorServiceProj::Tls(fut) => fut.poll(cx).map_ok(Connection::Tls), - ConnectorServiceProj::SslIsNotSupported => { - Poll::Ready(Err(ConnectError::SslIsNotSupported)) - } - } - } -} - -#[cfg(not(feature = "trust-dns"))] -mod resolver { - use super::*; - - pub(super) fn resolver() -> Resolver { - Resolver::Default - } -} - -#[cfg(feature = "trust-dns")] -mod resolver { - use std::{cell::RefCell, net::SocketAddr}; - - use actix_tls::connect::Resolve; - use futures_core::future::LocalBoxFuture; - use trust_dns_resolver::{ - config::{ResolverConfig, ResolverOpts}, - system_conf::read_system_conf, - TokioAsyncResolver, - }; - - use super::*; - - pub(super) fn resolver() -> Resolver { - // new type for impl Resolve trait for TokioAsyncResolver. - struct TrustDnsResolver(TokioAsyncResolver); - - impl Resolve for TrustDnsResolver { - fn lookup<'a>( - &'a self, - host: &'a str, - port: u16, - ) -> LocalBoxFuture<'a, Result, Box>> - { - Box::pin(async move { - let res = self - .0 - .lookup_ip(host) - .await? - .iter() - .map(|ip| SocketAddr::new(ip, port)) - .collect(); - Ok(res) - }) - } - } - - // dns struct is cached in thread local. - // so new client constructor can reuse the existing dns resolver. - thread_local! { - static TRUST_DNS_RESOLVER: RefCell> = RefCell::new(None); - } - - // get from thread local or construct a new trust-dns resolver. - TRUST_DNS_RESOLVER.with(|local| { - let resolver = local.borrow().as_ref().map(Clone::clone); - match resolver { - Some(resolver) => resolver, - None => { - let (cfg, opts) = match read_system_conf() { - Ok((cfg, opts)) => (cfg, opts), - Err(e) => { - log::error!("TRust-DNS can not load system config: {}", e); - (ResolverConfig::default(), ResolverOpts::default()) - } - }; - - let resolver = TokioAsyncResolver::tokio(cfg, opts).unwrap(); - - // box trust dns resolver and put it in thread local. - let resolver = Resolver::new_custom(TrustDnsResolver(resolver)); - *local.borrow_mut() = Some(resolver.clone()); - resolver - } - } - }) - } -} diff --git a/actix-http/src/client/mod.rs b/actix-http/src/client/mod.rs deleted file mode 100644 index 41d5fef2a..000000000 --- a/actix-http/src/client/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! HTTP client. - -use http::Uri; - -mod config; -mod connection; -mod connector; -mod error; -mod h1proto; -mod h2proto; -mod pool; - -pub use actix_tls::connect::{ - Connect as TcpConnect, ConnectError as TcpConnectError, Connection as TcpConnection, -}; - -pub use self::connection::{Connection, ConnectionIo}; -pub use self::connector::{Connector, ConnectorService}; -pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError}; -pub use crate::Protocol; - -#[derive(Clone)] -pub struct Connect { - pub uri: Uri, - pub addr: Option, -} diff --git a/actix-http/src/config.rs b/actix-http/src/config.rs index 9a2293e92..b3b215da4 100644 --- a/actix-http/src/config.rs +++ b/actix-http/src/config.rs @@ -1,356 +1,177 @@ -use std::cell::Cell; -use std::fmt::Write; -use std::rc::Rc; -use std::time::Duration; -use std::{fmt, net}; - -use actix_rt::{ - task::JoinHandle, - time::{interval, sleep_until, Instant, Sleep}, +use std::{ + net, + rc::Rc, + time::{Duration, Instant}, }; + use bytes::BytesMut; -use time::OffsetDateTime; -/// "Sun, 06 Nov 1994 08:49:37 GMT".len() -const DATE_VALUE_LENGTH: usize = 29; +use crate::{date::DateService, KeepAlive}; -#[derive(Debug, PartialEq, Clone, Copy)] -/// Server keep-alive setting -pub enum KeepAlive { - /// Keep alive in seconds - Timeout(usize), - /// Rely on OS to shutdown tcp connection - Os, - /// Disabled - Disabled, -} - -impl From for KeepAlive { - fn from(keepalive: usize) -> Self { - KeepAlive::Timeout(keepalive) - } -} - -impl From> for KeepAlive { - fn from(keepalive: Option) -> Self { - if let Some(keepalive) = keepalive { - KeepAlive::Timeout(keepalive) - } else { - KeepAlive::Disabled - } - } -} - -/// Http service configuration +/// HTTP service configuration. +#[derive(Debug, Clone)] pub struct ServiceConfig(Rc); +#[derive(Debug)] struct Inner { - keep_alive: Option, - client_timeout: u64, - client_disconnect: u64, - ka_enabled: bool, + keep_alive: KeepAlive, + client_request_timeout: Duration, + client_disconnect_timeout: Duration, secure: bool, local_addr: Option, date_service: DateService, } -impl Clone for ServiceConfig { - fn clone(&self) -> Self { - ServiceConfig(self.0.clone()) - } -} - impl Default for ServiceConfig { fn default() -> Self { - Self::new(KeepAlive::Timeout(5), 0, 0, false, None) + Self::new( + KeepAlive::default(), + Duration::from_secs(5), + Duration::ZERO, + false, + None, + ) } } impl ServiceConfig { - /// Create instance of `ServiceConfig` + /// Create instance of `ServiceConfig`. pub fn new( keep_alive: KeepAlive, - client_timeout: u64, - client_disconnect: u64, + client_request_timeout: Duration, + client_disconnect_timeout: Duration, secure: bool, local_addr: Option, ) -> ServiceConfig { - let (keep_alive, ka_enabled) = match keep_alive { - KeepAlive::Timeout(val) => (val as u64, true), - KeepAlive::Os => (0, true), - KeepAlive::Disabled => (0, false), - }; - let keep_alive = if ka_enabled && keep_alive > 0 { - Some(Duration::from_secs(keep_alive)) - } else { - None - }; - ServiceConfig(Rc::new(Inner { - keep_alive, - ka_enabled, - client_timeout, - client_disconnect, + keep_alive: keep_alive.normalize(), + client_request_timeout, + client_disconnect_timeout, secure, local_addr, date_service: DateService::new(), })) } - /// Returns true if connection is secure (HTTPS) + /// Returns `true` if connection is secure (i.e., using TLS / HTTPS). #[inline] pub fn secure(&self) -> bool { self.0.secure } /// Returns the local address that this server is bound to. + /// + /// Returns `None` for connections via UDS (Unix Domain Socket). #[inline] pub fn local_addr(&self) -> Option { self.0.local_addr } - /// Keep alive duration if configured. + /// Connection keep-alive setting. #[inline] - pub fn keep_alive(&self) -> Option { + pub fn keep_alive(&self) -> KeepAlive { self.0.keep_alive } - /// Return state of connection keep-alive functionality - #[inline] - pub fn keep_alive_enabled(&self) -> bool { - self.0.ka_enabled - } - - /// Client timeout for first request. - #[inline] - pub fn client_timer(&self) -> Option { - let delay_time = self.0.client_timeout; - if delay_time != 0 { - Some(sleep_until(self.now() + Duration::from_millis(delay_time))) - } else { - None + /// Creates a time object representing the deadline for this connection's keep-alive period, if + /// enabled. + /// + /// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`. + pub fn keep_alive_deadline(&self) -> Option { + match self.keep_alive() { + KeepAlive::Timeout(dur) => Some(self.now() + dur), + KeepAlive::Os => None, + KeepAlive::Disabled => None, } } - /// Client timeout for first request. - pub fn client_timer_expire(&self) -> Option { - let delay = self.0.client_timeout; - if delay != 0 { - Some(self.now() + Duration::from_millis(delay)) - } else { - None - } + /// Creates a time object representing the deadline for the client to finish sending the head of + /// its first request. + /// + /// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`. + pub fn client_request_deadline(&self) -> Option { + let timeout = self.0.client_request_timeout; + (timeout != Duration::ZERO).then(|| self.now() + timeout) } - /// Client disconnect timer - pub fn client_disconnect_timer(&self) -> Option { - let delay = self.0.client_disconnect; - if delay != 0 { - Some(self.now() + Duration::from_millis(delay)) - } else { - None - } + /// Creates a time object representing the deadline for the client to disconnect. + pub fn client_disconnect_deadline(&self) -> Option { + let timeout = self.0.client_disconnect_timeout; + (timeout != Duration::ZERO).then(|| self.now() + timeout) } - #[inline] - /// Return keep-alive timer delay is configured. - pub fn keep_alive_timer(&self) -> Option { - self.keep_alive().map(|ka| sleep_until(self.now() + ka)) - } - - /// Keep-alive expire time - pub fn keep_alive_expire(&self) -> Option { - self.keep_alive().map(|ka| self.now() + ka) - } - - #[inline] pub(crate) fn now(&self) -> Instant { self.0.date_service.now() } + /// Writes date header to `dst` buffer. + /// + /// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls + /// than normal. Note that a CRLF (`\r\n`) is included in what is written. #[doc(hidden)] - pub fn set_date(&self, dst: &mut BytesMut) { - let mut buf: [u8; 39] = [0; 39]; - buf[..6].copy_from_slice(b"date: "); + pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) { + let mut buf: [u8; 37] = [0; 37]; + + buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " }); + self.0 .date_service - .set_date(|date| buf[6..35].copy_from_slice(&date.bytes)); - buf[35..].copy_from_slice(b"\r\n\r\n"); + .with_date(|date| buf[6..35].copy_from_slice(&date.bytes)); + + buf[35..].copy_from_slice(b"\r\n"); dst.extend_from_slice(&buf); } - pub(crate) fn set_date_header(&self, dst: &mut BytesMut) { + #[allow(unused)] // used with `http2` feature flag + pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) { self.0 .date_service - .set_date(|date| dst.extend_from_slice(&date.bytes)); - } -} - -#[derive(Copy, Clone)] -struct Date { - bytes: [u8; DATE_VALUE_LENGTH], - pos: usize, -} - -impl Date { - fn new() -> Date { - let mut date = Date { - bytes: [0; DATE_VALUE_LENGTH], - pos: 0, - }; - date.update(); - date - } - - fn update(&mut self) { - self.pos = 0; - write!( - self, - "{}", - OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT") - ) - .unwrap(); - } -} - -impl fmt::Write for Date { - fn write_str(&mut self, s: &str) -> fmt::Result { - let len = s.len(); - self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); - self.pos += len; - Ok(()) - } -} - -/// Service for update Date and Instant periodically at 500 millis interval. -struct DateService { - current: Rc>, - handle: JoinHandle<()>, -} - -impl Drop for DateService { - fn drop(&mut self) { - // stop the timer update async task on drop. - self.handle.abort(); - } -} - -impl DateService { - fn new() -> Self { - // shared date and timer for DateService and update async task. - let current = Rc::new(Cell::new((Date::new(), Instant::now()))); - let current_clone = Rc::clone(¤t); - // spawn an async task sleep for 500 milli and update current date/timer in a loop. - // handle is used to stop the task on DateService drop. - let handle = actix_rt::spawn(async move { - #[cfg(test)] - let _notify = notify_on_drop::NotifyOnDrop::new(); - - let mut interval = interval(Duration::from_millis(500)); - loop { - let now = interval.tick().await; - let date = Date::new(); - current_clone.set((date, now)); - } - }); - - DateService { current, handle } - } - - fn now(&self) -> Instant { - self.current.get().1 - } - - fn set_date(&self, mut f: F) { - f(&self.current.get().0); - } -} - -// TODO: move to a util module for testing all spawn handle drop style tasks. -#[cfg(test)] -/// Test Module for checking the drop state of certain async tasks that are spawned -/// with `actix_rt::spawn` -/// -/// The target task must explicitly generate `NotifyOnDrop` when spawn the task -mod notify_on_drop { - use std::cell::RefCell; - - thread_local! { - static NOTIFY_DROPPED: RefCell> = RefCell::new(None); - } - - /// Check if the spawned task is dropped. - /// - /// # Panic: - /// - /// When there was no `NotifyOnDrop` instance on current thread - pub(crate) fn is_dropped() -> bool { - NOTIFY_DROPPED.with(|bool| { - bool.borrow() - .expect("No NotifyOnDrop existed on current thread") - }) - } - - pub(crate) struct NotifyOnDrop; - - impl NotifyOnDrop { - /// # Panic: - /// - /// When construct multiple instances on any given thread. - pub(crate) fn new() -> Self { - NOTIFY_DROPPED.with(|bool| { - let mut bool = bool.borrow_mut(); - if bool.is_some() { - panic!("NotifyOnDrop existed on current thread"); - } else { - *bool = Some(false); - } - }); - - NotifyOnDrop - } - } - - impl Drop for NotifyOnDrop { - fn drop(&mut self) { - NOTIFY_DROPPED.with(|bool| { - if let Some(b) = bool.borrow_mut().as_mut() { - *b = true; - } - }); - } + .with_date(|date| dst.extend_from_slice(&date.bytes)); } } #[cfg(test)] mod tests { - use super::*; + use actix_rt::{ + task::yield_now, + time::{sleep, sleep_until}, + }; + use memchr::memmem; - use actix_rt::task::yield_now; + use super::*; + use crate::{date::DATE_VALUE_LENGTH, notify_on_drop}; #[actix_rt::test] async fn test_date_service_update() { - let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None); + let settings = + ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None); yield_now().await; let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf1); + settings.write_date_header(&mut buf1, false); let now1 = settings.now(); - sleep_until(Instant::now() + Duration::from_secs(2)).await; + sleep_until((Instant::now() + Duration::from_secs(2)).into()).await; yield_now().await; let now2 = settings.now(); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf2); + settings.write_date_header(&mut buf2, false); assert_ne!(now1, now2); assert_ne!(buf1, buf2); drop(settings); - assert!(notify_on_drop::is_dropped()); + + // Ensure the task will drop eventually + let mut times = 0; + while !notify_on_drop::is_dropped() { + sleep(Duration::from_millis(100)).await; + times += 1; + assert!(times < 10, "Timeout waiting for task drop"); + } } #[actix_rt::test] @@ -365,14 +186,21 @@ mod tests { let clone3 = service.clone(); drop(clone1); - assert_eq!(false, notify_on_drop::is_dropped()); + assert!(!notify_on_drop::is_dropped()); drop(clone2); - assert_eq!(false, notify_on_drop::is_dropped()); + assert!(!notify_on_drop::is_dropped()); drop(clone3); - assert_eq!(false, notify_on_drop::is_dropped()); + assert!(!notify_on_drop::is_dropped()); drop(service); - assert!(notify_on_drop::is_dropped()); + + // Ensure the task will drop eventually + let mut times = 0; + while !notify_on_drop::is_dropped() { + sleep(Duration::from_millis(100)).await; + times += 1; + assert!(times < 10, "Timeout waiting for task drop"); + } } #[test] @@ -382,11 +210,27 @@ mod tests { #[actix_rt::test] async fn test_date() { - let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None); + let settings = ServiceConfig::default(); + let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf1); + settings.write_date_header(&mut buf1, false); + let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf2); + settings.write_date_header(&mut buf2, false); + assert_eq!(buf1, buf2); } + + #[actix_rt::test] + async fn test_date_camel_case() { + let settings = ServiceConfig::default(); + + let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); + settings.write_date_header(&mut buf, false); + assert!(memmem::find(&buf, b"date:").is_some()); + + let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); + settings.write_date_header(&mut buf, true); + assert!(memmem::find(&buf, b"Date:").is_some()); + } } diff --git a/actix-http/src/date.rs b/actix-http/src/date.rs new file mode 100644 index 000000000..735dd9100 --- /dev/null +++ b/actix-http/src/date.rs @@ -0,0 +1,92 @@ +use std::{ + cell::Cell, + fmt::{self, Write}, + rc::Rc, + time::{Duration, Instant, SystemTime}, +}; + +use actix_rt::{task::JoinHandle, time::interval}; + +/// "Thu, 01 Jan 1970 00:00:00 GMT".len() +pub(crate) const DATE_VALUE_LENGTH: usize = 29; + +#[derive(Clone, Copy)] +pub(crate) struct Date { + pub(crate) bytes: [u8; DATE_VALUE_LENGTH], + pos: usize, +} + +impl Date { + fn new() -> Date { + let mut date = Date { + bytes: [0; DATE_VALUE_LENGTH], + pos: 0, + }; + date.update(); + date + } + + fn update(&mut self) { + self.pos = 0; + write!(self, "{}", httpdate::HttpDate::from(SystemTime::now())).unwrap(); + } +} + +impl fmt::Write for Date { + fn write_str(&mut self, s: &str) -> fmt::Result { + let len = s.len(); + self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); + self.pos += len; + Ok(()) + } +} + +/// Service for update Date and Instant periodically at 500 millis interval. +pub(crate) struct DateService { + current: Rc>, + handle: JoinHandle<()>, +} + +impl DateService { + pub(crate) fn new() -> Self { + // shared date and timer for DateService and update async task. + let current = Rc::new(Cell::new((Date::new(), Instant::now()))); + let current_clone = Rc::clone(¤t); + // spawn an async task sleep for 500 millis and update current date/timer in a loop. + // handle is used to stop the task on DateService drop. + let handle = actix_rt::spawn(async move { + #[cfg(test)] + let _notify = crate::notify_on_drop::NotifyOnDrop::new(); + + let mut interval = interval(Duration::from_millis(500)); + loop { + let now = interval.tick().await; + let date = Date::new(); + current_clone.set((date, now.into_std())); + } + }); + + DateService { current, handle } + } + + pub(crate) fn now(&self) -> Instant { + self.current.get().1 + } + + pub(crate) fn with_date(&self, mut f: F) { + f(&self.current.get().0); + } +} + +impl fmt::Debug for DateService { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("DateService").finish_non_exhaustive() + } +} + +impl Drop for DateService { + fn drop(&mut self) { + // stop the timer update async task on drop. + self.handle.abort(); + } +} diff --git a/actix-http/src/encoding/decoder.rs b/actix-http/src/encoding/decoder.rs index f0abae865..cda534d60 100644 --- a/actix-http/src/encoding/decoder.rs +++ b/actix-http/src/encoding/decoder.rs @@ -8,24 +8,29 @@ use std::{ }; use actix_rt::task::{spawn_blocking, JoinHandle}; -use brotli2::write::BrotliDecoder; use bytes::Bytes; +#[cfg(feature = "compress-gzip")] use flate2::write::{GzDecoder, ZlibDecoder}; use futures_core::{ready, Stream}; +#[cfg(feature = "compress-zstd")] +use zstd::stream::write::Decoder as ZstdDecoder; use crate::{ encoding::Writer, - error::{BlockingError, PayloadError}, - http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING}, + error::PayloadError, + header::{ContentEncoding, HeaderMap, CONTENT_ENCODING}, }; const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049; -pub struct Decoder { - decoder: Option, - stream: S, - eof: bool, - fut: Option, ContentDecoder), io::Error>>>, +pin_project_lite::pin_project! { + pub struct Decoder { + decoder: Option, + #[pin] + stream: S, + eof: bool, + fut: Option, ContentDecoder), io::Error>>>, + } } impl Decoder @@ -36,14 +41,27 @@ where #[inline] pub fn new(stream: S, encoding: ContentEncoding) -> Decoder { let decoder = match encoding { - ContentEncoding::Br => Some(ContentDecoder::Br(Box::new( - BrotliDecoder::new(Writer::new()), + #[cfg(feature = "compress-brotli")] + ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new( + brotli::DecompressorWriter::new(Writer::new(), 8_096), ))), - ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new( - ZlibDecoder::new(Writer::new()), - ))), - ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new( - GzDecoder::new(Writer::new()), + + #[cfg(feature = "compress-gzip")] + ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(ZlibDecoder::new( + Writer::new(), + )))), + + #[cfg(feature = "compress-gzip")] + ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new( + Writer::new(), + )))), + + #[cfg(feature = "compress-zstd")] + ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new( + ZstdDecoder::new(Writer::new()).expect( + "Failed to create zstd decoder. This is a bug. \ + Please report it to the actix-web repository.", + ), ))), _ => None, }; @@ -63,7 +81,7 @@ where let encoding = headers .get(&CONTENT_ENCODING) .and_then(|val| val.to_str().ok()) - .map(ContentEncoding::from) + .and_then(|x| x.parse().ok()) .unwrap_or(ContentEncoding::Identity); Self::new(stream, encoding) @@ -72,45 +90,48 @@ where impl Stream for Decoder where - S: Stream> + Unpin, + S: Stream>, { type Item = Result; - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - loop { - if let Some(ref mut fut) = self.fut { - let (chunk, decoder) = - ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??; + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); - self.decoder = Some(decoder); - self.fut.take(); + loop { + if let Some(ref mut fut) = this.fut { + let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| { + PayloadError::Io(io::Error::new( + io::ErrorKind::Other, + "Blocking task was cancelled unexpectedly", + )) + })??; + + *this.decoder = Some(decoder); + this.fut.take(); if let Some(chunk) = chunk { return Poll::Ready(Some(Ok(chunk))); } } - if self.eof { + if *this.eof { return Poll::Ready(None); } - match ready!(Pin::new(&mut self.stream).poll_next(cx)) { + match ready!(this.stream.as_mut().poll_next(cx)) { Some(Err(err)) => return Poll::Ready(Some(Err(err))), Some(Ok(chunk)) => { - if let Some(mut decoder) = self.decoder.take() { + if let Some(mut decoder) = this.decoder.take() { if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE { let chunk = decoder.feed_data(chunk)?; - self.decoder = Some(decoder); + *this.decoder = Some(decoder); if let Some(chunk) = chunk { return Poll::Ready(Some(Ok(chunk))); } } else { - self.fut = Some(spawn_blocking(move || { + *this.fut = Some(spawn_blocking(move || { let chunk = decoder.feed_data(chunk)?; Ok((chunk, decoder)) })); @@ -123,9 +144,9 @@ where } None => { - self.eof = true; + *this.eof = true; - return if let Some(mut decoder) = self.decoder.take() { + return if let Some(mut decoder) = this.decoder.take() { match decoder.feed_eof() { Ok(Some(res)) => Poll::Ready(Some(Ok(res))), Ok(None) => Poll::Ready(None), @@ -141,15 +162,26 @@ where } enum ContentDecoder { + #[cfg(feature = "compress-gzip")] Deflate(Box>), + + #[cfg(feature = "compress-gzip")] Gzip(Box>), - Br(Box>), + + #[cfg(feature = "compress-brotli")] + Brotli(Box>), + + // We need explicit 'static lifetime here because ZstdDecoder need lifetime + // argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static` + #[cfg(feature = "compress-zstd")] + Zstd(Box>), } impl ContentDecoder { fn feed_eof(&mut self) -> io::Result> { match self { - ContentDecoder::Br(ref mut decoder) => match decoder.flush() { + #[cfg(feature = "compress-brotli")] + ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() { Ok(()) => { let b = decoder.get_mut().take(); @@ -159,9 +191,10 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), }, + #[cfg(feature = "compress-gzip")] ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() { Ok(_) => { let b = decoder.get_mut().take(); @@ -172,9 +205,10 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), }, + #[cfg(feature = "compress-gzip")] ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() { Ok(_) => { let b = decoder.get_mut().take(); @@ -184,14 +218,28 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), + }, + + #[cfg(feature = "compress-zstd")] + ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() { + Ok(_) => { + let b = decoder.get_mut().take(); + if !b.is_empty() { + Ok(Some(b)) + } else { + Ok(None) + } + } + Err(err) => Err(err), }, } } fn feed_data(&mut self, data: Bytes) -> io::Result> { match self { - ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) { + #[cfg(feature = "compress-brotli")] + ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) { Ok(_) => { decoder.flush()?; let b = decoder.get_mut().take(); @@ -202,9 +250,10 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), }, + #[cfg(feature = "compress-gzip")] ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { Ok(_) => { decoder.flush()?; @@ -216,9 +265,10 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), }, + #[cfg(feature = "compress-gzip")] ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) { Ok(_) => { decoder.flush()?; @@ -230,7 +280,22 @@ impl ContentDecoder { Ok(None) } } - Err(e) => Err(e), + Err(err) => Err(err), + }, + + #[cfg(feature = "compress-zstd")] + ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) { + Ok(_) => { + decoder.flush()?; + + let b = decoder.get_mut().take(); + if !b.is_empty() { + Ok(Some(b)) + } else { + Ok(None) + } + } + Err(err) => Err(err), }, } } diff --git a/actix-http/src/encoding/encoder.rs b/actix-http/src/encoding/encoder.rs index b8bc8b68d..180927ac6 100644 --- a/actix-http/src/encoding/encoder.rs +++ b/actix-http/src/encoding/encoder.rs @@ -9,106 +9,118 @@ use std::{ }; use actix_rt::task::{spawn_blocking, JoinHandle}; -use brotli2::write::BrotliEncoder; use bytes::Bytes; use derive_more::Display; +#[cfg(feature = "compress-gzip")] use flate2::write::{GzEncoder, ZlibEncoder}; use futures_core::ready; -use pin_project::pin_project; - -use crate::{ - body::{Body, BodySize, BoxAnyBody, MessageBody, ResponseBody}, - http::{ - header::{ContentEncoding, CONTENT_ENCODING}, - HeaderValue, StatusCode, - }, - Error, ResponseHead, -}; +use pin_project_lite::pin_project; +use tracing::trace; +#[cfg(feature = "compress-zstd")] +use zstd::stream::write::Encoder as ZstdEncoder; use super::Writer; -use crate::error::BlockingError; +use crate::{ + body::{self, BodySize, MessageBody}, + header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING}, + ResponseHead, StatusCode, +}; const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024; -#[pin_project] -pub struct Encoder { - eof: bool, - #[pin] - body: EncoderBody, - encoder: Option, - fut: Option>>, -} - -impl Encoder { - pub fn response( - encoding: ContentEncoding, - head: &mut ResponseHead, - body: ResponseBody, - ) -> ResponseBody> { - let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING) - || head.status == StatusCode::SWITCHING_PROTOCOLS - || head.status == StatusCode::NO_CONTENT - || encoding == ContentEncoding::Identity - || encoding == ContentEncoding::Auto); - - let body = match body { - ResponseBody::Other(b) => match b { - Body::None => return ResponseBody::Other(Body::None), - Body::Empty => return ResponseBody::Other(Body::Empty), - Body::Bytes(buf) => { - if can_encode { - EncoderBody::Bytes(buf) - } else { - return ResponseBody::Other(Body::Bytes(buf)); - } - } - Body::Message(stream) => EncoderBody::BoxedStream(stream), - }, - ResponseBody::Body(stream) => EncoderBody::Stream(stream), - }; - - if can_encode { - // Modify response body only if encoder is not None - if let Some(enc) = ContentEncoder::encoder(encoding) { - update_head(encoding, head); - head.no_chunking(false); - return ResponseBody::Body(Encoder { - body, - eof: false, - fut: None, - encoder: Some(enc), - }); - } - } - - ResponseBody::Body(Encoder { - body, - eof: false, - fut: None, - encoder: None, - }) +pin_project! { + pub struct Encoder { + #[pin] + body: EncoderBody, + encoder: Option, + fut: Option>>, + eof: bool, } } -#[pin_project(project = EncoderBodyProj)] -enum EncoderBody { - Bytes(Bytes), - Stream(#[pin] B), - BoxedStream(BoxAnyBody), +impl Encoder { + fn none() -> Self { + Encoder { + body: EncoderBody::None { + body: body::None::new(), + }, + encoder: None, + fut: None, + eof: true, + } + } + + fn empty() -> Self { + Encoder { + body: EncoderBody::Full { body: Bytes::new() }, + encoder: None, + fut: None, + eof: true, + } + } + + pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self { + // no need to compress empty bodies + match body.size() { + BodySize::None => return Self::none(), + BodySize::Sized(0) => return Self::empty(), + _ => {} + } + + let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING) + || head.status == StatusCode::SWITCHING_PROTOCOLS + || head.status == StatusCode::NO_CONTENT + || encoding == ContentEncoding::Identity); + + let body = match body.try_into_bytes() { + Ok(body) => EncoderBody::Full { body }, + Err(body) => EncoderBody::Stream { body }, + }; + + if should_encode { + // wrap body only if encoder is feature-enabled + if let Some(enc) = ContentEncoder::select(encoding) { + update_head(encoding, head); + + return Encoder { + body, + encoder: Some(enc), + fut: None, + eof: false, + }; + } + } + + Encoder { + body, + encoder: None, + fut: None, + eof: false, + } + } +} + +pin_project! { + #[project = EncoderBodyProj] + enum EncoderBody { + None { body: body::None }, + Full { body: Bytes }, + Stream { #[pin] body: B }, + } } impl MessageBody for EncoderBody where B: MessageBody, - B::Error: Into, { - type Error = EncoderError; + type Error = EncoderError; + #[inline] fn size(&self) -> BodySize { match self { - EncoderBody::Bytes(ref b) => b.size(), - EncoderBody::Stream(ref b) => b.size(), - EncoderBody::BoxedStream(ref b) => b.size(), + EncoderBody::None { body } => body.size(), + EncoderBody::Full { body } => body.size(), + EncoderBody::Stream { body } => body.size(), } } @@ -117,28 +129,27 @@ where cx: &mut Context<'_>, ) -> Poll>> { match self.project() { - EncoderBodyProj::Bytes(b) => { - if b.is_empty() { - Poll::Ready(None) - } else { - Poll::Ready(Some(Ok(std::mem::take(b)))) - } + EncoderBodyProj::None { body } => { + Pin::new(body).poll_next(cx).map_err(|err| match err {}) } - // TODO: MSRV 1.51: poll_map_err - EncoderBodyProj::Stream(b) => match ready!(b.poll_next(cx)) { - Some(Err(err)) => Poll::Ready(Some(Err(EncoderError::Body(err)))), - Some(Ok(val)) => Poll::Ready(Some(Ok(val))), - None => Poll::Ready(None), - }, - EncoderBodyProj::BoxedStream(ref mut b) => { - match ready!(b.as_pin_mut().poll_next(cx)) { - Some(Err(err)) => { - Poll::Ready(Some(Err(EncoderError::Boxed(err.into())))) - } - Some(Ok(val)) => Poll::Ready(Some(Ok(val))), - None => Poll::Ready(None), - } + EncoderBodyProj::Full { body } => { + Pin::new(body).poll_next(cx).map_err(|err| match err {}) } + EncoderBodyProj::Stream { body } => body + .poll_next(cx) + .map_err(|err| EncoderError::Body(err.into())), + } + } + + #[inline] + fn try_into_bytes(self) -> Result + where + Self: Sized, + { + match self { + EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()), + EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()), + _ => Err(self), } } } @@ -146,15 +157,15 @@ where impl MessageBody for Encoder where B: MessageBody, - B::Error: Into, { - type Error = EncoderError; + type Error = EncoderError; + #[inline] fn size(&self) -> BodySize { - if self.encoder.is_none() { - self.body.size() - } else { + if self.encoder.is_some() { BodySize::Stream + } else { + self.body.size() } } @@ -163,6 +174,7 @@ where cx: &mut Context<'_>, ) -> Poll>> { let mut this = self.project(); + loop { if *this.eof { return Poll::Ready(None); @@ -170,7 +182,12 @@ where if let Some(ref mut fut) = this.fut { let mut encoder = ready!(Pin::new(fut).poll(cx)) - .map_err(|_| EncoderError::Blocking(BlockingError))? + .map_err(|_| { + EncoderError::Io(io::Error::new( + io::ErrorKind::Other, + "Blocking task was cancelled unexpectedly", + )) + })? .map_err(EncoderError::Io)?; let chunk = encoder.take(); @@ -211,6 +228,7 @@ where None => { if let Some(encoder) = this.encoder.take() { let chunk = encoder.finish().map_err(EncoderError::Io)?; + if chunk.is_empty() { return Poll::Ready(None); } else { @@ -224,35 +242,75 @@ where } } } + + #[inline] + fn try_into_bytes(mut self) -> Result + where + Self: Sized, + { + if self.encoder.is_some() { + Err(self) + } else { + match self.body.try_into_bytes() { + Ok(body) => Ok(body), + Err(body) => { + self.body = body; + Err(self) + } + } + } + } } fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) { - head.headers_mut().insert( - CONTENT_ENCODING, - HeaderValue::from_static(encoding.as_str()), - ); + head.headers_mut() + .insert(header::CONTENT_ENCODING, encoding.to_header_value()); + head.headers_mut() + .append(header::VARY, HeaderValue::from_static("accept-encoding")); + + head.no_chunking(false); } enum ContentEncoder { + #[cfg(feature = "compress-gzip")] Deflate(ZlibEncoder), + + #[cfg(feature = "compress-gzip")] Gzip(GzEncoder), - Br(BrotliEncoder), + + #[cfg(feature = "compress-brotli")] + Brotli(Box>), + + // Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we + // use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`. + #[cfg(feature = "compress-zstd")] + Zstd(ZstdEncoder<'static, Writer>), } impl ContentEncoder { - fn encoder(encoding: ContentEncoding) -> Option { + fn select(encoding: ContentEncoding) -> Option { match encoding { + #[cfg(feature = "compress-gzip")] ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new( Writer::new(), flate2::Compression::fast(), ))), + + #[cfg(feature = "compress-gzip")] ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new( Writer::new(), flate2::Compression::fast(), ))), - ContentEncoding::Br => { - Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3))) + + #[cfg(feature = "compress-brotli")] + ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())), + + #[cfg(feature = "compress-zstd")] + ContentEncoding::Zstd => { + let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?; + Some(ContentEncoder::Zstd(encoder)) } + _ => None, } } @@ -260,38 +318,60 @@ impl ContentEncoder { #[inline] pub(crate) fn take(&mut self) -> Bytes { match *self { - ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(), + #[cfg(feature = "compress-brotli")] + ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(), + + #[cfg(feature = "compress-gzip")] ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(), + + #[cfg(feature = "compress-gzip")] ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(), + + #[cfg(feature = "compress-zstd")] + ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(), } } fn finish(self) -> Result { match self { - ContentEncoder::Br(encoder) => match encoder.finish() { - Ok(writer) => Ok(writer.buf.freeze()), + #[cfg(feature = "compress-brotli")] + ContentEncoder::Brotli(mut encoder) => match encoder.flush() { + Ok(()) => Ok(encoder.into_inner().buf.freeze()), Err(err) => Err(err), }, + + #[cfg(feature = "compress-gzip")] ContentEncoder::Gzip(encoder) => match encoder.finish() { Ok(writer) => Ok(writer.buf.freeze()), Err(err) => Err(err), }, + + #[cfg(feature = "compress-gzip")] ContentEncoder::Deflate(encoder) => match encoder.finish() { Ok(writer) => Ok(writer.buf.freeze()), Err(err) => Err(err), }, + + #[cfg(feature = "compress-zstd")] + ContentEncoder::Zstd(encoder) => match encoder.finish() { + Ok(writer) => Ok(writer.buf.freeze()), + Err(err) => Err(err), + }, } } fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { match *self { - ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) { + #[cfg(feature = "compress-brotli")] + ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) { Ok(_) => Ok(()), Err(err) => { trace!("Error decoding br encoding: {}", err); Err(err) } }, + + #[cfg(feature = "compress-gzip")] ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) { Ok(_) => Ok(()), Err(err) => { @@ -299,6 +379,8 @@ impl ContentEncoder { Err(err) } }, + + #[cfg(feature = "compress-gzip")] ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) { Ok(_) => Ok(()), Err(err) => { @@ -306,39 +388,52 @@ impl ContentEncoder { Err(err) } }, + + #[cfg(feature = "compress-zstd")] + ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) { + Ok(_) => Ok(()), + Err(err) => { + trace!("Error decoding ztsd encoding: {}", err); + Err(err) + } + }, } } } +#[cfg(feature = "compress-brotli")] +fn new_brotli_compressor() -> Box> { + Box::new(brotli::CompressorWriter::new( + Writer::new(), + 32 * 1024, // 32 KiB buffer + 3, // BROTLI_PARAM_QUALITY + 22, // BROTLI_PARAM_LGWIN + )) +} + #[derive(Debug, Display)] #[non_exhaustive] -pub enum EncoderError { +pub enum EncoderError { + /// Wrapped body stream error. #[display(fmt = "body")] - Body(E), - - #[display(fmt = "boxed")] - Boxed(Error), - - #[display(fmt = "blocking")] - Blocking(BlockingError), + Body(Box), + /// Generic I/O error. #[display(fmt = "io")] Io(io::Error), } -impl StdError for EncoderError { +impl StdError for EncoderError { fn source(&self) -> Option<&(dyn StdError + 'static)> { - None - } -} - -impl> From> for Error { - fn from(err: EncoderError) -> Self { - match err { - EncoderError::Body(err) => err.into(), - EncoderError::Boxed(err) => err, - EncoderError::Blocking(err) => err.into(), - EncoderError::Io(err) => err.into(), + match self { + EncoderError::Body(err) => Some(&**err), + EncoderError::Io(err) => Some(err), } } } + +impl From for crate::Error { + fn from(err: EncoderError) -> Self { + crate::Error::new_encoder().with_cause(err) + } +} diff --git a/actix-http/src/encoding/mod.rs b/actix-http/src/encoding/mod.rs index cb271c638..6801b5fb0 100644 --- a/actix-http/src/encoding/mod.rs +++ b/actix-http/src/encoding/mod.rs @@ -7,10 +7,12 @@ use bytes::{Bytes, BytesMut}; mod decoder; mod encoder; -pub use self::decoder::Decoder; -pub use self::encoder::Encoder; +pub use self::{decoder::Decoder, encoder::Encoder}; -pub(self) struct Writer { +/// Special-purpose writer for streaming (de-)compression. +/// +/// Pre-allocates 8KiB of capacity. +struct Writer { buf: BytesMut, } diff --git a/actix-http/src/error.rs b/actix-http/src/error.rs index 92efd572d..6f332118e 100644 --- a/actix-http/src/error.rs +++ b/actix-http/src/error.rs @@ -1,174 +1,157 @@ //! Error and Result module -use std::{ - error::Error as StdError, - fmt, - io::{self, Write as _}, - str::Utf8Error, - string::FromUtf8Error, -}; +use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error}; -use bytes::BytesMut; use derive_more::{Display, Error, From}; -use http::{header, uri::InvalidUri, StatusCode}; -use serde::de::value::Error as DeError; +pub use http::{status::InvalidStatusCode, Error as HttpError}; +use http::{uri::InvalidUri, StatusCode}; -use crate::{body::Body, helpers::Writer, Response}; +use crate::{body::BoxBody, Response}; -pub use http::Error as HttpError; - -/// General purpose actix web error. -/// -/// An actix web error is used to carry errors from `std::error` -/// through actix in a convenient way. It can be created through -/// converting errors with `into()`. -/// -/// Whenever it is created from an external object a response error is created -/// for it that can be used to create an HTTP response from it this means that -/// if you have access to an actix `Error` you can always get a -/// `ResponseError` reference from it. pub struct Error { - cause: Box, + inner: Box, +} + +pub(crate) struct ErrorInner { + #[allow(dead_code)] + kind: Kind, + cause: Option>, } impl Error { - /// Returns the reference to the underlying `ResponseError`. - pub fn as_response_error(&self) -> &dyn ResponseError { - self.cause.as_ref() + fn new(kind: Kind) -> Self { + Self { + inner: Box::new(ErrorInner { kind, cause: None }), + } } - /// Similar to `as_response_error` but downcasts. - pub fn as_error(&self) -> Option<&T> { - ::downcast_ref(self.cause.as_ref()) + pub(crate) fn with_cause(mut self, cause: impl Into>) -> Self { + self.inner.cause = Some(cause.into()); + self + } + + pub(crate) fn new_http() -> Self { + Self::new(Kind::Http) + } + + pub(crate) fn new_parse() -> Self { + Self::new(Kind::Parse) + } + + pub(crate) fn new_payload() -> Self { + Self::new(Kind::Payload) + } + + pub(crate) fn new_body() -> Self { + Self::new(Kind::Body) + } + + pub(crate) fn new_send_response() -> Self { + Self::new(Kind::SendResponse) + } + + #[allow(unused)] // available for future use + pub(crate) fn new_io() -> Self { + Self::new(Kind::Io) + } + + #[allow(unused)] // used in encoder behind feature flag so ignore unused warning + pub(crate) fn new_encoder() -> Self { + Self::new(Kind::Encoder) + } + + #[allow(unused)] // used with `ws` feature flag + pub(crate) fn new_ws() -> Self { + Self::new(Kind::Ws) } } -/// Errors that can generate responses. -pub trait ResponseError: fmt::Debug + fmt::Display { - /// Returns appropriate status code for error. - /// - /// A 500 Internal Server Error is used by default. If [error_response](Self::error_response) is - /// also implemented and does not call `self.status_code()`, then this will not be used. - fn status_code(&self) -> StatusCode { - StatusCode::INTERNAL_SERVER_ERROR - } +impl From for Response { + fn from(err: Error) -> Self { + // TODO: more appropriate error status codes, usage assessment needed + let status_code = match err.inner.kind { + Kind::Parse => StatusCode::BAD_REQUEST, + _ => StatusCode::INTERNAL_SERVER_ERROR, + }; - /// Creates full response for error. - /// - /// By default, the generated response uses a 500 Internal Server Error status code, a - /// `Content-Type` of `text/plain`, and the body is set to `Self`'s `Display` impl. - fn error_response(&self) -> Response { - let mut resp = Response::new(self.status_code()); - let mut buf = BytesMut::new(); - let _ = write!(Writer(&mut buf), "{}", self); - resp.headers_mut().insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static("text/plain; charset=utf-8"), - ); - resp.set_body(Body::from(buf)) + Response::new(status_code).set_body(BoxBody::new(err.to_string())) } - - downcast_get_type_id!(); } -downcast!(ResponseError); +#[derive(Debug, Clone, Copy, PartialEq, Eq, Display)] +pub(crate) enum Kind { + #[display(fmt = "error processing HTTP")] + Http, -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.cause, f) - } + #[display(fmt = "error parsing HTTP message")] + Parse, + + #[display(fmt = "request payload read error")] + Payload, + + #[display(fmt = "response body write error")] + Body, + + #[display(fmt = "send response error")] + SendResponse, + + #[display(fmt = "error in WebSocket process")] + Ws, + + #[display(fmt = "connection error")] + Io, + + #[display(fmt = "encoder error")] + Encoder, } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", &self.cause) + f.debug_struct("actix_http::Error") + .field("kind", &self.inner.kind) + .field("cause", &self.inner.cause) + .finish() } } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - None +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.inner.cause.as_ref() { + Some(err) => write!(f, "{}: {}", &self.inner.kind, err), + None => write!(f, "{}", &self.inner.kind), + } } } -impl From<()> for Error { - fn from(_: ()) -> Self { - Error::from(UnitError) +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.inner.cause.as_ref().map(Box::as_ref) } } impl From for Error { - fn from(_: std::convert::Infallible) -> Self { - // hint that an error that will never happen - unreachable!() + fn from(err: std::convert::Infallible) -> Self { + match err {} } } -/// Convert `Error` to a `Response` instance -impl From for Response { - fn from(err: Error) -> Self { - Response::from_error(err) +impl From for Error { + fn from(err: HttpError) -> Self { + Self::new_http().with_cause(err) } } -/// `Error` for any error that implements `ResponseError` -impl From for Error { - fn from(err: T) -> Error { - Error { - cause: Box::new(err), - } +#[cfg(feature = "ws")] +impl From for Error { + fn from(err: crate::ws::HandshakeError) -> Self { + Self::new_ws().with_cause(err) } } -#[derive(Debug, Display, Error)] -#[display(fmt = "Unknown Error")] -struct UnitError; - -impl ResponseError for Box {} - -/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`UnitError`]. -impl ResponseError for UnitError {} - -/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`actix_tls::accept::openssl::SslError`]. -#[cfg(feature = "openssl")] -impl ResponseError for actix_tls::accept::openssl::SslError {} - -/// Returns [`StatusCode::BAD_REQUEST`] for [`DeError`]. -impl ResponseError for DeError { - fn status_code(&self) -> StatusCode { - StatusCode::BAD_REQUEST - } -} - -/// Returns [`StatusCode::BAD_REQUEST`] for [`Utf8Error`]. -impl ResponseError for Utf8Error { - fn status_code(&self) -> StatusCode { - StatusCode::BAD_REQUEST - } -} - -/// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`HttpError`]. -impl ResponseError for HttpError {} - -/// Inspects the underlying [`io::ErrorKind`] and returns an appropriate status code. -/// -/// If the error is [`io::ErrorKind::NotFound`], [`StatusCode::NOT_FOUND`] is returned. If the -/// error is [`io::ErrorKind::PermissionDenied`], [`StatusCode::FORBIDDEN`] is returned. Otherwise, -/// [`StatusCode::INTERNAL_SERVER_ERROR`] is returned. -impl ResponseError for io::Error { - fn status_code(&self) -> StatusCode { - match self.kind() { - io::ErrorKind::NotFound => StatusCode::NOT_FOUND, - io::ErrorKind::PermissionDenied => StatusCode::FORBIDDEN, - _ => StatusCode::INTERNAL_SERVER_ERROR, - } - } -} - -/// Returns [`StatusCode::BAD_REQUEST`] for [`header::InvalidHeaderValue`]. -impl ResponseError for header::InvalidHeaderValue { - fn status_code(&self) -> StatusCode { - StatusCode::BAD_REQUEST +#[cfg(feature = "ws")] +impl From for Error { + fn from(err: crate::ws::ProtocolError) -> Self { + Self::new_ws().with_cause(err) } } @@ -177,54 +160,47 @@ impl ResponseError for header::InvalidHeaderValue { #[non_exhaustive] pub enum ParseError { /// An invalid `Method`, such as `GE.T`. - #[display(fmt = "Invalid Method specified")] + #[display(fmt = "invalid method specified")] Method, /// An invalid `Uri`, such as `exam ple.domain`. - #[display(fmt = "Uri error: {}", _0)] + #[display(fmt = "URI error: {}", _0)] Uri(InvalidUri), /// An invalid `HttpVersion`, such as `HTP/1.1` - #[display(fmt = "Invalid HTTP version specified")] + #[display(fmt = "invalid HTTP version specified")] Version, /// An invalid `Header`. - #[display(fmt = "Invalid Header provided")] + #[display(fmt = "invalid Header provided")] Header, /// A message head is too large to be reasonable. - #[display(fmt = "Message head is too large")] + #[display(fmt = "message head is too large")] TooLarge, /// A message reached EOF, but is not complete. - #[display(fmt = "Message is incomplete")] + #[display(fmt = "message is incomplete")] Incomplete, /// An invalid `Status`, such as `1337 ELITE`. - #[display(fmt = "Invalid Status provided")] + #[display(fmt = "invalid status provided")] Status, /// A timeout occurred waiting for an IO event. #[allow(dead_code)] - #[display(fmt = "Timeout")] + #[display(fmt = "timeout")] Timeout, - /// An `io::Error` that occurred while trying to read or write to a network stream. - #[display(fmt = "IO error: {}", _0)] + /// An I/O error that occurred while trying to read or write to a network stream. + #[display(fmt = "I/O error: {}", _0)] Io(io::Error), - /// Parsing a field as string failed - #[display(fmt = "UTF8 error: {}", _0)] + /// Parsing a field as string failed. + #[display(fmt = "UTF-8 error: {}", _0)] Utf8(Utf8Error), } -/// Return `BadRequest` for `ParseError` -impl ResponseError for ParseError { - fn status_code(&self) -> StatusCode { - StatusCode::BAD_REQUEST - } -} - impl From for ParseError { fn from(err: io::Error) -> ParseError { ParseError::Io(err) @@ -263,40 +239,42 @@ impl From for ParseError { } } -/// A set of errors that can occur running blocking tasks in thread pool. -#[derive(Debug, Display, Error)] -#[display(fmt = "Blocking thread pool is gone")] -pub struct BlockingError; +impl From for Error { + fn from(err: ParseError) -> Self { + Self::new_parse().with_cause(err) + } +} -/// `InternalServerError` for `BlockingError` -impl ResponseError for BlockingError {} +impl From for Response { + fn from(err: ParseError) -> Self { + Error::from(err).into() + } +} /// A set of errors that can occur during payload parsing. #[derive(Debug, Display)] #[non_exhaustive] pub enum PayloadError { /// A payload reached EOF, but is not complete. - #[display( - fmt = "A payload reached EOF, but is not complete. Inner error: {:?}", - _0 - )] + #[display(fmt = "payload reached EOF before completing: {:?}", _0)] Incomplete(Option), /// Content encoding stream corruption. - #[display(fmt = "Can not decode content-encoding.")] + #[display(fmt = "can not decode content-encoding")] EncodingCorrupted, /// Payload reached size limit. - #[display(fmt = "Payload reached size limit.")] + #[display(fmt = "payload reached size limit")] Overflow, /// Payload length is unknown. - #[display(fmt = "Payload length is unknown.")] + #[display(fmt = "payload length is unknown")] UnknownLength, /// HTTP/2 payload error. + #[cfg(feature = "http2")] #[display(fmt = "{}", _0)] - Http2Payload(h2::Error), + Http2Payload(::h2::Error), /// Generic I/O error. #[display(fmt = "{}", _0)] @@ -307,18 +285,20 @@ impl std::error::Error for PayloadError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { PayloadError::Incomplete(None) => None, - PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error), + PayloadError::Incomplete(Some(err)) => Some(err), PayloadError::EncodingCorrupted => None, PayloadError::Overflow => None, PayloadError::UnknownLength => None, - PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error), - PayloadError::Io(err) => Some(err as &dyn std::error::Error), + #[cfg(feature = "http2")] + PayloadError::Http2Payload(err) => Some(err), + PayloadError::Io(err) => Some(err), } } } -impl From for PayloadError { - fn from(err: h2::Error) -> Self { +#[cfg(feature = "http2")] +impl From<::h2::Error> for PayloadError { + fn from(err: ::h2::Error) -> Self { PayloadError::Http2Payload(err) } } @@ -335,168 +315,138 @@ impl From for PayloadError { } } -impl From for PayloadError { - fn from(_: BlockingError) -> Self { - PayloadError::Io(io::Error::new( - io::ErrorKind::Other, - "Operation is canceled", - )) - } -} - -/// `PayloadError` returns two possible results: -/// -/// - `Overflow` returns `PayloadTooLarge` -/// - Other errors returns `BadRequest` -impl ResponseError for PayloadError { - fn status_code(&self) -> StatusCode { - match *self { - PayloadError::Overflow => StatusCode::PAYLOAD_TOO_LARGE, - _ => StatusCode::BAD_REQUEST, - } +impl From for Error { + fn from(err: PayloadError) -> Self { + Self::new_payload().with_cause(err) } } /// A set of errors that can occur during dispatching HTTP requests. -#[derive(Debug, Display, Error, From)] +#[derive(Debug, Display, From)] #[non_exhaustive] pub enum DispatchError { - /// Service error - Service(Error), + /// Service error. + #[display(fmt = "service error")] + Service(Response), - /// Upgrade service error + /// Body streaming error. + #[display(fmt = "body error: {}", _0)] + Body(Box), + + /// Upgrade service error. + #[display(fmt = "upgrade error")] Upgrade, - /// An `io::Error` that occurred while trying to read or write to a network - /// stream. - #[display(fmt = "IO error: {}", _0)] + /// An `io::Error` that occurred while trying to read or write to a network stream. + #[display(fmt = "I/O error: {}", _0)] Io(io::Error), - /// Http request parse error. - #[display(fmt = "Parse error: {}", _0)] + /// Request parse error. + #[display(fmt = "request parse error: {}", _0)] Parse(ParseError), - /// Http/2 error + /// HTTP/2 error. #[display(fmt = "{}", _0)] + #[cfg(feature = "http2")] H2(h2::Error), /// The first request did not complete within the specified timeout. - #[display(fmt = "The first request did not complete within the specified timeout")] + #[display(fmt = "request did not complete within the specified timeout")] SlowRequestTimeout, - /// Disconnect timeout. Makes sense for ssl streams. - #[display(fmt = "Connection shutdown timeout")] + /// Disconnect timeout. Makes sense for TLS streams. + #[display(fmt = "connection shutdown timeout")] DisconnectTimeout, - /// Payload is not consumed - #[display(fmt = "Task is completed but request's payload is not consumed")] - PayloadIsNotConsumed, + /// Handler dropped payload before reading EOF. + #[display(fmt = "handler dropped payload before reading EOF")] + HandlerDroppedPayload, - /// Malformed request - #[display(fmt = "Malformed request")] - MalformedRequest, - - /// Internal error - #[display(fmt = "Internal error")] + /// Internal error. + #[display(fmt = "internal error")] InternalError, - - /// Unknown error - #[display(fmt = "Unknown error")] - Unknown, } -/// A set of error that can occur during parsing content type. -#[derive(Debug, Display, Error)] -#[non_exhaustive] -pub enum ContentTypeError { - /// Can not parse content type - #[display(fmt = "Can not parse content type")] - ParseError, +impl StdError for DispatchError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DispatchError::Service(_res) => None, + DispatchError::Body(err) => Some(&**err), + DispatchError::Io(err) => Some(err), + DispatchError::Parse(err) => Some(err), - /// Unknown content encoding - #[display(fmt = "Unknown content encoding")] - UnknownEncoding, -} + #[cfg(feature = "http2")] + DispatchError::H2(err) => Some(err), -#[cfg(test)] -mod content_type_test_impls { - use super::*; - - impl std::cmp::PartialEq for ContentTypeError { - fn eq(&self, other: &Self) -> bool { - match self { - Self::ParseError => matches!(other, ContentTypeError::ParseError), - Self::UnknownEncoding => { - matches!(other, ContentTypeError::UnknownEncoding) - } - } + _ => None, } } } -impl ResponseError for ContentTypeError { - fn status_code(&self) -> StatusCode { - StatusCode::BAD_REQUEST - } +/// A set of error that can occur during parsing content type. +#[derive(Debug, Display, Error)] +#[cfg_attr(test, derive(PartialEq, Eq))] +#[non_exhaustive] +pub enum ContentTypeError { + /// Can not parse content type. + #[display(fmt = "could not parse content type")] + ParseError, + + /// Unknown content encoding. + #[display(fmt = "unknown content encoding")] + UnknownEncoding, } #[cfg(test)] mod tests { + use http::Error as HttpError; + use super::*; - use http::{Error as HttpError, StatusCode}; - use std::io; #[test] fn test_into_response() { - let resp: Response = ParseError::Incomplete.error_response(); + let resp: Response = ParseError::Incomplete.into(); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let err: HttpError = StatusCode::from_u16(10000).err().unwrap().into(); - let resp: Response = err.error_response(); + let resp: Response = Error::new_http().with_cause(err).into(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] fn test_as_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); - let e: Error = ParseError::Io(orig).into(); - assert_eq!(format!("{}", e.as_response_error()), "IO error: other"); - } - - #[test] - fn test_error_cause() { - let orig = io::Error::new(io::ErrorKind::Other, "other"); - let desc = orig.to_string(); - let e = Error::from(orig); - assert_eq!(format!("{}", e.as_response_error()), desc); + let err: Error = ParseError::Io(orig).into(); + assert_eq!( + format!("{}", err), + "error parsing HTTP message: I/O error: other" + ); } #[test] fn test_error_display() { let orig = io::Error::new(io::ErrorKind::Other, "other"); - let desc = orig.to_string(); - let e = Error::from(orig); - assert_eq!(format!("{}", e), desc); + let err = Error::new_io().with_cause(orig); + assert_eq!("connection error: other", err.to_string()); } #[test] fn test_error_http_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); - let e = Error::from(orig); - let resp: Response = e.into(); + let err = Error::new_io().with_cause(orig); + let resp: Response = err.into(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] fn test_payload_error() { - let err: PayloadError = - io::Error::new(io::ErrorKind::Other, "ParseError").into(); + let err: PayloadError = io::Error::new(io::ErrorKind::Other, "ParseError").into(); assert!(err.to_string().contains("ParseError")); let err = PayloadError::Incomplete(None); assert_eq!( err.to_string(), - "A payload reached EOF, but is not complete. Inner error: None" + "payload reached EOF before completing: None" ); } @@ -516,7 +466,7 @@ mod tests { match ParseError::from($from) { e @ $error => { let desc = format!("{}", e); - assert_eq!(desc, format!("IO error: {}", $from)); + assert_eq!(desc, format!("I/O error: {}", $from)); } _ => unreachable!("{:?}", $from), } @@ -535,14 +485,4 @@ mod tests { from!(httparse::Error::TooManyHeaders => ParseError::TooLarge); from!(httparse::Error::Version => ParseError::Version); } - - #[test] - fn test_error_casting() { - let err = PayloadError::Overflow; - let resp_err: &dyn ResponseError = &err; - let err = resp_err.downcast_ref::().unwrap(); - assert_eq!(err.to_string(), "Payload reached size limit."); - let not_err = resp_err.downcast_ref::(); - assert!(not_err.is_none()); - } } diff --git a/actix-http/src/extensions.rs b/actix-http/src/extensions.rs index 5fdcefd6d..f2047a9ce 100644 --- a/actix-http/src/extensions.rs +++ b/actix-http/src/extensions.rs @@ -1,18 +1,38 @@ use std::{ any::{Any, TypeId}, - fmt, mem, + collections::HashMap, + fmt, + hash::{BuildHasherDefault, Hasher}, }; -use ahash::AHashMap; +/// A hasher for `TypeId`s that takes advantage of its known characteristics. +/// +/// Author of `anymap` crate has done research on the topic: +/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599 +#[derive(Debug, Default)] +struct NoOpHasher(u64); + +impl Hasher for NoOpHasher { + fn write(&mut self, _bytes: &[u8]) { + unimplemented!("This NoOpHasher can only handle u64s") + } + + fn write_u64(&mut self, i: u64) { + self.0 = i; + } + + fn finish(&self) -> u64 { + self.0 + } +} /// A type map for request extensions. /// /// All entries into this map must be owned types (or static references). #[derive(Default)] pub struct Extensions { - /// Use FxHasher with a std HashMap with for faster - /// lookups on the small `TypeId` (u64 equivalent) keys. - map: AHashMap>, + /// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys. + map: HashMap, BuildHasherDefault>, } impl Extensions { @@ -20,7 +40,7 @@ impl Extensions { #[inline] pub fn new() -> Extensions { Extensions { - map: AHashMap::default(), + map: HashMap::default(), } } @@ -123,11 +143,6 @@ impl Extensions { pub fn extend(&mut self, other: Extensions) { self.map.extend(other.map); } - - /// Sets (or overrides) items from `other` into this map. - pub(crate) fn drain_from(&mut self, other: &mut Self) { - self.map.extend(mem::take(&mut other.map)); - } } impl fmt::Debug for Extensions { @@ -179,6 +194,8 @@ mod tests { #[test] fn test_integers() { + static A: u32 = 8; + let mut map = Extensions::new(); map.insert::(8); @@ -191,6 +208,7 @@ mod tests { map.insert::(32); map.insert::(64); map.insert::(128); + map.insert::<&'static u32>(&A); assert!(map.get::().is_some()); assert!(map.get::().is_some()); assert!(map.get::().is_some()); @@ -201,6 +219,7 @@ mod tests { assert!(map.get::().is_some()); assert!(map.get::().is_some()); assert!(map.get::().is_some()); + assert!(map.get::<&'static u32>().is_some()); } #[test] @@ -279,27 +298,4 @@ mod tests { assert_eq!(extensions.get(), Some(&20u8)); assert_eq!(extensions.get_mut(), Some(&mut 20u8)); } - - #[test] - fn test_drain_from() { - let mut ext = Extensions::new(); - ext.insert(2isize); - - let mut more_ext = Extensions::new(); - - more_ext.insert(5isize); - more_ext.insert(5usize); - - assert_eq!(ext.get::(), Some(&2isize)); - assert_eq!(ext.get::(), None); - assert_eq!(more_ext.get::(), Some(&5isize)); - assert_eq!(more_ext.get::(), Some(&5usize)); - - ext.drain_from(&mut more_ext); - - assert_eq!(ext.get::(), Some(&5isize)); - assert_eq!(ext.get::(), Some(&5usize)); - assert_eq!(more_ext.get::(), None); - assert_eq!(more_ext.get::(), None); - } } diff --git a/actix-http/src/h1/chunked.rs b/actix-http/src/h1/chunked.rs new file mode 100644 index 000000000..fc9081b81 --- /dev/null +++ b/actix-http/src/h1/chunked.rs @@ -0,0 +1,427 @@ +use std::{io, task::Poll}; + +use bytes::{Buf as _, Bytes, BytesMut}; +use tracing::{debug, trace}; + +macro_rules! byte ( + ($rdr:ident) => ({ + if $rdr.len() > 0 { + let b = $rdr[0]; + $rdr.advance(1); + b + } else { + return Poll::Pending + } + }) +); + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) enum ChunkedState { + Size, + SizeLws, + Extension, + SizeLf, + Body, + BodyCr, + BodyLf, + EndCr, + EndLf, + End, +} + +impl ChunkedState { + pub(super) fn step( + &self, + body: &mut BytesMut, + size: &mut u64, + buf: &mut Option, + ) -> Poll> { + use self::ChunkedState::*; + match *self { + Size => ChunkedState::read_size(body, size), + SizeLws => ChunkedState::read_size_lws(body), + Extension => ChunkedState::read_extension(body), + SizeLf => ChunkedState::read_size_lf(body, *size), + Body => ChunkedState::read_body(body, size, buf), + BodyCr => ChunkedState::read_body_cr(body), + BodyLf => ChunkedState::read_body_lf(body), + EndCr => ChunkedState::read_end_cr(body), + EndLf => ChunkedState::read_end_lf(body), + End => Poll::Ready(Ok(ChunkedState::End)), + } + } + + fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll> { + let radix = 16; + + let rem = match byte!(rdr) { + b @ b'0'..=b'9' => b - b'0', + b @ b'a'..=b'f' => b + 10 - b'a', + b @ b'A'..=b'F' => b + 10 - b'A', + b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), + b';' => return Poll::Ready(Ok(ChunkedState::Extension)), + b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)), + _ => { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size line: Invalid Size", + ))); + } + }; + + match size.checked_mul(radix) { + Some(n) => { + *size = n; + *size += rem as u64; + + Poll::Ready(Ok(ChunkedState::Size)) + } + None => { + debug!("chunk size would overflow u64"); + Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size line: Size is too big", + ))) + } + } + } + + fn read_size_lws(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + // LWS can follow the chunk size, but no more digits can come + b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)), + b';' => Poll::Ready(Ok(ChunkedState::Extension)), + b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size linear white space", + ))), + } + } + fn read_extension(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + // strictly 0x20 (space) should be disallowed but we don't parse quoted strings here + 0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid character in chunk extension", + ))), + _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions + } + } + fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll> { + match byte!(rdr) { + b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)), + b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size LF", + ))), + } + } + + fn read_body( + rdr: &mut BytesMut, + rem: &mut u64, + buf: &mut Option, + ) -> Poll> { + trace!("Chunked read, remaining={:?}", rem); + + let len = rdr.len() as u64; + if len == 0 { + Poll::Ready(Ok(ChunkedState::Body)) + } else { + let slice; + if *rem > len { + slice = rdr.split().freeze(); + *rem -= len; + } else { + slice = rdr.split_to(*rem as usize).freeze(); + *rem = 0; + } + *buf = Some(slice); + if *rem > 0 { + Poll::Ready(Ok(ChunkedState::Body)) + } else { + Poll::Ready(Ok(ChunkedState::BodyCr)) + } + } + } + + fn read_body_cr(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk body CR", + ))), + } + } + fn read_body_lf(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + b'\n' => Poll::Ready(Ok(ChunkedState::Size)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk body LF", + ))), + } + } + fn read_end_cr(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk end CR", + ))), + } + } + fn read_end_lf(rdr: &mut BytesMut) -> Poll> { + match byte!(rdr) { + b'\n' => Poll::Ready(Ok(ChunkedState::End)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk end LF", + ))), + } + } +} + +#[cfg(test)] +mod tests { + use actix_codec::Decoder as _; + use bytes::{Bytes, BytesMut}; + use http::Method; + + use crate::{ + error::ParseError, + h1::decoder::{MessageDecoder, PayloadItem}, + HttpMessage as _, Request, + }; + + macro_rules! parse_ready { + ($e:expr) => {{ + match MessageDecoder::::default().decode($e) { + Ok(Some((msg, _))) => msg, + Ok(_) => unreachable!("Eof during parsing http request"), + Err(err) => unreachable!("Error during parsing http request: {:?}", err), + } + }}; + } + + macro_rules! expect_parse_err { + ($e:expr) => {{ + match MessageDecoder::::default().decode($e) { + Err(err) => match err { + ParseError::Io(_) => unreachable!("Parse error expected"), + _ => {} + }, + _ => unreachable!("Error expected"), + } + }}; + } + + #[test] + fn test_parse_chunked_payload_chunk_extension() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + \r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + assert!(msg.chunked().unwrap()); + + buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n") + let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk(); + assert_eq!(chunk, Bytes::from_static(b"data")); + let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk(); + assert_eq!(chunk, Bytes::from_static(b"line")); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert!(msg.eof()); + } + + #[test] + fn test_request_chunked() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\r\n", + ); + let req = parse_ready!(&mut buf); + + if let Ok(val) = req.chunked() { + assert!(val); + } else { + unreachable!("Error"); + } + + // intentional typo in "chunked" + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chnked\r\n\r\n", + ); + expect_parse_err!(&mut buf); + } + + #[test] + fn test_http_request_chunked_payload() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\r\n", + ); + let mut reader = MessageDecoder::::default(); + let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + assert!(req.chunked().unwrap()); + + buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); + assert_eq!( + pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(), + b"data" + ); + assert_eq!( + pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(), + b"line" + ); + assert!(pl.decode(&mut buf).unwrap().unwrap().eof()); + } + + #[test] + fn test_http_request_chunked_payload_and_next_message() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\r\n", + ); + let mut reader = MessageDecoder::::default(); + let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + assert!(req.chunked().unwrap()); + + buf.extend( + b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\ + POST /test2 HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\r\n" + .iter(), + ); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"data"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"line"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert!(msg.eof()); + + let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); + assert!(req.chunked().unwrap()); + assert_eq!(*req.method(), Method::POST); + assert!(req.chunked().unwrap()); + } + + #[test] + fn test_http_request_chunked_payload_chunks() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + assert!(req.chunked().unwrap()); + + buf.extend(b"4\r\n1111\r\n"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"1111"); + + buf.extend(b"4\r\ndata\r"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"data"); + + buf.extend(b"\n4"); + assert!(pl.decode(&mut buf).unwrap().is_none()); + + buf.extend(b"\r"); + assert!(pl.decode(&mut buf).unwrap().is_none()); + buf.extend(b"\n"); + assert!(pl.decode(&mut buf).unwrap().is_none()); + + buf.extend(b"li"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"li"); + + //trailers + //buf.feed_data("test: test\r\n"); + //not_ready!(reader.parse(&mut buf, &mut readbuf)); + + buf.extend(b"ne\r\n0\r\n"); + let msg = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(msg.chunk().as_ref(), b"ne"); + assert!(pl.decode(&mut buf).unwrap().is_none()); + + buf.extend(b"\r\n"); + assert!(pl.decode(&mut buf).unwrap().unwrap().eof()); + } + + #[test] + fn chunk_extension_quoted() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + Host: localhost:8080\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 2;hello=b;one=\"1 2 3\"\r\n\ + xx", + ); + + let mut reader = MessageDecoder::::default(); + let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + + let chunk = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx"))); + } + + #[test] + fn hrs_chunk_extension_invalid() { + let mut buf = BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: localhost:8080\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 2;x\nx\r\n\ + 4c\r\n\ + 0\r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + + let err = pl.decode(&mut buf).unwrap_err(); + assert!(err + .to_string() + .contains("Invalid character in chunk extension")); + } + + #[test] + fn hrs_chunk_size_overflow() { + let mut buf = BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + f0000000000000003\r\n\ + abc\r\n\ + 0\r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + + let err = pl.decode(&mut buf).unwrap_err(); + assert!(err + .to_string() + .contains("Invalid chunk size line: Size is too big")); + } +} diff --git a/actix-http/src/h1/client.rs b/actix-http/src/h1/client.rs index 4a6104688..f3947dd12 100644 --- a/actix-http/src/h1/client.rs +++ b/actix-http/src/h1/client.rs @@ -1,23 +1,26 @@ -use std::io; +use std::{fmt, io}; -use actix_codec::{Decoder, Encoder}; use bitflags::bitflags; use bytes::{Bytes, BytesMut}; use http::{Method, Version}; +use tokio_util::codec::{Decoder, Encoder}; -use super::decoder::{PayloadDecoder, PayloadItem, PayloadType}; -use super::{decoder, encoder, reserve_readbuf}; -use super::{Message, MessageType}; -use crate::body::BodySize; -use crate::config::ServiceConfig; -use crate::error::{ParseError, PayloadError}; -use crate::message::{ConnectionType, RequestHeadType, ResponseHead}; +use super::{ + decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, + encoder, reserve_readbuf, Message, MessageType, +}; +use crate::{ + body::BodySize, + error::{ParseError, PayloadError}, + ConnectionType, RequestHeadType, ResponseHead, ServiceConfig, +}; bitflags! { + #[derive(Debug, Clone, Copy)] struct Flags: u8 { - const HEAD = 0b0000_0001; - const KEEPALIVE_ENABLED = 0b0000_1000; - const STREAM = 0b0001_0000; + const HEAD = 0b0000_0001; + const KEEP_ALIVE_ENABLED = 0b0000_1000; + const STREAM = 0b0001_0000; } } @@ -36,7 +39,7 @@ struct ClientCodecInner { decoder: decoder::MessageDecoder, payload: Option, version: Version, - ctype: ConnectionType, + conn_type: ConnectionType, // encoder part flags: Flags, @@ -49,23 +52,32 @@ impl Default for ClientCodec { } } +impl fmt::Debug for ClientCodec { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("h1::ClientCodec") + .field("flags", &self.inner.flags) + .finish_non_exhaustive() + } +} + impl ClientCodec { /// Create HTTP/1 codec. /// /// `keepalive_enabled` how response `connection` header get generated. pub fn new(config: ServiceConfig) -> Self { - let flags = if config.keep_alive_enabled() { - Flags::KEEPALIVE_ENABLED + let flags = if config.keep_alive().enabled() { + Flags::KEEP_ALIVE_ENABLED } else { Flags::empty() }; + ClientCodec { inner: ClientCodecInner { config, decoder: decoder::MessageDecoder::default(), payload: None, version: Version::HTTP_11, - ctype: ConnectionType::Close, + conn_type: ConnectionType::Close, flags, encoder: encoder::MessageEncoder::default(), @@ -75,12 +87,12 @@ impl ClientCodec { /// Check if request is upgrade pub fn upgrade(&self) -> bool { - self.inner.ctype == ConnectionType::Upgrade + self.inner.conn_type == ConnectionType::Upgrade } /// Check if last response is keep-alive - pub fn keepalive(&self) -> bool { - self.inner.ctype == ConnectionType::KeepAlive + pub fn keep_alive(&self) -> bool { + self.inner.conn_type == ConnectionType::KeepAlive } /// Check last request's message type @@ -102,8 +114,8 @@ impl ClientCodec { impl ClientPayloadCodec { /// Check if last response is keep-alive - pub fn keepalive(&self) -> bool { - self.inner.ctype == ConnectionType::KeepAlive + pub fn keep_alive(&self) -> bool { + self.inner.conn_type == ConnectionType::KeepAlive } /// Transform payload codec to a message codec @@ -117,15 +129,18 @@ impl Decoder for ClientCodec { type Error = ParseError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set"); + debug_assert!( + self.inner.payload.is_none(), + "Payload decoder should not be set" + ); if let Some((req, payload)) = self.inner.decoder.decode(src)? { - if let Some(ctype) = req.ctype() { + if let Some(conn_type) = req.conn_type() { // do not use peer's keep-alive - self.inner.ctype = if ctype == ConnectionType::KeepAlive { - self.inner.ctype + self.inner.conn_type = if conn_type == ConnectionType::KeepAlive { + self.inner.conn_type } else { - ctype + conn_type }; } @@ -190,9 +205,9 @@ impl Encoder> for ClientCodec { .set(Flags::HEAD, head.as_ref().method == Method::HEAD); // connection status - inner.ctype = match head.as_ref().connection_type() { + inner.conn_type = match head.as_ref().connection_type() { ConnectionType::KeepAlive => { - if inner.flags.contains(Flags::KEEPALIVE_ENABLED) { + if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) { ConnectionType::KeepAlive } else { ConnectionType::Close @@ -209,7 +224,7 @@ impl Encoder> for ClientCodec { false, inner.version, length, - inner.ctype, + inner.conn_type, &inner.config, )?; } diff --git a/actix-http/src/h1/codec.rs b/actix-http/src/h1/codec.rs index 634ca25e8..2b452f8f8 100644 --- a/actix-http/src/h1/codec.rs +++ b/actix-http/src/h1/codec.rs @@ -1,25 +1,22 @@ use std::{fmt, io}; -use actix_codec::{Decoder, Encoder}; use bitflags::bitflags; use bytes::BytesMut; use http::{Method, Version}; +use tokio_util::codec::{Decoder, Encoder}; -use super::decoder::{PayloadDecoder, PayloadItem, PayloadType}; -use super::{decoder, encoder}; -use super::{Message, MessageType}; -use crate::body::BodySize; -use crate::config::ServiceConfig; -use crate::error::ParseError; -use crate::message::ConnectionType; -use crate::request::Request; -use crate::response::Response; +use super::{ + decoder::{self, PayloadDecoder, PayloadItem, PayloadType}, + encoder, Message, MessageType, +}; +use crate::{body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig}; bitflags! { + #[derive(Debug, Clone, Copy)] struct Flags: u8 { - const HEAD = 0b0000_0001; - const KEEPALIVE_ENABLED = 0b0000_0010; - const STREAM = 0b0000_0100; + const HEAD = 0b0000_0001; + const KEEP_ALIVE_ENABLED = 0b0000_0010; + const STREAM = 0b0000_0100; } } @@ -29,7 +26,7 @@ pub struct Codec { decoder: decoder::MessageDecoder, payload: Option, version: Version, - ctype: ConnectionType, + conn_type: ConnectionType, // encoder part flags: Flags, @@ -44,7 +41,9 @@ impl Default for Codec { impl fmt::Debug for Codec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "h1::Codec({:?})", self.flags) + f.debug_struct("h1::Codec") + .field("flags", &self.flags) + .finish_non_exhaustive() } } @@ -53,8 +52,8 @@ impl Codec { /// /// `keepalive_enabled` how response `connection` header get generated. pub fn new(config: ServiceConfig) -> Self { - let flags = if config.keep_alive_enabled() { - Flags::KEEPALIVE_ENABLED + let flags = if config.keep_alive().enabled() { + Flags::KEEP_ALIVE_ENABLED } else { Flags::empty() }; @@ -65,7 +64,7 @@ impl Codec { decoder: decoder::MessageDecoder::default(), payload: None, version: Version::HTTP_11, - ctype: ConnectionType::Close, + conn_type: ConnectionType::Close, encoder: encoder::MessageEncoder::default(), } } @@ -73,19 +72,19 @@ impl Codec { /// Check if request is upgrade. #[inline] pub fn upgrade(&self) -> bool { - self.ctype == ConnectionType::Upgrade + self.conn_type == ConnectionType::Upgrade } /// Check if last response is keep-alive. #[inline] - pub fn keepalive(&self) -> bool { - self.ctype == ConnectionType::KeepAlive + pub fn keep_alive(&self) -> bool { + self.conn_type == ConnectionType::KeepAlive } /// Check if keep-alive enabled on server level. #[inline] - pub fn keepalive_enabled(&self) -> bool { - self.flags.contains(Flags::KEEPALIVE_ENABLED) + pub fn keep_alive_enabled(&self) -> bool { + self.flags.contains(Flags::KEEP_ALIVE_ENABLED) } /// Check last request's message type. @@ -124,12 +123,14 @@ impl Decoder for Codec { let head = req.head(); self.flags.set(Flags::HEAD, head.method == Method::HEAD); self.version = head.version; - self.ctype = head.connection_type(); - if self.ctype == ConnectionType::KeepAlive - && !self.flags.contains(Flags::KEEPALIVE_ENABLED) + self.conn_type = head.connection_type(); + + if self.conn_type == ConnectionType::KeepAlive + && !self.flags.contains(Flags::KEEP_ALIVE_ENABLED) { - self.ctype = ConnectionType::Close + self.conn_type = ConnectionType::Close } + match payload { PayloadType::None => self.payload = None, PayloadType::Payload(pl) => self.payload = Some(pl), @@ -159,14 +160,14 @@ impl Encoder, BodySize)>> for Codec { res.head_mut().version = self.version; // connection status - self.ctype = if let Some(ct) = res.head().ctype() { + self.conn_type = if let Some(ct) = res.head().conn_type() { if ct == ConnectionType::KeepAlive { - self.ctype + self.conn_type } else { ct } } else { - self.ctype + self.conn_type }; // encode message @@ -177,29 +178,28 @@ impl Encoder, BodySize)>> for Codec { self.flags.contains(Flags::STREAM), self.version, length, - self.ctype, + self.conn_type, &self.config, )?; - // self.headers_size = (dst.len() - len) as u32; } + Message::Chunk(Some(bytes)) => { self.encoder.encode_chunk(bytes.as_ref(), dst)?; } + Message::Chunk(None) => { self.encoder.encode_eof(dst)?; } } + Ok(()) } } #[cfg(test)] mod tests { - use bytes::BytesMut; - use http::Method; - use super::*; - use crate::HttpMessage; + use crate::HttpMessage as _; #[actix_rt::test] async fn test_http_request_chunked_payload_and_next_message() { diff --git a/actix-http/src/h1/decoder.rs b/actix-http/src/h1/decoder.rs index 8aba9f623..af64e8802 100644 --- a/actix-http/src/h1/decoder.rs +++ b/actix-http/src/h1/decoder.rs @@ -1,18 +1,15 @@ -use std::convert::TryFrom; -use std::io; -use std::marker::PhantomData; -use std::task::Poll; +use std::{io, marker::PhantomData, mem::MaybeUninit, task::Poll}; use actix_codec::Decoder; -use bytes::{Buf, Bytes, BytesMut}; -use http::header::{HeaderName, HeaderValue}; -use http::{header, Method, StatusCode, Uri, Version}; -use log::{debug, error, trace}; +use bytes::{Bytes, BytesMut}; +use http::{ + header::{self, HeaderName, HeaderValue}, + Method, StatusCode, Uri, Version, +}; +use tracing::{debug, error, trace}; -use crate::error::ParseError; -use crate::header::HeaderMap; -use crate::message::{ConnectionType, ResponseHead}; -use crate::request::Request; +use super::chunked::ChunkedState; +use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead}; pub(crate) const MAX_BUFFER_SIZE: usize = 131_072; const MAX_HEADERS: usize = 96; @@ -49,8 +46,25 @@ pub(crate) enum PayloadLength { None, } +impl PayloadLength { + /// Returns true if variant is `None`. + fn is_none(&self) -> bool { + matches!(self, Self::None) + } + + /// Returns true if variant is represents zero-length (not none) payload. + fn is_zero(&self) -> bool { + matches!( + self, + PayloadLength::Payload(PayloadType::Payload(PayloadDecoder { + kind: Kind::Length(0) + })) + ) + } +} + pub(crate) trait MessageType: Sized { - fn set_connection_type(&mut self, ctype: Option); + fn set_connection_type(&mut self, conn_type: Option); fn set_expect(&mut self); @@ -62,55 +76,82 @@ pub(crate) trait MessageType: Sized { &mut self, slice: &Bytes, raw_headers: &[HeaderIndex], + version: Version, ) -> Result { let mut ka = None; let mut has_upgrade_websocket = false; let mut expect = false; let mut chunked = false; + let mut seen_te = false; let mut content_length = None; { let headers = self.headers_mut(); for idx in raw_headers.iter() { - let name = - HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap(); + let name = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap(); // SAFETY: httparse already checks header value is only visible ASCII bytes // from_maybe_shared_unchecked contains debug assertions so they are omitted here let value = unsafe { - HeaderValue::from_maybe_shared_unchecked( - slice.slice(idx.value.0..idx.value.1), - ) + HeaderValue::from_maybe_shared_unchecked(slice.slice(idx.value.0..idx.value.1)) }; match name { - header::CONTENT_LENGTH => { - if let Ok(s) = value.to_str() { - if let Ok(len) = s.parse::() { - if len != 0 { - content_length = Some(len); - } + header::CONTENT_LENGTH if content_length.is_some() => { + debug!("multiple Content-Length"); + return Err(ParseError::Header); + } + + header::CONTENT_LENGTH => match value.to_str().map(str::trim) { + Ok(val) if val.starts_with('+') => { + debug!("illegal Content-Length: {:?}", val); + return Err(ParseError::Header); + } + + Ok(val) => { + if let Ok(len) = val.parse::() { + // accept 0 lengths here and remove them in `decode` after all + // headers have been processed to prevent request smuggling issues + content_length = Some(len); } else { - debug!("illegal Content-Length: {:?}", s); + debug!("illegal Content-Length: {:?}", val); return Err(ParseError::Header); } - } else { + } + + Err(_) => { debug!("illegal Content-Length: {:?}", value); return Err(ParseError::Header); } - } + }, + // transfer-encoding - header::TRANSFER_ENCODING => { - if let Ok(s) = value.to_str().map(|s| s.trim()) { - chunked = s.eq_ignore_ascii_case("chunked"); + header::TRANSFER_ENCODING if seen_te => { + debug!("multiple Transfer-Encoding not allowed"); + return Err(ParseError::Header); + } + + header::TRANSFER_ENCODING if version == Version::HTTP_11 => { + seen_te = true; + + if let Ok(val) = value.to_str().map(str::trim) { + if val.eq_ignore_ascii_case("chunked") { + chunked = true; + } else if val.eq_ignore_ascii_case("identity") { + // allow silently since multiple TE headers are already checked + } else { + debug!("illegal Transfer-Encoding: {:?}", val); + return Err(ParseError::Header); + } } else { return Err(ParseError::Header); } } + // connection keep-alive state header::CONNECTION => { - ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) { + ka = if let Ok(conn) = value.to_str().map(str::trim) { if conn.eq_ignore_ascii_case("keep-alive") { Some(ConnectionType::KeepAlive) } else if conn.eq_ignore_ascii_case("close") { @@ -124,31 +165,36 @@ pub(crate) trait MessageType: Sized { None }; } + header::UPGRADE => { - if let Ok(val) = value.to_str().map(|val| val.trim()) { + if let Ok(val) = value.to_str().map(str::trim) { if val.eq_ignore_ascii_case("websocket") { has_upgrade_websocket = true; } } } + header::EXPECT => { let bytes = value.as_bytes(); if bytes.len() >= 4 && &bytes[0..4] == b"100-" { expect = true; } } + _ => {} } headers.append(name, value); } } + self.set_connection_type(ka); + if expect { self.set_expect() } - // https://tools.ietf.org/html/rfc7230#section-3.3.3 + // https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 if chunked { // Chunked encoding Ok(PayloadLength::Payload(PayloadType::Payload( @@ -168,8 +214,8 @@ pub(crate) trait MessageType: Sized { } impl MessageType for Request { - fn set_connection_type(&mut self, ctype: Option) { - if let Some(ctype) = ctype { + fn set_connection_type(&mut self, conn_type: Option) { + if let Some(ctype) = conn_type { self.head_mut().set_connection_type(ctype); } } @@ -186,10 +232,18 @@ impl MessageType for Request { let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; let (len, method, uri, ver, h_len) = { - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY; + // SAFETY: + // Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the + // type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which + // do not require initialization. + let mut parsed = unsafe { + MaybeUninit::<[MaybeUninit>; MAX_HEADERS]>::uninit() + .assume_init() + }; - let mut req = httparse::Request::new(&mut parsed); - match req.parse(src)? { + let mut req = httparse::Request::new(&mut []); + + match req.parse_with_uninit_headers(src, &mut parsed)? { httparse::Status::Complete(len) => { let method = Method::from_bytes(req.method.unwrap().as_bytes()) .map_err(|_| ParseError::Method)?; @@ -203,6 +257,7 @@ impl MessageType for Request { (len, method, uri, version, req.headers.len()) } + httparse::Status::Partial => { return if src.len() >= MAX_BUFFER_SIZE { trace!("MAX_BUFFER_SIZE unprocessed data reached, closing"); @@ -218,7 +273,21 @@ impl MessageType for Request { let mut msg = Request::new(); // convert headers - let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?; + let mut length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?; + + // disallow HTTP/1.0 POST requests that do not contain a Content-Length headers + // see https://datatracker.ietf.org/doc/html/rfc1945#section-7.2.2 + if ver == Version::HTTP_10 && method == Method::POST && length.is_none() { + debug!("no Content-Length specified for HTTP/1.0 POST request"); + return Err(ParseError::Header); + } + + // Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed. + // Protects against some request smuggling attacks. + // See https://github.com/actix/actix-web/issues/2767. + if length.is_zero() { + length = PayloadLength::None; + } // payload decoder let decoder = match length { @@ -246,8 +315,8 @@ impl MessageType for Request { } impl MessageType for ResponseHead { - fn set_connection_type(&mut self, ctype: Option) { - if let Some(ctype) = ctype { + fn set_connection_type(&mut self, conn_type: Option) { + if let Some(ctype) = conn_type { ResponseHead::set_connection_type(self, ctype); } } @@ -262,22 +331,35 @@ impl MessageType for ResponseHead { let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; let (len, ver, status, h_len) = { - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY; + // SAFETY: + // Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the + // type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which + // do not require initialization. + let mut parsed = unsafe { + MaybeUninit::<[MaybeUninit>; MAX_HEADERS]>::uninit() + .assume_init() + }; - let mut res = httparse::Response::new(&mut parsed); - match res.parse(src)? { + let mut res = httparse::Response::new(&mut []); + + let mut config = httparse::ParserConfig::default(); + config.allow_spaces_after_header_name_in_responses(true); + + match config.parse_response_with_uninit_headers(&mut res, src, &mut parsed)? { httparse::Status::Complete(len) => { let version = if res.version.unwrap() == 1 { Version::HTTP_11 } else { Version::HTTP_10 }; - let status = StatusCode::from_u16(res.code.unwrap()) - .map_err(|_| ParseError::Status)?; + + let status = + StatusCode::from_u16(res.code.unwrap()).map_err(|_| ParseError::Status)?; HeaderIndex::record(src, res.headers, &mut headers); (len, version, status, res.headers.len()) } + httparse::Status::Partial => { return if src.len() >= MAX_BUFFER_SIZE { error!("MAX_BUFFER_SIZE unprocessed data reached, closing"); @@ -293,7 +375,14 @@ impl MessageType for ResponseHead { msg.version = ver; // convert headers - let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?; + let mut length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?; + + // Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed. + // Protects against some request smuggling attacks. + // See https://github.com/actix/actix-web/issues/2767. + if length.is_zero() { + length = PayloadLength::None; + } // message payload let decoder = if let PayloadLength::Payload(pl) = length { @@ -329,9 +418,6 @@ pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex { pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS]; -pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] = - [httparse::EMPTY_HEADER; MAX_HEADERS]; - impl HeaderIndex { pub(crate) fn record( bytes: &[u8], @@ -350,76 +436,65 @@ impl HeaderIndex { } } -#[derive(Debug, Clone, PartialEq)] -/// Http payload item +#[derive(Debug, Clone, PartialEq, Eq)] +/// Chunk type yielded while decoding a payload. pub enum PayloadItem { Chunk(Bytes), Eof, } -/// Decoders to handle different Transfer-Encodings. +/// Decoder that can handle different payload types. /// -/// If a message body does not include a Transfer-Encoding, it *should* -/// include a Content-Length header. -#[derive(Debug, Clone, PartialEq)] +/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`. +#[derive(Debug, Clone, PartialEq, Eq)] pub struct PayloadDecoder { kind: Kind, } impl PayloadDecoder { + /// Constructs a fixed-length payload decoder. pub fn length(x: u64) -> PayloadDecoder { PayloadDecoder { kind: Kind::Length(x), } } + /// Constructs a chunked encoding decoder. pub fn chunked() -> PayloadDecoder { PayloadDecoder { kind: Kind::Chunked(ChunkedState::Size, 0), } } + /// Creates an decoder that yields chunks until the stream returns EOF. pub fn eof() -> PayloadDecoder { PayloadDecoder { kind: Kind::Eof } } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum Kind { - /// A Reader used when a Content-Length header is passed with a positive - /// integer. + /// A reader used when a `Content-Length` header is passed with a positive integer. Length(u64), - /// A Reader used when Transfer-Encoding is `chunked`. - Chunked(ChunkedState, u64), - /// A Reader used for responses that don't indicate a length or chunked. - /// - /// Note: This should only used for `Response`s. It is illegal for a - /// `Request` to be made with both `Content-Length` and - /// `Transfer-Encoding: chunked` missing, as explained from the spec: - /// - /// > If a Transfer-Encoding header field is present in a response and - /// > the chunked transfer coding is not the final encoding, the - /// > message body length is determined by reading the connection until - /// > it is closed by the server. If a Transfer-Encoding header field - /// > is present in a request and the chunked transfer coding is not - /// > the final encoding, the message body length cannot be determined - /// > reliably; the server MUST respond with the 400 (Bad Request) - /// > status code and then close the connection. - Eof, -} -#[derive(Debug, PartialEq, Clone)] -enum ChunkedState { - Size, - SizeLws, - Extension, - SizeLf, - Body, - BodyCr, - BodyLf, - EndCr, - EndLf, - End, + /// A reader used when `Transfer-Encoding` is `chunked`. + Chunked(ChunkedState, u64), + + /// A reader used for responses that don't indicate a length or chunked. + /// + /// Note: This should only used for `Response`s. It is illegal for a `Request` to be made + /// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained + /// in [RFC 7230 §3.3.3]: + /// + /// > If a Transfer-Encoding header field is present in a response and the chunked transfer + /// > coding is not the final encoding, the message body length is determined by reading the + /// > connection until it is closed by the server. If a Transfer-Encoding header field is + /// > present in a request and the chunked transfer coding is not the final encoding, the + /// > message body length cannot be determined reliably; the server MUST respond with the 400 + /// > (Bad Request) status code and then close the connection. + /// + /// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3 + Eof, } impl Decoder for PayloadDecoder { @@ -448,27 +523,33 @@ impl Decoder for PayloadDecoder { Ok(Some(PayloadItem::Chunk(buf))) } } + Kind::Chunked(ref mut state, ref mut size) => { loop { let mut buf = None; + // advances the chunked state *state = match state.step(src, size, &mut buf) { Poll::Pending => return Ok(None), Poll::Ready(Ok(state)) => state, - Poll::Ready(Err(e)) => return Err(e), + Poll::Ready(Err(err)) => return Err(err), }; + if *state == ChunkedState::End { trace!("End of chunked stream"); return Ok(Some(PayloadItem::Eof)); } + if let Some(buf) = buf { return Ok(Some(PayloadItem::Chunk(buf))); } + if src.is_empty() { return Ok(None); } } } + Kind::Eof => { if src.is_empty() { Ok(None) @@ -480,201 +561,33 @@ impl Decoder for PayloadDecoder { } } -macro_rules! byte ( - ($rdr:ident) => ({ - if $rdr.len() > 0 { - let b = $rdr[0]; - $rdr.advance(1); - b - } else { - return Poll::Pending - } - }) -); - -impl ChunkedState { - fn step( - &self, - body: &mut BytesMut, - size: &mut u64, - buf: &mut Option, - ) -> Poll> { - use self::ChunkedState::*; - match *self { - Size => ChunkedState::read_size(body, size), - SizeLws => ChunkedState::read_size_lws(body), - Extension => ChunkedState::read_extension(body), - SizeLf => ChunkedState::read_size_lf(body, size), - Body => ChunkedState::read_body(body, size, buf), - BodyCr => ChunkedState::read_body_cr(body), - BodyLf => ChunkedState::read_body_lf(body), - EndCr => ChunkedState::read_end_cr(body), - EndLf => ChunkedState::read_end_lf(body), - End => Poll::Ready(Ok(ChunkedState::End)), - } - } - - fn read_size( - rdr: &mut BytesMut, - size: &mut u64, - ) -> Poll> { - let radix = 16; - match byte!(rdr) { - b @ b'0'..=b'9' => { - *size *= radix; - *size += u64::from(b - b'0'); - } - b @ b'a'..=b'f' => { - *size *= radix; - *size += u64::from(b + 10 - b'a'); - } - b @ b'A'..=b'F' => { - *size *= radix; - *size += u64::from(b + 10 - b'A'); - } - b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), - b';' => return Poll::Ready(Ok(ChunkedState::Extension)), - b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)), - _ => { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size line: Invalid Size", - ))); - } - } - Poll::Ready(Ok(ChunkedState::Size)) - } - - fn read_size_lws(rdr: &mut BytesMut) -> Poll> { - trace!("read_size_lws"); - match byte!(rdr) { - // LWS can follow the chunk size, but no more digits can come - b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)), - b';' => Poll::Ready(Ok(ChunkedState::Extension)), - b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size linear white space", - ))), - } - } - fn read_extension(rdr: &mut BytesMut) -> Poll> { - match byte!(rdr) { - b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), - _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions - } - } - fn read_size_lf( - rdr: &mut BytesMut, - size: &mut u64, - ) -> Poll> { - match byte!(rdr) { - b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)), - b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size LF", - ))), - } - } - - fn read_body( - rdr: &mut BytesMut, - rem: &mut u64, - buf: &mut Option, - ) -> Poll> { - trace!("Chunked read, remaining={:?}", rem); - - let len = rdr.len() as u64; - if len == 0 { - Poll::Ready(Ok(ChunkedState::Body)) - } else { - let slice; - if *rem > len { - slice = rdr.split().freeze(); - *rem -= len; - } else { - slice = rdr.split_to(*rem as usize).freeze(); - *rem = 0; - } - *buf = Some(slice); - if *rem > 0 { - Poll::Ready(Ok(ChunkedState::Body)) - } else { - Poll::Ready(Ok(ChunkedState::BodyCr)) - } - } - } - - fn read_body_cr(rdr: &mut BytesMut) -> Poll> { - match byte!(rdr) { - b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk body CR", - ))), - } - } - fn read_body_lf(rdr: &mut BytesMut) -> Poll> { - match byte!(rdr) { - b'\n' => Poll::Ready(Ok(ChunkedState::Size)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk body LF", - ))), - } - } - fn read_end_cr(rdr: &mut BytesMut) -> Poll> { - match byte!(rdr) { - b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk end CR", - ))), - } - } - fn read_end_lf(rdr: &mut BytesMut) -> Poll> { - match byte!(rdr) { - b'\n' => Poll::Ready(Ok(ChunkedState::End)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk end LF", - ))), - } - } -} - #[cfg(test)] mod tests { - use bytes::{Bytes, BytesMut}; - use http::{Method, Version}; - use super::*; - use crate::error::ParseError; - use crate::http::header::{HeaderName, SET_COOKIE}; - use crate::HttpMessage; + use crate::{header::SET_COOKIE, HttpMessage as _}; impl PayloadType { - fn unwrap(self) -> PayloadDecoder { + pub(crate) fn unwrap(self) -> PayloadDecoder { match self { PayloadType::Payload(pl) => pl, _ => panic!(), } } - fn is_unhandled(&self) -> bool { + pub(crate) fn is_unhandled(&self) -> bool { matches!(self, PayloadType::Stream(_)) } } impl PayloadItem { - fn chunk(self) -> Bytes { + pub(crate) fn chunk(self) -> Bytes { match self { PayloadItem::Chunk(chunk) => chunk, _ => panic!("error"), } } - fn eof(&self) -> bool { + + pub(crate) fn eof(&self) -> bool { matches!(*self, PayloadItem::Eof) } } @@ -731,20 +644,105 @@ mod tests { } #[test] - fn test_parse_post() { - let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n"); + fn parse_h09_reject() { + let mut buf = BytesMut::from( + "GET /test1 HTTP/0.9\r\n\ + \r\n", + ); + + let mut reader = MessageDecoder::::default(); + reader.decode(&mut buf).unwrap_err(); + + let mut buf = BytesMut::from( + "POST /test2 HTTP/0.9\r\n\ + Content-Length: 3\r\n\ + \r\n + abc", + ); + + let mut reader = MessageDecoder::::default(); + reader.decode(&mut buf).unwrap_err(); + } + + #[test] + fn parse_h10_get() { + let mut buf = BytesMut::from( + "GET /test1 HTTP/1.0\r\n\ + \r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); + assert_eq!(req.version(), Version::HTTP_10); + assert_eq!(*req.method(), Method::GET); + assert_eq!(req.path(), "/test1"); + + let mut buf = BytesMut::from( + "GET /test2 HTTP/1.0\r\n\ + Content-Length: 0\r\n\ + \r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); + assert_eq!(req.version(), Version::HTTP_10); + assert_eq!(*req.method(), Method::GET); + assert_eq!(req.path(), "/test2"); + + let mut buf = BytesMut::from( + "GET /test3 HTTP/1.0\r\n\ + Content-Length: 3\r\n\ + \r\n + abc", + ); + + let mut reader = MessageDecoder::::default(); + let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); + assert_eq!(req.version(), Version::HTTP_10); + assert_eq!(*req.method(), Method::GET); + assert_eq!(req.path(), "/test3"); + } + + #[test] + fn parse_h10_post() { + let mut buf = BytesMut::from( + "POST /test1 HTTP/1.0\r\n\ + Content-Length: 3\r\n\ + \r\n\ + abc", + ); + + let mut reader = MessageDecoder::::default(); + let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); + assert_eq!(req.version(), Version::HTTP_10); + assert_eq!(*req.method(), Method::POST); + assert_eq!(req.path(), "/test1"); + + let mut buf = BytesMut::from( + "POST /test2 HTTP/1.0\r\n\ + Content-Length: 0\r\n\ + \r\n", + ); let mut reader = MessageDecoder::::default(); let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); assert_eq!(req.version(), Version::HTTP_10); assert_eq!(*req.method(), Method::POST); assert_eq!(req.path(), "/test2"); + + let mut buf = BytesMut::from( + "POST /test3 HTTP/1.0\r\n\ + \r\n", + ); + + let mut reader = MessageDecoder::::default(); + let err = reader.decode(&mut buf).unwrap_err(); + assert!(err.to_string().contains("Header")) } #[test] fn test_parse_body() { - let mut buf = - BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); + let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); let mut reader = MessageDecoder::::default(); let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); @@ -760,8 +758,7 @@ mod tests { #[test] fn test_parse_body_crlf() { - let mut buf = - BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); + let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); let mut reader = MessageDecoder::::default(); let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); @@ -836,121 +833,98 @@ mod tests { #[test] fn test_conn_default_1_0() { - let mut buf = BytesMut::from("GET /test HTTP/1.0\r\n\r\n"); - let req = parse_ready!(&mut buf); - + let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.0\r\n\r\n")); assert_eq!(req.head().connection_type(), ConnectionType::Close); } #[test] fn test_conn_default_1_1() { - let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n"); - let req = parse_ready!(&mut buf); - + let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.1\r\n\r\n")); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); } #[test] fn test_conn_close() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ connection: close\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::Close); - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ connection: Close\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::Close); } #[test] fn test_conn_close_1_0() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.0\r\n\ connection: close\r\n\r\n", - ); - - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::Close); } #[test] fn test_conn_keep_alive_1_0() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.0\r\n\ connection: keep-alive\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.0\r\n\ connection: Keep-Alive\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); } #[test] fn test_conn_keep_alive_1_1() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ connection: keep-alive\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); } #[test] fn test_conn_other_1_0() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.0\r\n\ connection: other\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::Close); } #[test] fn test_conn_other_1_1() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ connection: other\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - + )); assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive); } #[test] fn test_conn_upgrade() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ upgrade: websockets\r\n\ connection: upgrade\r\n\r\n", - ); - let req = parse_ready!(&mut buf); + )); assert!(req.upgrade()); assert_eq!(req.head().connection_type(), ConnectionType::Upgrade); - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ upgrade: Websockets\r\n\ connection: Upgrade\r\n\r\n", - ); - let req = parse_ready!(&mut buf); + )); assert!(req.upgrade()); assert_eq!(req.head().connection_type(), ConnectionType::Upgrade); @@ -958,87 +932,62 @@ mod tests { #[test] fn test_conn_upgrade_connect_method() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "CONNECT /test HTTP/1.1\r\n\ content-type: text/plain\r\n\r\n", - ); - let req = parse_ready!(&mut buf); + )); assert!(req.upgrade()); } #[test] - fn test_request_chunked() { - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - - if let Ok(val) = req.chunked() { - assert!(val); - } else { - unreachable!("Error"); - } - - // intentional typo in "chunked" - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chnked\r\n\r\n", - ); - let req = parse_ready!(&mut buf); - - if let Ok(val) = req.chunked() { - assert!(!val); - } else { - unreachable!("Error"); - } - } - - #[test] - fn test_headers_content_length_err_1() { - let mut buf = BytesMut::from( + fn test_headers_bad_content_length() { + // string CL + expect_parse_err!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ content-length: line\r\n\r\n", - ); + )); - expect_parse_err!(&mut buf) + // negative CL + expect_parse_err!(&mut BytesMut::from( + "GET /test HTTP/1.1\r\n\ + content-length: -1\r\n\r\n", + )); } #[test] - fn test_headers_content_length_err_2() { + fn octal_ish_cl_parsed_as_decimal() { let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - content-length: -1\r\n\r\n", + "POST /test HTTP/1.1\r\n\ + content-length: 011\r\n\r\n", ); - - expect_parse_err!(&mut buf); + let mut reader = MessageDecoder::::default(); + let (_req, pl) = reader.decode(&mut buf).unwrap().unwrap(); + assert!(matches!( + pl, + PayloadType::Payload(pl) if pl == PayloadDecoder::length(11) + )); } #[test] fn test_invalid_header() { - let mut buf = BytesMut::from( + expect_parse_err!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ test line\r\n\r\n", - ); - - expect_parse_err!(&mut buf); + )); } #[test] fn test_invalid_name() { - let mut buf = BytesMut::from( + expect_parse_err!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ test[]: line\r\n\r\n", - ); - - expect_parse_err!(&mut buf); + )); } #[test] fn test_http_request_bad_status_line() { - let mut buf = BytesMut::from("getpath \r\n\r\n"); - expect_parse_err!(&mut buf); + expect_parse_err!(&mut BytesMut::from("getpath \r\n\r\n")); } #[test] @@ -1078,11 +1027,10 @@ mod tests { #[test] fn test_http_request_parser_utf8() { - let mut buf = BytesMut::from( + let req = parse_ready!(&mut BytesMut::from( "GET /test HTTP/1.1\r\n\ x-test: тест\r\n\r\n", - ); - let req = parse_ready!(&mut buf); + )); assert_eq!( req.headers().get("x-test").unwrap().as_bytes(), @@ -1092,144 +1040,18 @@ mod tests { #[test] fn test_http_request_parser_two_slashes() { - let mut buf = BytesMut::from("GET //path HTTP/1.1\r\n\r\n"); - let req = parse_ready!(&mut buf); - + let req = parse_ready!(&mut BytesMut::from("GET //path HTTP/1.1\r\n\r\n")); assert_eq!(req.path(), "//path"); } #[test] fn test_http_request_parser_bad_method() { - let mut buf = BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n"); - - expect_parse_err!(&mut buf); + expect_parse_err!(&mut BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n")); } #[test] fn test_http_request_parser_bad_version() { - let mut buf = BytesMut::from("GET //get HT/11\r\n\r\n"); - - expect_parse_err!(&mut buf); - } - - #[test] - fn test_http_request_chunked_payload() { - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\r\n", - ); - let mut reader = MessageDecoder::::default(); - let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); - let mut pl = pl.unwrap(); - assert!(req.chunked().unwrap()); - - buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); - assert_eq!( - pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(), - b"data" - ); - assert_eq!( - pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(), - b"line" - ); - assert!(pl.decode(&mut buf).unwrap().unwrap().eof()); - } - - #[test] - fn test_http_request_chunked_payload_and_next_message() { - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\r\n", - ); - let mut reader = MessageDecoder::::default(); - let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); - let mut pl = pl.unwrap(); - assert!(req.chunked().unwrap()); - - buf.extend( - b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\ - POST /test2 HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\r\n" - .iter(), - ); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"data"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"line"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert!(msg.eof()); - - let (req, _) = reader.decode(&mut buf).unwrap().unwrap(); - assert!(req.chunked().unwrap()); - assert_eq!(*req.method(), Method::POST); - assert!(req.chunked().unwrap()); - } - - #[test] - fn test_http_request_chunked_payload_chunks() { - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\r\n", - ); - - let mut reader = MessageDecoder::::default(); - let (req, pl) = reader.decode(&mut buf).unwrap().unwrap(); - let mut pl = pl.unwrap(); - assert!(req.chunked().unwrap()); - - buf.extend(b"4\r\n1111\r\n"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"1111"); - - buf.extend(b"4\r\ndata\r"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"data"); - - buf.extend(b"\n4"); - assert!(pl.decode(&mut buf).unwrap().is_none()); - - buf.extend(b"\r"); - assert!(pl.decode(&mut buf).unwrap().is_none()); - buf.extend(b"\n"); - assert!(pl.decode(&mut buf).unwrap().is_none()); - - buf.extend(b"li"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"li"); - - //trailers - //buf.feed_data("test: test\r\n"); - //not_ready!(reader.parse(&mut buf, &mut readbuf)); - - buf.extend(b"ne\r\n0\r\n"); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert_eq!(msg.chunk().as_ref(), b"ne"); - assert!(pl.decode(&mut buf).unwrap().is_none()); - - buf.extend(b"\r\n"); - assert!(pl.decode(&mut buf).unwrap().unwrap().eof()); - } - - #[test] - fn test_parse_chunked_payload_chunk_extension() { - let mut buf = BytesMut::from( - "GET /test HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\ - \r\n", - ); - - let mut reader = MessageDecoder::::default(); - let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); - let mut pl = pl.unwrap(); - assert!(msg.chunked().unwrap()); - - buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n") - let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk(); - assert_eq!(chunk, Bytes::from_static(b"data")); - let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk(); - assert_eq!(chunk, Bytes::from_static(b"line")); - let msg = pl.decode(&mut buf).unwrap().unwrap(); - assert!(msg.eof()); + expect_parse_err!(&mut BytesMut::from("GET //get HT/11\r\n\r\n")); } #[test] @@ -1243,4 +1065,121 @@ mod tests { let chunk = pl.decode(&mut buf).unwrap().unwrap(); assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data"))); } + + #[test] + fn hrs_multiple_content_length() { + expect_parse_err!(&mut BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Content-Length: 4\r\n\ + Content-Length: 2\r\n\ + \r\n\ + abcd", + )); + + expect_parse_err!(&mut BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Content-Length: 0\r\n\ + Content-Length: 2\r\n\ + \r\n\ + ab", + )); + } + + #[test] + fn hrs_content_length_plus() { + expect_parse_err!(&mut BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Content-Length: +3\r\n\ + \r\n\ + 000", + )); + } + + #[test] + fn hrs_te_http10() { + // in HTTP/1.0 transfer encoding is ignored and must therefore contain a CL header + + expect_parse_err!(&mut BytesMut::from( + "POST / HTTP/1.0\r\n\ + Host: example.com\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 3\r\n\ + aaa\r\n\ + 0\r\n\ + ", + )); + } + + #[test] + fn hrs_cl_and_te_http10() { + // in HTTP/1.0 transfer encoding is simply ignored so it's fine to have both + + let mut buf = BytesMut::from( + "GET / HTTP/1.0\r\n\ + Host: example.com\r\n\ + Content-Length: 3\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 000", + ); + + parse_ready!(&mut buf); + } + + #[test] + fn hrs_unknown_transfer_encoding() { + let mut buf = BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Transfer-Encoding: JUNK\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 5\r\n\ + hello\r\n\ + 0", + ); + + expect_parse_err!(&mut buf); + } + + #[test] + fn hrs_multiple_transfer_encoding() { + let mut buf = BytesMut::from( + "GET / HTTP/1.1\r\n\ + Host: example.com\r\n\ + Content-Length: 51\r\n\ + Transfer-Encoding: identity\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 0\r\n\ + \r\n\ + GET /forbidden HTTP/1.1\r\n\ + Host: example.com\r\n\r\n", + ); + + expect_parse_err!(&mut buf); + } + + #[test] + fn transfer_encoding_agrees() { + let mut buf = BytesMut::from( + "GET /test HTTP/1.1\r\n\ + Host: example.com\r\n\ + Content-Length: 3\r\n\ + Transfer-Encoding: identity\r\n\ + \r\n\ + 0\r\n", + ); + + let mut reader = MessageDecoder::::default(); + let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap(); + let mut pl = pl.unwrap(); + + let chunk = pl.decode(&mut buf).unwrap().unwrap(); + assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"0\r\n"))); + } } diff --git a/actix-http/src/h1/dispatcher.rs b/actix-http/src/h1/dispatcher.rs index c81d0b3bc..00b51360e 100644 --- a/actix-http/src/h1/dispatcher.rs +++ b/actix-http/src/h1/dispatcher.rs @@ -8,119 +8,166 @@ use std::{ task::{Context, Poll}, }; -use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed, FramedParts}; -use actix_rt::time::{sleep_until, Instant, Sleep}; +use actix_codec::{Framed, FramedParts}; +use actix_rt::time::sleep_until; use actix_service::Service; use bitflags::bitflags; use bytes::{Buf, BytesMut}; use futures_core::ready; -use log::{error, trace}; -use pin_project::pin_project; +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::codec::{Decoder as _, Encoder as _}; +use tracing::{error, trace}; -use crate::body::{Body, BodySize, MessageBody}; -use crate::config::ServiceConfig; -use crate::error::{DispatchError, Error}; -use crate::error::{ParseError, PayloadError}; -use crate::http::StatusCode; -use crate::request::Request; -use crate::response::Response; -use crate::service::HttpFlow; -use crate::OnConnectData; - -use super::codec::Codec; -use super::payload::{Payload, PayloadSender, PayloadStatus}; -use super::{Message, MessageType}; +use super::{ + codec::Codec, + decoder::MAX_BUFFER_SIZE, + payload::{Payload, PayloadSender, PayloadStatus}, + timer::TimerState, + Message, MessageType, +}; +use crate::{ + body::{BodySize, BoxBody, MessageBody}, + config::ServiceConfig, + error::{DispatchError, ParseError, PayloadError}, + service::HttpFlow, + Error, Extensions, OnConnectData, Request, Response, StatusCode, +}; const LW_BUFFER_SIZE: usize = 1024; const HW_BUFFER_SIZE: usize = 1024 * 8; const MAX_PIPELINED_MESSAGES: usize = 16; bitflags! { + #[derive(Debug, Clone, Copy)] pub struct Flags: u8 { - const STARTED = 0b0000_0001; - const KEEPALIVE = 0b0000_0010; - const SHUTDOWN = 0b0000_0100; - const READ_DISCONNECT = 0b0000_1000; - const WRITE_DISCONNECT = 0b0001_0000; + /// Set when stream is read for first time. + const STARTED = 0b0000_0001; + + /// Set when full request-response cycle has occurred. + const FINISHED = 0b0000_0010; + + /// Set if connection is in keep-alive (inactive) state. + const KEEP_ALIVE = 0b0000_0100; + + /// Set if in shutdown procedure. + const SHUTDOWN = 0b0000_1000; + + /// Set if read-half is disconnected. + const READ_DISCONNECT = 0b0001_0000; + + /// Set if write-half is disconnected. + const WRITE_DISCONNECT = 0b0010_0000; } } -#[pin_project] -/// Dispatcher for HTTP/1.1 protocol -pub struct Dispatcher -where - S: Service, - S::Error: Into, +// there's 2 versions of Dispatcher state because of: +// https://github.com/taiki-e/pin-project-lite/issues/3 +// +// tl;dr: pin-project-lite doesn't play well with other attribute macros - B: MessageBody, - B::Error: Into, +#[cfg(not(test))] +pin_project! { + /// Dispatcher for HTTP/1.1 protocol + pub struct Dispatcher + where + S: Service, + S::Error: Into>, - X: Service, - X::Error: Into, + B: MessageBody, - U: Service<(Request, Framed), Response = ()>, - U::Error: fmt::Display, -{ - #[pin] - inner: DispatcherState, + X: Service, + X::Error: Into>, - #[cfg(test)] - poll_count: u64, + U: Service<(Request, Framed), Response = ()>, + U::Error: fmt::Display, + { + #[pin] + inner: DispatcherState, + } } -#[pin_project(project = DispatcherStateProj)] -enum DispatcherState -where - S: Service, - S::Error: Into, +#[cfg(test)] +pin_project! { + /// Dispatcher for HTTP/1.1 protocol + pub struct Dispatcher + where + S: Service, + S::Error: Into>, - B: MessageBody, - B::Error: Into, + B: MessageBody, - X: Service, - X::Error: Into, + X: Service, + X::Error: Into>, - U: Service<(Request, Framed), Response = ()>, - U::Error: fmt::Display, -{ - Normal(#[pin] InnerDispatcher), - Upgrade(#[pin] U::Future), + U: Service<(Request, Framed), Response = ()>, + U::Error: fmt::Display, + { + #[pin] + pub(super) inner: DispatcherState, + + // used in tests + pub(super) poll_count: u64, + } } -#[pin_project(project = InnerDispatcherProj)] -struct InnerDispatcher -where - S: Service, - S::Error: Into, +pin_project! { + #[project = DispatcherStateProj] + pub(super) enum DispatcherState + where + S: Service, + S::Error: Into>, - B: MessageBody, - B::Error: Into, + B: MessageBody, - X: Service, - X::Error: Into, + X: Service, + X::Error: Into>, - U: Service<(Request, Framed), Response = ()>, - U::Error: fmt::Display, -{ - flow: Rc>, - on_connect_data: OnConnectData, - flags: Flags, - peer_addr: Option, - error: Option, + U: Service<(Request, Framed), Response = ()>, + U::Error: fmt::Display, + { + Normal { #[pin] inner: InnerDispatcher }, + Upgrade { #[pin] fut: U::Future }, + } +} - #[pin] - state: State, - payload: Option, - messages: VecDeque, +pin_project! { + #[project = InnerDispatcherProj] + pub(super) struct InnerDispatcher + where + S: Service, + S::Error: Into>, - ka_expire: Instant, - #[pin] - ka_timer: Option, + B: MessageBody, - io: Option, - read_buf: BytesMut, - write_buf: BytesMut, - codec: Codec, + X: Service, + X::Error: Into>, + + U: Service<(Request, Framed), Response = ()>, + U::Error: fmt::Display, + { + flow: Rc>, + pub(super) flags: Flags, + peer_addr: Option, + conn_data: Option>, + config: ServiceConfig, + error: Option, + + #[pin] + pub(super) state: State, + // when Some(_) dispatcher is in state of receiving request payload + payload: Option, + messages: VecDeque, + + head_timer: TimerState, + ka_timer: TimerState, + shutdown_timer: TimerState, + + pub(super) io: Option, + read_buf: BytesMut, + write_buf: BytesMut, + codec: Codec, + } } enum DispatcherMessage { @@ -129,36 +176,57 @@ enum DispatcherMessage { Error(Response<()>), } -#[pin_project(project = StateProj)] -enum State -where - S: Service, - X: Service, - - B: MessageBody, - B::Error: Into, -{ - None, - ExpectCall(#[pin] X::Future), - ServiceCall(#[pin] S::Future), - SendPayload(#[pin] B), - SendErrorPayload(#[pin] Body), +pin_project! { + #[project = StateProj] + pub(super) enum State + where + S: Service, + X: Service, + B: MessageBody, + { + None, + ExpectCall { #[pin] fut: X::Future }, + ServiceCall { #[pin] fut: S::Future }, + SendPayload { #[pin] body: B }, + SendErrorPayload { #[pin] body: BoxBody }, + } } impl State where S: Service, - X: Service, - B: MessageBody, - B::Error: Into, { - fn is_empty(&self) -> bool { + pub(super) fn is_none(&self) -> bool { matches!(self, State::None) } } +impl fmt::Debug for State +where + S: Service, + X: Service, + B: MessageBody, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::None => write!(f, "State::None"), + Self::ExpectCall { .. } => f.debug_struct("State::ExpectCall").finish_non_exhaustive(), + Self::ServiceCall { .. } => { + f.debug_struct("State::ServiceCall").finish_non_exhaustive() + } + Self::SendPayload { .. } => { + f.debug_struct("State::SendPayload").finish_non_exhaustive() + } + Self::SendErrorPayload { .. } => f + .debug_struct("State::SendErrorPayload") + .finish_non_exhaustive(), + } + } +} + +#[derive(Debug)] enum PollResponse { Upgrade(Request), DoNothing, @@ -170,14 +238,13 @@ where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into, + S::Error: Into>, S::Response: Into>, B: MessageBody, - B::Error: Into, X: Service, - X::Error: Into, + X::Error: Into>, U: Service<(Request, Framed), Response = ()>, U::Error: fmt::Display, @@ -185,40 +252,35 @@ where /// Create HTTP/1 dispatcher. pub(crate) fn new( io: T, - config: ServiceConfig, flow: Rc>, - on_connect_data: OnConnectData, + config: ServiceConfig, peer_addr: Option, + conn_data: OnConnectData, ) -> Self { - let flags = if config.keep_alive_enabled() { - Flags::KEEPALIVE - } else { - Flags::empty() - }; - - // keep-alive timer - let (ka_expire, ka_timer) = match config.keep_alive_timer() { - Some(delay) => (delay.deadline(), Some(delay)), - None => (config.now(), None), - }; - Dispatcher { - inner: DispatcherState::Normal(InnerDispatcher { - read_buf: BytesMut::with_capacity(HW_BUFFER_SIZE), - write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE), - payload: None, - state: State::None, - error: None, - messages: VecDeque::new(), - io: Some(io), - codec: Codec::new(config), - flow, - on_connect_data, - flags, - peer_addr, - ka_expire, - ka_timer, - }), + inner: DispatcherState::Normal { + inner: InnerDispatcher { + flow, + flags: Flags::empty(), + peer_addr, + conn_data: conn_data.0.map(Rc::new), + config: config.clone(), + error: None, + + state: State::None, + payload: None, + messages: VecDeque::new(), + + head_timer: TimerState::new(config.client_request_deadline().is_some()), + ka_timer: TimerState::new(config.keep_alive().enabled()), + shutdown_timer: TimerState::new(config.client_disconnect_deadline().is_some()), + + io: Some(io), + read_buf: BytesMut::with_capacity(HW_BUFFER_SIZE), + write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE), + codec: Codec::new(config), + }, + }, #[cfg(test)] poll_count: 0, @@ -231,14 +293,13 @@ where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into, + S::Error: Into>, S::Response: Into>, B: MessageBody, - B::Error: Into, X: Service, - X::Error: Into, + X::Error: Into>, U: Service<(Request, Framed), Response = ()>, U::Error: fmt::Display, @@ -253,20 +314,18 @@ where } } - // if checked is set to true, delay disconnect until all tasks have finished. fn client_disconnected(self: Pin<&mut Self>) { let this = self.project(); + this.flags .insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT); + if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Incomplete(None)); } } - fn poll_flush( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let InnerDispatcherProj { io, write_buf, .. } = self.project(); let mut io = Pin::new(io.as_mut().unwrap()); @@ -276,12 +335,12 @@ where while written < len { match io.as_mut().poll_write(cx, &write_buf[written..])? { Poll::Ready(0) => { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::WriteZero, - "", - ))) + error!("write zero; closing"); + return Poll::Ready(Err(io::Error::new(io::ErrorKind::WriteZero, ""))); } + Poll::Ready(n) => written += n, + Poll::Pending => { write_buf.advance(written); return Poll::Pending; @@ -289,59 +348,68 @@ where } } - // everything has written to io. clear buffer. + // everything has written to I/O; clear buffer write_buf.clear(); - // flush the io and check if get blocked. + // flush the I/O and check if get blocked io.poll_flush(cx) } fn send_response_inner( self: Pin<&mut Self>, - message: Response<()>, + res: Response<()>, body: &impl MessageBody, ) -> Result { + let this = self.project(); + let size = body.size(); - let mut this = self.project(); + this.codec - .encode(Message::Item((message, size)), &mut this.write_buf) + .encode(Message::Item((res, size)), this.write_buf) .map_err(|err| { if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Incomplete(None)); } + DispatchError::Io(err) })?; - this.flags.set(Flags::KEEPALIVE, this.codec.keepalive()); - Ok(size) } fn send_response( mut self: Pin<&mut Self>, - message: Response<()>, + res: Response<()>, body: B, ) -> Result<(), DispatchError> { - let size = self.as_mut().send_response_inner(message, &body)?; - let state = match size { - BodySize::None | BodySize::Empty => State::None, - _ => State::SendPayload(body), - }; - self.project().state.set(state); + let size = self.as_mut().send_response_inner(res, &body)?; + let mut this = self.project(); + this.state.set(match size { + BodySize::None | BodySize::Sized(0) => { + this.flags.insert(Flags::FINISHED); + State::None + } + _ => State::SendPayload { body }, + }); + Ok(()) } fn send_error_response( mut self: Pin<&mut Self>, - message: Response<()>, - body: Body, + res: Response<()>, + body: BoxBody, ) -> Result<(), DispatchError> { - let size = self.as_mut().send_response_inner(message, &body)?; - let state = match size { - BodySize::None | BodySize::Empty => State::None, - _ => State::SendErrorPayload(body), - }; - self.project().state.set(state); + let size = self.as_mut().send_response_inner(res, &body)?; + let mut this = self.project(); + this.state.set(match size { + BodySize::None | BodySize::Sized(0) => { + this.flags.insert(Flags::FINISHED); + State::None + } + _ => State::SendErrorPayload { body }, + }); + Ok(()) } @@ -358,152 +426,171 @@ where 'res: loop { let mut this = self.as_mut().project(); match this.state.as_mut().project() { - // no future is in InnerDispatcher state. pop next message. + // no future is in InnerDispatcher state; pop next message StateProj::None => match this.messages.pop_front() { - // handle request message. + // handle request message Some(DispatcherMessage::Item(req)) => { // Handle `EXPECT: 100-Continue` header if req.head().expect() { - // set InnerDispatcher state and continue loop to poll it. - let task = this.flow.expect.call(req); - this.state.set(State::ExpectCall(task)); + // set InnerDispatcher state and continue loop to poll it + let fut = this.flow.expect.call(req); + this.state.set(State::ExpectCall { fut }); } else { - // the same as expect call. - let task = this.flow.service.call(req); - this.state.set(State::ServiceCall(task)); + // set InnerDispatcher state and continue loop to poll it + let fut = this.flow.service.call(req); + this.state.set(State::ServiceCall { fut }); }; } - // handle error message. + // handle error message Some(DispatcherMessage::Error(res)) => { - // send_response would update InnerDispatcher state to SendPayload or - // None(If response body is empty). - // continue loop to poll it. - self.as_mut().send_error_response(res, Body::Empty)?; + // send_response would update InnerDispatcher state to SendPayload or None + // (If response body is empty) + // continue loop to poll it + self.as_mut().send_error_response(res, BoxBody::new(()))?; } - // return with upgrade request and poll it exclusively. - Some(DispatcherMessage::Upgrade(req)) => { - return Ok(PollResponse::Upgrade(req)); - } + // return with upgrade request and poll it exclusively + Some(DispatcherMessage::Upgrade(req)) => return Ok(PollResponse::Upgrade(req)), - // all messages are dealt with. - None => return Ok(PollResponse::DoNothing), + // all messages are dealt with + None => { + // start keep-alive if last request allowed it + this.flags.set(Flags::KEEP_ALIVE, this.codec.keep_alive()); + + return Ok(PollResponse::DoNothing); + } }, - StateProj::ServiceCall(fut) => match fut.poll(cx) { - // service call resolved. send response. - Poll::Ready(Ok(res)) => { - let (res, body) = res.into().replace_body(()); - self.as_mut().send_response(res, body)?; - } - // send service call error as response - Poll::Ready(Err(err)) => { - let res = Response::from_error(err); - let (res, body) = res.replace_body(()); - self.as_mut().send_error_response(res, body)?; - } - - // service call pending and could be waiting for more chunk messages. - // (pipeline message limit and/or payload can_read limit) - Poll::Pending => { - // no new message is decoded and no new payload is feed. - // nothing to do except waiting for new incoming data from client. - if !self.as_mut().poll_request(cx)? { - return Ok(PollResponse::DoNothing); + StateProj::ServiceCall { fut } => { + match fut.poll(cx) { + // service call resolved. send response. + Poll::Ready(Ok(res)) => { + let (res, body) = res.into().replace_body(()); + self.as_mut().send_response(res, body)?; } - // otherwise keep loop. - } - }, - StateProj::SendPayload(mut stream) => { + // send service call error as response + Poll::Ready(Err(err)) => { + let res: Response = err.into(); + let (res, body) = res.replace_body(()); + self.as_mut().send_error_response(res, body)?; + } + + // service call pending and could be waiting for more chunk messages + // (pipeline message limit and/or payload can_read limit) + Poll::Pending => { + // no new message is decoded and no new payload is fed + // nothing to do except waiting for new incoming data from client + if !self.as_mut().poll_request(cx)? { + return Ok(PollResponse::DoNothing); + } + // else loop + } + } + } + + StateProj::SendPayload { mut body } => { // keep populate writer buffer until buffer size limit hit, // get blocked or finished. while this.write_buf.len() < super::payload::MAX_BUFFER_SIZE { - match stream.as_mut().poll_next(cx) { + match body.as_mut().poll_next(cx) { Poll::Ready(Some(Ok(item))) => { - this.codec.encode( - Message::Chunk(Some(item)), - &mut this.write_buf, - )?; + this.codec + .encode(Message::Chunk(Some(item)), this.write_buf)?; } Poll::Ready(None) => { - this.codec - .encode(Message::Chunk(None), &mut this.write_buf)?; + this.codec.encode(Message::Chunk(None), this.write_buf)?; + // payload stream finished. // set state to None and handle next message this.state.set(State::None); + this.flags.insert(Flags::FINISHED); + continue 'res; } Poll::Ready(Some(Err(err))) => { - return Err(DispatchError::Service(err.into())) + let err = err.into(); + tracing::error!("Response payload stream error: {err:?}"); + this.flags.insert(Flags::FINISHED); + return Err(DispatchError::Body(err)); } Poll::Pending => return Ok(PollResponse::DoNothing), } } - // buffer is beyond max size. - // return and try to write the whole buffer to io stream. + + // buffer is beyond max size + // return and try to write the whole buffer to I/O stream. return Ok(PollResponse::DrainWriteBuf); } - StateProj::SendErrorPayload(mut stream) => { + StateProj::SendErrorPayload { mut body } => { // TODO: de-dupe impl with SendPayload // keep populate writer buffer until buffer size limit hit, // get blocked or finished. while this.write_buf.len() < super::payload::MAX_BUFFER_SIZE { - match stream.as_mut().poll_next(cx) { + match body.as_mut().poll_next(cx) { Poll::Ready(Some(Ok(item))) => { - this.codec.encode( - Message::Chunk(Some(item)), - &mut this.write_buf, - )?; + this.codec + .encode(Message::Chunk(Some(item)), this.write_buf)?; } Poll::Ready(None) => { - this.codec - .encode(Message::Chunk(None), &mut this.write_buf)?; - // payload stream finished. + this.codec.encode(Message::Chunk(None), this.write_buf)?; + + // payload stream finished // set state to None and handle next message this.state.set(State::None); + this.flags.insert(Flags::FINISHED); + continue 'res; } Poll::Ready(Some(Err(err))) => { - return Err(DispatchError::Service(err)) + tracing::error!("Response payload stream error: {err:?}"); + this.flags.insert(Flags::FINISHED); + return Err(DispatchError::Body( + Error::new_body().with_cause(err).into(), + )); } Poll::Pending => return Ok(PollResponse::DoNothing), } } - // buffer is beyond max size. - // return and try to write the whole buffer to io stream. + + // buffer is beyond max size + // return and try to write the whole buffer to stream return Ok(PollResponse::DrainWriteBuf); } - StateProj::ExpectCall(fut) => match fut.poll(cx) { - // expect resolved. write continue to buffer and set InnerDispatcher state - // to service call. - Poll::Ready(Ok(req)) => { - this.write_buf - .extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n"); - let fut = this.flow.service.call(req); - this.state.set(State::ServiceCall(fut)); - } + StateProj::ExpectCall { fut } => { + trace!(" calling expect service"); - // send expect error as response - Poll::Ready(Err(err)) => { - let res = Response::from_error(err); - let (res, body) = res.replace_body(()); - self.as_mut().send_error_response(res, body)?; - } + match fut.poll(cx) { + // expect resolved. write continue to buffer and set InnerDispatcher state + // to service call. + Poll::Ready(Ok(req)) => { + this.write_buf + .extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n"); + let fut = this.flow.service.call(req); + this.state.set(State::ServiceCall { fut }); + } - // expect must be solved before progress can be made. - Poll::Pending => return Ok(PollResponse::DoNothing), - }, + // send expect error as response + Poll::Ready(Err(err)) => { + let res: Response = err.into(); + let (res, body) = res.replace_body(()); + self.as_mut().send_error_response(res, body)?; + } + + // expect must be solved before progress can be made. + Poll::Pending => return Ok(PollResponse::DoNothing), + } + } } } } @@ -513,143 +600,153 @@ where req: Request, cx: &mut Context<'_>, ) -> Result<(), DispatchError> { - // Handle `EXPECT: 100-Continue` header - if req.head().expect() { - // set dispatcher state so the future is pinned. + // initialize dispatcher state + { let mut this = self.as_mut().project(); - let task = this.flow.expect.call(req); - this.state.set(State::ExpectCall(task)); - } else { - // the same as above. - let mut this = self.as_mut().project(); - let task = this.flow.service.call(req); - this.state.set(State::ServiceCall(task)); + + // Handle `EXPECT: 100-Continue` header + if req.head().expect() { + // set dispatcher state to call expect handler + let fut = this.flow.expect.call(req); + this.state.set(State::ExpectCall { fut }); + } else { + // set dispatcher state to call service handler + let fut = this.flow.service.call(req); + this.state.set(State::ServiceCall { fut }); + }; }; - // eagerly poll the future for once(or twice if expect is resolved immediately). + // eagerly poll the future once (or twice if expect is resolved immediately). loop { match self.as_mut().project().state.project() { - StateProj::ExpectCall(fut) => { + StateProj::ExpectCall { fut } => { match fut.poll(cx) { - // expect is resolved. continue loop and poll the service call branch. + // expect is resolved; continue loop and poll the service call branch. Poll::Ready(Ok(req)) => { self.as_mut().send_continue(); + let mut this = self.as_mut().project(); - let task = this.flow.service.call(req); - this.state.set(State::ServiceCall(task)); + let fut = this.flow.service.call(req); + this.state.set(State::ServiceCall { fut }); + continue; } - // future is pending. return Ok(()) to notify that a new state is - // set and the outer loop should be continue. - Poll::Pending => return Ok(()), - // future is error. send response and return a result. On success - // to notify the dispatcher a new state is set and the outer loop - // should be continue. + + // future is error; send response and return a result + // on success to notify the dispatcher a new state is set and the outer loop + // should be continued Poll::Ready(Err(err)) => { - let res = Response::from_error(err); + let res: Response = err.into(); let (res, body) = res.replace_body(()); return self.send_error_response(res, body); } + + // future is pending; return Ok(()) to notify that a new state is + // set and the outer loop should be continue. + Poll::Pending => return Ok(()), } } - StateProj::ServiceCall(fut) => { + + StateProj::ServiceCall { fut } => { // return no matter the service call future's result. return match fut.poll(cx) { - // future is resolved. send response and return a result. On success + // Future is resolved. Send response and return a result. On success // to notify the dispatcher a new state is set and the outer loop // should be continue. Poll::Ready(Ok(res)) => { let (res, body) = res.into().replace_body(()); - self.send_response(res, body) + self.as_mut().send_response(res, body) } - // see the comment on ExpectCall state branch's Pending. + + // see the comment on ExpectCall state branch's Pending Poll::Pending => Ok(()), - // see the comment on ExpectCall state branch's Ready(Err(err)). + + // see the comment on ExpectCall state branch's Ready(Err(_)) Poll::Ready(Err(err)) => { - let res = Response::from_error(err); + let res: Response = err.into(); let (res, body) = res.replace_body(()); - self.send_error_response(res, body) + self.as_mut().send_error_response(res, body) } }; } - _ => unreachable!( - "State must be set to ServiceCall or ExceptCall in handle_request" - ), + + _ => { + unreachable!("State must be set to ServiceCall or ExceptCall in handle_request") + } } } } /// Process one incoming request. - fn poll_request( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Result { + /// + /// Returns true if any meaningful work was done. + fn poll_request(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Result { + let pipeline_queue_full = self.messages.len() >= MAX_PIPELINED_MESSAGES; + let can_not_read = !self.can_read(cx); + // limit amount of non-processed requests - if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read(cx) { + if pipeline_queue_full || can_not_read { return Ok(false); } - let mut updated = false; let mut this = self.as_mut().project(); + + let mut updated = false; + + // decode from read buf as many full requests as possible loop { - match this.codec.decode(&mut this.read_buf) { + match this.codec.decode(this.read_buf) { Ok(Some(msg)) => { updated = true; - this.flags.insert(Flags::STARTED); match msg { Message::Item(mut req) => { + // head timer only applies to first request on connection + this.head_timer.clear(line!()); + req.head_mut().peer_addr = *this.peer_addr; - // merge on_connect_ext data into request extensions - this.on_connect_data.merge_into(&mut req); + req.conn_data.clone_from(this.conn_data); match this.codec.message_type() { - // Request is upgradable. add upgrade message and break. - // everything remain in read buffer would be handed to + // request has no payload + MessageType::None => {} + + // Request is upgradable. Add upgrade message and break. + // Everything remaining in read buffer will be handed to // upgraded Request. MessageType::Stream if this.flow.upgrade.is_some() => { - this.messages - .push_back(DispatcherMessage::Upgrade(req)); + this.messages.push_back(DispatcherMessage::Upgrade(req)); break; } - // Request is not upgradable. + // request is not upgradable MessageType::Payload | MessageType::Stream => { - /* - PayloadSender and Payload are smart pointers share the - same state. - PayloadSender is attached to dispatcher and used to sink - new chunked request data to state. - Payload is attached to Request and passed to Service::call - where the state can be collected and consumed. - */ - let (ps, pl) = Payload::create(false); - let (req1, _) = - req.replace_payload(crate::Payload::H1(pl)); - req = req1; - *this.payload = Some(ps); + // PayloadSender and Payload are smart pointers share the + // same state. PayloadSender is attached to dispatcher and used + // to sink new chunked request data to state. Payload is + // attached to Request and passed to Service::call where the + // state can be collected and consumed. + let (sender, payload) = Payload::create(false); + *req.payload() = crate::Payload::H1 { payload }; + *this.payload = Some(sender); } - - // Request has no payload. - MessageType::None => {} } // handle request early when no future in InnerDispatcher state. - if this.state.is_empty() { + if this.state.is_none() { self.as_mut().handle_request(req, cx)?; this = self.as_mut().project(); } else { this.messages.push_back(DispatcherMessage::Item(req)); } } + Message::Chunk(Some(chunk)) => { if let Some(ref mut payload) = this.payload { payload.feed_data(chunk); } else { - error!( - "Internal server error: unexpected payload chunk" - ); + error!("Internal server error: unexpected payload chunk"); this.flags.insert(Flags::READ_DISCONNECT); this.messages.push_back(DispatcherMessage::Error( Response::internal_server_error().drop_body(), @@ -658,6 +755,7 @@ where break; } } + Message::Chunk(None) => { if let Some(mut payload) = this.payload.take() { payload.feed_eof(); @@ -673,39 +771,51 @@ where } } } - // decode is partial and buffer is not full yet. - // break and wait for more read. + + // decode is partial and buffer is not full yet + // break and wait for more read Ok(None) => break, + Err(ParseError::Io(err)) => { + trace!("I/O error: {}", &err); self.as_mut().client_disconnected(); this = self.as_mut().project(); *this.error = Some(DispatchError::Io(err)); break; } + Err(ParseError::TooLarge) => { + trace!("request head was too big; returning 431 response"); + if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Overflow); } - // Requests overflow buffer size should be responded with 431 - this.messages.push_back(DispatcherMessage::Error( - Response::with_body( + + // request heads that overflow buffer size return a 431 error + this.messages + .push_back(DispatcherMessage::Error(Response::with_body( StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, (), - ), - )); + ))); + this.flags.insert(Flags::READ_DISCONNECT); *this.error = Some(ParseError::TooLarge.into()); + break; } + Err(err) => { + trace!("parse error {}", &err); + if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::EncodingCorrupted); } - // Malformed requests should be responded with 400 + // malformed requests should be responded with 400 this.messages.push_back(DispatcherMessage::Error( Response::bad_request().drop_body(), )); + this.flags.insert(Flags::READ_DISCONNECT); *this.error = Some(err.into()); break; @@ -713,100 +823,113 @@ where } } - if updated && this.ka_timer.is_some() { - if let Some(expire) = this.codec.config().keep_alive_expire() { - *this.ka_expire = expire; - } - } Ok(updated) } - /// keep-alive timer - fn poll_keepalive( + fn poll_head_timer( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<(), DispatchError> { - let mut this = self.as_mut().project(); + let this = self.as_mut().project(); - // when a branch is not explicit return early it's meant to fall through - // and return as Ok(()) - match this.ka_timer.as_mut().as_pin_mut() { - None => { - // conditionally go into shutdown timeout - if this.flags.contains(Flags::SHUTDOWN) { - if let Some(deadline) = this.codec.config().client_disconnect_timer() - { - // write client disconnect time out and poll again to - // go into Some> branch - this.ka_timer.set(Some(sleep_until(deadline))); - return self.poll_keepalive(cx); - } - } - } - Some(mut timer) => { - // only operate when keep-alive timer is resolved. - if timer.as_mut().poll(cx).is_ready() { - // got timeout during shutdown, drop connection - if this.flags.contains(Flags::SHUTDOWN) { - return Err(DispatchError::DisconnectTimeout); - // exceed deadline. check for any outstanding tasks - } else if timer.deadline() >= *this.ka_expire { - // have no task at hand. - if this.state.is_empty() && this.write_buf.is_empty() { - if this.flags.contains(Flags::STARTED) { - trace!("Keep-alive timeout, close connection"); - this.flags.insert(Flags::SHUTDOWN); + if let TimerState::Active { timer } = this.head_timer { + if timer.as_mut().poll(cx).is_ready() { + // timeout on first request (slow request) return 408 - // start shutdown timeout - if let Some(deadline) = - this.codec.config().client_disconnect_timer() - { - timer.as_mut().reset(deadline); - let _ = timer.poll(cx); - } else { - // no shutdown timeout, drop socket - this.flags.insert(Flags::WRITE_DISCONNECT); - } - } else { - // timeout on first request (slow request) return 408 - trace!("Slow request timeout"); - let _ = self.as_mut().send_error_response( - Response::with_body(StatusCode::REQUEST_TIMEOUT, ()), - Body::Empty, - ); - this = self.project(); - this.flags.insert(Flags::STARTED | Flags::SHUTDOWN); - } - // still have unfinished task. try to reset and register keep-alive. - } else if let Some(deadline) = - this.codec.config().keep_alive_expire() - { - timer.as_mut().reset(deadline); - let _ = timer.poll(cx); - } - // timer resolved but still have not met the keep-alive expire deadline. - // reset and register for later wakeup. - } else { - timer.as_mut().reset(*this.ka_expire); - let _ = timer.poll(cx); - } - } + trace!("timed out on slow request; replying with 408 and closing connection"); + + let _ = self.as_mut().send_error_response( + Response::with_body(StatusCode::REQUEST_TIMEOUT, ()), + BoxBody::new(()), + ); + + self.project().flags.insert(Flags::SHUTDOWN); } - } + }; + Ok(()) } - /// Returns true when io stream can be disconnected after write to it. + fn poll_ka_timer(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Result<(), DispatchError> { + let this = self.as_mut().project(); + if let TimerState::Active { timer } = this.ka_timer { + debug_assert!( + this.flags.contains(Flags::KEEP_ALIVE), + "keep-alive flag should be set when timer is active", + ); + debug_assert!( + this.state.is_none(), + "dispatcher should not be in keep-alive phase if state is not none: {:?}", + this.state, + ); + + // Assert removed by @robjtede on account of issue #2655. There are cases where an I/O + // flush can be pending after entering the keep-alive state causing the subsequent flush + // wake up to panic here. This appears to be a Linux-only problem. Leaving original code + // below for posterity because a simple and reliable test could not be found to trigger + // the behavior. + // debug_assert!( + // this.write_buf.is_empty(), + // "dispatcher should not be in keep-alive phase if write_buf is not empty", + // ); + + // keep-alive timer has timed out + if timer.as_mut().poll(cx).is_ready() { + // no tasks at hand + trace!("timer timed out; closing connection"); + this.flags.insert(Flags::SHUTDOWN); + + if let Some(deadline) = this.config.client_disconnect_deadline() { + // start shutdown timeout if enabled + this.shutdown_timer + .set_and_init(cx, sleep_until(deadline.into()), line!()); + } else { + // no shutdown timeout, drop socket + this.flags.insert(Flags::WRITE_DISCONNECT); + } + } + } + + Ok(()) + } + + fn poll_shutdown_timer( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Result<(), DispatchError> { + let this = self.as_mut().project(); + if let TimerState::Active { timer } = this.shutdown_timer { + debug_assert!( + this.flags.contains(Flags::SHUTDOWN), + "shutdown flag should be set when timer is active", + ); + + // timed-out during shutdown; drop connection + if timer.as_mut().poll(cx).is_ready() { + trace!("timed-out during shutdown"); + return Err(DispatchError::DisconnectTimeout); + } + } + + Ok(()) + } + + /// Poll head, keep-alive, and disconnect timer. + fn poll_timers(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Result<(), DispatchError> { + self.as_mut().poll_head_timer(cx)?; + self.as_mut().poll_ka_timer(cx)?; + self.as_mut().poll_shutdown_timer(cx)?; + + Ok(()) + } + + /// Returns true when I/O stream can be disconnected after write to it. /// /// It covers these conditions: - /// - /// - `std::io::ErrorKind::ConnectionReset` after partial read. + /// - `std::io::ErrorKind::ConnectionReset` after partial read; /// - all data read done. - #[inline(always)] - fn read_available( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Result { + #[inline(always)] // TODO: bench this inline + fn read_available(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Result { let this = self.project(); if this.flags.contains(Flags::READ_DISCONNECT) { @@ -819,47 +942,42 @@ where loop { // Return early when read buf exceed decoder's max buffer size. - if this.read_buf.len() >= super::decoder::MAX_BUFFER_SIZE { - /* - At this point it's not known IO stream is still scheduled - to be waked up. so force wake up dispatcher just in case. + if this.read_buf.len() >= MAX_BUFFER_SIZE { + // At this point it's not known IO stream is still scheduled to be waked up so + // force wake up dispatcher just in case. + // + // Reason: + // AsyncRead mostly would only have guarantee wake up when the poll_read + // return Poll::Pending. + // + // Case: + // When read_buf is beyond max buffer size the early return could be successfully + // be parsed as a new Request. This case would not generate ParseError::TooLarge and + // at this point IO stream is not fully read to Pending and would result in + // dispatcher stuck until timeout (keep-alive). + // + // Note: + // This is a perf choice to reduce branch on ::decode. + // + // A Request head too large to parse is only checked on `httparse::Status::Partial`. - Reason: - AsyncRead mostly would only have guarantee wake up - when the poll_read return Poll::Pending. - - Case: - When read_buf is beyond max buffer size the early return - could be successfully be parsed as a new Request. - This case would not generate ParseError::TooLarge - and at this point IO stream is not fully read to Pending - and would result in dispatcher stuck until timeout (KA) - - Note: - This is a perf choice to reduce branch on - ::decode. - - A Request head too large to parse is only checked on - httparse::Status::Partial condition. - */ - if this.payload.is_none() { - /* - When dispatcher has a payload the responsibility of - wake up it would be shift to h1::payload::Payload. - - Reason: - Self wake up when there is payload would waste poll - and/or result in over read. - - Case: - When payload is (partial) dropped by user there is - no need to do read anymore. - At this case read_buf could always remain beyond - MAX_BUFFER_SIZE and self wake up would be busy poll - dispatcher and waste resource. - - */ - cx.waker().wake_by_ref(); + match this.payload { + // When dispatcher has a payload the responsibility of wake ups is shifted to + // `h1::payload::Payload` unless the payload is needing a read, in which case it + // might not have access to the waker and could result in the dispatcher + // getting stuck until timeout. + // + // Reason: + // Self wake up when there is payload would waste poll and/or result in + // over read. + // + // Case: + // When payload is (partial) dropped by user there is no need to do + // read anymore. At this case read_buf could always remain beyond + // MAX_BUFFER_SIZE and self wake up would be busy poll dispatcher and + // waste resources. + Some(ref p) if p.need_read(cx) != PayloadStatus::Read => {} + _ => cx.waker().wake_by_ref(), } return Ok(false); @@ -871,20 +989,31 @@ where this.read_buf.reserve(HW_BUFFER_SIZE - remaining); } - match actix_codec::poll_read_buf(io.as_mut(), cx, this.read_buf) { + match tokio_util::io::poll_read_buf(io.as_mut(), cx, this.read_buf) { Poll::Ready(Ok(n)) => { + this.flags.remove(Flags::FINISHED); + if n == 0 { return Ok(true); } + read_some = true; } - Poll::Pending => return Ok(false), + + Poll::Pending => { + return Ok(false); + } + Poll::Ready(Err(err)) => { return match err.kind() { + // convert WouldBlock error to the same as Pending return io::ErrorKind::WouldBlock => Ok(false), + + // connection reset after partial read io::ErrorKind::ConnectionReset if read_some => Ok(true), + _ => Err(DispatchError::Io(err)), - } + }; } } } @@ -909,14 +1038,13 @@ where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into, + S::Error: Into>, S::Response: Into>, B: MessageBody, - B::Error: Into, X: Service, - X::Error: Into, + X::Error: Into>, U: Service<(Request, Framed), Response = ()>, U::Error: fmt::Display, @@ -933,27 +1061,60 @@ where } match this.inner.project() { - DispatcherStateProj::Normal(mut inner) => { - inner.as_mut().poll_keepalive(cx)?; + DispatcherStateProj::Upgrade { fut: upgrade } => upgrade.poll(cx).map_err(|err| { + error!("Upgrade handler error: {}", err); + DispatchError::Upgrade + }), - if inner.flags.contains(Flags::SHUTDOWN) { + DispatcherStateProj::Normal { mut inner } => { + trace!("start flags: {:?}", &inner.flags); + + trace_timer_states( + "start", + &inner.head_timer, + &inner.ka_timer, + &inner.shutdown_timer, + ); + + inner.as_mut().poll_timers(cx)?; + + let poll = if inner.flags.contains(Flags::SHUTDOWN) { if inner.flags.contains(Flags::WRITE_DISCONNECT) { Poll::Ready(Ok(())) } else { - // flush buffer and wait on blocked. + // flush buffer and wait on blocked ready!(inner.as_mut().poll_flush(cx))?; - Pin::new(inner.project().io.as_mut().unwrap()) + Pin::new(inner.as_mut().project().io.as_mut().unwrap()) .poll_shutdown(cx) .map_err(DispatchError::from) } } else { - // read from io stream and fill read buffer. + // read from I/O stream and fill read buffer let should_disconnect = inner.as_mut().read_available(cx)?; + // after reading something from stream, clear keep-alive timer + if !inner.read_buf.is_empty() && inner.flags.contains(Flags::KEEP_ALIVE) { + let inner = inner.as_mut().project(); + inner.flags.remove(Flags::KEEP_ALIVE); + inner.ka_timer.clear(line!()); + } + + if !inner.flags.contains(Flags::STARTED) { + inner.as_mut().project().flags.insert(Flags::STARTED); + + if let Some(deadline) = inner.config.client_request_deadline() { + inner.as_mut().project().head_timer.set_and_init( + cx, + sleep_until(deadline.into()), + line!(), + ); + } + } + inner.as_mut().poll_request(cx)?; - // io stream should to be closed. if should_disconnect { + // I/O stream should to be closed let inner = inner.as_mut().project(); inner.flags.insert(Flags::READ_DISCONNECT); if let Some(mut payload) = inner.payload.take() { @@ -962,467 +1123,128 @@ where }; loop { - // poll_response and populate write buffer. - // drain indicate if write buffer should be emptied before next run. + // poll response to populate write buffer + // drain indicates whether write buffer should be emptied before next run let drain = match inner.as_mut().poll_response(cx)? { PollResponse::DrainWriteBuf => true, - PollResponse::DoNothing => false, + + PollResponse::DoNothing => { + // KEEP_ALIVE is set in send_response_inner if client allows it + // FINISHED is set after writing last chunk of response + if inner.flags.contains(Flags::KEEP_ALIVE | Flags::FINISHED) { + if let Some(timer) = inner.config.keep_alive_deadline() { + inner.as_mut().project().ka_timer.set_and_init( + cx, + sleep_until(timer.into()), + line!(), + ); + } + } + + false + } + // upgrade request and goes Upgrade variant of DispatcherState. PollResponse::Upgrade(req) => { let upgrade = inner.upgrade(req); self.as_mut() .project() .inner - .set(DispatcherState::Upgrade(upgrade)); + .set(DispatcherState::Upgrade { fut: upgrade }); return self.poll(cx); } }; - // we didn't get WouldBlock from write operation, - // so data get written to kernel completely (macOS) - // and we have to write again otherwise response can get stuck + // we didn't get WouldBlock from write operation, so data get written to + // kernel completely (macOS) and we have to write again otherwise response + // can get stuck // - // TODO: what? is WouldBlock good or bad? - // want to find a reference for this macOS behavior - if inner.as_mut().poll_flush(cx)?.is_pending() || !drain { + // TODO: want to find a reference for this behavior + // see introduced commit: 3872d3ba + let flush_was_ready = inner.as_mut().poll_flush(cx)?.is_ready(); + + // this assert seems to always be true but not willing to commit to it until + // we understand what Nikolay meant when writing the above comment + // debug_assert!(flush_was_ready); + + if !flush_was_ready || !drain { break; } } // client is gone if inner.flags.contains(Flags::WRITE_DISCONNECT) { + trace!("client is gone; disconnecting"); return Poll::Ready(Ok(())); } - let is_empty = inner.state.is_empty(); - let inner_p = inner.as_mut().project(); - // read half is closed and we do not processing any responses - if inner_p.flags.contains(Flags::READ_DISCONNECT) && is_empty { + let state_is_none = inner_p.state.is_none(); + + // read half is closed; we do not process any responses + if inner_p.flags.contains(Flags::READ_DISCONNECT) && state_is_none { + trace!("read half closed; start shutdown"); inner_p.flags.insert(Flags::SHUTDOWN); } // keep-alive and stream errors - if is_empty && inner_p.write_buf.is_empty() { + if state_is_none && inner_p.write_buf.is_empty() { if let Some(err) = inner_p.error.take() { - Poll::Ready(Err(err)) + error!("stream error: {}", &err); + return Poll::Ready(Err(err)); } + // disconnect if keep-alive is not enabled - else if inner_p.flags.contains(Flags::STARTED) - && !inner_p.flags.intersects(Flags::KEEPALIVE) + if inner_p.flags.contains(Flags::FINISHED) + && !inner_p.flags.contains(Flags::KEEP_ALIVE) { + inner_p.flags.remove(Flags::FINISHED); inner_p.flags.insert(Flags::SHUTDOWN); - self.poll(cx) + return self.poll(cx); } + // disconnect if shutdown - else if inner_p.flags.contains(Flags::SHUTDOWN) { - self.poll(cx) - } else { - Poll::Pending + if inner_p.flags.contains(Flags::SHUTDOWN) { + return self.poll(cx); } - } else { - Poll::Pending } - } + + trace_timer_states( + "end", + inner_p.head_timer, + inner_p.ka_timer, + inner_p.shutdown_timer, + ); + + Poll::Pending + }; + + trace!("end flags: {:?}", &inner.flags); + + poll } - DispatcherStateProj::Upgrade(fut) => fut.poll(cx).map_err(|e| { - error!("Upgrade handler error: {}", e); - DispatchError::Upgrade - }), } } } -#[cfg(test)] -mod tests { - use std::str; +#[allow(dead_code)] +fn trace_timer_states( + label: &str, + head_timer: &TimerState, + ka_timer: &TimerState, + shutdown_timer: &TimerState, +) { + trace!("{} timers:", label); - use actix_service::fn_service; - use actix_utils::future::{ready, Ready}; - use bytes::Bytes; - use futures_util::future::lazy; - - use super::*; - use crate::{ - error::Error, - h1::{ExpectHandler, UpgradeHandler}, - http::Method, - test::{TestBuffer, TestSeqBuffer}, - HttpMessage, KeepAlive, - }; - - fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option { - haystack[from..] - .windows(needle.len()) - .position(|window| window == needle) + if head_timer.is_enabled() { + trace!(" head {}", &head_timer); } - fn stabilize_date_header(payload: &mut [u8]) { - let mut from = 0; - - while let Some(pos) = find_slice(&payload, b"date", from) { - payload[(from + pos)..(from + pos + 35)] - .copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC"); - from += 35; - } + if ka_timer.is_enabled() { + trace!(" keep-alive {}", &ka_timer); } - fn ok_service() -> impl Service, Error = Error> { - fn_service(|_req: Request| ready(Ok::<_, Error>(Response::ok()))) - } - - fn echo_path_service( - ) -> impl Service, Error = Error> { - fn_service(|req: Request| { - let path = req.path().as_bytes(); - ready(Ok::<_, Error>( - Response::ok().set_body(Body::from_slice(path)), - )) - }) - } - - fn echo_payload_service( - ) -> impl Service, Error = Error> { - fn_service(|mut req: Request| { - Box::pin(async move { - use futures_util::stream::StreamExt as _; - - let mut pl = req.take_payload(); - let mut body = BytesMut::new(); - while let Some(chunk) = pl.next().await { - body.extend_from_slice(chunk.unwrap().chunk()) - } - - Ok::<_, Error>(Response::ok().set_body(body.freeze())) - }) - }) - } - - #[actix_rt::test] - async fn test_req_parse_err() { - lazy(|cx| { - let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n"); - - let services = HttpFlow::new(ok_service(), ExpectHandler, None); - - let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( - buf, - ServiceConfig::default(), - services, - OnConnectData::default(), - None, - ); - - actix_rt::pin!(h1); - - match h1.as_mut().poll(cx) { - Poll::Pending => panic!(), - Poll::Ready(res) => assert!(res.is_err()), - } - - if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() { - assert!(inner.flags.contains(Flags::READ_DISCONNECT)); - assert_eq!( - &inner.project().io.take().unwrap().write_buf[..26], - b"HTTP/1.1 400 Bad Request\r\n" - ); - } - }) - .await; - } - - #[actix_rt::test] - async fn test_pipelining() { - lazy(|cx| { - let buf = TestBuffer::new( - "\ - GET /abcd HTTP/1.1\r\n\r\n\ - GET /def HTTP/1.1\r\n\r\n\ - ", - ); - - let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None); - - let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); - - let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( - buf, - cfg, - services, - OnConnectData::default(), - None, - ); - - actix_rt::pin!(h1); - - assert!(matches!(&h1.inner, DispatcherState::Normal(_))); - - match h1.as_mut().poll(cx) { - Poll::Pending => panic!("first poll should not be pending"), - Poll::Ready(res) => assert!(res.is_ok()), - } - - // polls: initial => shutdown - assert_eq!(h1.poll_count, 2); - - if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() { - let res = &mut inner.project().io.take().unwrap().write_buf[..]; - stabilize_date_header(res); - - let exp = b"\ - HTTP/1.1 200 OK\r\n\ - content-length: 5\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ - /abcd\ - HTTP/1.1 200 OK\r\n\ - content-length: 4\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ - /def\ - "; - - assert_eq!(res.to_vec(), exp.to_vec()); - } - }) - .await; - - lazy(|cx| { - let buf = TestBuffer::new( - "\ - GET /abcd HTTP/1.1\r\n\r\n\ - GET /def HTTP/1\r\n\r\n\ - ", - ); - - let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None); - - let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); - - let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( - buf, - cfg, - services, - OnConnectData::default(), - None, - ); - - actix_rt::pin!(h1); - - assert!(matches!(&h1.inner, DispatcherState::Normal(_))); - - match h1.as_mut().poll(cx) { - Poll::Pending => panic!("first poll should not be pending"), - Poll::Ready(res) => assert!(res.is_err()), - } - - // polls: initial => shutdown - assert_eq!(h1.poll_count, 1); - - if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() { - let res = &mut inner.project().io.take().unwrap().write_buf[..]; - stabilize_date_header(res); - - let exp = b"\ - HTTP/1.1 200 OK\r\n\ - content-length: 5\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ - /abcd\ - HTTP/1.1 400 Bad Request\r\n\ - content-length: 0\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ - "; - - assert_eq!(res.to_vec(), exp.to_vec()); - } - }) - .await; - } - - #[actix_rt::test] - async fn test_expect() { - lazy(|cx| { - let mut buf = TestSeqBuffer::empty(); - let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None); - - let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None); - - let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( - buf.clone(), - cfg, - services, - OnConnectData::default(), - None, - ); - - buf.extend_read_buf( - "\ - POST /upload HTTP/1.1\r\n\ - Content-Length: 5\r\n\ - Expect: 100-continue\r\n\ - \r\n\ - ", - ); - - actix_rt::pin!(h1); - - assert!(h1.as_mut().poll(cx).is_pending()); - assert!(matches!(&h1.inner, DispatcherState::Normal(_))); - - // polls: manual - assert_eq!(h1.poll_count, 1); - eprintln!("poll count: {}", h1.poll_count); - - if let DispatcherState::Normal(ref inner) = h1.inner { - let io = inner.io.as_ref().unwrap(); - let res = &io.write_buf()[..]; - assert_eq!( - str::from_utf8(res).unwrap(), - "HTTP/1.1 100 Continue\r\n\r\n" - ); - } - - buf.extend_read_buf("12345"); - assert!(h1.as_mut().poll(cx).is_ready()); - - // polls: manual manual shutdown - assert_eq!(h1.poll_count, 3); - - if let DispatcherState::Normal(ref inner) = h1.inner { - let io = inner.io.as_ref().unwrap(); - let mut res = (&io.write_buf()[..]).to_owned(); - stabilize_date_header(&mut res); - - assert_eq!( - str::from_utf8(&res).unwrap(), - "\ - HTTP/1.1 100 Continue\r\n\ - \r\n\ - HTTP/1.1 200 OK\r\n\ - content-length: 5\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\ - \r\n\ - 12345\ - " - ); - } - }) - .await; - } - - #[actix_rt::test] - async fn test_eager_expect() { - lazy(|cx| { - let mut buf = TestSeqBuffer::empty(); - let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None); - - let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); - - let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( - buf.clone(), - cfg, - services, - OnConnectData::default(), - None, - ); - - buf.extend_read_buf( - "\ - POST /upload HTTP/1.1\r\n\ - Content-Length: 5\r\n\ - Expect: 100-continue\r\n\ - \r\n\ - ", - ); - - actix_rt::pin!(h1); - - assert!(h1.as_mut().poll(cx).is_ready()); - assert!(matches!(&h1.inner, DispatcherState::Normal(_))); - - // polls: manual shutdown - assert_eq!(h1.poll_count, 2); - - if let DispatcherState::Normal(ref inner) = h1.inner { - let io = inner.io.as_ref().unwrap(); - let mut res = (&io.write_buf()[..]).to_owned(); - stabilize_date_header(&mut res); - - // Despite the content-length header and even though the request payload has not - // been sent, this test expects a complete service response since the payload - // is not used at all. The service passed to dispatcher is path echo and doesn't - // consume payload bytes. - assert_eq!( - str::from_utf8(&res).unwrap(), - "\ - HTTP/1.1 100 Continue\r\n\ - \r\n\ - HTTP/1.1 200 OK\r\n\ - content-length: 7\r\n\ - connection: close\r\n\ - date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\ - \r\n\ - /upload\ - " - ); - } - }) - .await; - } - - #[actix_rt::test] - async fn test_upgrade() { - struct TestUpgrade; - - impl Service<(Request, Framed)> for TestUpgrade { - type Response = (); - type Error = Error; - type Future = Ready>; - - actix_service::always_ready!(); - - fn call(&self, (req, _framed): (Request, Framed)) -> Self::Future { - assert_eq!(req.method(), Method::GET); - assert!(req.upgrade()); - assert_eq!(req.headers().get("upgrade").unwrap(), "websocket"); - ready(Ok(())) - } - } - - lazy(|cx| { - let mut buf = TestSeqBuffer::empty(); - let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None); - - let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade)); - - let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new( - buf.clone(), - cfg, - services, - OnConnectData::default(), - None, - ); - - buf.extend_read_buf( - "\ - GET /ws HTTP/1.1\r\n\ - Connection: Upgrade\r\n\ - Upgrade: websocket\r\n\ - \r\n\ - ", - ); - - actix_rt::pin!(h1); - - assert!(h1.as_mut().poll(cx).is_ready()); - assert!(matches!(&h1.inner, DispatcherState::Upgrade(_))); - - // polls: manual shutdown - assert_eq!(h1.poll_count, 2); - }) - .await; + if shutdown_timer.is_enabled() { + trace!(" shutdown {}", &shutdown_timer); } } diff --git a/actix-http/src/h1/dispatcher_tests.rs b/actix-http/src/h1/dispatcher_tests.rs new file mode 100644 index 000000000..50259e6ce --- /dev/null +++ b/actix-http/src/h1/dispatcher_tests.rs @@ -0,0 +1,972 @@ +use std::{future::Future, str, task::Poll, time::Duration}; + +use actix_codec::Framed; +use actix_rt::{pin, time::sleep}; +use actix_service::{fn_service, Service}; +use actix_utils::future::{ready, Ready}; +use bytes::{Buf, Bytes, BytesMut}; +use futures_util::future::lazy; + +use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags}; +use crate::{ + body::MessageBody, + config::ServiceConfig, + h1::{Codec, ExpectHandler, UpgradeHandler}, + service::HttpFlow, + test::{TestBuffer, TestSeqBuffer}, + Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode, +}; + +fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option { + memchr::memmem::find(&haystack[from..], needle) +} + +fn stabilize_date_header(payload: &mut [u8]) { + let mut from = 0; + while let Some(pos) = find_slice(payload, b"date", from) { + payload[(from + pos)..(from + pos + 35)] + .copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC"); + from += 35; + } +} + +fn ok_service() -> impl Service, Error = Error> { + status_service(StatusCode::OK) +} + +fn status_service( + status: StatusCode, +) -> impl Service, Error = Error> { + fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status)))) +} + +fn echo_path_service() -> impl Service, Error = Error> +{ + fn_service(|req: Request| { + let path = req.path().as_bytes(); + ready(Ok::<_, Error>( + Response::ok().set_body(Bytes::copy_from_slice(path)), + )) + }) +} + +fn drop_payload_service() -> impl Service, Error = Error> +{ + fn_service(|mut req: Request| async move { + let _ = req.take_payload(); + Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped")) + }) +} + +fn echo_payload_service() -> impl Service, Error = Error> { + fn_service(|mut req: Request| { + Box::pin(async move { + use futures_util::StreamExt as _; + + let mut pl = req.take_payload(); + let mut body = BytesMut::new(); + while let Some(chunk) = pl.next().await { + body.extend_from_slice(chunk.unwrap().chunk()) + } + + Ok::<_, Error>(Response::ok().set_body(body.freeze())) + }) + }) +} + +#[actix_rt::test] +async fn late_request() { + let mut buf = TestBuffer::empty(); + + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::from_millis(100), + Duration::ZERO, + false, + None, + ); + let services = HttpFlow::new(ok_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + pin!(h1); + + lazy(|cx| { + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + match h1.as_mut().poll(cx) { + Poll::Ready(_) => panic!("first poll should not be ready"), + Poll::Pending => {} + } + + // polls: initial + assert_eq!(h1.poll_count, 1); + + buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n"); + + match h1.as_mut().poll(cx) { + Poll::Pending => panic!("second poll should not be pending"), + Poll::Ready(res) => assert!(res.is_ok()), + } + + // polls: initial pending => handle req => shutdown + assert_eq!(h1.poll_count, 3); + + let mut res = buf.take_write_buf().to_vec(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; +} + +#[actix_rt::test] +async fn oneshot_connection() { + let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n"); + + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::from_millis(100), + Duration::ZERO, + false, + None, + ); + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + pin!(h1); + + lazy(|cx| { + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + match h1.as_mut().poll(cx) { + Poll::Pending => panic!("first poll should not be pending"), + Poll::Ready(res) => assert!(res.is_ok()), + } + + // polls: initial => shutdown + assert_eq!(h1.poll_count, 2); + + let mut res = buf.take_write_buf().to_vec(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = http_msg( + r" + HTTP/1.1 200 OK + content-length: 5 + connection: close + date: Thu, 01 Jan 1970 12:34:56 UTC + + /abcd + ", + ); + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(&exp) + ); + }) + .await; +} + +#[actix_rt::test] +async fn keep_alive_timeout() { + let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n"); + + let cfg = ServiceConfig::new( + KeepAlive::Timeout(Duration::from_millis(200)), + Duration::from_millis(100), + Duration::ZERO, + false, + None, + ); + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + pin!(h1); + + lazy(|cx| { + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + assert!( + h1.as_mut().poll(cx).is_pending(), + "keep-alive should prevent poll from resolving" + ); + + // polls: initial + assert_eq!(h1.poll_count, 1); + + let mut res = buf.take_write_buf().to_vec(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 5\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /abcd\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; + + // sleep slightly longer than keep-alive timeout + sleep(Duration::from_millis(250)).await; + + lazy(|cx| { + assert!( + h1.as_mut().poll(cx).is_ready(), + "keep-alive should have resolved", + ); + + // polls: initial => keep-alive wake-up shutdown + assert_eq!(h1.poll_count, 2); + + if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() { + // connection closed + assert!(inner.flags.contains(Flags::SHUTDOWN)); + assert!(inner.flags.contains(Flags::WRITE_DISCONNECT)); + // and nothing added to write buffer + assert!(buf.write_buf_slice().is_empty()); + } + }) + .await; +} + +#[actix_rt::test] +async fn keep_alive_follow_up_req() { + let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n"); + + let cfg = ServiceConfig::new( + KeepAlive::Timeout(Duration::from_millis(500)), + Duration::from_millis(100), + Duration::ZERO, + false, + None, + ); + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + pin!(h1); + + lazy(|cx| { + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + assert!( + h1.as_mut().poll(cx).is_pending(), + "keep-alive should prevent poll from resolving" + ); + + // polls: initial + assert_eq!(h1.poll_count, 1); + + let mut res = buf.take_write_buf().to_vec(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 5\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /abcd\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; + + // sleep for less than KA timeout + sleep(Duration::from_millis(100)).await; + + lazy(|cx| { + assert!( + h1.as_mut().poll(cx).is_pending(), + "keep-alive should not have resolved dispatcher yet", + ); + + // polls: initial => manual + assert_eq!(h1.poll_count, 2); + + if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() { + // connection not closed + assert!(!inner.flags.contains(Flags::SHUTDOWN)); + assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT)); + // and nothing added to write buffer + assert!(buf.write_buf_slice().is_empty()); + } + }) + .await; + + lazy(|cx| { + buf.extend_read_buf( + "\ + GET /efg HTTP/1.1\r\n\ + Connection: close\r\n\ + \r\n\r\n", + ); + + assert!( + h1.as_mut().poll(cx).is_ready(), + "connection close header should override keep-alive setting", + ); + + // polls: initial => manual => follow-up req => shutdown + assert_eq!(h1.poll_count, 4); + + if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() { + // connection closed + assert!(inner.flags.contains(Flags::SHUTDOWN)); + assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT)); + } + + let mut res = buf.take_write_buf().to_vec(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 4\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /efg\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; +} + +#[actix_rt::test] +async fn req_parse_err() { + lazy(|cx| { + let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n"); + + let services = HttpFlow::new(ok_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + ServiceConfig::default(), + None, + OnConnectData::default(), + ); + + pin!(h1); + + match h1.as_mut().poll(cx) { + Poll::Pending => panic!(), + Poll::Ready(res) => assert!(res.is_err()), + } + + if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() { + assert!(inner.flags.contains(Flags::READ_DISCONNECT)); + assert_eq!( + &buf.write_buf_slice()[..26], + b"HTTP/1.1 400 Bad Request\r\n" + ); + } + }) + .await; +} + +#[actix_rt::test] +async fn pipelining_ok_then_ok() { + lazy(|cx| { + let buf = TestBuffer::new( + "\ + GET /abcd HTTP/1.1\r\n\r\n\ + GET /def HTTP/1.1\r\n\r\n\ + ", + ); + + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::from_millis(1), + Duration::from_millis(1), + false, + None, + ); + + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + + pin!(h1); + + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + match h1.as_mut().poll(cx) { + Poll::Pending => panic!("first poll should not be pending"), + Poll::Ready(res) => assert!(res.is_ok()), + } + + // polls: initial => shutdown + assert_eq!(h1.poll_count, 2); + + let mut res = buf.write_buf_slice_mut(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 5\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /abcd\ + HTTP/1.1 200 OK\r\n\ + content-length: 4\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /def\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; +} + +#[actix_rt::test] +async fn pipelining_ok_then_bad() { + lazy(|cx| { + let buf = TestBuffer::new( + "\ + GET /abcd HTTP/1.1\r\n\r\n\ + GET /def HTTP/1\r\n\r\n\ + ", + ); + + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::from_millis(1), + Duration::from_millis(1), + false, + None, + ); + + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + + pin!(h1); + + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + match h1.as_mut().poll(cx) { + Poll::Pending => panic!("first poll should not be pending"), + Poll::Ready(res) => assert!(res.is_err()), + } + + // polls: initial => shutdown + assert_eq!(h1.poll_count, 1); + + let mut res = buf.write_buf_slice_mut(); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = b"\ + HTTP/1.1 200 OK\r\n\ + content-length: 5\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + /abcd\ + HTTP/1.1 400 Bad Request\r\n\ + content-length: 0\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\ + "; + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(exp) + ); + }) + .await; +} + +#[actix_rt::test] +async fn expect_handling() { + lazy(|cx| { + let mut buf = TestSeqBuffer::empty(); + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::ZERO, + Duration::ZERO, + false, + None, + ); + + let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + + buf.extend_read_buf( + "\ + POST /upload HTTP/1.1\r\n\ + Content-Length: 5\r\n\ + Expect: 100-continue\r\n\ + \r\n\ + ", + ); + + pin!(h1); + + assert!(h1.as_mut().poll(cx).is_pending()); + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + // polls: manual + assert_eq!(h1.poll_count, 1); + + if let DispatcherState::Normal { ref inner } = h1.inner { + let io = inner.io.as_ref().unwrap(); + let res = &io.write_buf()[..]; + assert_eq!( + str::from_utf8(res).unwrap(), + "HTTP/1.1 100 Continue\r\n\r\n" + ); + } + + buf.extend_read_buf("12345"); + assert!(h1.as_mut().poll(cx).is_ready()); + + // polls: manual manual shutdown + assert_eq!(h1.poll_count, 3); + + if let DispatcherState::Normal { ref inner } = h1.inner { + let io = inner.io.as_ref().unwrap(); + let mut res = io.write_buf()[..].to_owned(); + stabilize_date_header(&mut res); + + assert_eq!( + str::from_utf8(&res).unwrap(), + "\ + HTTP/1.1 100 Continue\r\n\ + \r\n\ + HTTP/1.1 200 OK\r\n\ + content-length: 5\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\ + \r\n\ + 12345\ + " + ); + } + }) + .await; +} + +#[actix_rt::test] +async fn expect_eager() { + lazy(|cx| { + let mut buf = TestSeqBuffer::empty(); + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::ZERO, + Duration::ZERO, + false, + None, + ); + + let services = HttpFlow::new(echo_path_service(), ExpectHandler, None); + + let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + + buf.extend_read_buf( + "\ + POST /upload HTTP/1.1\r\n\ + Content-Length: 5\r\n\ + Expect: 100-continue\r\n\ + \r\n\ + ", + ); + + pin!(h1); + + assert!(h1.as_mut().poll(cx).is_ready()); + assert!(matches!(&h1.inner, DispatcherState::Normal { .. })); + + // polls: manual shutdown + assert_eq!(h1.poll_count, 2); + + if let DispatcherState::Normal { ref inner } = h1.inner { + let io = inner.io.as_ref().unwrap(); + let mut res = io.write_buf()[..].to_owned(); + stabilize_date_header(&mut res); + + // Despite the content-length header and even though the request payload has not + // been sent, this test expects a complete service response since the payload + // is not used at all. The service passed to dispatcher is path echo and doesn't + // consume payload bytes. + assert_eq!( + str::from_utf8(&res).unwrap(), + "\ + HTTP/1.1 100 Continue\r\n\ + \r\n\ + HTTP/1.1 200 OK\r\n\ + content-length: 7\r\n\ + connection: close\r\n\ + date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\ + \r\n\ + /upload\ + " + ); + } + }) + .await; +} + +#[actix_rt::test] +async fn upgrade_handling() { + struct TestUpgrade; + + impl Service<(Request, Framed)> for TestUpgrade { + type Response = (); + type Error = Error; + type Future = Ready>; + + actix_service::always_ready!(); + + fn call(&self, (req, _framed): (Request, Framed)) -> Self::Future { + assert_eq!(req.method(), Method::GET); + assert!(req.upgrade()); + assert_eq!(req.headers().get("upgrade").unwrap(), "websocket"); + ready(Ok(())) + } + } + + lazy(|cx| { + let mut buf = TestSeqBuffer::empty(); + let cfg = ServiceConfig::new( + KeepAlive::Disabled, + Duration::ZERO, + Duration::ZERO, + false, + None, + ); + + let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade)); + + let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new( + buf.clone(), + services, + cfg, + None, + OnConnectData::default(), + ); + + buf.extend_read_buf( + "\ + GET /ws HTTP/1.1\r\n\ + Connection: Upgrade\r\n\ + Upgrade: websocket\r\n\ + \r\n\ + ", + ); + + pin!(h1); + + assert!(h1.as_mut().poll(cx).is_ready()); + assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. })); + + // polls: manual shutdown + assert_eq!(h1.poll_count, 2); + }) + .await; +} + +// fix in #2624 reverted temporarily +// complete fix tracked in #2745 +#[ignore] +#[actix_rt::test] +async fn handler_drop_payload() { + let _ = env_logger::try_init(); + + let mut buf = TestBuffer::new(http_msg( + r" + POST /drop-payload HTTP/1.1 + Content-Length: 3 + + abc + ", + )); + + let services = HttpFlow::new( + drop_payload_service(), + ExpectHandler, + None::, + ); + + let h1 = Dispatcher::new( + buf.clone(), + services, + ServiceConfig::default(), + None, + OnConnectData::default(), + ); + pin!(h1); + + lazy(|cx| { + assert!(h1.as_mut().poll(cx).is_pending()); + + // polls: manual + assert_eq!(h1.poll_count, 1); + + let mut res = BytesMut::from(buf.take_write_buf().as_ref()); + stabilize_date_header(&mut res); + let res = &res[..]; + + let exp = http_msg( + r" + HTTP/1.1 200 OK + content-length: 15 + date: Thu, 01 Jan 1970 12:34:56 UTC + + payload dropped + ", + ); + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(&exp) + ); + + if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() { + assert!(inner.state.is_none()); + } + }) + .await; + + lazy(|cx| { + // add message that claims to have payload longer than provided + buf.extend_read_buf(http_msg( + r" + POST /drop-payload HTTP/1.1 + Content-Length: 200 + + abc + ", + )); + + assert!(h1.as_mut().poll(cx).is_pending()); + + // polls: manual => manual + assert_eq!(h1.poll_count, 2); + + let mut res = BytesMut::from(buf.take_write_buf().as_ref()); + stabilize_date_header(&mut res); + let res = &res[..]; + + // expect response immediately even though request side has not finished reading payload + let exp = http_msg( + r" + HTTP/1.1 200 OK + content-length: 15 + date: Thu, 01 Jan 1970 12:34:56 UTC + + payload dropped + ", + ); + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(&exp) + ); + }) + .await; + + lazy(|cx| { + assert!(h1.as_mut().poll(cx).is_ready()); + + // polls: manual => manual => manual + assert_eq!(h1.poll_count, 3); + + let mut res = BytesMut::from(buf.take_write_buf().as_ref()); + stabilize_date_header(&mut res); + let res = &res[..]; + + // expect that unrequested error response is sent back since connection could not be cleaned + let exp = http_msg( + r" + HTTP/1.1 500 Internal Server Error + content-length: 0 + connection: close + date: Thu, 01 Jan 1970 12:34:56 UTC + + ", + ); + + assert_eq!( + res, + exp, + "\nexpected response not in write buffer:\n\ + response: {:?}\n\ + expected: {:?}", + String::from_utf8_lossy(res), + String::from_utf8_lossy(&exp) + ); + }) + .await; +} + +fn http_msg(msg: impl AsRef) -> BytesMut { + let mut msg = msg + .as_ref() + .trim() + .split('\n') + .map(|line| [line.trim_start(), "\r"].concat()) + .collect::>() + .join("\n"); + + // remove trailing \r + msg.pop(); + + if !msg.is_empty() && !msg.contains("\r\n\r\n") { + msg.push_str("\r\n\r\n"); + } + + BytesMut::from(msg.as_bytes()) +} + +#[test] +fn http_msg_creates_msg() { + assert_eq!(http_msg(r""), ""); + + assert_eq!( + http_msg( + r" + POST / HTTP/1.1 + Content-Length: 3 + + abc + " + ), + "POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc" + ); + + assert_eq!( + http_msg( + r" + GET / HTTP/1.1 + Content-Length: 3 + + " + ), + "GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n" + ); +} diff --git a/actix-http/src/h1/encoder.rs b/actix-http/src/h1/encoder.rs index eaabcb687..abe396ce2 100644 --- a/actix-http/src/h1/encoder.rs +++ b/actix-http/src/h1/encoder.rs @@ -1,24 +1,26 @@ -use std::io::Write; -use std::marker::PhantomData; -use std::ptr::copy_nonoverlapping; -use std::slice::from_raw_parts_mut; -use std::{cmp, io}; +use std::{ + cmp, + io::{self, Write as _}, + marker::PhantomData, + ptr::copy_nonoverlapping, + slice::from_raw_parts_mut, +}; use bytes::{BufMut, BytesMut}; -use crate::body::BodySize; -use crate::config::ServiceConfig; -use crate::header::{map::Value, HeaderName}; -use crate::helpers; -use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}; -use crate::http::{HeaderMap, StatusCode, Version}; -use crate::message::{ConnectionType, RequestHeadType}; -use crate::response::Response; +use crate::{ + body::BodySize, + header::{ + map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, + }, + helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version, +}; const AVERAGE_HEADER_SIZE: usize = 30; #[derive(Debug)] pub(crate) struct MessageEncoder { + #[allow(dead_code)] pub length: BodySize, pub te: TransferEncoding, _phantom: PhantomData, @@ -54,7 +56,7 @@ pub(crate) trait MessageType: Sized { dst: &mut BytesMut, version: Version, mut length: BodySize, - ctype: ConnectionType, + conn_type: ConnectionType, config: &ServiceConfig, ) -> io::Result<()> { let chunked = self.chunked(); @@ -69,17 +71,28 @@ pub(crate) trait MessageType: Sized { | StatusCode::PROCESSING | StatusCode::NO_CONTENT => { // skip content-length and transfer-encoding headers - // See https://tools.ietf.org/html/rfc7230#section-3.3.1 - // and https://tools.ietf.org/html/rfc7230#section-3.3.2 + // see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1 + // and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2 skip_len = true; length = BodySize::None } + + StatusCode::NOT_MODIFIED => { + // 304 responses should never have a body but should retain a manually set + // content-length header + // see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1 + skip_len = false; + length = BodySize::None; + } + _ => {} } } + match length { BodySize::Stream => { if chunked { + skip_len = true; if camel_case { dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n") } else { @@ -90,19 +103,14 @@ pub(crate) trait MessageType: Sized { dst.put_slice(b"\r\n"); } } - BodySize::Empty => { - if camel_case { - dst.put_slice(b"\r\nContent-Length: 0\r\n"); - } else { - dst.put_slice(b"\r\ncontent-length: 0\r\n"); - } - } - BodySize::Sized(len) => helpers::write_content_length(len, dst), + BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"), + BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"), + BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case), BodySize::None => dst.put_slice(b"\r\n"), } // Connection - match ctype { + match conn_type { ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"), ConnectionType::KeepAlive if version < Version::HTTP_11 => { if camel_case { @@ -144,7 +152,6 @@ pub(crate) trait MessageType: Sized { let k = key.as_str().as_bytes(); let k_len = k.len(); - // TODO: drain? for val in value.iter() { let v = val.as_ref(); let v_len = v.len(); @@ -173,7 +180,7 @@ pub(crate) trait MessageType: Sized { unsafe { if camel_case { // use Camel-Case headers - write_camel_case(k, from_raw_parts_mut(buf, k_len)); + write_camel_case(k, buf, k_len); } else { write_data(k, buf, k_len); } @@ -203,14 +210,14 @@ pub(crate) trait MessageType: Sized { dst.advance_mut(pos); } - // optimized date header, set_date writes \r\n if !has_date { - config.set_date(dst); - } else { - // msg eof - dst.extend_from_slice(b"\r\n"); + // optimized date header, write_date_header writes its own \r\n + config.write_date_header(dst, camel_case); } + // end-of-headers marker + dst.extend_from_slice(b"\r\n"); + Ok(()) } @@ -250,6 +257,12 @@ impl MessageType for Response<()> { None } + fn camel_case(&self) -> bool { + self.head() + .flags + .contains(crate::message::Flags::CAMEL_CASE) + } + fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> { let head = self.head(); let reason = head.reason().as_bytes(); @@ -287,7 +300,7 @@ impl MessageType for RequestHeadType { let head = self.as_ref(); dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE); write!( - helpers::Writer(dst), + helpers::MutWriter(dst), "{} {} {}", head.method, head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"), @@ -297,11 +310,7 @@ impl MessageType for RequestHeadType { Version::HTTP_11 => "HTTP/1.1", Version::HTTP_2 => "HTTP/2.0", Version::HTTP_3 => "HTTP/3.0", - _ => - return Err(io::Error::new( - io::ErrorKind::Other, - "unsupported version" - )), + _ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")), } ) .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) @@ -309,16 +318,17 @@ impl MessageType for RequestHeadType { } impl MessageEncoder { - /// Encode message + /// Encode chunk. pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result { self.te.encode(msg, buf) } - /// Encode eof + /// Encode EOF. pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> { self.te.encode_eof(buf) } + /// Encode message. pub fn encode( &mut self, dst: &mut BytesMut, @@ -327,13 +337,13 @@ impl MessageEncoder { stream: bool, version: Version, length: BodySize, - ctype: ConnectionType, + conn_type: ConnectionType, config: &ServiceConfig, ) -> io::Result<()> { // transfer encoding if !head { self.te = match length { - BodySize::Empty => TransferEncoding::empty(), + BodySize::Sized(0) => TransferEncoding::empty(), BodySize::Sized(len) => TransferEncoding::length(len), BodySize::Stream => { if message.chunked() && !stream { @@ -349,7 +359,7 @@ impl MessageEncoder { } message.encode_status(dst)?; - message.encode_headers(dst, version, length, ctype, config) + message.encode_headers(dst, version, length, conn_type, config) } } @@ -363,10 +373,12 @@ pub(crate) struct TransferEncoding { enum TransferEncodingKind { /// An Encoder for when Transfer-Encoding includes `chunked`. Chunked(bool), + /// An Encoder for when Content-Length is set. /// /// Enforces that the body is not longer than the Content-Length header. Length(u64), + /// An Encoder for when Content-Length is not known. /// /// Application decides when to stop writing. @@ -420,7 +432,7 @@ impl TransferEncoding { *eof = true; buf.extend_from_slice(b"0\r\n\r\n"); } else { - writeln!(helpers::Writer(buf), "{:X}\r", msg.len()) + writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len()) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; buf.reserve(msg.len() + 2); @@ -438,7 +450,7 @@ impl TransferEncoding { buf.extend_from_slice(&msg[..len as usize]); - *remaining -= len as u64; + *remaining -= len; Ok(*remaining == 0) } else { Ok(true) @@ -471,15 +483,22 @@ impl TransferEncoding { } /// # Safety -/// Callers must ensure that the given length matches given value length. +/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is +/// valid for writes of at least `len` bytes. unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) { debug_assert_eq!(value.len(), len); copy_nonoverlapping(value.as_ptr(), buf, len); } -fn write_camel_case(value: &[u8], buffer: &mut [u8]) { +/// # Safety +/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is +/// valid for writes of at least `len` bytes. +unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) { // first copy entire (potentially wrong) slice to output - buffer[..value.len()].copy_from_slice(value); + write_data(value, buf, len); + + // SAFETY: We just initialized the buffer with `value` + let buffer = from_raw_parts_mut(buf, len); let mut iter = value.iter(); @@ -498,6 +517,7 @@ fn write_camel_case(value: &[u8], buffer: &mut [u8]) { if let Some(c @ b'a'..=b'z') = iter.next() { buffer[index] = c & 0b1101_1111; } + index += 1; } index += 1; @@ -509,11 +529,13 @@ mod tests { use std::rc::Rc; use bytes::Bytes; - use http::header::AUTHORIZATION; + use http::header::{AUTHORIZATION, UPGRADE_INSECURE_REQUESTS}; use super::*; - use crate::http::header::{HeaderValue, CONTENT_TYPE}; - use crate::RequestHead; + use crate::{ + header::{HeaderValue, CONTENT_TYPE}, + RequestHead, + }; #[test] fn test_chunked_te() { @@ -538,22 +560,25 @@ mod tests { head.headers .insert(CONTENT_TYPE, HeaderValue::from_static("plain/text")); + head.headers + .insert(UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1")); + let mut head = RequestHeadType::Owned(head); let _ = head.encode_headers( &mut bytes, Version::HTTP_11, - BodySize::Empty, + BodySize::Sized(0), ConnectionType::Close, &ServiceConfig::default(), ); - let data = - String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); + let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); assert!(data.contains("Content-Length: 0\r\n")); assert!(data.contains("Connection: close\r\n")); assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Date: date\r\n")); + assert!(data.contains("Upgrade-Insecure-Requests: 1\r\n")); let _ = head.encode_headers( &mut bytes, @@ -562,8 +587,7 @@ mod tests { ConnectionType::KeepAlive, &ServiceConfig::default(), ); - let data = - String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); + let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); assert!(data.contains("Transfer-Encoding: chunked\r\n")); assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Date: date\r\n")); @@ -584,8 +608,7 @@ mod tests { ConnectionType::KeepAlive, &ServiceConfig::default(), ); - let data = - String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); + let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); assert!(data.contains("transfer-encoding: chunked\r\n")); assert!(data.contains("content-type: xml\r\n")); assert!(data.contains("content-type: plain/text\r\n")); @@ -614,12 +637,11 @@ mod tests { let _ = head.encode_headers( &mut bytes, Version::HTTP_11, - BodySize::Empty, + BodySize::Sized(0), ConnectionType::Close, &ServiceConfig::default(), ); - let data = - String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); + let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); assert!(data.contains("content-length: 0\r\n")); assert!(data.contains("connection: close\r\n")); assert!(data.contains("authorization: another authorization\r\n")); @@ -642,8 +664,7 @@ mod tests { ConnectionType::Upgrade, &ServiceConfig::default(), ); - let data = - String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); + let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); assert!(!data.contains("content-length: 0\r\n")); assert!(!data.contains("transfer-encoding: chunked\r\n")); } diff --git a/actix-http/src/h1/expect.rs b/actix-http/src/h1/expect.rs index bb8e28e95..bef281e59 100644 --- a/actix-http/src/h1/expect.rs +++ b/actix-http/src/h1/expect.rs @@ -1,8 +1,7 @@ use actix_service::{Service, ServiceFactory}; use actix_utils::future::{ready, Ready}; -use crate::error::Error; -use crate::request::Request; +use crate::{Error, Request}; pub struct ExpectHandler; diff --git a/actix-http/src/h1/mod.rs b/actix-http/src/h1/mod.rs index 7e6df6ceb..9e44608d8 100644 --- a/actix-http/src/h1/mod.rs +++ b/actix-http/src/h1/mod.rs @@ -1,32 +1,40 @@ //! HTTP/1 protocol implementation. + use bytes::{Bytes, BytesMut}; +mod chunked; mod client; mod codec; mod decoder; mod dispatcher; +#[cfg(test)] +mod dispatcher_tests; mod encoder; mod expect; mod payload; mod service; +mod timer; mod upgrade; mod utils; -pub use self::client::{ClientCodec, ClientPayloadCodec}; -pub use self::codec::Codec; -pub use self::dispatcher::Dispatcher; -pub use self::expect::ExpectHandler; -pub use self::payload::Payload; -pub use self::service::{H1Service, H1ServiceHandler}; -pub use self::upgrade::UpgradeHandler; -pub use self::utils::SendResponse; +pub use self::{ + client::{ClientCodec, ClientPayloadCodec}, + codec::Codec, + dispatcher::Dispatcher, + expect::ExpectHandler, + payload::Payload, + service::{H1Service, H1ServiceHandler}, + upgrade::UpgradeHandler, + utils::SendResponse, +}; #[derive(Debug)] /// Codec message pub enum Message { - /// Http message + /// HTTP message. Item(T), - /// Payload chunk + + /// Payload chunk. Chunk(Option), } @@ -57,7 +65,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) { #[cfg(test)] mod tests { use super::*; - use crate::request::Request; + use crate::Request; impl Message { pub fn message(self) -> Request { diff --git a/actix-http/src/h1/payload.rs b/actix-http/src/h1/payload.rs index e72493fa2..2ad3a14a3 100644 --- a/actix-http/src/h1/payload.rs +++ b/actix-http/src/h1/payload.rs @@ -1,9 +1,12 @@ //! Payload stream -use std::cell::RefCell; -use std::collections::VecDeque; -use std::pin::Pin; -use std::rc::{Rc, Weak}; -use std::task::{Context, Poll, Waker}; + +use std::{ + cell::RefCell, + collections::VecDeque, + pin::Pin, + rc::{Rc, Weak}, + task::{Context, Poll, Waker}, +}; use bytes::Bytes; use futures_core::Stream; @@ -13,7 +16,7 @@ use crate::error::PayloadError; /// max buffer size 32k pub(crate) const MAX_BUFFER_SIZE: usize = 32_768; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum PayloadStatus { Read, Pause, @@ -22,39 +25,32 @@ pub enum PayloadStatus { /// Buffered stream of bytes chunks /// -/// Payload stores chunks in a vector. First chunk can be received with -/// `.readany()` method. Payload stream is not thread safe. Payload does not -/// notify current task when new data is available. +/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does +/// not notify current task when new data is available. /// -/// Payload stream can be used as `Response` body stream. +/// Payload can be used as `Response` body stream. #[derive(Debug)] pub struct Payload { inner: Rc>, } impl Payload { - /// Create payload stream. + /// Creates a payload stream. /// - /// This method construct two objects responsible for bytes stream - /// generation. - /// - /// * `PayloadSender` - *Sender* side of the stream - /// - /// * `Payload` - *Receiver* side of the stream + /// This method construct two objects responsible for bytes stream generation: + /// - `PayloadSender` - *Sender* side of the stream + /// - `Payload` - *Receiver* side of the stream pub fn create(eof: bool) -> (PayloadSender, Payload) { let shared = Rc::new(RefCell::new(Inner::new(eof))); ( - PayloadSender { - inner: Rc::downgrade(&shared), - }, + PayloadSender::new(Rc::downgrade(&shared)), Payload { inner: shared }, ) } - /// Create empty payload - #[doc(hidden)] - pub fn empty() -> Payload { + /// Creates an empty payload. + pub(crate) fn empty() -> Payload { Payload { inner: Rc::new(RefCell::new(Inner::new(true))), } @@ -77,14 +73,6 @@ impl Payload { pub fn unread_data(&mut self, data: Bytes) { self.inner.borrow_mut().unread_data(data); } - - #[inline] - pub fn readany( - &mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - self.inner.borrow_mut().readany(cx) - } } impl Stream for Payload { @@ -94,7 +82,7 @@ impl Stream for Payload { self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>> { - self.inner.borrow_mut().readany(cx) + Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx) } } @@ -104,6 +92,10 @@ pub struct PayloadSender { } impl PayloadSender { + fn new(inner: Weak>) -> Self { + Self { inner } + } + #[inline] pub fn set_error(&mut self, err: PayloadError) { if let Some(shared) = self.inner.upgrade() { @@ -125,6 +117,7 @@ impl PayloadSender { } } + #[allow(clippy::needless_pass_by_ref_mut)] #[inline] pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus { // we check need_read only if Payload (other side) is alive, @@ -182,12 +175,11 @@ impl Inner { /// Register future waiting data from payload. /// Waker would be used in `Inner::wake` - fn register(&mut self, cx: &mut Context<'_>) { + fn register(&mut self, cx: &Context<'_>) { if self .task .as_ref() - .map(|w| !cx.waker().will_wake(w)) - .unwrap_or(true) + .map_or(true, |w| !cx.waker().will_wake(w)) { self.task = Some(cx.waker().clone()); } @@ -195,12 +187,11 @@ impl Inner { // Register future feeding data to payload. /// Waker would be used in `Inner::wake_io` - fn register_io(&mut self, cx: &mut Context<'_>) { + fn register_io(&mut self, cx: &Context<'_>) { if self .io_task .as_ref() - .map(|w| !cx.waker().will_wake(w)) - .unwrap_or(true) + .map_or(true, |w| !cx.waker().will_wake(w)) { self.io_task = Some(cx.waker().clone()); } @@ -229,9 +220,9 @@ impl Inner { self.len } - fn readany( - &mut self, - cx: &mut Context<'_>, + fn poll_next( + mut self: Pin<&mut Self>, + cx: &Context<'_>, ) -> Poll>> { if let Some(data) = self.items.pop_front() { self.len -= data.len(); @@ -262,8 +253,15 @@ impl Inner { #[cfg(test)] mod tests { - use super::*; use actix_utils::future::poll_fn; + use static_assertions::{assert_impl_all, assert_not_impl_any}; + + use super::*; + + assert_impl_all!(Payload: Unpin); + assert_not_impl_any!(Payload: Send, Sync); + + assert_impl_all!(Inner: Unpin, Send, Sync); #[actix_rt::test] async fn test_unread_data() { @@ -275,7 +273,10 @@ mod tests { assert_eq!( Bytes::from("data"), - poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap() + poll_fn(|cx| Pin::new(&mut payload).poll_next(cx)) + .await + .unwrap() + .unwrap() ); } } diff --git a/actix-http/src/h1/service.rs b/actix-http/src/h1/service.rs index 1ab85cbf3..f2f8a0e48 100644 --- a/actix-http/src/h1/service.rs +++ b/actix-http/src/h1/service.rs @@ -1,7 +1,10 @@ -use std::marker::PhantomData; -use std::rc::Rc; -use std::task::{Context, Poll}; -use std::{fmt, net}; +use std::{ + fmt, + marker::PhantomData, + net, + rc::Rc, + task::{Context, Poll}, +}; use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_rt::net::TcpStream; @@ -10,18 +13,16 @@ use actix_service::{ }; use actix_utils::future::ready; use futures_core::future::LocalBoxFuture; +use tracing::error; -use crate::body::MessageBody; -use crate::config::ServiceConfig; -use crate::error::{DispatchError, Error}; -use crate::request::Request; -use crate::response::Response; -use crate::service::HttpServiceHandler; -use crate::{ConnectCallback, OnConnectData}; - -use super::codec::Codec; -use super::dispatcher::Dispatcher; -use super::{ExpectHandler, UpgradeHandler}; +use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler}; +use crate::{ + body::{BoxBody, MessageBody}, + config::ServiceConfig, + error::DispatchError, + service::HttpServiceHandler, + ConnectCallback, OnConnectData, Request, Response, +}; /// `ServiceFactory` implementation for HTTP1 transport pub struct H1Service { @@ -36,7 +37,7 @@ pub struct H1Service { impl H1Service where S: ServiceFactory, - S::Error: Into, + S::Error: Into>, S::InitError: fmt::Debug, S::Response: Into>, B: MessageBody, @@ -61,33 +62,27 @@ impl H1Service where S: ServiceFactory, S::Future: 'static, - S::Error: Into, + S::Error: Into>, S::InitError: fmt::Debug, S::Response: Into>, B: MessageBody, - B::Error: Into, X: ServiceFactory, X::Future: 'static, - X::Error: Into, + X::Error: Into>, X::InitError: fmt::Debug, U: ServiceFactory<(Request, Framed), Config = (), Response = ()>, U::Future: 'static, - U::Error: fmt::Display + Into, + U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { /// Create simple tcp stream service pub fn tcp( self, - ) -> impl ServiceFactory< - TcpStream, - Config = (), - Response = (), - Error = DispatchError, - InitError = (), - > { + ) -> impl ServiceFactory + { fn_service(|io: TcpStream| { let peer_addr = io.peer_addr().ok(); ready(Ok((io, peer_addr))) @@ -98,28 +93,29 @@ where #[cfg(feature = "openssl")] mod openssl { - use super::*; - - use actix_service::ServiceFactoryExt; use actix_tls::accept::{ - openssl::{Acceptor, SslAcceptor, SslError, TlsStream}, + openssl::{ + reexports::{Error as SslError, SslAcceptor}, + Acceptor, TlsStream, + }, TlsError, }; + use super::*; + impl H1Service, S, B, X, U> where S: ServiceFactory, S::Future: 'static, - S::Error: Into, + S::Error: Into>, S::InitError: fmt::Debug, S::Response: Into>, B: MessageBody, - B::Error: Into, X: ServiceFactory, X::Future: 'static, - X::Error: Into, + X::Error: Into>, X::InitError: fmt::Debug, U: ServiceFactory< @@ -128,10 +124,10 @@ mod openssl { Response = (), >, U::Future: 'static, - U::Error: fmt::Display + Into, + U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { - /// Create openssl based service + /// Create OpenSSL based service. pub fn openssl( self, acceptor: SslAcceptor, @@ -143,43 +139,44 @@ mod openssl { InitError = (), > { Acceptor::new(acceptor) - .map_err(TlsError::Tls) - .map_init_err(|_| panic!()) - .and_then(|io: TlsStream| { + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { let peer_addr = io.get_ref().peer_addr().ok(); - ready(Ok((io, peer_addr))) + (io, peer_addr) }) .and_then(self.map_err(TlsError::Service)) } } } -#[cfg(feature = "rustls")] -mod rustls { - use super::*; - +#[cfg(feature = "rustls-0_20")] +mod rustls_0_20 { use std::io; - use actix_service::ServiceFactoryExt; + use actix_service::ServiceFactoryExt as _; use actix_tls::accept::{ - rustls::{Acceptor, ServerConfig, TlsStream}, + rustls_0_20::{reexports::ServerConfig, Acceptor, TlsStream}, TlsError, }; + use super::*; + impl H1Service, S, B, X, U> where S: ServiceFactory, S::Future: 'static, - S::Error: Into, + S::Error: Into>, S::InitError: fmt::Debug, S::Response: Into>, B: MessageBody, - B::Error: Into, X: ServiceFactory, X::Future: 'static, - X::Error: Into, + X::Error: Into>, X::InitError: fmt::Debug, U: ServiceFactory< @@ -188,10 +185,10 @@ mod rustls { Response = (), >, U::Future: 'static, - U::Error: fmt::Display + Into, + U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { - /// Create rustls based service + /// Create Rustls v0.20 based service. pub fn rustls( self, config: ServerConfig, @@ -203,11 +200,196 @@ mod rustls { InitError = (), > { Acceptor::new(config) - .map_err(TlsError::Tls) - .map_init_err(|_| panic!()) - .and_then(|io: TlsStream| { + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { let peer_addr = io.get_ref().0.peer_addr().ok(); - ready(Ok((io, peer_addr))) + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_21")] +mod rustls_0_21 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H1Service, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into>, + S::InitError: fmt::Debug, + S::Response: Into>, + + B: MessageBody, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.21 based service. + pub fn rustls_021( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_22")] +mod rustls_0_22 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H1Service, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into>, + S::InitError: fmt::Debug, + S::Response: Into>, + + B: MessageBody, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.22 based service. + pub fn rustls_0_22( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_23")] +mod rustls_0_23 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H1Service, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into>, + S::InitError: fmt::Debug, + S::Response: Into>, + + B: MessageBody, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.23 based service. + pub fn rustls_0_23( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) }) .and_then(self.map_err(TlsError::Service)) } @@ -217,7 +399,7 @@ mod rustls { impl H1Service where S: ServiceFactory, - S::Error: Into, + S::Error: Into>, S::Response: Into>, S::InitError: fmt::Debug, B: MessageBody, @@ -225,7 +407,7 @@ where pub fn expect(self, expect: X1) -> H1Service where X1: ServiceFactory, - X1::Error: Into, + X1::Error: Into>, X1::InitError: fmt::Debug, { H1Service { @@ -261,28 +443,26 @@ where } } -impl ServiceFactory<(T, Option)> - for H1Service +impl ServiceFactory<(T, Option)> for H1Service where T: AsyncRead + AsyncWrite + Unpin + 'static, S: ServiceFactory, S::Future: 'static, - S::Error: Into, + S::Error: Into>, S::Response: Into>, S::InitError: fmt::Debug, B: MessageBody, - B::Error: Into, X: ServiceFactory, X::Future: 'static, - X::Error: Into, + X::Error: Into>, X::InitError: fmt::Debug, U: ServiceFactory<(Request, Framed), Config = (), Response = ()>, U::Future: 'static, - U::Error: fmt::Display + Into, + U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { type Response = (); @@ -302,13 +482,13 @@ where Box::pin(async move { let expect = expect .await - .map_err(|e| log::error!("Init http expect service error: {:?}", e))?; + .map_err(|e| error!("Init http expect service error: {:?}", e))?; let upgrade = match upgrade { Some(upgrade) => { - let upgrade = upgrade.await.map_err(|e| { - log::error!("Init http upgrade service error: {:?}", e) - })?; + let upgrade = upgrade + .await + .map_err(|e| error!("Init http upgrade service error: {:?}", e))?; Some(upgrade) } None => None, @@ -316,7 +496,7 @@ where let service = service .await - .map_err(|e| log::error!("Init http service error: {:?}", e))?; + .map_err(|e| error!("Init http service error: {:?}", e))?; Ok(H1ServiceHandler::new( cfg, @@ -332,45 +512,35 @@ where /// `Service` implementation for HTTP/1 transport pub type H1ServiceHandler = HttpServiceHandler; -impl Service<(T, Option)> - for HttpServiceHandler +impl Service<(T, Option)> for HttpServiceHandler where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into, + S::Error: Into>, S::Response: Into>, B: MessageBody, - B::Error: Into, X: Service, - X::Error: Into, + X::Error: Into>, U: Service<(Request, Framed), Response = ()>, - U::Error: fmt::Display + Into, + U::Error: fmt::Display + Into>, { type Response = (); type Error = DispatchError; type Future = Dispatcher; fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { - self._poll_ready(cx).map_err(|e| { - log::error!("HTTP/1 service readiness error: {:?}", e); - DispatchError::Service(e) + self._poll_ready(cx).map_err(|err| { + error!("HTTP/1 service readiness error: {:?}", err); + DispatchError::Service(err) }) } fn call(&self, (io, addr): (T, Option)) -> Self::Future { - let on_connect_data = - OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); - - Dispatcher::new( - io, - self.cfg.clone(), - self.flow.clone(), - on_connect_data, - addr, - ) + let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); + Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data) } } diff --git a/actix-http/src/h1/timer.rs b/actix-http/src/h1/timer.rs new file mode 100644 index 000000000..0ea70a319 --- /dev/null +++ b/actix-http/src/h1/timer.rs @@ -0,0 +1,81 @@ +use std::{fmt, future::Future, pin::Pin, task::Context}; + +use actix_rt::time::{Instant, Sleep}; +use tracing::trace; + +#[derive(Debug)] +pub(super) enum TimerState { + Disabled, + Inactive, + Active { timer: Pin> }, +} + +impl TimerState { + pub(super) fn new(enabled: bool) -> Self { + if enabled { + Self::Inactive + } else { + Self::Disabled + } + } + + pub(super) fn is_enabled(&self) -> bool { + matches!(self, Self::Active { .. } | Self::Inactive) + } + + pub(super) fn set(&mut self, timer: Sleep, line: u32) { + if matches!(self, Self::Disabled) { + trace!("setting disabled timer from line {}", line); + } + + *self = Self::Active { + timer: Box::pin(timer), + }; + } + + pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) { + self.set(timer, line); + self.init(cx); + } + + pub(super) fn clear(&mut self, line: u32) { + if matches!(self, Self::Disabled) { + trace!("trying to clear a disabled timer from line {}", line); + } + + if matches!(self, Self::Inactive) { + trace!("trying to clear an inactive timer from line {}", line); + } + + *self = Self::Inactive; + } + + pub(super) fn init(&mut self, cx: &mut Context<'_>) { + if let TimerState::Active { timer } = self { + let _ = timer.as_mut().poll(cx); + } + } +} + +impl fmt::Display for TimerState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TimerState::Disabled => f.write_str("timer is disabled"), + TimerState::Inactive => f.write_str("timer is inactive"), + TimerState::Active { timer } => { + let deadline = timer.deadline(); + let now = Instant::now(); + + if deadline < now { + f.write_str("timer is active and has reached deadline") + } else { + write!( + f, + "timer is active and due to expire in {} milliseconds", + ((deadline - now).as_secs_f32() * 1000.0) + ) + } + } + } + } +} diff --git a/actix-http/src/h1/upgrade.rs b/actix-http/src/h1/upgrade.rs index e57ea8ae9..f25b0718b 100644 --- a/actix-http/src/h1/upgrade.rs +++ b/actix-http/src/h1/upgrade.rs @@ -2,9 +2,7 @@ use actix_codec::Framed; use actix_service::{Service, ServiceFactory}; use futures_core::future::LocalBoxFuture; -use crate::error::Error; -use crate::h1::Codec; -use crate::request::Request; +use crate::{h1::Codec, Error, Request}; pub struct UpgradeHandler; diff --git a/actix-http/src/h1/utils.rs b/actix-http/src/h1/utils.rs index 90e44daa4..5c11b1dab 100644 --- a/actix-http/src/h1/utils.rs +++ b/actix-http/src/h1/utils.rs @@ -1,22 +1,29 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; use actix_codec::{AsyncRead, AsyncWrite, Framed}; +use pin_project_lite::pin_project; -use crate::body::{BodySize, MessageBody}; -use crate::error::Error; -use crate::h1::{Codec, Message}; -use crate::response::Response; +use crate::{ + body::{BodySize, MessageBody}, + h1::{Codec, Message}, + Error, Response, +}; -/// Send HTTP/1 response -#[pin_project::pin_project] -pub struct SendResponse { - res: Option, BodySize)>>, - #[pin] - body: Option, - #[pin] - framed: Option>, +pin_project! { + /// Send HTTP/1 response + pub struct SendResponse { + res: Option, BodySize)>>, + + #[pin] + body: Option, + + #[pin] + framed: Option>, + } } impl SendResponse @@ -38,7 +45,7 @@ where impl Future for SendResponse where T: AsyncRead + AsyncWrite + Unpin, - B: MessageBody + Unpin, + B: MessageBody, B::Error: Into, { type Output = Result, Error>; @@ -62,26 +69,24 @@ where .unwrap() .is_write_buf_full() { - let next = - // TODO: MSRV 1.51: poll_map_err - match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) { - Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)), - Poll::Ready(Some(Err(err))) => { - return Poll::Ready(Err(err.into())) - } - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - }; + let next = match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) { + Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)), + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + }; match next { Poll::Ready(item) => { // body is done when item is None body_done = item.is_none(); if body_done { - let _ = this.body.take(); + this.body.set(None); } let framed = this.framed.as_mut().as_pin_mut().unwrap(); - framed.write(Message::Chunk(item))?; + framed + .write(Message::Chunk(item)) + .map_err(|err| Error::new_send_response().with_cause(err))?; } Poll::Pending => body_ready = false, } @@ -92,7 +97,10 @@ where // flush write buffer if !framed.is_write_buf_empty() { - match framed.flush(cx)? { + match framed + .flush(cx) + .map_err(|err| Error::new_send_response().with_cause(err))? + { Poll::Ready(_) => { if body_ready { continue; @@ -106,7 +114,9 @@ where // send response if let Some(res) = this.res.take() { - framed.write(res)?; + framed + .write(res) + .map_err(|err| Error::new_send_response().with_cause(err))?; continue; } diff --git a/actix-http/src/h2/dispatcher.rs b/actix-http/src/h2/dispatcher.rs index baff20e51..400476c88 100644 --- a/actix-http/src/h2/dispatcher.rs +++ b/actix-http/src/h2/dispatcher.rs @@ -1,32 +1,35 @@ use std::{ cmp, + error::Error as StdError, future::Future, marker::PhantomData, net, - pin::Pin, + pin::{pin, Pin}, rc::Rc, task::{Context, Poll}, }; use actix_codec::{AsyncRead, AsyncWrite}; +use actix_rt::time::{sleep, Sleep}; use actix_service::Service; use actix_utils::future::poll_fn; use bytes::{Bytes, BytesMut}; use futures_core::ready; -use h2::server::{Connection, SendResponse}; -use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}; -use log::{error, trace}; +use h2::{ + server::{Connection, SendResponse}, + Ping, PingPong, +}; use pin_project_lite::pin_project; -use crate::body::{BodySize, MessageBody}; -use crate::config::ServiceConfig; -use crate::error::Error; -use crate::message::ResponseHead; -use crate::payload::Payload; -use crate::request::Request; -use crate::response::Response; -use crate::service::HttpFlow; -use crate::OnConnectData; +use crate::{ + body::{BodySize, BoxBody, MessageBody}, + config::ServiceConfig, + header::{ + HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE, + }, + service::HttpFlow, + Extensions, Method, OnConnectData, Payload, Request, Response, ResponseHead, +}; const CHUNK_SIZE: usize = 16_384; @@ -35,43 +38,71 @@ pin_project! { pub struct Dispatcher { flow: Rc>, connection: Connection, - on_connect_data: OnConnectData, + conn_data: Option>, config: ServiceConfig, peer_addr: Option, - _phantom: PhantomData, + ping_pong: Option, + _phantom: PhantomData } } -impl Dispatcher { +impl Dispatcher +where + T: AsyncRead + AsyncWrite + Unpin, +{ pub(crate) fn new( + mut conn: Connection, flow: Rc>, - connection: Connection, - on_connect_data: OnConnectData, config: ServiceConfig, peer_addr: Option, + conn_data: OnConnectData, + timer: Option>>, ) -> Self { + let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong { + timer: timer + .map(|mut timer| { + // reuse timer slot if it was initialized for handshake + timer.as_mut().reset((config.now() + dur).into()); + timer + }) + .unwrap_or_else(|| Box::pin(sleep(dur))), + in_flight: false, + ping_pong: conn.ping_pong().unwrap(), + }); + Self { flow, config, peer_addr, - connection, - on_connect_data, + connection: conn, + conn_data: conn_data.0.map(Rc::new), + ping_pong, _phantom: PhantomData, } } } +struct H2PingPong { + /// Handle to send ping frames from the peer. + ping_pong: PingPong, + + /// True when a ping has been sent and is waiting for a reply. + in_flight: bool, + + /// Timeout for pong response. + timer: Pin>, +} + impl Future for Dispatcher where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into, + S::Error: Into>, S::Future: 'static, S::Response: Into>, B: MessageBody, - B::Error: Into, { type Output = Result<(), crate::error::DispatchError>; @@ -79,107 +110,147 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - while let Some((req, tx)) = - ready!(Pin::new(&mut this.connection).poll_accept(cx)?) - { - let (parts, body) = req.into_parts(); - let pl = crate::h2::Payload::new(body); - let pl = Payload::::H2(pl); - let mut req = Request::with_payload(pl); + loop { + match Pin::new(&mut this.connection).poll_accept(cx)? { + Poll::Ready(Some((req, tx))) => { + let (parts, body) = req.into_parts(); + let payload = crate::h2::Payload::new(body); + let pl = Payload::H2 { payload }; + let mut req = Request::with_payload(pl); + let head_req = parts.method == Method::HEAD; - let head = req.head_mut(); - head.uri = parts.uri; - head.method = parts.method; - head.version = parts.version; - head.headers = parts.headers.into(); - head.peer_addr = this.peer_addr; + let head = req.head_mut(); + head.uri = parts.uri; + head.method = parts.method; + head.version = parts.version; + head.headers = parts.headers.into(); + head.peer_addr = this.peer_addr; - // merge on_connect_ext data into request extensions - this.on_connect_data.merge_into(&mut req); + req.conn_data.clone_from(&this.conn_data); - let fut = this.flow.service.call(req); - let config = this.config.clone(); + let fut = this.flow.service.call(req); + let config = this.config.clone(); - // multiplex request handling with spawn task - actix_rt::spawn(async move { - // resolve service call and send response. - let res = match fut.await { - Ok(res) => handle_response(res.into(), tx, config).await, - Err(err) => { - let res = Response::from_error(err.into()); - handle_response(res, tx, config).await - } - }; + // multiplex request handling with spawn task + actix_rt::spawn(async move { + // resolve service call and send response. + let res = match fut.await { + Ok(res) => handle_response(res.into(), tx, config, head_req).await, + Err(err) => { + let res: Response = err.into(); + handle_response(res, tx, config, head_req).await + } + }; - // log error. - if let Err(err) = res { - match err { - DispatchError::SendResponse(err) => { - trace!("Error sending HTTP/2 response: {:?}", err) + // log error. + if let Err(err) = res { + match err { + DispatchError::SendResponse(err) => { + tracing::trace!("Error sending response: {err:?}"); + } + DispatchError::SendData(err) => { + tracing::warn!("Send data error: {err:?}"); + } + DispatchError::ResponseBody(err) => { + tracing::error!("Response payload stream error: {err:?}"); + } + } } - DispatchError::SendData(err) => warn!("{:?}", err), - DispatchError::ResponseBody(err) => { - error!("Response payload stream error: {:?}", err) - } - } + }); } - }); - } + Poll::Ready(None) => return Poll::Ready(Ok(())), - Poll::Ready(Ok(())) + Poll::Pending => match this.ping_pong.as_mut() { + Some(ping_pong) => loop { + if ping_pong.in_flight { + // When there is an in-flight ping-pong, poll pong and and keep-alive + // timer. On successful pong received, update keep-alive timer to + // determine the next timing of ping pong. + match ping_pong.ping_pong.poll_pong(cx)? { + Poll::Ready(_) => { + ping_pong.in_flight = false; + + let dead_line = this.config.keep_alive_deadline().unwrap(); + ping_pong.timer.as_mut().reset(dead_line.into()); + } + Poll::Pending => { + return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(())); + } + } + } else { + // When there is no in-flight ping-pong, keep-alive timer is used to + // wait for next timing of ping-pong. Therefore, at this point it serves + // as an interval instead. + ready!(ping_pong.timer.as_mut().poll(cx)); + + ping_pong.ping_pong.send_ping(Ping::opaque())?; + + let dead_line = this.config.keep_alive_deadline().unwrap(); + ping_pong.timer.as_mut().reset(dead_line.into()); + + ping_pong.in_flight = true; + } + }, + None => return Poll::Pending, + }, + } + } } } enum DispatchError { SendResponse(h2::Error), SendData(h2::Error), - ResponseBody(Error), + ResponseBody(Box), } async fn handle_response( res: Response, mut tx: SendResponse, config: ServiceConfig, + head_req: bool, ) -> Result<(), DispatchError> where B: MessageBody, - B::Error: Into, { let (res, body) = res.replace_body(()); // prepare response. let mut size = body.size(); let res = prepare_response(config, res.head(), &mut size); - let eof = size.is_eof(); + let eof_or_head = size.is_eof() || head_req; // send response head and return on eof. let mut stream = tx - .send_response(res, eof) + .send_response(res, eof_or_head) .map_err(DispatchError::SendResponse)?; - if eof { + if eof_or_head { return Ok(()); } - // poll response body and send chunks to client. - actix_rt::pin!(body); + let mut body = pin!(body); + // poll response body and send chunks to client while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await { let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?; 'send: loop { + let chunk_size = cmp::min(chunk.len(), CHUNK_SIZE); + // reserve enough space and wait for stream ready. - stream.reserve_capacity(cmp::min(chunk.len(), CHUNK_SIZE)); + stream.reserve_capacity(chunk_size); match poll_fn(|cx| stream.poll_capacity(cx)).await { // No capacity left. drop body and return. None => return Ok(()), - Some(res) => { - // Split chuck to writeable size and send to client. - let cap = res.map_err(DispatchError::SendData)?; + Some(Err(err)) => return Err(DispatchError::SendData(err)), + + Some(Ok(cap)) => { + // split chunk to writeable size and send to client let len = chunk.len(); - let bytes = chunk.split_to(cmp::min(cap, len)); + let bytes = chunk.split_to(cmp::min(len, cap)); stream .send_data(bytes, false) @@ -226,30 +297,43 @@ fn prepare_response( _ => {} } - let _ = match size { - BodySize::None | BodySize::Stream => None, - BodySize::Empty => res - .headers_mut() - .insert(CONTENT_LENGTH, HeaderValue::from_static("0")), + match size { + BodySize::None | BodySize::Stream => {} + + BodySize::Sized(0) => { + #[allow(clippy::declare_interior_mutable_const)] + const HV_ZERO: HeaderValue = HeaderValue::from_static("0"); + res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO); + } + BodySize::Sized(len) => { let mut buf = itoa::Buffer::new(); res.headers_mut().insert( CONTENT_LENGTH, HeaderValue::from_str(buf.format(*len)).unwrap(), - ) + ); } }; // copy headers for (key, value) in head.headers.iter() { - match *key { - // TODO: consider skipping other headers according to: - // https://tools.ietf.org/html/rfc7540#section-8.1.2.2 - // omit HTTP/1.x only headers - CONNECTION | TRANSFER_ENCODING => continue, - CONTENT_LENGTH if skip_len => continue, - DATE => has_date = true, + match key { + // omit HTTP/1.x only headers according to: + // https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2 + &CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue, + + &CONTENT_LENGTH if skip_len => continue, + &DATE => has_date = true, + + // omit HTTP/1.x only headers according to: + // https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2 + hdr if hdr == HeaderName::from_static("keep-alive") + || hdr == HeaderName::from_static("proxy-connection") => + { + continue + } + _ => {} } @@ -259,7 +343,7 @@ fn prepare_response( // set date header if !has_date { let mut bytes = BytesMut::with_capacity(29); - config.set_date_header(&mut bytes); + config.write_date_header_value(&mut bytes); res.headers_mut().insert( DATE, // SAFETY: serialized date-times are known ASCII strings diff --git a/actix-http/src/h2/mod.rs b/actix-http/src/h2/mod.rs index 7eff44ac1..e47099cac 100644 --- a/actix-http/src/h2/mod.rs +++ b/actix-http/src/h2/mod.rs @@ -1,20 +1,29 @@ //! HTTP/2 protocol. use std::{ + future::Future, pin::Pin, task::{Context, Poll}, }; +use actix_codec::{AsyncRead, AsyncWrite}; +use actix_rt::time::{sleep_until, Sleep}; use bytes::Bytes; use futures_core::{ready, Stream}; -use h2::RecvStream; +use h2::{ + server::{handshake, Connection, Handshake}, + RecvStream, +}; + +use crate::{ + config::ServiceConfig, + error::{DispatchError, PayloadError}, +}; mod dispatcher; mod service; -pub use self::dispatcher::Dispatcher; -pub use self::service::H2Service; -use crate::error::PayloadError; +pub use self::{dispatcher::Dispatcher, service::H2Service}; /// HTTP/2 peer stream. pub struct Payload { @@ -30,10 +39,7 @@ impl Payload { impl Stream for Payload { type Item = Result; - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); match ready!(Pin::new(&mut this.stream).poll_data(cx)) { @@ -50,3 +56,52 @@ impl Stream for Payload { } } } + +pub(crate) fn handshake_with_timeout(io: T, config: &ServiceConfig) -> HandshakeWithTimeout +where + T: AsyncRead + AsyncWrite + Unpin, +{ + HandshakeWithTimeout { + handshake: handshake(io), + timer: config + .client_request_deadline() + .map(|deadline| Box::pin(sleep_until(deadline.into()))), + } +} + +pub(crate) struct HandshakeWithTimeout { + handshake: Handshake, + timer: Option>>, +} + +impl Future for HandshakeWithTimeout +where + T: AsyncRead + AsyncWrite + Unpin, +{ + type Output = Result<(Connection, Option>>), DispatchError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + match Pin::new(&mut this.handshake).poll(cx)? { + // return the timer on success handshake; its slot can be re-used for h2 ping-pong + Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))), + Poll::Pending => match this.timer.as_mut() { + Some(timer) => { + ready!(timer.as_mut().poll(cx)); + Poll::Ready(Err(DispatchError::SlowRequestTimeout)) + } + None => Poll::Pending, + }, + } + } +} + +#[cfg(test)] +mod tests { + use static_assertions::assert_impl_all; + + use super::*; + + assert_impl_all!(Payload: Unpin, Send, Sync); +} diff --git a/actix-http/src/h2/service.rs b/actix-http/src/h2/service.rs index a75abef7d..636ac3161 100644 --- a/actix-http/src/h2/service.rs +++ b/actix-http/src/h2/service.rs @@ -1,30 +1,29 @@ -use std::future::Future; -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{net, rc::Rc}; +use std::{ + future::Future, + marker::PhantomData, + mem, net, + pin::Pin, + rc::Rc, + task::{Context, Poll}, +}; use actix_codec::{AsyncRead, AsyncWrite}; use actix_rt::net::TcpStream; use actix_service::{ - fn_factory, fn_service, IntoServiceFactory, Service, ServiceFactory, - ServiceFactoryExt as _, + fn_factory, fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _, }; use actix_utils::future::ready; -use bytes::Bytes; use futures_core::{future::LocalBoxFuture, ready}; -use h2::server::{handshake, Handshake}; -use log::error; +use tracing::{error, trace}; -use crate::body::MessageBody; -use crate::config::ServiceConfig; -use crate::error::{DispatchError, Error}; -use crate::request::Request; -use crate::response::Response; -use crate::service::HttpFlow; -use crate::{ConnectCallback, OnConnectData}; - -use super::dispatcher::Dispatcher; +use super::{dispatcher::Dispatcher, handshake_with_timeout, HandshakeWithTimeout}; +use crate::{ + body::{BoxBody, MessageBody}, + config::ServiceConfig, + error::DispatchError, + service::HttpFlow, + ConnectCallback, OnConnectData, Request, Response, +}; /// `ServiceFactory` implementation for HTTP/2 transport pub struct H2Service { @@ -37,12 +36,11 @@ pub struct H2Service { impl H2Service where S: ServiceFactory, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Response: Into> + 'static, >::Future: 'static, B: MessageBody + 'static, - B::Error: Into, { /// Create new `H2Service` instance with config. pub(crate) fn with_config>( @@ -68,12 +66,11 @@ impl H2Service where S: ServiceFactory, S::Future: 'static, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Response: Into> + 'static, >::Future: 'static, B: MessageBody + 'static, - B::Error: Into, { /// Create plain TCP based service pub fn tcp( @@ -97,9 +94,14 @@ where #[cfg(feature = "openssl")] mod openssl { - use actix_service::{fn_factory, fn_service, ServiceFactoryExt}; - use actix_tls::accept::openssl::{Acceptor, SslAcceptor, SslError, TlsStream}; - use actix_tls::accept::TlsError; + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + openssl::{ + reexports::{Error as SslError, SslAcceptor}, + Acceptor, TlsStream, + }, + TlsError, + }; use super::*; @@ -107,14 +109,13 @@ mod openssl { where S: ServiceFactory, S::Future: 'static, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Response: Into> + 'static, >::Future: 'static, B: MessageBody + 'static, - B::Error: Into, { - /// Create OpenSSL based service + /// Create OpenSSL based service. pub fn openssl( self, acceptor: SslAcceptor, @@ -126,41 +127,42 @@ mod openssl { InitError = S::InitError, > { Acceptor::new(acceptor) - .map_err(TlsError::Tls) - .map_init_err(|_| panic!()) - .and_then(fn_factory(|| { - ready(Ok::<_, S::InitError>(fn_service( - |io: TlsStream| { - let peer_addr = io.get_ref().peer_addr().ok(); - ready(Ok((io, peer_addr))) - }, - ))) - })) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().peer_addr().ok(); + (io, peer_addr) + }) .and_then(self.map_err(TlsError::Service)) } } } -#[cfg(feature = "rustls")] -mod rustls { - use super::*; - use actix_service::ServiceFactoryExt; - use actix_tls::accept::rustls::{Acceptor, ServerConfig, TlsStream}; - use actix_tls::accept::TlsError; +#[cfg(feature = "rustls-0_20")] +mod rustls_0_20 { use std::io; + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + impl H2Service, S, B> where S: ServiceFactory, S::Future: 'static, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Response: Into> + 'static, >::Future: 'static, B: MessageBody + 'static, - B::Error: Into, { - /// Create Rustls based service + /// Create Rustls v0.20 based service. pub fn rustls( self, mut config: ServerConfig, @@ -171,20 +173,172 @@ mod rustls { Error = TlsError, InitError = S::InitError, > { - let protos = vec!["h2".to_string().into()]; - config.set_protocols(&protos); + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; Acceptor::new(config) - .map_err(TlsError::Tls) - .map_init_err(|_| panic!()) - .and_then(fn_factory(|| { - ready(Ok::<_, S::InitError>(fn_service( - |io: TlsStream| { - let peer_addr = io.get_ref().0.peer_addr().ok(); - ready(Ok((io, peer_addr))) - }, - ))) - })) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_21")] +mod rustls_0_21 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H2Service, S, B> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + { + /// Create Rustls v0.21 based service. + pub fn rustls_021( + self, + mut config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = S::InitError, + > { + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_22")] +mod rustls_0_22 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H2Service, S, B> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + { + /// Create Rustls v0.22 based service. + pub fn rustls_0_22( + self, + mut config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = S::InitError, + > { + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_23")] +mod rustls_0_23 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H2Service, S, B> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + { + /// Create Rustls v0.23 based service. + pub fn rustls_0_23( + self, + mut config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = S::InitError, + > { + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) .and_then(self.map_err(TlsError::Service)) } } @@ -196,12 +350,11 @@ where S: ServiceFactory, S::Future: 'static, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Response: Into> + 'static, >::Future: 'static, B: MessageBody + 'static, - B::Error: Into, { type Response = (); type Error = DispatchError; @@ -236,7 +389,7 @@ where impl H2ServiceHandler where S: Service, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Future: 'static, S::Response: Into> + 'static, B: MessageBody + 'static, @@ -259,27 +412,25 @@ impl Service<(T, Option)> for H2ServiceHandler, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Future: 'static, S::Response: Into> + 'static, B: MessageBody + 'static, - B::Error: Into, { type Response = (); type Error = DispatchError; type Future = H2ServiceHandlerResponse; fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.flow.service.poll_ready(cx).map_err(|e| { - let e = e.into(); - error!("Service readiness error: {:?}", e); - DispatchError::Service(e) + self.flow.service.poll_ready(cx).map_err(|err| { + let err = err.into(); + error!("Service readiness error: {:?}", err); + DispatchError::Service(err) }) } fn call(&self, (io, addr): (T, Option)) -> Self::Future { - let on_connect_data = - OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); + let on_connect_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); H2ServiceHandlerResponse { state: State::Handshake( @@ -287,7 +438,7 @@ where Some(self.cfg.clone()), addr, on_connect_data, - handshake(io), + handshake_with_timeout(io, &self.cfg), ), } } @@ -298,21 +449,21 @@ where T: AsyncRead + AsyncWrite + Unpin, S::Future: 'static, { - Incoming(Dispatcher), Handshake( Option>>, Option, Option, OnConnectData, - Handshake, + HandshakeWithTimeout, ), + Established(Dispatcher), } pub struct H2ServiceHandlerResponse where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Future: 'static, S::Response: Into> + 'static, B: MessageBody + 'static, @@ -324,40 +475,44 @@ impl Future for H2ServiceHandlerResponse where T: AsyncRead + AsyncWrite + Unpin, S: Service, - S::Error: Into + 'static, + S::Error: Into> + 'static, S::Future: 'static, S::Response: Into> + 'static, B: MessageBody, - B::Error: Into, { type Output = Result<(), DispatchError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.state { - State::Incoming(ref mut disp) => Pin::new(disp).poll(cx), State::Handshake( ref mut srv, ref mut config, ref peer_addr, - ref mut on_connect_data, + ref mut conn_data, ref mut handshake, ) => match ready!(Pin::new(handshake).poll(cx)) { - Ok(conn) => { - let on_connect_data = std::mem::take(on_connect_data); - self.state = State::Incoming(Dispatcher::new( - srv.take().unwrap(), + Ok((conn, timer)) => { + let on_connect_data = mem::take(conn_data); + + self.state = State::Established(Dispatcher::new( conn, - on_connect_data, + srv.take().unwrap(), config.take().unwrap(), *peer_addr, + on_connect_data, + timer, )); + self.poll(cx) } + Err(err) => { trace!("H2 handshake error: {}", err); - Poll::Ready(Err(err.into())) + Poll::Ready(Err(err)) } }, + + State::Established(ref mut disp) => Pin::new(disp).poll(cx), } } } diff --git a/actix-http/src/header/as_name.rs b/actix-http/src/header/as_name.rs index af81ff7f2..a895010b1 100644 --- a/actix-http/src/header/as_name.rs +++ b/actix-http/src/header/as_name.rs @@ -1,47 +1,55 @@ -//! Helper trait for types that can be effectively borrowed as a [HeaderValue]. -//! -//! [HeaderValue]: crate::http::HeaderValue +//! Sealed [`AsHeaderName`] trait and implementations. -use std::{borrow::Cow, str::FromStr}; +use std::{borrow::Cow, str::FromStr as _}; use http::header::{HeaderName, InvalidHeaderName}; +/// Sealed trait implemented for types that can be effectively borrowed as a [`HeaderValue`]. +/// +/// [`HeaderValue`]: super::HeaderValue pub trait AsHeaderName: Sealed {} +pub struct Seal; + pub trait Sealed { - fn try_as_name(&self) -> Result, InvalidHeaderName>; + fn try_as_name(&self, seal: Seal) -> Result, InvalidHeaderName>; } impl Sealed for HeaderName { - fn try_as_name(&self) -> Result, InvalidHeaderName> { + #[inline] + fn try_as_name(&self, _: Seal) -> Result, InvalidHeaderName> { Ok(Cow::Borrowed(self)) } } impl AsHeaderName for HeaderName {} impl Sealed for &HeaderName { - fn try_as_name(&self) -> Result, InvalidHeaderName> { + #[inline] + fn try_as_name(&self, _: Seal) -> Result, InvalidHeaderName> { Ok(Cow::Borrowed(*self)) } } impl AsHeaderName for &HeaderName {} impl Sealed for &str { - fn try_as_name(&self) -> Result, InvalidHeaderName> { + #[inline] + fn try_as_name(&self, _: Seal) -> Result, InvalidHeaderName> { HeaderName::from_str(self).map(Cow::Owned) } } impl AsHeaderName for &str {} impl Sealed for String { - fn try_as_name(&self) -> Result, InvalidHeaderName> { + #[inline] + fn try_as_name(&self, _: Seal) -> Result, InvalidHeaderName> { HeaderName::from_str(self).map(Cow::Owned) } } impl AsHeaderName for String {} impl Sealed for &String { - fn try_as_name(&self) -> Result, InvalidHeaderName> { + #[inline] + fn try_as_name(&self, _: Seal) -> Result, InvalidHeaderName> { HeaderName::from_str(self).map(Cow::Owned) } } diff --git a/actix-http/src/header/common.rs b/actix-http/src/header/common.rs new file mode 100644 index 000000000..6942dc26a --- /dev/null +++ b/actix-http/src/header/common.rs @@ -0,0 +1,53 @@ +//! Common header names not defined in [`http`]. +//! +//! Any headers added to this file will need to be re-exported from the list at `crate::headers`. + +use http::header::HeaderName; + +/// Response header field that indicates how caches have handled that response and its corresponding +/// request. +/// +/// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211) for full semantics. +// TODO(breaking): replace with http's version +pub const CACHE_STATUS: HeaderName = HeaderName::from_static("cache-status"); + +/// Response header field that allows origin servers to control the behavior of CDN caches +/// interposed between them and clients separately from other caches that might handle the response. +/// +/// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213) for full semantics. +// TODO(breaking): replace with http's version +pub const CDN_CACHE_CONTROL: HeaderName = HeaderName::from_static("cdn-cache-control"); + +/// Response header that prevents a document from loading any cross-origin resources that don't +/// explicitly grant the document permission (using [CORP] or [CORS]). +/// +/// [CORP]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Cross-Origin_Resource_Policy_(CORP) +/// [CORS]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS +pub const CROSS_ORIGIN_EMBEDDER_POLICY: HeaderName = + HeaderName::from_static("cross-origin-embedder-policy"); + +/// Response header that allows you to ensure a top-level document does not share a browsing context +/// group with cross-origin documents. +pub const CROSS_ORIGIN_OPENER_POLICY: HeaderName = + HeaderName::from_static("cross-origin-opener-policy"); + +/// Response header that conveys a desire that the browser blocks no-cors cross-origin/cross-site +/// requests to the given resource. +pub const CROSS_ORIGIN_RESOURCE_POLICY: HeaderName = + HeaderName::from_static("cross-origin-resource-policy"); + +/// Response header that provides a mechanism to allow and deny the use of browser features in a +/// document or within any `